wgpu/api/buffer.rs
1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3 error, fmt,
4 ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// Created with [`Device::create_buffer`] or
13/// [`DeviceExt::create_buffer_init`](util::DeviceExt::create_buffer_init).
14///
15/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
16///
17/// A `Buffer`'s bytes have "interior mutability": functions like
18/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
19/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
20/// prevents simultaneous reads and writes of buffer contents using run-time
21/// checks.
22///
23/// [mapping]: Buffer#mapping-buffers
24///
25/// # Mapping buffers
26///
27/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
28/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
29/// `&mut [u8]` slice of bytes. Buffers created with the
30/// [`mapped_at_creation`][mac] flag set are also mapped initially.
31///
32/// Depending on the hardware, the buffer could be memory shared between CPU and
33/// GPU, so that the CPU has direct access to the same bytes the GPU will
34/// consult; or it may be ordinary CPU memory, whose contents the system must
35/// copy to/from the GPU as needed. This crate's API is designed to work the
36/// same way in either case: at any given time, a buffer is either mapped and
37/// available to the CPU, or unmapped and ready for use by the GPU, but never
38/// both. This makes it impossible for either side to observe changes by the
39/// other immediately, and any necessary transfers can be carried out when the
40/// buffer transitions from one state to the other.
41///
42/// There are two ways to map a buffer:
43///
44/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
45/// buffer is mapped when it is created. This is the easiest way to initialize
46/// a new buffer. You can set `mapped_at_creation` on any kind of buffer,
47/// regardless of its [`usage`] flags.
48///
49/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
50/// flags, then you can call `buffer.slice(range).map_async(mode, callback)`
51/// to map the portion of `buffer` given by `range`. This waits for the GPU to
52/// finish using the buffer, and invokes `callback` as soon as the buffer is
53/// safe for the CPU to access.
54///
55/// Once a buffer is mapped:
56///
57/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
58/// [`BufferView`], which dereferences to a `&[u8]` that you can use to read
59/// the buffer's contents.
60///
61/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
62/// [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
63/// read and write the buffer's contents.
64///
65/// The given `range` must fall within the mapped portion of the buffer. If you
66/// attempt to access overlapping ranges, even for shared access only, these
67/// methods panic.
68///
69/// While a buffer is mapped, you may not submit any commands to the GPU that
70/// access it. You may record command buffers that use the buffer, but if you
71/// submit them while the buffer is mapped, submission will panic.
72///
73/// When you are done using the buffer on the CPU, you must call
74/// [`Buffer::unmap`] to make it available for use by the GPU again. All
75/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
76/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
77///
78/// # Example
79///
80/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
81/// with `f32` values like this:
82///
83/// ```
84/// # #[cfg(feature = "noop")]
85/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
86/// # #[cfg(not(feature = "noop"))]
87/// # let device: wgpu::Device = { return; };
88/// #
89/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
90/// # label: None,
91/// # size: 400,
92/// # usage: wgpu::BufferUsages::MAP_WRITE,
93/// # mapped_at_creation: false,
94/// # });
95/// let capturable = buffer.clone();
96/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
97/// if result.is_ok() {
98/// let mut view = capturable.get_mapped_range_mut(..);
99/// let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
100/// floats.fill(42.0);
101/// drop(view);
102/// capturable.unmap();
103/// }
104/// });
105/// ```
106///
107/// This code takes the following steps:
108///
109/// - First, it makes a cloned handle to the buffer for capture by
110/// the callback passed to [`map_async`]. Since a [`map_async`] callback may be
111/// invoked from another thread, interaction between the callback and the
112/// thread calling [`map_async`] generally requires some sort of shared heap
113/// data like this. In real code, there might be an [`Arc`] to some larger
114/// structure that itself owns `buffer`.
115///
116/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
117/// the buffer's entire contents.
118///
119/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
120/// which the slice refers be made accessible to the CPU ("mapped"). This may
121/// entail waiting for previously enqueued operations on `buffer` to finish.
122/// Although [`map_async`] itself always returns immediately, it saves the
123/// callback function to be invoked later.
124///
125/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
126/// shown in this example) determines that the buffer is mapped and ready for
127/// the CPU to use, it invokes the callback function.
128///
129/// - The callback function calls [`Buffer::slice`] and then
130/// [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
131/// dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
132///
133/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
134/// [f32]`, and calls the slice [`fill`] method to fill the buffer with a
135/// useful value.
136///
137/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
138/// the buffer. In real code, the callback would also need to do some sort of
139/// synchronization to let the rest of the program know that it has completed
140/// its work.
141///
142/// If using [`map_async`] directly is awkward, you may find it more convenient to
143/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
144/// However, those each have their own tradeoffs; the asynchronous nature of GPU
145/// execution makes it hard to avoid friction altogether.
146///
147/// [`Arc`]: std::sync::Arc
148/// [`map_async`]: BufferSlice::map_async
149/// [`bytemuck`]: https://crates.io/crates/bytemuck
150/// [`fill`]: slice::fill
151///
152/// ## Mapping buffers on the web
153///
154/// When compiled to WebAssembly and running in a browser content process,
155/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
156/// In this context, `wgpu` is further isolated from the GPU:
157///
158/// - Depending on the browser's WebGPU implementation, mapping and unmapping
159/// buffers probably entails copies between WebAssembly linear memory and the
160/// graphics driver's buffers.
161///
162/// - All modern web browsers isolate web content in its own sandboxed process,
163/// which can only interact with the GPU via interprocess communication (IPC).
164/// Although most browsers' IPC systems use shared memory for large data
165/// transfers, there will still probably need to be copies into and out of the
166/// shared memory buffers.
167///
168/// All of these copies contribute to the cost of buffer mapping in this
169/// configuration.
170///
171/// [`usage`]: BufferDescriptor::usage
172/// [mac]: BufferDescriptor::mapped_at_creation
173/// [`MAP_READ`]: BufferUsages::MAP_READ
174/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
175#[derive(Debug, Clone)]
176pub struct Buffer {
177 pub(crate) inner: dispatch::DispatchBuffer,
178 pub(crate) map_context: Arc<Mutex<MapContext>>,
179 pub(crate) size: wgt::BufferAddress,
180 pub(crate) usage: BufferUsages,
181 // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
182}
183#[cfg(send_sync)]
184static_assertions::assert_impl_all!(Buffer: Send, Sync);
185
186crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
187
188impl Buffer {
189 /// Return the binding view of the entire buffer.
190 pub fn as_entire_binding(&self) -> BindingResource<'_> {
191 BindingResource::Buffer(self.as_entire_buffer_binding())
192 }
193
194 /// Return the binding view of the entire buffer.
195 pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
196 BufferBinding {
197 buffer: self,
198 offset: 0,
199 size: None,
200 }
201 }
202
203 /// Get the [`wgpu_hal`] buffer from this `Buffer`.
204 ///
205 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
206 /// and pass that struct to the to the `A` type parameter.
207 ///
208 /// Returns a guard that dereferences to the type of the hal backend
209 /// which implements [`A::Buffer`].
210 ///
211 /// # Deadlocks
212 ///
213 /// - The returned guard holds a read-lock on a device-local "destruction"
214 /// lock, which will cause all calls to `destroy` to block until the
215 /// guard is released.
216 ///
217 /// # Errors
218 ///
219 /// This method will return None if:
220 /// - The buffer is not from the backend specified by `A`.
221 /// - The buffer is from the `webgpu` or `custom` backend.
222 /// - The buffer has had [`Self::destroy()`] called on it.
223 ///
224 /// # Safety
225 ///
226 /// - The returned resource must not be destroyed unless the guard
227 /// is the last reference to it and it is not in use by the GPU.
228 /// The guard and handle may be dropped at any time however.
229 /// - All the safety requirements of wgpu-hal must be upheld.
230 ///
231 /// [`A::Buffer`]: hal::Api::Buffer
232 #[cfg(wgpu_core)]
233 pub unsafe fn as_hal<A: wgc::hal_api::HalApi>(
234 &self,
235 ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
236 let buffer = self.inner.as_core_opt()?;
237 unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
238 }
239
240 /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
241 /// indicated by `bounds`. Regardless of what sort of data `self` stores,
242 /// `bounds` start and end are given in bytes.
243 ///
244 /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
245 /// buffer contents for access from the CPU. See the [`BufferSlice`]
246 /// documentation for details.
247 ///
248 /// The `range` argument can be half or fully unbounded: for example,
249 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
250 /// refers to the portion starting at the `n`th byte and extending to the
251 /// end of the buffer.
252 ///
253 /// # Panics
254 ///
255 /// - If `bounds` is outside of the bounds of `self`.
256 /// - If `bounds` has a length less than 1.
257 #[track_caller]
258 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
259 let (offset, size) = range_to_offset_size(bounds, self.size);
260 check_buffer_bounds(self.size, offset, size);
261 BufferSlice {
262 buffer: self,
263 offset,
264 size,
265 }
266 }
267
268 /// Unmaps the buffer from host memory.
269 ///
270 /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
271 /// makes the buffer available for use by the GPU again.
272 pub fn unmap(&self) {
273 self.map_context.lock().reset();
274 self.inner.unmap();
275 }
276
277 /// Destroy the associated native resources as soon as possible.
278 pub fn destroy(&self) {
279 self.inner.destroy();
280 }
281
282 /// Returns the length of the buffer allocation in bytes.
283 ///
284 /// This is always equal to the `size` that was specified when creating the buffer.
285 pub fn size(&self) -> BufferAddress {
286 self.size
287 }
288
289 /// Returns the allowed usages for this `Buffer`.
290 ///
291 /// This is always equal to the `usage` that was specified when creating the buffer.
292 pub fn usage(&self) -> BufferUsages {
293 self.usage
294 }
295
296 /// Map the buffer to host (CPU) memory, making it available for reading or writing
297 /// via [`get_mapped_range()`](Self::get_mapped_range).
298 /// It is available once the `callback` is called with an [`Ok`] response.
299 ///
300 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
301 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
302 ///
303 /// The callback will be called on the thread that first calls the above functions after the GPU work
304 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
305 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
306 /// and used to set flags, send messages, etc.
307 ///
308 /// As long as a buffer is mapped, it is not available for use by any other commands;
309 /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
310 ///
311 /// This can also be performed using [`BufferSlice::map_async()`].
312 ///
313 /// # Panics
314 ///
315 /// - If the buffer is already mapped.
316 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
317 /// - If `bounds` is outside of the bounds of `self`.
318 /// - If `bounds` has a length less than 1.
319 /// - If the start and end of `bounds` are not be aligned to [`MAP_ALIGNMENT`].
320 pub fn map_async<S: RangeBounds<BufferAddress>>(
321 &self,
322 mode: MapMode,
323 bounds: S,
324 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
325 ) {
326 self.slice(bounds).map_async(mode, callback)
327 }
328
329 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
330 ///
331 /// Returns a [`BufferView`] referring to the buffer range represented by
332 /// `self`. See the documentation for [`BufferView`] for details.
333 ///
334 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
335 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
336 ///
337 /// This can also be performed using [`BufferSlice::get_mapped_range()`].
338 ///
339 /// # Panics
340 ///
341 /// - If `bounds` is outside of the bounds of `self`.
342 /// - If `bounds` has a length less than 1.
343 /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
344 /// - If the buffer to which `self` refers is not currently [mapped].
345 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
346 ///
347 /// [mapped]: Buffer#mapping-buffers
348 pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView<'_> {
349 self.slice(bounds).get_mapped_range()
350 }
351
352 /// Gain write access to the bytes of a [mapped] [`Buffer`].
353 ///
354 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
355 /// `self`. See the documentation for [`BufferViewMut`] for more details.
356 ///
357 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
358 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
359 ///
360 /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
361 ///
362 /// # Panics
363 ///
364 /// - If `bounds` is outside of the bounds of `self`.
365 /// - If `bounds` has a length less than 1.
366 /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
367 /// - If the buffer to which `self` refers is not currently [mapped].
368 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
369 ///
370 /// [mapped]: Buffer#mapping-buffers
371 pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(
372 &self,
373 bounds: S,
374 ) -> BufferViewMut<'_> {
375 self.slice(bounds).get_mapped_range_mut()
376 }
377
378 #[cfg(custom)]
379 /// Returns custom implementation of Buffer (if custom backend and is internally T)
380 pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
381 self.inner.as_custom()
382 }
383}
384
385/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
386///
387/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
388///
389/// ```no_run
390/// # let buffer: wgpu::Buffer = todo!();
391/// let slice = buffer.slice(10..20);
392/// ```
393///
394/// This returns a slice referring to the second ten bytes of `buffer`. To get a
395/// slice of the entire `Buffer`:
396///
397/// ```no_run
398/// # let buffer: wgpu::Buffer = todo!();
399/// let whole_buffer_slice = buffer.slice(..);
400/// ```
401///
402/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
403/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
404/// a draw call should consult. You can also convert it to a [`BufferBinding`]
405/// with `.into()`.
406///
407/// To access the slice's contents on the CPU, you must first [map] the buffer,
408/// and then call [`BufferSlice::get_mapped_range`] or
409/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
410/// contents. See the documentation on [mapping][map] for more details,
411/// including example code.
412///
413/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
414/// nobody else is modifying the `T` values to which it refers, a
415/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
416/// changing. You can still record and submit commands operating on the
417/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
418/// represents a certain range of the buffer's bytes.
419///
420/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
421/// specification, an offset and size are specified as arguments to each call
422/// working with the [`Buffer`], instead.
423///
424/// [map]: Buffer#mapping-buffers
425#[derive(Copy, Clone, Debug, PartialEq)]
426pub struct BufferSlice<'a> {
427 pub(crate) buffer: &'a Buffer,
428 pub(crate) offset: BufferAddress,
429 pub(crate) size: BufferSize,
430}
431#[cfg(send_sync)]
432static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
433
434impl<'a> BufferSlice<'a> {
435 /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
436 /// indicated by `bounds`.
437 ///
438 /// The `range` argument can be half or fully unbounded: for example,
439 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
440 /// refers to the portion starting at the `n`th byte and extending to the
441 /// end of the buffer.
442 ///
443 /// # Panics
444 ///
445 /// - If `bounds` is outside of the bounds of `self`.
446 /// - If `bounds` has a length less than 1.
447 #[track_caller]
448 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
449 let (offset, size) = range_to_offset_size(bounds, self.size.get());
450 check_buffer_bounds(self.size.get(), offset, size);
451 BufferSlice {
452 buffer: self.buffer,
453 offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
454 size, // check_buffer_bounds ensures this is essentially min()
455 }
456 }
457
458 /// Map the buffer to host (CPU) memory, making it available for reading or writing
459 /// via [`get_mapped_range()`](Self::get_mapped_range).
460 /// It is available once the `callback` is called with an [`Ok`] response.
461 ///
462 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
463 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
464 ///
465 /// The callback will be called on the thread that first calls the above functions after the GPU work
466 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
467 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
468 /// and used to set flags, send messages, etc.
469 ///
470 /// As long as a buffer is mapped, it is not available for use by any other commands;
471 /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
472 ///
473 /// This can also be performed using [`Buffer::map_async()`].
474 ///
475 /// # Panics
476 ///
477 /// - If the buffer is already mapped.
478 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
479 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
480 pub fn map_async(
481 &self,
482 mode: MapMode,
483 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
484 ) {
485 let mut mc = self.buffer.map_context.lock();
486 assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
487 let end = self.offset + self.size.get();
488 mc.initial_range = self.offset..end;
489
490 self.buffer
491 .inner
492 .map_async(mode, self.offset..end, Box::new(callback));
493 }
494
495 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
496 ///
497 /// Returns a [`BufferView`] referring to the buffer range represented by
498 /// `self`. See the documentation for [`BufferView`] for details.
499 ///
500 /// Multiple views may be obtained and used simultaneously as long as they are from
501 /// non-overlapping slices.
502 ///
503 /// This can also be performed using [`Buffer::get_mapped_range()`].
504 ///
505 /// # Panics
506 ///
507 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
508 /// - If the buffer to which `self` refers is not currently [mapped].
509 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
510 ///
511 /// [mapped]: Buffer#mapping-buffers
512 pub fn get_mapped_range(&self) -> BufferView<'a> {
513 let end = self.buffer.map_context.lock().add(self.offset, self.size);
514 let range = self.buffer.inner.get_mapped_range(self.offset..end);
515 BufferView {
516 slice: *self,
517 inner: range,
518 }
519 }
520
521 /// Gain write access to the bytes of a [mapped] [`Buffer`].
522 ///
523 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
524 /// `self`. See the documentation for [`BufferViewMut`] for more details.
525 ///
526 /// Multiple views may be obtained and used simultaneously as long as they are from
527 /// non-overlapping slices.
528 ///
529 /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
530 ///
531 /// # Panics
532 ///
533 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`].
534 /// - If the buffer to which `self` refers is not currently [mapped].
535 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
536 ///
537 /// [mapped]: Buffer#mapping-buffers
538 pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
539 let end = self.buffer.map_context.lock().add(self.offset, self.size);
540 let range = self.buffer.inner.get_mapped_range(self.offset..end);
541 BufferViewMut {
542 slice: *self,
543 inner: range,
544 readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
545 }
546 }
547
548 /// Returns the buffer this is a slice of.
549 ///
550 /// You should usually not need to call this, and if you received the buffer from code you
551 /// do not control, you should refrain from accessing the buffer outside the bounds of the
552 /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
553 pub fn buffer(&self) -> &'a Buffer {
554 self.buffer
555 }
556
557 /// Returns the offset in [`Self::buffer()`] this slice starts at.
558 pub fn offset(&self) -> BufferAddress {
559 self.offset
560 }
561
562 /// Returns the size of this slice.
563 pub fn size(&self) -> BufferSize {
564 self.size
565 }
566}
567
568impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
569 /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
570 /// provided that it will be used without a dynamic offset.
571 fn from(value: BufferSlice<'a>) -> Self {
572 BufferBinding {
573 buffer: value.buffer,
574 offset: value.offset,
575 size: Some(value.size),
576 }
577 }
578}
579
580impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
581 /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
582 /// provided that it will be used without a dynamic offset.
583 fn from(value: BufferSlice<'a>) -> Self {
584 crate::BindingResource::Buffer(crate::BufferBinding::from(value))
585 }
586}
587
588/// The mapped portion of a buffer, if any, and its outstanding views.
589///
590/// This ensures that views fall within the mapped range and don't overlap.
591#[derive(Debug)]
592pub(crate) struct MapContext {
593 /// The range of the buffer that is mapped.
594 ///
595 /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
596 /// the buffer is mapped at creation time, and when you call `map_async` on
597 /// some [`BufferSlice`] (so technically, it indicates the portion that is
598 /// *or has been requested to be* mapped.)
599 ///
600 /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
601 pub(crate) initial_range: Range<BufferAddress>,
602
603 /// The ranges covered by all outstanding [`BufferView`]s and
604 /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
605 /// within `initial_range`.
606 sub_ranges: Vec<Range<BufferAddress>>,
607}
608
609impl MapContext {
610 pub(crate) fn new() -> Self {
611 Self {
612 initial_range: 0..0,
613 sub_ranges: Vec::new(),
614 }
615 }
616
617 /// Record that the buffer is no longer mapped.
618 fn reset(&mut self) {
619 self.initial_range = 0..0;
620
621 assert!(
622 self.sub_ranges.is_empty(),
623 "You cannot unmap a buffer that still has accessible mapped views"
624 );
625 }
626
627 /// Record that the `size` bytes of the buffer at `offset` are now viewed.
628 ///
629 /// Return the byte offset within the buffer of the end of the viewed range.
630 ///
631 /// # Panics
632 ///
633 /// This panics if the given range overlaps with any existing range.
634 fn add(&mut self, offset: BufferAddress, size: BufferSize) -> BufferAddress {
635 let end = offset + size.get();
636 assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
637 // This check is essential for avoiding undefined behavior: it is the
638 // only thing that ensures that `&mut` references to the buffer's
639 // contents don't alias anything else.
640 for sub in self.sub_ranges.iter() {
641 assert!(
642 end <= sub.start || offset >= sub.end,
643 "Intersecting map range with {sub:?}"
644 );
645 }
646 self.sub_ranges.push(offset..end);
647 end
648 }
649
650 /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
651 ///
652 /// # Panics
653 ///
654 /// This panics if the given range does not exactly match one previously
655 /// passed to [`add`].
656 ///
657 /// [`add]`: MapContext::add
658 fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
659 let end = offset + size.get();
660
661 let index = self
662 .sub_ranges
663 .iter()
664 .position(|r| *r == (offset..end))
665 .expect("unable to remove range from map context");
666 self.sub_ranges.swap_remove(index);
667 }
668}
669
670/// Describes a [`Buffer`].
671///
672/// For use with [`Device::create_buffer`].
673///
674/// Corresponds to [WebGPU `GPUBufferDescriptor`](
675/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
676pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
677static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
678
679/// Error occurred when trying to async map a buffer.
680#[derive(Clone, PartialEq, Eq, Debug)]
681pub struct BufferAsyncError;
682static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
683
684impl fmt::Display for BufferAsyncError {
685 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
686 write!(f, "Error occurred when trying to async map a buffer")
687 }
688}
689
690impl error::Error for BufferAsyncError {}
691
692/// Type of buffer mapping.
693#[derive(Debug, Clone, Copy, Eq, PartialEq)]
694pub enum MapMode {
695 /// Map only for reading
696 Read,
697 /// Map only for writing
698 Write,
699}
700static_assertions::assert_impl_all!(MapMode: Send, Sync);
701
702/// A read-only view of a mapped buffer's bytes.
703///
704/// To get a `BufferView`, first [map] the buffer, and then
705/// call `buffer.slice(range).get_mapped_range()`.
706///
707/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
708/// slice methods to access the buffer's contents. It also implements
709/// `AsRef<[u8]>`, if that's more convenient.
710///
711/// Before the buffer can be unmapped, all `BufferView`s observing it
712/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
713///
714/// For example code, see the documentation on [mapping buffers][map].
715///
716/// [map]: Buffer#mapping-buffers
717/// [`map_async`]: BufferSlice::map_async
718#[derive(Debug)]
719pub struct BufferView<'a> {
720 slice: BufferSlice<'a>,
721 inner: dispatch::DispatchBufferMappedRange,
722}
723
724#[cfg(webgpu)]
725impl BufferView<'_> {
726 /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
727 /// This can be MUCH faster than dereferencing the view which copies the data into
728 /// the Rust / wasm heap.
729 pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
730 self.inner.as_uint8array()
731 }
732}
733
734impl core::ops::Deref for BufferView<'_> {
735 type Target = [u8];
736
737 #[inline]
738 fn deref(&self) -> &[u8] {
739 self.inner.slice()
740 }
741}
742
743impl AsRef<[u8]> for BufferView<'_> {
744 #[inline]
745 fn as_ref(&self) -> &[u8] {
746 self.inner.slice()
747 }
748}
749
750/// A write-only view of a mapped buffer's bytes.
751///
752/// To get a `BufferViewMut`, first [map] the buffer, and then
753/// call `buffer.slice(range).get_mapped_range_mut()`.
754///
755/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
756/// Rust slice methods to access the buffer's contents. It also implements
757/// `AsMut<[u8]>`, if that's more convenient.
758///
759/// It is possible to read the buffer using this view, but doing so is not
760/// recommended, as it is likely to be slow.
761///
762/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
763/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
764///
765/// For example code, see the documentation on [mapping buffers][map].
766///
767/// [map]: Buffer#mapping-buffers
768#[derive(Debug)]
769pub struct BufferViewMut<'a> {
770 slice: BufferSlice<'a>,
771 inner: dispatch::DispatchBufferMappedRange,
772 readable: bool,
773}
774
775impl AsMut<[u8]> for BufferViewMut<'_> {
776 #[inline]
777 fn as_mut(&mut self) -> &mut [u8] {
778 self.inner.slice_mut()
779 }
780}
781
782impl Deref for BufferViewMut<'_> {
783 type Target = [u8];
784
785 fn deref(&self) -> &Self::Target {
786 if !self.readable {
787 log::warn!("Reading from a BufferViewMut is slow and not recommended.");
788 }
789
790 self.inner.slice()
791 }
792}
793
794impl DerefMut for BufferViewMut<'_> {
795 fn deref_mut(&mut self) -> &mut Self::Target {
796 self.inner.slice_mut()
797 }
798}
799
800impl Drop for BufferView<'_> {
801 fn drop(&mut self) {
802 self.slice
803 .buffer
804 .map_context
805 .lock()
806 .remove(self.slice.offset, self.slice.size);
807 }
808}
809
810impl Drop for BufferViewMut<'_> {
811 fn drop(&mut self) {
812 self.slice
813 .buffer
814 .map_context
815 .lock()
816 .remove(self.slice.offset, self.slice.size);
817 }
818}
819
820#[track_caller]
821fn check_buffer_bounds(
822 buffer_size: BufferAddress,
823 slice_offset: BufferAddress,
824 slice_size: BufferSize,
825) {
826 // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
827 if slice_offset >= buffer_size {
828 panic!(
829 "slice offset {} is out of range for buffer of size {}",
830 slice_offset, buffer_size
831 );
832 }
833
834 // Detect integer overflow.
835 let end = slice_offset.checked_add(slice_size.get());
836 if end.is_none_or(|end| end > buffer_size) {
837 panic!(
838 "slice offset {} size {} is out of range for buffer of size {}",
839 slice_offset, slice_size, buffer_size
840 );
841 }
842}
843
844#[track_caller]
845fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
846 bounds: S,
847 whole_size: BufferAddress,
848) -> (BufferAddress, BufferSize) {
849 let offset = match bounds.start_bound() {
850 Bound::Included(&bound) => bound,
851 Bound::Excluded(&bound) => bound + 1,
852 Bound::Unbounded => 0,
853 };
854 let size = BufferSize::new(match bounds.end_bound() {
855 Bound::Included(&bound) => bound + 1 - offset,
856 Bound::Excluded(&bound) => bound - offset,
857 Bound::Unbounded => whole_size - offset,
858 })
859 .expect("buffer slices can not be empty");
860
861 (offset, size)
862}
863
864#[cfg(test)]
865mod tests {
866 use super::{check_buffer_bounds, range_to_offset_size, BufferAddress, BufferSize};
867
868 fn bs(value: BufferAddress) -> BufferSize {
869 BufferSize::new(value).unwrap()
870 }
871
872 #[test]
873 fn range_to_offset_size_works() {
874 let whole = 100;
875
876 assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
877 assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
878 assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
879 assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
880 assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
881 assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
882 }
883
884 #[test]
885 #[should_panic = "buffer slices can not be empty"]
886 fn range_to_offset_size_panics_for_empty_range() {
887 range_to_offset_size(123..123, 200);
888 }
889
890 #[test]
891 #[should_panic = "buffer slices can not be empty"]
892 fn range_to_offset_size_panics_for_unbounded_empty_range() {
893 range_to_offset_size(..0, 100);
894 }
895
896 #[test]
897 fn check_buffer_bounds_works_for_end_in_range() {
898 check_buffer_bounds(200, 100, bs(50));
899 check_buffer_bounds(200, 100, bs(100));
900 check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
901 check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
902 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
903 }
904
905 #[test]
906 #[should_panic]
907 fn check_buffer_bounds_panics_for_end_over_size() {
908 check_buffer_bounds(200, 100, bs(101));
909 }
910
911 #[test]
912 #[should_panic]
913 fn check_buffer_bounds_panics_for_end_wraparound() {
914 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
915 }
916}