wgpu/api/
queue.rs

1use std::ops::{Deref, DerefMut};
2
3use crate::*;
4
5/// Handle to a command queue on a device.
6///
7/// A `Queue` executes recorded [`CommandBuffer`] objects and provides convenience methods
8/// for writing to [buffers](Queue::write_buffer) and [textures](Queue::write_texture).
9/// It can be created along with a [`Device`] by calling [`Adapter::request_device`].
10///
11/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
12#[derive(Debug, Clone)]
13pub struct Queue {
14    pub(crate) inner: dispatch::DispatchQueue,
15}
16#[cfg(send_sync)]
17static_assertions::assert_impl_all!(Queue: Send, Sync);
18
19crate::cmp::impl_eq_ord_hash_proxy!(Queue => .inner);
20
21/// Identifier for a particular call to [`Queue::submit`]. Can be used
22/// as part of an argument to [`Device::poll`] to block for a particular
23/// submission to finish.
24///
25/// This type is unique to the Rust API of `wgpu`.
26/// There is no analogue in the WebGPU specification.
27#[derive(Debug, Clone)]
28pub struct SubmissionIndex {
29    #[cfg_attr(
30        all(
31            target_arch = "wasm32",
32            not(target_os = "emscripten"),
33            not(feature = "webgl"),
34        ),
35        expect(dead_code)
36    )]
37    pub(crate) index: u64,
38}
39#[cfg(send_sync)]
40static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
41
42pub use wgt::Maintain as MaintainBase;
43/// Passed to [`Device::poll`] to control how and if it should block.
44pub type Maintain = wgt::Maintain<SubmissionIndex>;
45#[cfg(send_sync)]
46static_assertions::assert_impl_all!(Maintain: Send, Sync);
47
48/// A write-only view into a staging buffer.
49///
50/// Reading into this buffer won't yield the contents of the buffer from the
51/// GPU and is likely to be slow. Because of this, although [`AsMut`] is
52/// implemented for this type, [`AsRef`] is not.
53pub struct QueueWriteBufferView<'a> {
54    queue: &'a Queue,
55    buffer: &'a Buffer,
56    offset: BufferAddress,
57    inner: dispatch::DispatchQueueWriteBuffer,
58}
59#[cfg(send_sync)]
60static_assertions::assert_impl_all!(QueueWriteBufferView<'_>: Send, Sync);
61
62impl Deref for QueueWriteBufferView<'_> {
63    type Target = [u8];
64
65    fn deref(&self) -> &Self::Target {
66        log::warn!("Reading from a QueueWriteBufferView won't yield the contents of the buffer and may be slow.");
67        self.inner.slice()
68    }
69}
70
71impl DerefMut for QueueWriteBufferView<'_> {
72    fn deref_mut(&mut self) -> &mut Self::Target {
73        self.inner.slice_mut()
74    }
75}
76
77impl AsMut<[u8]> for QueueWriteBufferView<'_> {
78    fn as_mut(&mut self) -> &mut [u8] {
79        self.inner.slice_mut()
80    }
81}
82
83impl Drop for QueueWriteBufferView<'_> {
84    fn drop(&mut self) {
85        self.queue
86            .inner
87            .write_staging_buffer(&self.buffer.inner, self.offset, &self.inner);
88    }
89}
90
91impl Queue {
92    /// Schedule a data write into `buffer` starting at `offset`.
93    ///
94    /// This method fails if `data` overruns the size of `buffer` starting at `offset`.
95    ///
96    /// This does *not* submit the transfer to the GPU immediately. Calls to
97    /// `write_buffer` begin execution only on the next call to
98    /// [`Queue::submit`]. To get a set of scheduled transfers started
99    /// immediately, it's fine to call `submit` with no command buffers at all:
100    ///
101    /// ```no_run
102    /// # let queue: wgpu::Queue = todo!();
103    /// queue.submit([]);
104    /// ```
105    ///
106    /// However, `data` will be immediately copied into staging memory, so the
107    /// caller may discard it any time after this call completes.
108    ///
109    /// If possible, consider using [`Queue::write_buffer_with`] instead. That
110    /// method avoids an intermediate copy and is often able to transfer data
111    /// more efficiently than this one.
112    pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
113        self.inner.write_buffer(&buffer.inner, offset, data);
114    }
115
116    /// Write to a buffer via a directly mapped staging buffer.
117    ///
118    /// Return a [`QueueWriteBufferView`] which, when dropped, schedules a copy
119    /// of its contents into `buffer` at `offset`. The returned view
120    /// dereferences to a `size`-byte long `&mut [u8]`, in which you should
121    /// store the data you would like written to `buffer`.
122    ///
123    /// This method may perform transfers faster than [`Queue::write_buffer`],
124    /// because the returned [`QueueWriteBufferView`] is actually the staging
125    /// buffer for the write, mapped into the caller's address space. Writing
126    /// your data directly into this staging buffer avoids the temporary
127    /// CPU-side buffer needed by `write_buffer`.
128    ///
129    /// Reading from the returned view is slow, and will not yield the current
130    /// contents of `buffer`.
131    ///
132    /// Note that dropping the [`QueueWriteBufferView`] does *not* submit the
133    /// transfer to the GPU immediately. The transfer begins only on the next
134    /// call to [`Queue::submit`] after the view is dropped. To get a set of
135    /// scheduled transfers started immediately, it's fine to call `submit` with
136    /// no command buffers at all:
137    ///
138    /// ```no_run
139    /// # let queue: wgpu::Queue = todo!();
140    /// queue.submit([]);
141    /// ```
142    ///
143    /// This method fails if `size` is greater than the size of `buffer` starting at `offset`.
144    #[must_use]
145    pub fn write_buffer_with<'a>(
146        &'a self,
147        buffer: &'a Buffer,
148        offset: BufferAddress,
149        size: BufferSize,
150    ) -> Option<QueueWriteBufferView<'a>> {
151        profiling::scope!("Queue::write_buffer_with");
152        self.inner
153            .validate_write_buffer(&buffer.inner, offset, size)?;
154        let staging_buffer = self.inner.create_staging_buffer(size)?;
155        Some(QueueWriteBufferView {
156            queue: self,
157            buffer,
158            offset,
159            inner: staging_buffer,
160        })
161    }
162
163    /// Schedule a write of some data into a texture.
164    ///
165    /// * `data` contains the texels to be written, which must be in
166    ///   [the same format as the texture](TextureFormat).
167    /// * `data_layout` describes the memory layout of `data`, which does not necessarily
168    ///   have to have tightly packed rows.
169    /// * `texture` specifies the texture to write into, and the location within the
170    ///   texture (coordinate offset, mip level) that will be overwritten.
171    /// * `size` is the size, in texels, of the region to be written.
172    ///
173    /// This method fails if `size` overruns the size of `texture`, or if `data` is too short.
174    ///
175    /// This does *not* submit the transfer to the GPU immediately. Calls to
176    /// `write_texture` begin execution only on the next call to
177    /// [`Queue::submit`]. To get a set of scheduled transfers started
178    /// immediately, it's fine to call `submit` with no command buffers at all:
179    ///
180    /// ```no_run
181    /// # let queue: wgpu::Queue = todo!();
182    /// queue.submit([]);
183    /// ```
184    ///
185    /// However, `data` will be immediately copied into staging memory, so the
186    /// caller may discard it any time after this call completes.
187    pub fn write_texture(
188        &self,
189        texture: TexelCopyTextureInfo<'_>,
190        data: &[u8],
191        data_layout: TexelCopyBufferLayout,
192        size: Extent3d,
193    ) {
194        self.inner.write_texture(texture, data, data_layout, size);
195    }
196
197    /// Schedule a copy of data from `image` into `texture`.
198    #[cfg(any(webgpu, webgl))]
199    pub fn copy_external_image_to_texture(
200        &self,
201        source: &wgt::CopyExternalImageSourceInfo,
202        dest: wgt::CopyExternalImageDestInfo<&api::Texture>,
203        size: Extent3d,
204    ) {
205        self.inner
206            .copy_external_image_to_texture(source, dest, size);
207    }
208
209    /// Submits a series of finished command buffers for execution.
210    pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
211        &self,
212        command_buffers: I,
213    ) -> SubmissionIndex {
214        let mut command_buffers = command_buffers.into_iter().map(|comb| {
215            comb.inner
216                .lock()
217                .take()
218                .expect("Command buffer already submitted")
219        });
220
221        let index = self.inner.submit(&mut command_buffers);
222
223        SubmissionIndex { index }
224    }
225
226    /// Gets the amount of nanoseconds each tick of a timestamp query represents.
227    ///
228    /// Returns zero if timestamp queries are unsupported.
229    ///
230    /// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
231    /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
232    pub fn get_timestamp_period(&self) -> f32 {
233        self.inner.get_timestamp_period()
234    }
235
236    /// Registers a callback when the previous call to submit finishes running on the gpu. This callback
237    /// being called implies that all mapped buffer callbacks which were registered before this call will
238    /// have been called.
239    ///
240    /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
241    /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
242    ///
243    /// The callback will be called on the thread that first calls the above functions after the gpu work
244    /// has completed. There are no restrictions on the code you can run in the callback, however on native the
245    /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
246    /// and used to set flags, send messages, etc.
247    pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
248        self.inner.on_submitted_work_done(Box::new(callback));
249    }
250}