wgpu/api/queue.rs
1use std::{
2 ops::{Deref, DerefMut},
3 sync::Arc,
4 thread,
5};
6
7use crate::context::{DynContext, QueueWriteBuffer};
8use crate::*;
9
10/// Handle to a command queue on a device.
11///
12/// A `Queue` executes recorded [`CommandBuffer`] objects and provides convenience methods
13/// for writing to [buffers](Queue::write_buffer) and [textures](Queue::write_texture).
14/// It can be created along with a [`Device`] by calling [`Adapter::request_device`].
15///
16/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
17#[derive(Debug)]
18pub struct Queue {
19 pub(crate) context: Arc<C>,
20 pub(crate) data: Box<Data>,
21}
22#[cfg(send_sync)]
23static_assertions::assert_impl_all!(Queue: Send, Sync);
24
25impl Drop for Queue {
26 fn drop(&mut self) {
27 if !thread::panicking() {
28 self.context.queue_drop(self.data.as_ref());
29 }
30 }
31}
32
33/// Identifier for a particular call to [`Queue::submit`]. Can be used
34/// as part of an argument to [`Device::poll`] to block for a particular
35/// submission to finish.
36///
37/// This type is unique to the Rust API of `wgpu`.
38/// There is no analogue in the WebGPU specification.
39#[derive(Debug, Clone)]
40pub struct SubmissionIndex {
41 #[cfg_attr(not(native), allow(dead_code))]
42 pub(crate) data: Arc<crate::Data>,
43}
44#[cfg(send_sync)]
45static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
46
47pub use wgt::Maintain as MaintainBase;
48/// Passed to [`Device::poll`] to control how and if it should block.
49pub type Maintain = wgt::Maintain<SubmissionIndex>;
50#[cfg(send_sync)]
51static_assertions::assert_impl_all!(Maintain: Send, Sync);
52
53/// A write-only view into a staging buffer.
54///
55/// Reading into this buffer won't yield the contents of the buffer from the
56/// GPU and is likely to be slow. Because of this, although [`AsMut`] is
57/// implemented for this type, [`AsRef`] is not.
58pub struct QueueWriteBufferView<'a> {
59 queue: &'a Queue,
60 buffer: &'a Buffer,
61 offset: BufferAddress,
62 inner: Box<dyn QueueWriteBuffer>,
63}
64#[cfg(send_sync)]
65static_assertions::assert_impl_all!(QueueWriteBufferView<'_>: Send, Sync);
66
67impl Deref for QueueWriteBufferView<'_> {
68 type Target = [u8];
69
70 fn deref(&self) -> &Self::Target {
71 log::warn!("Reading from a QueueWriteBufferView won't yield the contents of the buffer and may be slow.");
72 self.inner.slice()
73 }
74}
75
76impl DerefMut for QueueWriteBufferView<'_> {
77 fn deref_mut(&mut self) -> &mut Self::Target {
78 self.inner.slice_mut()
79 }
80}
81
82impl<'a> AsMut<[u8]> for QueueWriteBufferView<'a> {
83 fn as_mut(&mut self) -> &mut [u8] {
84 self.inner.slice_mut()
85 }
86}
87
88impl<'a> Drop for QueueWriteBufferView<'a> {
89 fn drop(&mut self) {
90 DynContext::queue_write_staging_buffer(
91 &*self.queue.context,
92 self.queue.data.as_ref(),
93 self.buffer.data.as_ref(),
94 self.offset,
95 &*self.inner,
96 );
97 }
98}
99
100impl Queue {
101 /// Schedule a data write into `buffer` starting at `offset`.
102 ///
103 /// This method fails if `data` overruns the size of `buffer` starting at `offset`.
104 ///
105 /// This does *not* submit the transfer to the GPU immediately. Calls to
106 /// `write_buffer` begin execution only on the next call to
107 /// [`Queue::submit`]. To get a set of scheduled transfers started
108 /// immediately, it's fine to call `submit` with no command buffers at all:
109 ///
110 /// ```no_run
111 /// # let queue: wgpu::Queue = todo!();
112 /// queue.submit([]);
113 /// ```
114 ///
115 /// However, `data` will be immediately copied into staging memory, so the
116 /// caller may discard it any time after this call completes.
117 ///
118 /// If possible, consider using [`Queue::write_buffer_with`] instead. That
119 /// method avoids an intermediate copy and is often able to transfer data
120 /// more efficiently than this one.
121 pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
122 DynContext::queue_write_buffer(
123 &*self.context,
124 self.data.as_ref(),
125 buffer.data.as_ref(),
126 offset,
127 data,
128 )
129 }
130
131 /// Write to a buffer via a directly mapped staging buffer.
132 ///
133 /// Return a [`QueueWriteBufferView`] which, when dropped, schedules a copy
134 /// of its contents into `buffer` at `offset`. The returned view
135 /// dereferences to a `size`-byte long `&mut [u8]`, in which you should
136 /// store the data you would like written to `buffer`.
137 ///
138 /// This method may perform transfers faster than [`Queue::write_buffer`],
139 /// because the returned [`QueueWriteBufferView`] is actually the staging
140 /// buffer for the write, mapped into the caller's address space. Writing
141 /// your data directly into this staging buffer avoids the temporary
142 /// CPU-side buffer needed by `write_buffer`.
143 ///
144 /// Reading from the returned view is slow, and will not yield the current
145 /// contents of `buffer`.
146 ///
147 /// Note that dropping the [`QueueWriteBufferView`] does *not* submit the
148 /// transfer to the GPU immediately. The transfer begins only on the next
149 /// call to [`Queue::submit`] after the view is dropped. To get a set of
150 /// scheduled transfers started immediately, it's fine to call `submit` with
151 /// no command buffers at all:
152 ///
153 /// ```no_run
154 /// # let queue: wgpu::Queue = todo!();
155 /// queue.submit([]);
156 /// ```
157 ///
158 /// This method fails if `size` is greater than the size of `buffer` starting at `offset`.
159 #[must_use]
160 pub fn write_buffer_with<'a>(
161 &'a self,
162 buffer: &'a Buffer,
163 offset: BufferAddress,
164 size: BufferSize,
165 ) -> Option<QueueWriteBufferView<'a>> {
166 profiling::scope!("Queue::write_buffer_with");
167 DynContext::queue_validate_write_buffer(
168 &*self.context,
169 self.data.as_ref(),
170 buffer.data.as_ref(),
171 offset,
172 size,
173 )?;
174 let staging_buffer =
175 DynContext::queue_create_staging_buffer(&*self.context, self.data.as_ref(), size)?;
176 Some(QueueWriteBufferView {
177 queue: self,
178 buffer,
179 offset,
180 inner: staging_buffer,
181 })
182 }
183
184 /// Schedule a write of some data into a texture.
185 ///
186 /// * `data` contains the texels to be written, which must be in
187 /// [the same format as the texture](TextureFormat).
188 /// * `data_layout` describes the memory layout of `data`, which does not necessarily
189 /// have to have tightly packed rows.
190 /// * `texture` specifies the texture to write into, and the location within the
191 /// texture (coordinate offset, mip level) that will be overwritten.
192 /// * `size` is the size, in texels, of the region to be written.
193 ///
194 /// This method fails if `size` overruns the size of `texture`, or if `data` is too short.
195 ///
196 /// This does *not* submit the transfer to the GPU immediately. Calls to
197 /// `write_texture` begin execution only on the next call to
198 /// [`Queue::submit`]. To get a set of scheduled transfers started
199 /// immediately, it's fine to call `submit` with no command buffers at all:
200 ///
201 /// ```no_run
202 /// # let queue: wgpu::Queue = todo!();
203 /// queue.submit([]);
204 /// ```
205 ///
206 /// However, `data` will be immediately copied into staging memory, so the
207 /// caller may discard it any time after this call completes.
208 pub fn write_texture(
209 &self,
210 texture: ImageCopyTexture<'_>,
211 data: &[u8],
212 data_layout: ImageDataLayout,
213 size: Extent3d,
214 ) {
215 DynContext::queue_write_texture(
216 &*self.context,
217 self.data.as_ref(),
218 texture,
219 data,
220 data_layout,
221 size,
222 )
223 }
224
225 /// Schedule a copy of data from `image` into `texture`.
226 #[cfg(any(webgpu, webgl))]
227 pub fn copy_external_image_to_texture(
228 &self,
229 source: &wgt::ImageCopyExternalImage,
230 dest: crate::ImageCopyTextureTagged<'_>,
231 size: Extent3d,
232 ) {
233 DynContext::queue_copy_external_image_to_texture(
234 &*self.context,
235 self.data.as_ref(),
236 source,
237 dest,
238 size,
239 )
240 }
241
242 /// Submits a series of finished command buffers for execution.
243 pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
244 &self,
245 command_buffers: I,
246 ) -> SubmissionIndex {
247 let mut command_buffers = command_buffers
248 .into_iter()
249 .map(|mut comb| comb.data.take().unwrap());
250
251 let data =
252 DynContext::queue_submit(&*self.context, self.data.as_ref(), &mut command_buffers);
253
254 SubmissionIndex { data }
255 }
256
257 /// Gets the amount of nanoseconds each tick of a timestamp query represents.
258 ///
259 /// Returns zero if timestamp queries are unsupported.
260 ///
261 /// Timestamp values are represented in nanosecond values on WebGPU, see `<https://gpuweb.github.io/gpuweb/#timestamp>`
262 /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
263 pub fn get_timestamp_period(&self) -> f32 {
264 DynContext::queue_get_timestamp_period(&*self.context, self.data.as_ref())
265 }
266
267 /// Registers a callback when the previous call to submit finishes running on the gpu. This callback
268 /// being called implies that all mapped buffer callbacks which were registered before this call will
269 /// have been called.
270 ///
271 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
272 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
273 ///
274 /// The callback will be called on the thread that first calls the above functions after the gpu work
275 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
276 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
277 /// and used to set flags, send messages, etc.
278 pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
279 DynContext::queue_on_submitted_work_done(
280 &*self.context,
281 self.data.as_ref(),
282 Box::new(callback),
283 )
284 }
285}