wgpu_core/device/
queue.rs

1#[cfg(feature = "trace")]
2use crate::device::trace::Action;
3use crate::{
4    api_log,
5    command::{
6        extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
7        ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide,
8        TexelCopyTextureInfo, TransferError,
9    },
10    conv,
11    device::{DeviceError, WaitIdleError},
12    get_lowest_common_denom,
13    global::Global,
14    id::{self, QueueId},
15    init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
16    lock::{rank, Mutex, MutexGuard, RwLockWriteGuard},
17    resource::{
18        Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
19        DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
20        ParentDevice, ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable,
21    },
22    resource_log,
23    snatch::SnatchGuard,
24    track::{self, Tracker, TrackerIndex},
25    FastHashMap, SubmissionIndex,
26};
27
28use smallvec::SmallVec;
29
30use crate::scratch::ScratchBuffer;
31use std::{
32    iter,
33    mem::{self, ManuallyDrop},
34    ptr::NonNull,
35    sync::{atomic::Ordering, Arc},
36};
37use thiserror::Error;
38
39use super::{life::LifetimeTracker, Device};
40
41pub struct Queue {
42    raw: Box<dyn hal::DynQueue>,
43    pub(crate) pending_writes: Mutex<PendingWrites>,
44    life_tracker: Mutex<LifetimeTracker>,
45    // The device needs to be dropped last (`Device.zero_buffer` might be referenced by the encoder in pending writes).
46    pub(crate) device: Arc<Device>,
47}
48
49impl Queue {
50    pub(crate) fn new(
51        device: Arc<Device>,
52        raw: Box<dyn hal::DynQueue>,
53    ) -> Result<Self, DeviceError> {
54        let pending_encoder = device
55            .command_allocator
56            .acquire_encoder(device.raw(), raw.as_ref())
57            .map_err(DeviceError::from_hal);
58
59        let pending_encoder = match pending_encoder {
60            Ok(pending_encoder) => pending_encoder,
61            Err(e) => {
62                return Err(e);
63            }
64        };
65
66        let mut pending_writes = PendingWrites::new(pending_encoder);
67
68        let zero_buffer = device.zero_buffer.as_ref();
69        pending_writes.activate();
70        unsafe {
71            pending_writes
72                .command_encoder
73                .transition_buffers(&[hal::BufferBarrier {
74                    buffer: zero_buffer,
75                    usage: hal::StateTransition {
76                        from: hal::BufferUses::empty(),
77                        to: hal::BufferUses::COPY_DST,
78                    },
79                }]);
80            pending_writes
81                .command_encoder
82                .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
83            pending_writes
84                .command_encoder
85                .transition_buffers(&[hal::BufferBarrier {
86                    buffer: zero_buffer,
87                    usage: hal::StateTransition {
88                        from: hal::BufferUses::COPY_DST,
89                        to: hal::BufferUses::COPY_SRC,
90                    },
91                }]);
92        }
93
94        Ok(Queue {
95            raw,
96            device,
97            pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
98            life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
99        })
100    }
101
102    pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
103        self.raw.as_ref()
104    }
105
106    #[track_caller]
107    pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
108        self.life_tracker.lock()
109    }
110
111    pub(crate) fn maintain(
112        &self,
113        submission_index: u64,
114        snatch_guard: &SnatchGuard,
115    ) -> (
116        SmallVec<[SubmittedWorkDoneClosure; 1]>,
117        Vec<super::BufferMapPendingClosure>,
118        bool,
119    ) {
120        let mut life_tracker = self.lock_life();
121        let submission_closures = life_tracker.triage_submissions(submission_index);
122
123        let mapping_closures = life_tracker.handle_mapping(snatch_guard);
124
125        let queue_empty = life_tracker.queue_empty();
126
127        (submission_closures, mapping_closures, queue_empty)
128    }
129}
130
131crate::impl_resource_type!(Queue);
132// TODO: https://github.com/gfx-rs/wgpu/issues/4014
133impl Labeled for Queue {
134    fn label(&self) -> &str {
135        ""
136    }
137}
138crate::impl_parent_device!(Queue);
139crate::impl_storage_item!(Queue);
140
141impl Drop for Queue {
142    fn drop(&mut self) {
143        resource_log!("Drop {}", self.error_ident());
144
145        let last_successful_submission_index = self
146            .device
147            .last_successful_submission_index
148            .load(Ordering::Acquire);
149
150        let fence = self.device.fence.read();
151
152        // Try waiting on the last submission using the following sequence of timeouts
153        let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
154
155        for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
156            let is_last_iter = i == timeouts_in_ms.len() - 1;
157
158            api_log!(
159                "Waiting on last submission. try: {}/{}. timeout: {}ms",
160                i + 1,
161                timeouts_in_ms.len(),
162                timeout_ms
163            );
164
165            let wait_res = unsafe {
166                self.device.raw().wait(
167                    fence.as_ref(),
168                    last_successful_submission_index,
169                    #[cfg(not(target_arch = "wasm32"))]
170                    timeout_ms,
171                    #[cfg(target_arch = "wasm32")]
172                    0, // WebKit and Chromium don't support a non-0 timeout
173                )
174            };
175            // Note: If we don't panic below we are in UB land (destroying resources while they are still in use by the GPU).
176            match wait_res {
177                Ok(true) => break,
178                Ok(false) => {
179                    // It's fine that we timed out on WebGL; GL objects can be deleted early as they
180                    // will be kept around by the driver if GPU work hasn't finished.
181                    // Moreover, the way we emulate read mappings on WebGL allows us to execute map_buffer earlier than on other
182                    // backends since getBufferSubData is synchronous with respect to the other previously enqueued GL commands.
183                    // Relying on this behavior breaks the clean abstraction wgpu-hal tries to maintain and
184                    // we should find ways to improve this. See https://github.com/gfx-rs/wgpu/issues/6538.
185                    #[cfg(target_arch = "wasm32")]
186                    {
187                        break;
188                    }
189                    #[cfg(not(target_arch = "wasm32"))]
190                    {
191                        if is_last_iter {
192                            panic!(
193                                "We timed out while waiting on the last successful submission to complete!"
194                            );
195                        }
196                    }
197                }
198                Err(e) => match e {
199                    hal::DeviceError::OutOfMemory => {
200                        if is_last_iter {
201                            panic!(
202                                "We ran into an OOM error while waiting on the last successful submission to complete!"
203                            );
204                        }
205                    }
206                    hal::DeviceError::Lost => {
207                        self.device.handle_hal_error(e); // will lose the device
208                        break;
209                    }
210                    hal::DeviceError::ResourceCreationFailed => unreachable!(),
211                    hal::DeviceError::Unexpected => {
212                        panic!(
213                            "We ran into an unexpected error while waiting on the last successful submission to complete!"
214                        );
215                    }
216                },
217            }
218        }
219        drop(fence);
220
221        let snatch_guard = self.device.snatchable_lock.read();
222        let (submission_closures, mapping_closures, queue_empty) =
223            self.maintain(last_successful_submission_index, &snatch_guard);
224        drop(snatch_guard);
225
226        assert!(queue_empty);
227
228        let closures = crate::device::UserClosures {
229            mappings: mapping_closures,
230            submissions: submission_closures,
231            device_lost_invocations: SmallVec::new(),
232        };
233
234        closures.fire();
235    }
236}
237
238#[cfg(send_sync)]
239pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
240#[cfg(not(send_sync))]
241pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
242
243/// A texture or buffer to be freed soon.
244///
245/// This is just a tagged raw texture or buffer, generally about to be added to
246/// some other more specific container like:
247///
248/// - `PendingWrites::temp_resources`: resources used by queue writes and
249///   unmaps, waiting to be folded in with the next queue submission
250///
251/// - `ActiveSubmission::temp_resources`: temporary resources used by a queue
252///   submission, to be freed when it completes
253#[derive(Debug)]
254pub enum TempResource {
255    StagingBuffer(FlushedStagingBuffer),
256    ScratchBuffer(ScratchBuffer),
257    DestroyedBuffer(DestroyedBuffer),
258    DestroyedTexture(DestroyedTexture),
259}
260
261/// A series of raw [`CommandBuffer`]s that have been submitted to a
262/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
263///
264/// [`CommandBuffer`]: hal::Api::CommandBuffer
265/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
266pub(crate) struct EncoderInFlight {
267    inner: crate::command::CommandEncoder,
268    pub(crate) trackers: Tracker,
269    pub(crate) temp_resources: Vec<TempResource>,
270
271    /// These are the buffers that have been tracked by `PendingWrites`.
272    pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
273    /// These are the textures that have been tracked by `PendingWrites`.
274    pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
275}
276
277/// A private command encoder for writes made directly on the device
278/// or queue.
279///
280/// Operations like `buffer_unmap`, `queue_write_buffer`, and
281/// `queue_write_texture` need to copy data to the GPU. At the hal
282/// level, this must be done by encoding and submitting commands, but
283/// these operations are not associated with any specific wgpu command
284/// buffer.
285///
286/// Instead, `Device::pending_writes` owns one of these values, which
287/// has its own hal command encoder and resource lists. The commands
288/// accumulated here are automatically submitted to the queue the next
289/// time the user submits a wgpu command buffer, ahead of the user's
290/// commands.
291///
292/// Important:
293/// When locking pending_writes be sure that tracker is not locked
294/// and try to lock trackers for the minimum timespan possible
295///
296/// All uses of [`StagingBuffer`]s end up here.
297#[derive(Debug)]
298pub(crate) struct PendingWrites {
299    // The command encoder needs to be destroyed before any other resource in pending writes.
300    pub command_encoder: Box<dyn hal::DynCommandEncoder>,
301
302    /// True if `command_encoder` is in the "recording" state, as
303    /// described in the docs for the [`wgpu_hal::CommandEncoder`]
304    /// trait.
305    ///
306    /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
307    pub is_recording: bool,
308
309    temp_resources: Vec<TempResource>,
310    dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
311    dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
312}
313
314impl PendingWrites {
315    pub fn new(command_encoder: Box<dyn hal::DynCommandEncoder>) -> Self {
316        Self {
317            command_encoder,
318            is_recording: false,
319            temp_resources: Vec::new(),
320            dst_buffers: FastHashMap::default(),
321            dst_textures: FastHashMap::default(),
322        }
323    }
324
325    pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
326        self.dst_buffers
327            .insert(buffer.tracker_index(), buffer.clone());
328    }
329
330    pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
331        self.dst_textures
332            .insert(texture.tracker_index(), texture.clone());
333    }
334
335    pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
336        self.dst_buffers.contains_key(&buffer.tracker_index())
337    }
338
339    pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
340        self.dst_textures.contains_key(&texture.tracker_index())
341    }
342
343    pub fn consume_temp(&mut self, resource: TempResource) {
344        self.temp_resources.push(resource);
345    }
346
347    pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
348        self.temp_resources
349            .push(TempResource::StagingBuffer(buffer));
350    }
351
352    fn pre_submit(
353        &mut self,
354        command_allocator: &CommandAllocator,
355        device: &Arc<Device>,
356        queue: &Queue,
357    ) -> Result<Option<EncoderInFlight>, DeviceError> {
358        if self.is_recording {
359            let pending_buffers = mem::take(&mut self.dst_buffers);
360            let pending_textures = mem::take(&mut self.dst_textures);
361
362            let cmd_buf = unsafe { self.command_encoder.end_encoding() }
363                .map_err(|e| device.handle_hal_error(e))?;
364            self.is_recording = false;
365
366            let new_encoder = command_allocator
367                .acquire_encoder(device.raw(), queue.raw())
368                .map_err(|e| device.handle_hal_error(e))?;
369
370            let encoder = EncoderInFlight {
371                inner: crate::command::CommandEncoder {
372                    raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
373                    list: vec![cmd_buf],
374                    device: device.clone(),
375                    is_open: false,
376                    hal_label: None,
377                },
378                trackers: Tracker::new(),
379                temp_resources: mem::take(&mut self.temp_resources),
380                pending_buffers,
381                pending_textures,
382            };
383            Ok(Some(encoder))
384        } else {
385            self.dst_buffers.clear();
386            self.dst_textures.clear();
387            Ok(None)
388        }
389    }
390
391    pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
392        if !self.is_recording {
393            unsafe {
394                self.command_encoder
395                    .begin_encoding(Some("(wgpu internal) PendingWrites"))
396                    .unwrap();
397            }
398            self.is_recording = true;
399        }
400        self.command_encoder.as_mut()
401    }
402}
403
404impl Drop for PendingWrites {
405    fn drop(&mut self) {
406        unsafe {
407            if self.is_recording {
408                self.command_encoder.discard_encoding();
409            }
410        }
411    }
412}
413
414#[derive(Clone, Debug, Error)]
415#[non_exhaustive]
416pub enum QueueWriteError {
417    #[error(transparent)]
418    Queue(#[from] DeviceError),
419    #[error(transparent)]
420    Transfer(#[from] TransferError),
421    #[error(transparent)]
422    MemoryInitFailure(#[from] ClearError),
423    #[error(transparent)]
424    DestroyedResource(#[from] DestroyedResourceError),
425    #[error(transparent)]
426    InvalidResource(#[from] InvalidResourceError),
427}
428
429#[derive(Clone, Debug, Error)]
430#[non_exhaustive]
431pub enum QueueSubmitError {
432    #[error(transparent)]
433    Queue(#[from] DeviceError),
434    #[error(transparent)]
435    DestroyedResource(#[from] DestroyedResourceError),
436    #[error(transparent)]
437    Unmap(#[from] BufferAccessError),
438    #[error("{0} is still mapped")]
439    BufferStillMapped(ResourceErrorIdent),
440    #[error(transparent)]
441    InvalidResource(#[from] InvalidResourceError),
442    #[error(transparent)]
443    CommandEncoder(#[from] CommandEncoderError),
444    #[error(transparent)]
445    ValidateBlasActionsError(#[from] crate::ray_tracing::ValidateBlasActionsError),
446    #[error(transparent)]
447    ValidateTlasActionsError(#[from] crate::ray_tracing::ValidateTlasActionsError),
448}
449
450//TODO: move out common parts of write_xxx.
451
452impl Queue {
453    pub fn write_buffer(
454        &self,
455        buffer: Fallible<Buffer>,
456        buffer_offset: wgt::BufferAddress,
457        data: &[u8],
458    ) -> Result<(), QueueWriteError> {
459        profiling::scope!("Queue::write_buffer");
460        api_log!("Queue::write_buffer");
461
462        let buffer = buffer.get()?;
463
464        let data_size = data.len() as wgt::BufferAddress;
465
466        self.same_device_as(buffer.as_ref())?;
467
468        let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
469            data_size
470        } else {
471            log::trace!("Ignoring write_buffer of size 0");
472            return Ok(());
473        };
474
475        let snatch_guard = self.device.snatchable_lock.read();
476
477        // Platform validation requires that the staging buffer always be
478        // freed, even if an error occurs. All paths from here must call
479        // `device.pending_writes.consume`.
480        let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
481        let mut pending_writes = self.pending_writes.lock();
482
483        let staging_buffer = {
484            profiling::scope!("copy");
485            staging_buffer.write(data);
486            staging_buffer.flush()
487        };
488
489        let result = self.write_staging_buffer_impl(
490            &snatch_guard,
491            &mut pending_writes,
492            &staging_buffer,
493            buffer,
494            buffer_offset,
495        );
496
497        pending_writes.consume(staging_buffer);
498        result
499    }
500
501    pub fn create_staging_buffer(
502        &self,
503        buffer_size: wgt::BufferSize,
504    ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
505        profiling::scope!("Queue::create_staging_buffer");
506        resource_log!("Queue::create_staging_buffer");
507
508        let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
509        let ptr = unsafe { staging_buffer.ptr() };
510
511        Ok((staging_buffer, ptr))
512    }
513
514    pub fn write_staging_buffer(
515        &self,
516        buffer: Fallible<Buffer>,
517        buffer_offset: wgt::BufferAddress,
518        staging_buffer: StagingBuffer,
519    ) -> Result<(), QueueWriteError> {
520        profiling::scope!("Queue::write_staging_buffer");
521
522        let buffer = buffer.get()?;
523
524        let snatch_guard = self.device.snatchable_lock.read();
525        let mut pending_writes = self.pending_writes.lock();
526
527        // At this point, we have taken ownership of the staging_buffer from the
528        // user. Platform validation requires that the staging buffer always
529        // be freed, even if an error occurs. All paths from here must call
530        // `device.pending_writes.consume`.
531        let staging_buffer = staging_buffer.flush();
532
533        let result = self.write_staging_buffer_impl(
534            &snatch_guard,
535            &mut pending_writes,
536            &staging_buffer,
537            buffer,
538            buffer_offset,
539        );
540
541        pending_writes.consume(staging_buffer);
542        result
543    }
544
545    pub fn validate_write_buffer(
546        &self,
547        buffer: Fallible<Buffer>,
548        buffer_offset: u64,
549        buffer_size: wgt::BufferSize,
550    ) -> Result<(), QueueWriteError> {
551        profiling::scope!("Queue::validate_write_buffer");
552
553        let buffer = buffer.get()?;
554
555        self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
556
557        Ok(())
558    }
559
560    fn validate_write_buffer_impl(
561        &self,
562        buffer: &Buffer,
563        buffer_offset: u64,
564        buffer_size: wgt::BufferSize,
565    ) -> Result<(), TransferError> {
566        buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
567        if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
568            return Err(TransferError::UnalignedCopySize(buffer_size.get()));
569        }
570        if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
571            return Err(TransferError::UnalignedBufferOffset(buffer_offset));
572        }
573        if buffer_offset + buffer_size.get() > buffer.size {
574            return Err(TransferError::BufferOverrun {
575                start_offset: buffer_offset,
576                end_offset: buffer_offset + buffer_size.get(),
577                buffer_size: buffer.size,
578                side: CopySide::Destination,
579            });
580        }
581
582        Ok(())
583    }
584
585    fn write_staging_buffer_impl(
586        &self,
587        snatch_guard: &SnatchGuard,
588        pending_writes: &mut PendingWrites,
589        staging_buffer: &FlushedStagingBuffer,
590        buffer: Arc<Buffer>,
591        buffer_offset: u64,
592    ) -> Result<(), QueueWriteError> {
593        let transition = {
594            let mut trackers = self.device.trackers.lock();
595            trackers
596                .buffers
597                .set_single(&buffer, hal::BufferUses::COPY_DST)
598        };
599
600        let dst_raw = buffer.try_raw(snatch_guard)?;
601
602        self.same_device_as(buffer.as_ref())?;
603
604        self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
605
606        let region = hal::BufferCopy {
607            src_offset: 0,
608            dst_offset: buffer_offset,
609            size: staging_buffer.size,
610        };
611        let barriers = iter::once(hal::BufferBarrier {
612            buffer: staging_buffer.raw(),
613            usage: hal::StateTransition {
614                from: hal::BufferUses::MAP_WRITE,
615                to: hal::BufferUses::COPY_SRC,
616            },
617        })
618        .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
619        .collect::<Vec<_>>();
620        let encoder = pending_writes.activate();
621        unsafe {
622            encoder.transition_buffers(&barriers);
623            encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
624        }
625
626        pending_writes.insert_buffer(&buffer);
627
628        // Ensure the overwritten bytes are marked as initialized so
629        // they don't need to be nulled prior to mapping or binding.
630        {
631            buffer
632                .initialization_status
633                .write()
634                .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
635        }
636
637        Ok(())
638    }
639
640    pub fn write_texture(
641        &self,
642        destination: wgt::TexelCopyTextureInfo<Fallible<Texture>>,
643        data: &[u8],
644        data_layout: &wgt::TexelCopyBufferLayout,
645        size: &wgt::Extent3d,
646    ) -> Result<(), QueueWriteError> {
647        profiling::scope!("Queue::write_texture");
648        api_log!("Queue::write_texture");
649
650        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
651            log::trace!("Ignoring write_texture of size 0");
652            return Ok(());
653        }
654
655        let dst = destination.texture.get()?;
656        let destination = wgt::TexelCopyTextureInfo {
657            texture: (),
658            mip_level: destination.mip_level,
659            origin: destination.origin,
660            aspect: destination.aspect,
661        };
662
663        self.same_device_as(dst.as_ref())?;
664
665        dst.check_usage(wgt::TextureUsages::COPY_DST)
666            .map_err(TransferError::MissingTextureUsage)?;
667
668        // Note: Doing the copy range validation early is important because ensures that the
669        // dimensions are not going to cause overflow in other parts of the validation.
670        let (hal_copy_size, array_layer_count) =
671            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
672
673        let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
674
675        if !dst_base.aspect.is_one() {
676            return Err(TransferError::CopyAspectNotOne.into());
677        }
678
679        if !conv::is_valid_copy_dst_texture_format(dst.desc.format, destination.aspect) {
680            return Err(TransferError::CopyToForbiddenTextureFormat {
681                format: dst.desc.format,
682                aspect: destination.aspect,
683            }
684            .into());
685        }
686
687        // Note: `_source_bytes_per_array_layer` is ignored since we
688        // have a staging copy, and it can have a different value.
689        let (required_bytes_in_copy, _source_bytes_per_array_layer) = validate_linear_texture_data(
690            data_layout,
691            dst.desc.format,
692            destination.aspect,
693            data.len() as wgt::BufferAddress,
694            CopySide::Source,
695            size,
696            false,
697        )?;
698
699        if dst.desc.format.is_depth_stencil_format() {
700            self.device
701                .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
702                .map_err(TransferError::from)?;
703        }
704
705        let snatch_guard = self.device.snatchable_lock.read();
706
707        let mut pending_writes = self.pending_writes.lock();
708        let encoder = pending_writes.activate();
709
710        // If the copy does not fully cover the layers, we need to initialize to
711        // zero *first* as we don't keep track of partial texture layer inits.
712        //
713        // Strictly speaking we only need to clear the areas of a layer
714        // untouched, but this would get increasingly messy.
715        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
716            // volume textures don't have a layer range as array volumes aren't supported
717            0..1
718        } else {
719            destination.origin.z..destination.origin.z + size.depth_or_array_layers
720        };
721        let mut dst_initialization_status = dst.initialization_status.write();
722        if dst_initialization_status.mips[destination.mip_level as usize]
723            .check(init_layer_range.clone())
724            .is_some()
725        {
726            if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
727                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
728                    .drain(init_layer_range)
729                    .collect::<Vec<std::ops::Range<u32>>>()
730                {
731                    let mut trackers = self.device.trackers.lock();
732                    crate::command::clear_texture(
733                        &dst,
734                        TextureInitRange {
735                            mip_range: destination.mip_level..(destination.mip_level + 1),
736                            layer_range,
737                        },
738                        encoder,
739                        &mut trackers.textures,
740                        &self.device.alignments,
741                        self.device.zero_buffer.as_ref(),
742                        &snatch_guard,
743                    )
744                    .map_err(QueueWriteError::from)?;
745                }
746            } else {
747                dst_initialization_status.mips[destination.mip_level as usize]
748                    .drain(init_layer_range);
749            }
750        }
751
752        let dst_raw = dst.try_raw(&snatch_guard)?;
753
754        let (block_width, block_height) = dst.desc.format.block_dimensions();
755        let width_in_blocks = size.width / block_width;
756        let height_in_blocks = size.height / block_height;
757
758        let block_size = dst
759            .desc
760            .format
761            .block_copy_size(Some(destination.aspect))
762            .unwrap();
763        let bytes_in_last_row = width_in_blocks * block_size;
764
765        let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
766        let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
767
768        let bytes_per_row_alignment = get_lowest_common_denom(
769            self.device.alignments.buffer_copy_pitch.get() as u32,
770            block_size,
771        );
772        let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
773
774        // Platform validation requires that the staging buffer always be
775        // freed, even if an error occurs. All paths from here must call
776        // `device.pending_writes.consume`.
777        let staging_buffer = if stage_bytes_per_row == bytes_per_row {
778            profiling::scope!("copy aligned");
779            // Fast path if the data is already being aligned optimally.
780            let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
781            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
782            staging_buffer.write(&data[data_layout.offset as usize..]);
783            staging_buffer
784        } else {
785            profiling::scope!("copy chunked");
786            // Copy row by row into the optimal alignment.
787            let block_rows_in_copy =
788                (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
789            let stage_size =
790                wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
791                    .unwrap();
792            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
793            let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
794            for layer in 0..size.depth_or_array_layers {
795                let rows_offset = layer * rows_per_image;
796                for row in rows_offset..rows_offset + height_in_blocks {
797                    let src_offset = data_layout.offset as u32 + row * bytes_per_row;
798                    let dst_offset = row * stage_bytes_per_row;
799                    unsafe {
800                        staging_buffer.write_with_offset(
801                            data,
802                            src_offset as isize,
803                            dst_offset as isize,
804                            copy_bytes_per_row,
805                        )
806                    }
807                }
808            }
809            staging_buffer
810        };
811
812        let staging_buffer = staging_buffer.flush();
813
814        let regions = (0..array_layer_count)
815            .map(|array_layer_offset| {
816                let mut texture_base = dst_base.clone();
817                texture_base.array_layer += array_layer_offset;
818                hal::BufferTextureCopy {
819                    buffer_layout: wgt::TexelCopyBufferLayout {
820                        offset: array_layer_offset as u64
821                            * rows_per_image as u64
822                            * stage_bytes_per_row as u64,
823                        bytes_per_row: Some(stage_bytes_per_row),
824                        rows_per_image: Some(rows_per_image),
825                    },
826                    texture_base,
827                    size: hal_copy_size,
828                }
829            })
830            .collect::<Vec<_>>();
831
832        {
833            let buffer_barrier = hal::BufferBarrier {
834                buffer: staging_buffer.raw(),
835                usage: hal::StateTransition {
836                    from: hal::BufferUses::MAP_WRITE,
837                    to: hal::BufferUses::COPY_SRC,
838                },
839            };
840
841            let mut trackers = self.device.trackers.lock();
842            let transition =
843                trackers
844                    .textures
845                    .set_single(&dst, selector, hal::TextureUses::COPY_DST);
846            let texture_barriers = transition
847                .map(|pending| pending.into_hal(dst_raw))
848                .collect::<Vec<_>>();
849
850            unsafe {
851                encoder.transition_textures(&texture_barriers);
852                encoder.transition_buffers(&[buffer_barrier]);
853                encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, &regions);
854            }
855        }
856
857        pending_writes.consume(staging_buffer);
858        pending_writes.insert_texture(&dst);
859
860        Ok(())
861    }
862
863    #[cfg(webgl)]
864    pub fn copy_external_image_to_texture(
865        &self,
866        source: &wgt::CopyExternalImageSourceInfo,
867        destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
868        size: wgt::Extent3d,
869    ) -> Result<(), QueueWriteError> {
870        profiling::scope!("Queue::copy_external_image_to_texture");
871
872        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
873            log::trace!("Ignoring write_texture of size 0");
874            return Ok(());
875        }
876
877        let mut needs_flag = false;
878        needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
879        needs_flag |= source.origin != wgt::Origin2d::ZERO;
880        needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
881        #[allow(clippy::bool_comparison)]
882        if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
883            needs_flag |= source.flip_y != false;
884            needs_flag |= destination.premultiplied_alpha != false;
885        }
886
887        if needs_flag {
888            self.device
889                .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
890                .map_err(TransferError::from)?;
891        }
892
893        let src_width = source.source.width();
894        let src_height = source.source.height();
895
896        let dst = destination.texture.get()?;
897        let premultiplied_alpha = destination.premultiplied_alpha;
898        let destination = wgt::TexelCopyTextureInfo {
899            texture: (),
900            mip_level: destination.mip_level,
901            origin: destination.origin,
902            aspect: destination.aspect,
903        };
904
905        if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
906            return Err(
907                TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
908            );
909        }
910        if dst.desc.dimension != wgt::TextureDimension::D2 {
911            return Err(TransferError::InvalidDimensionExternal.into());
912        }
913        dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
914            .map_err(TransferError::MissingTextureUsage)?;
915        if dst.desc.sample_count != 1 {
916            return Err(TransferError::InvalidSampleCount {
917                sample_count: dst.desc.sample_count,
918            }
919            .into());
920        }
921
922        if source.origin.x + size.width > src_width {
923            return Err(TransferError::TextureOverrun {
924                start_offset: source.origin.x,
925                end_offset: source.origin.x + size.width,
926                texture_size: src_width,
927                dimension: crate::resource::TextureErrorDimension::X,
928                side: CopySide::Source,
929            }
930            .into());
931        }
932        if source.origin.y + size.height > src_height {
933            return Err(TransferError::TextureOverrun {
934                start_offset: source.origin.y,
935                end_offset: source.origin.y + size.height,
936                texture_size: src_height,
937                dimension: crate::resource::TextureErrorDimension::Y,
938                side: CopySide::Source,
939            }
940            .into());
941        }
942        if size.depth_or_array_layers != 1 {
943            return Err(TransferError::TextureOverrun {
944                start_offset: 0,
945                end_offset: size.depth_or_array_layers,
946                texture_size: 1,
947                dimension: crate::resource::TextureErrorDimension::Z,
948                side: CopySide::Source,
949            }
950            .into());
951        }
952
953        // Note: Doing the copy range validation early is important because ensures that the
954        // dimensions are not going to cause overflow in other parts of the validation.
955        let (hal_copy_size, _) =
956            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
957
958        let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
959
960        let mut pending_writes = self.pending_writes.lock();
961        let encoder = pending_writes.activate();
962
963        // If the copy does not fully cover the layers, we need to initialize to
964        // zero *first* as we don't keep track of partial texture layer inits.
965        //
966        // Strictly speaking we only need to clear the areas of a layer
967        // untouched, but this would get increasingly messy.
968        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
969            // volume textures don't have a layer range as array volumes aren't supported
970            0..1
971        } else {
972            destination.origin.z..destination.origin.z + size.depth_or_array_layers
973        };
974        let mut dst_initialization_status = dst.initialization_status.write();
975        if dst_initialization_status.mips[destination.mip_level as usize]
976            .check(init_layer_range.clone())
977            .is_some()
978        {
979            if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
980                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
981                    .drain(init_layer_range)
982                    .collect::<Vec<std::ops::Range<u32>>>()
983                {
984                    let mut trackers = self.device.trackers.lock();
985                    crate::command::clear_texture(
986                        &dst,
987                        TextureInitRange {
988                            mip_range: destination.mip_level..(destination.mip_level + 1),
989                            layer_range,
990                        },
991                        encoder,
992                        &mut trackers.textures,
993                        &self.device.alignments,
994                        self.device.zero_buffer.as_ref(),
995                        &self.device.snatchable_lock.read(),
996                    )
997                    .map_err(QueueWriteError::from)?;
998                }
999            } else {
1000                dst_initialization_status.mips[destination.mip_level as usize]
1001                    .drain(init_layer_range);
1002            }
1003        }
1004
1005        let snatch_guard = self.device.snatchable_lock.read();
1006        let dst_raw = dst.try_raw(&snatch_guard)?;
1007
1008        let regions = hal::TextureCopy {
1009            src_base: hal::TextureCopyBase {
1010                mip_level: 0,
1011                array_layer: 0,
1012                origin: source.origin.to_3d(0),
1013                aspect: hal::FormatAspects::COLOR,
1014            },
1015            dst_base,
1016            size: hal_copy_size,
1017        };
1018
1019        let mut trackers = self.device.trackers.lock();
1020        let transitions = trackers
1021            .textures
1022            .set_single(&dst, selector, hal::TextureUses::COPY_DST);
1023
1024        // `copy_external_image_to_texture` is exclusive to the WebGL backend.
1025        // Don't go through the `DynCommandEncoder` abstraction and directly to the WebGL backend.
1026        let encoder_webgl = encoder
1027            .as_any_mut()
1028            .downcast_mut::<hal::gles::CommandEncoder>()
1029            .unwrap();
1030        let dst_raw_webgl = dst_raw
1031            .as_any()
1032            .downcast_ref::<hal::gles::Texture>()
1033            .unwrap();
1034        let transitions_webgl = transitions.map(|pending| {
1035            let dyn_transition = pending.into_hal(dst_raw);
1036            hal::TextureBarrier {
1037                texture: dst_raw_webgl,
1038                range: dyn_transition.range,
1039                usage: dyn_transition.usage,
1040            }
1041        });
1042
1043        use hal::CommandEncoder as _;
1044        unsafe {
1045            encoder_webgl.transition_textures(transitions_webgl);
1046            encoder_webgl.copy_external_image_to_texture(
1047                source,
1048                dst_raw_webgl,
1049                premultiplied_alpha,
1050                iter::once(regions),
1051            );
1052        }
1053
1054        Ok(())
1055    }
1056
1057    pub fn submit(
1058        &self,
1059        command_buffers: &[Arc<CommandBuffer>],
1060    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1061        profiling::scope!("Queue::submit");
1062        api_log!("Queue::submit");
1063
1064        let submit_index;
1065
1066        let res = 'error: {
1067            let snatch_guard = self.device.snatchable_lock.read();
1068
1069            // Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
1070            let mut fence = self.device.fence.write();
1071            submit_index = self
1072                .device
1073                .active_submission_index
1074                .fetch_add(1, Ordering::SeqCst)
1075                + 1;
1076            let mut active_executions = Vec::new();
1077
1078            let mut used_surface_textures = track::TextureUsageScope::default();
1079
1080            // Use a hashmap here to deduplicate the surface textures that are used in the command buffers.
1081            // This avoids vulkan deadlocking from the same surface texture being submitted multiple times.
1082            let mut submit_surface_textures_owned = FastHashMap::default();
1083
1084            {
1085                if !command_buffers.is_empty() {
1086                    profiling::scope!("prepare");
1087
1088                    let mut first_error = None;
1089
1090                    //TODO: if multiple command buffers are submitted, we can re-use the last
1091                    // native command buffer of the previous chain instead of always creating
1092                    // a temporary one, since the chains are not finished.
1093
1094                    // finish all the command buffers first
1095                    for command_buffer in command_buffers {
1096                        profiling::scope!("process command buffer");
1097
1098                        // we reset the used surface textures every time we use
1099                        // it, so make sure to set_size on it.
1100                        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1101
1102                        // Note that we are required to invalidate all command buffers in both the success and failure paths.
1103                        // This is why we `continue` and don't early return via `?`.
1104                        #[allow(unused_mut)]
1105                        let mut cmd_buf_data = command_buffer.take_finished();
1106
1107                        #[cfg(feature = "trace")]
1108                        if let Some(ref mut trace) = *self.device.trace.lock() {
1109                            if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
1110                                trace.add(Action::Submit(
1111                                    submit_index,
1112                                    cmd_buf_data.commands.take().unwrap(),
1113                                ));
1114                            }
1115                        }
1116
1117                        if first_error.is_some() {
1118                            continue;
1119                        }
1120
1121                        let mut baked = match cmd_buf_data {
1122                            Ok(cmd_buf_data) => {
1123                                let res = validate_command_buffer(
1124                                    command_buffer,
1125                                    self,
1126                                    &cmd_buf_data,
1127                                    &snatch_guard,
1128                                    &mut submit_surface_textures_owned,
1129                                    &mut used_surface_textures,
1130                                );
1131                                if let Err(err) = res {
1132                                    first_error.get_or_insert(err);
1133                                    continue;
1134                                }
1135                                cmd_buf_data.into_baked_commands()
1136                            }
1137                            Err(err) => {
1138                                first_error.get_or_insert(err.into());
1139                                continue;
1140                            }
1141                        };
1142
1143                        // execute resource transitions
1144                        if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Transit")) {
1145                            break 'error Err(e.into());
1146                        }
1147
1148                        //Note: locking the trackers has to be done after the storages
1149                        let mut trackers = self.device.trackers.lock();
1150                        if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1151                        {
1152                            break 'error Err(e.into());
1153                        }
1154                        if let Err(e) = baked.initialize_texture_memory(
1155                            &mut trackers,
1156                            &self.device,
1157                            &snatch_guard,
1158                        ) {
1159                            break 'error Err(e.into());
1160                        }
1161
1162                        //Note: stateless trackers are not merged:
1163                        // device already knows these resources exist.
1164                        CommandBuffer::insert_barriers_from_device_tracker(
1165                            baked.encoder.raw.as_mut(),
1166                            &mut trackers,
1167                            &baked.trackers,
1168                            &snatch_guard,
1169                        );
1170
1171                        if let Err(e) = baked.encoder.close_and_push_front() {
1172                            break 'error Err(e.into());
1173                        }
1174
1175                        // Transition surface textures into `Present` state.
1176                        // Note: we could technically do it after all of the command buffers,
1177                        // but here we have a command encoder by hand, so it's easier to use it.
1178                        if !used_surface_textures.is_empty() {
1179                            if let Err(e) = baked.encoder.open_pass(Some("(wgpu internal) Present"))
1180                            {
1181                                break 'error Err(e.into());
1182                            }
1183                            let texture_barriers = trackers
1184                                .textures
1185                                .set_from_usage_scope_and_drain_transitions(
1186                                    &used_surface_textures,
1187                                    &snatch_guard,
1188                                )
1189                                .collect::<Vec<_>>();
1190                            unsafe {
1191                                baked.encoder.raw.transition_textures(&texture_barriers);
1192                            };
1193                            if let Err(e) = baked.encoder.close() {
1194                                break 'error Err(e.into());
1195                            }
1196                            used_surface_textures = track::TextureUsageScope::default();
1197                        }
1198
1199                        // done
1200                        active_executions.push(EncoderInFlight {
1201                            inner: baked.encoder,
1202                            trackers: baked.trackers,
1203                            temp_resources: baked.temp_resources,
1204                            pending_buffers: FastHashMap::default(),
1205                            pending_textures: FastHashMap::default(),
1206                        });
1207                    }
1208
1209                    if let Some(first_error) = first_error {
1210                        break 'error Err(first_error);
1211                    }
1212                }
1213            }
1214
1215            let mut pending_writes = self.pending_writes.lock();
1216
1217            {
1218                used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1219                for texture in pending_writes.dst_textures.values() {
1220                    match texture.try_inner(&snatch_guard) {
1221                        Ok(TextureInner::Native { .. }) => {}
1222                        Ok(TextureInner::Surface { .. }) => {
1223                            // Compare the Arcs by pointer as Textures don't implement Eq
1224                            submit_surface_textures_owned
1225                                .insert(Arc::as_ptr(texture), texture.clone());
1226
1227                            unsafe {
1228                                used_surface_textures
1229                                    .merge_single(texture, None, hal::TextureUses::PRESENT)
1230                                    .unwrap()
1231                            };
1232                        }
1233                        Err(e) => break 'error Err(e.into()),
1234                    }
1235                }
1236
1237                if !used_surface_textures.is_empty() {
1238                    let mut trackers = self.device.trackers.lock();
1239
1240                    let texture_barriers = trackers
1241                        .textures
1242                        .set_from_usage_scope_and_drain_transitions(
1243                            &used_surface_textures,
1244                            &snatch_guard,
1245                        )
1246                        .collect::<Vec<_>>();
1247                    unsafe {
1248                        pending_writes
1249                            .command_encoder
1250                            .transition_textures(&texture_barriers);
1251                    };
1252                }
1253            }
1254
1255            match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1256                Ok(Some(pending_execution)) => {
1257                    active_executions.insert(0, pending_execution);
1258                }
1259                Ok(None) => {}
1260                Err(e) => break 'error Err(e.into()),
1261            }
1262            let hal_command_buffers = active_executions
1263                .iter()
1264                .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1265                .collect::<Vec<_>>();
1266
1267            {
1268                let mut submit_surface_textures =
1269                    SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1270                        submit_surface_textures_owned.len(),
1271                    );
1272
1273                for texture in submit_surface_textures_owned.values() {
1274                    let raw = match texture.inner.get(&snatch_guard) {
1275                        Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1276                        _ => unreachable!(),
1277                    };
1278                    submit_surface_textures.push(raw);
1279                }
1280
1281                if let Err(e) = unsafe {
1282                    self.raw().submit(
1283                        &hal_command_buffers,
1284                        &submit_surface_textures,
1285                        (fence.as_mut(), submit_index),
1286                    )
1287                }
1288                .map_err(|e| self.device.handle_hal_error(e))
1289                {
1290                    break 'error Err(e.into());
1291                }
1292
1293                // Advance the successful submission index.
1294                self.device
1295                    .last_successful_submission_index
1296                    .fetch_max(submit_index, Ordering::SeqCst);
1297            }
1298
1299            profiling::scope!("cleanup");
1300
1301            // this will register the new submission to the life time tracker
1302            self.lock_life()
1303                .track_submission(submit_index, active_executions);
1304            drop(pending_writes);
1305
1306            // This will schedule destruction of all resources that are no longer needed
1307            // by the user but used in the command stream, among other things.
1308            let fence_guard = RwLockWriteGuard::downgrade(fence);
1309            let (closures, _) =
1310                match self
1311                    .device
1312                    .maintain(fence_guard, wgt::Maintain::Poll, snatch_guard)
1313                {
1314                    Ok(closures) => closures,
1315                    Err(WaitIdleError::Device(err)) => {
1316                        break 'error Err(QueueSubmitError::Queue(err))
1317                    }
1318                    Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
1319                };
1320
1321            Ok(closures)
1322        };
1323
1324        let callbacks = match res {
1325            Ok(ok) => ok,
1326            Err(e) => return Err((submit_index, e)),
1327        };
1328
1329        // the closures should execute with nothing locked!
1330        callbacks.fire();
1331
1332        api_log!("Queue::submit returned submit index {submit_index}");
1333
1334        Ok(submit_index)
1335    }
1336
1337    pub fn get_timestamp_period(&self) -> f32 {
1338        unsafe { self.raw().get_timestamp_period() }
1339    }
1340
1341    /// `closure` is guaranteed to be called.
1342    pub fn on_submitted_work_done(
1343        &self,
1344        closure: SubmittedWorkDoneClosure,
1345    ) -> Option<SubmissionIndex> {
1346        api_log!("Queue::on_submitted_work_done");
1347        //TODO: flush pending writes
1348        self.lock_life().add_work_done_closure(closure)
1349    }
1350}
1351
1352impl Global {
1353    pub fn queue_write_buffer(
1354        &self,
1355        queue_id: QueueId,
1356        buffer_id: id::BufferId,
1357        buffer_offset: wgt::BufferAddress,
1358        data: &[u8],
1359    ) -> Result<(), QueueWriteError> {
1360        let queue = self.hub.queues.get(queue_id);
1361
1362        #[cfg(feature = "trace")]
1363        if let Some(ref mut trace) = *queue.device.trace.lock() {
1364            let data_path = trace.make_binary("bin", data);
1365            trace.add(Action::WriteBuffer {
1366                id: buffer_id,
1367                data: data_path,
1368                range: buffer_offset..buffer_offset + data.len() as u64,
1369                queued: true,
1370            });
1371        }
1372
1373        let buffer = self.hub.buffers.get(buffer_id);
1374        queue.write_buffer(buffer, buffer_offset, data)
1375    }
1376
1377    pub fn queue_create_staging_buffer(
1378        &self,
1379        queue_id: QueueId,
1380        buffer_size: wgt::BufferSize,
1381        id_in: Option<id::StagingBufferId>,
1382    ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1383        let queue = self.hub.queues.get(queue_id);
1384        let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1385
1386        let fid = self.hub.staging_buffers.prepare(id_in);
1387        let id = fid.assign(staging_buffer);
1388
1389        Ok((id, ptr))
1390    }
1391
1392    pub fn queue_write_staging_buffer(
1393        &self,
1394        queue_id: QueueId,
1395        buffer_id: id::BufferId,
1396        buffer_offset: wgt::BufferAddress,
1397        staging_buffer_id: id::StagingBufferId,
1398    ) -> Result<(), QueueWriteError> {
1399        let queue = self.hub.queues.get(queue_id);
1400        let buffer = self.hub.buffers.get(buffer_id);
1401        let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1402        queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1403    }
1404
1405    pub fn queue_validate_write_buffer(
1406        &self,
1407        queue_id: QueueId,
1408        buffer_id: id::BufferId,
1409        buffer_offset: u64,
1410        buffer_size: wgt::BufferSize,
1411    ) -> Result<(), QueueWriteError> {
1412        let queue = self.hub.queues.get(queue_id);
1413        let buffer = self.hub.buffers.get(buffer_id);
1414        queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1415    }
1416
1417    pub fn queue_write_texture(
1418        &self,
1419        queue_id: QueueId,
1420        destination: &TexelCopyTextureInfo,
1421        data: &[u8],
1422        data_layout: &wgt::TexelCopyBufferLayout,
1423        size: &wgt::Extent3d,
1424    ) -> Result<(), QueueWriteError> {
1425        let queue = self.hub.queues.get(queue_id);
1426
1427        #[cfg(feature = "trace")]
1428        if let Some(ref mut trace) = *queue.device.trace.lock() {
1429            let data_path = trace.make_binary("bin", data);
1430            trace.add(Action::WriteTexture {
1431                to: *destination,
1432                data: data_path,
1433                layout: *data_layout,
1434                size: *size,
1435            });
1436        }
1437
1438        let destination = wgt::TexelCopyTextureInfo {
1439            texture: self.hub.textures.get(destination.texture),
1440            mip_level: destination.mip_level,
1441            origin: destination.origin,
1442            aspect: destination.aspect,
1443        };
1444        queue.write_texture(destination, data, data_layout, size)
1445    }
1446
1447    #[cfg(webgl)]
1448    pub fn queue_copy_external_image_to_texture(
1449        &self,
1450        queue_id: QueueId,
1451        source: &wgt::CopyExternalImageSourceInfo,
1452        destination: crate::command::CopyExternalImageDestInfo,
1453        size: wgt::Extent3d,
1454    ) -> Result<(), QueueWriteError> {
1455        let queue = self.hub.queues.get(queue_id);
1456        let destination = wgt::CopyExternalImageDestInfo {
1457            texture: self.hub.textures.get(destination.texture),
1458            mip_level: destination.mip_level,
1459            origin: destination.origin,
1460            aspect: destination.aspect,
1461            color_space: destination.color_space,
1462            premultiplied_alpha: destination.premultiplied_alpha,
1463        };
1464        queue.copy_external_image_to_texture(source, destination, size)
1465    }
1466
1467    pub fn queue_submit(
1468        &self,
1469        queue_id: QueueId,
1470        command_buffer_ids: &[id::CommandBufferId],
1471    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1472        let queue = self.hub.queues.get(queue_id);
1473        let command_buffer_guard = self.hub.command_buffers.read();
1474        let command_buffers = command_buffer_ids
1475            .iter()
1476            .map(|id| command_buffer_guard.get(*id))
1477            .collect::<Vec<_>>();
1478        drop(command_buffer_guard);
1479        queue.submit(&command_buffers)
1480    }
1481
1482    pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1483        let queue = self.hub.queues.get(queue_id);
1484        queue.get_timestamp_period()
1485    }
1486
1487    pub fn queue_on_submitted_work_done(
1488        &self,
1489        queue_id: QueueId,
1490        closure: SubmittedWorkDoneClosure,
1491    ) -> SubmissionIndex {
1492        api_log!("Queue::on_submitted_work_done {queue_id:?}");
1493
1494        //TODO: flush pending writes
1495        let queue = self.hub.queues.get(queue_id);
1496        let result = queue.on_submitted_work_done(closure);
1497        result.unwrap_or(0) // '0' means no wait is necessary
1498    }
1499}
1500
1501fn validate_command_buffer(
1502    command_buffer: &CommandBuffer,
1503    queue: &Queue,
1504    cmd_buf_data: &crate::command::CommandBufferMutable,
1505    snatch_guard: &SnatchGuard,
1506    submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1507    used_surface_textures: &mut track::TextureUsageScope,
1508) -> Result<(), QueueSubmitError> {
1509    command_buffer.same_device_as(queue)?;
1510
1511    {
1512        profiling::scope!("check resource state");
1513
1514        {
1515            profiling::scope!("buffers");
1516            for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1517                buffer.check_destroyed(snatch_guard)?;
1518
1519                match *buffer.map_state.lock() {
1520                    BufferMapState::Idle => (),
1521                    _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1522                }
1523            }
1524        }
1525        {
1526            profiling::scope!("textures");
1527            for texture in cmd_buf_data.trackers.textures.used_resources() {
1528                let should_extend = match texture.try_inner(snatch_guard)? {
1529                    TextureInner::Native { .. } => false,
1530                    TextureInner::Surface { .. } => {
1531                        // Compare the Arcs by pointer as Textures don't implement Eq.
1532                        submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1533
1534                        true
1535                    }
1536                };
1537                if should_extend {
1538                    unsafe {
1539                        used_surface_textures
1540                            .merge_single(texture, None, hal::TextureUses::PRESENT)
1541                            .unwrap();
1542                    };
1543                }
1544            }
1545        }
1546
1547        if let Err(e) = cmd_buf_data.validate_blas_actions() {
1548            return Err(e.into());
1549        }
1550        if let Err(e) = cmd_buf_data.validate_tlas_actions(snatch_guard) {
1551            return Err(e.into());
1552        }
1553    }
1554    Ok(())
1555}