1#[cfg(feature = "trace")]
2use crate::device::trace::Action;
3use crate::{
4 api_log,
5 command::{
6 extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
7 ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide,
8 ImageCopyTexture, TransferError,
9 },
10 conv,
11 device::{DeviceError, WaitIdleError},
12 get_lowest_common_denom,
13 global::Global,
14 hal_label,
15 id::{self, QueueId},
16 init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
17 lock::RwLockWriteGuard,
18 resource::{
19 Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
20 DestroyedTexture, Fallible, FlushedStagingBuffer, InvalidResourceError, Labeled,
21 ParentDevice, ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable,
22 },
23 resource_log,
24 track::{self, Tracker, TrackerIndex},
25 FastHashMap, SubmissionIndex,
26};
27
28use smallvec::SmallVec;
29
30use std::{
31 iter,
32 mem::{self, ManuallyDrop},
33 ptr::NonNull,
34 sync::{atomic::Ordering, Arc},
35};
36use thiserror::Error;
37
38use super::Device;
39
40pub struct Queue {
41 raw: ManuallyDrop<Box<dyn hal::DynQueue>>,
42 pub(crate) device: Arc<Device>,
43}
44
45impl Queue {
46 pub(crate) fn new(device: Arc<Device>, raw: Box<dyn hal::DynQueue>) -> Self {
47 Queue {
48 raw: ManuallyDrop::new(raw),
49 device,
50 }
51 }
52
53 pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
54 self.raw.as_ref()
55 }
56}
57
58crate::impl_resource_type!(Queue);
59impl Labeled for Queue {
61 fn label(&self) -> &str {
62 ""
63 }
64}
65crate::impl_parent_device!(Queue);
66crate::impl_storage_item!(Queue);
67
68impl Drop for Queue {
69 fn drop(&mut self) {
70 resource_log!("Drop {}", self.error_ident());
71 let queue = unsafe { ManuallyDrop::take(&mut self.raw) };
73 self.device.release_queue(queue);
74 }
75}
76
77#[repr(C)]
78pub struct SubmittedWorkDoneClosureC {
79 pub callback: unsafe extern "C" fn(user_data: *mut u8),
80 pub user_data: *mut u8,
81}
82
83#[cfg(send_sync)]
84unsafe impl Send for SubmittedWorkDoneClosureC {}
85
86pub struct SubmittedWorkDoneClosure {
87 inner: SubmittedWorkDoneClosureInner,
90}
91
92#[cfg(send_sync)]
93type SubmittedWorkDoneCallback = Box<dyn FnOnce() + Send + 'static>;
94#[cfg(not(send_sync))]
95type SubmittedWorkDoneCallback = Box<dyn FnOnce() + 'static>;
96
97enum SubmittedWorkDoneClosureInner {
98 Rust { callback: SubmittedWorkDoneCallback },
99 C { inner: SubmittedWorkDoneClosureC },
100}
101
102impl SubmittedWorkDoneClosure {
103 pub fn from_rust(callback: SubmittedWorkDoneCallback) -> Self {
104 Self {
105 inner: SubmittedWorkDoneClosureInner::Rust { callback },
106 }
107 }
108
109 pub unsafe fn from_c(inner: SubmittedWorkDoneClosureC) -> Self {
117 Self {
118 inner: SubmittedWorkDoneClosureInner::C { inner },
119 }
120 }
121
122 pub(crate) fn call(self) {
123 match self.inner {
124 SubmittedWorkDoneClosureInner::Rust { callback } => callback(),
125 SubmittedWorkDoneClosureInner::C { inner } => unsafe {
127 (inner.callback)(inner.user_data)
128 },
129 }
130 }
131}
132
133#[derive(Debug)]
144pub enum TempResource {
145 StagingBuffer(FlushedStagingBuffer),
146 DestroyedBuffer(DestroyedBuffer),
147 DestroyedTexture(DestroyedTexture),
148}
149
150pub(crate) struct EncoderInFlight {
156 raw: Box<dyn hal::DynCommandEncoder>,
157 cmd_buffers: Vec<Box<dyn hal::DynCommandBuffer>>,
158 pub(crate) trackers: Tracker,
159
160 pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
162 pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
164}
165
166impl EncoderInFlight {
167 pub(crate) unsafe fn land(mut self) -> Box<dyn hal::DynCommandEncoder> {
172 unsafe { self.raw.reset_all(self.cmd_buffers) };
173 {
174 profiling::scope!("drop command buffer trackers");
177 drop(self.trackers);
178 drop(self.pending_buffers);
179 drop(self.pending_textures);
180 }
181 self.raw
182 }
183}
184
185#[derive(Debug)]
206pub(crate) struct PendingWrites {
207 pub command_encoder: Box<dyn hal::DynCommandEncoder>,
208
209 pub is_recording: bool,
215
216 temp_resources: Vec<TempResource>,
217 dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
218 dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
219}
220
221impl PendingWrites {
222 pub fn new(command_encoder: Box<dyn hal::DynCommandEncoder>) -> Self {
223 Self {
224 command_encoder,
225 is_recording: false,
226 temp_resources: Vec::new(),
227 dst_buffers: FastHashMap::default(),
228 dst_textures: FastHashMap::default(),
229 }
230 }
231
232 pub fn dispose(mut self, device: &dyn hal::DynDevice) {
233 unsafe {
234 if self.is_recording {
235 self.command_encoder.discard_encoding();
236 }
237 device.destroy_command_encoder(self.command_encoder);
238 }
239
240 self.temp_resources.clear();
241 }
242
243 pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
244 self.dst_buffers
245 .insert(buffer.tracker_index(), buffer.clone());
246 }
247
248 pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
249 self.dst_textures
250 .insert(texture.tracker_index(), texture.clone());
251 }
252
253 pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
254 self.dst_buffers.contains_key(&buffer.tracker_index())
255 }
256
257 pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
258 self.dst_textures.contains_key(&texture.tracker_index())
259 }
260
261 pub fn consume_temp(&mut self, resource: TempResource) {
262 self.temp_resources.push(resource);
263 }
264
265 pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
266 self.temp_resources
267 .push(TempResource::StagingBuffer(buffer));
268 }
269
270 fn pre_submit(
271 &mut self,
272 command_allocator: &CommandAllocator,
273 device: &Device,
274 queue: &Queue,
275 ) -> Result<Option<EncoderInFlight>, DeviceError> {
276 if self.is_recording {
277 let pending_buffers = mem::take(&mut self.dst_buffers);
278 let pending_textures = mem::take(&mut self.dst_textures);
279
280 let cmd_buf = unsafe { self.command_encoder.end_encoding() }
281 .map_err(|e| device.handle_hal_error(e))?;
282 self.is_recording = false;
283
284 let new_encoder = command_allocator
285 .acquire_encoder(device.raw(), queue.raw())
286 .map_err(|e| device.handle_hal_error(e))?;
287
288 let encoder = EncoderInFlight {
289 raw: mem::replace(&mut self.command_encoder, new_encoder),
290 cmd_buffers: vec![cmd_buf],
291 trackers: Tracker::new(),
292 pending_buffers,
293 pending_textures,
294 };
295 Ok(Some(encoder))
296 } else {
297 self.dst_buffers.clear();
298 self.dst_textures.clear();
299 Ok(None)
300 }
301 }
302
303 pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
304 if !self.is_recording {
305 unsafe {
306 self.command_encoder
307 .begin_encoding(Some("(wgpu internal) PendingWrites"))
308 .unwrap();
309 }
310 self.is_recording = true;
311 }
312 self.command_encoder.as_mut()
313 }
314
315 pub fn deactivate(&mut self) {
316 if self.is_recording {
317 unsafe {
318 self.command_encoder.discard_encoding();
319 }
320 self.is_recording = false;
321 }
322 }
323}
324
325#[derive(Clone, Debug, Error)]
326#[non_exhaustive]
327pub enum QueueWriteError {
328 #[error(transparent)]
329 Queue(#[from] DeviceError),
330 #[error(transparent)]
331 Transfer(#[from] TransferError),
332 #[error(transparent)]
333 MemoryInitFailure(#[from] ClearError),
334 #[error(transparent)]
335 DestroyedResource(#[from] DestroyedResourceError),
336 #[error(transparent)]
337 InvalidResource(#[from] InvalidResourceError),
338}
339
340#[derive(Clone, Debug, Error)]
341#[non_exhaustive]
342pub enum QueueSubmitError {
343 #[error(transparent)]
344 Queue(#[from] DeviceError),
345 #[error(transparent)]
346 DestroyedResource(#[from] DestroyedResourceError),
347 #[error(transparent)]
348 Unmap(#[from] BufferAccessError),
349 #[error("{0} is still mapped")]
350 BufferStillMapped(ResourceErrorIdent),
351 #[error("Surface output was dropped before the command buffer got submitted")]
352 SurfaceOutputDropped,
353 #[error("Surface was unconfigured before the command buffer got submitted")]
354 SurfaceUnconfigured,
355 #[error("GPU got stuck :(")]
356 StuckGpu,
357 #[error(transparent)]
358 InvalidResource(#[from] InvalidResourceError),
359 #[error(transparent)]
360 CommandEncoder(#[from] CommandEncoderError),
361}
362
363impl Queue {
366 pub fn write_buffer(
367 &self,
368 buffer: Fallible<Buffer>,
369 buffer_offset: wgt::BufferAddress,
370 data: &[u8],
371 ) -> Result<(), QueueWriteError> {
372 profiling::scope!("Queue::write_buffer");
373 api_log!("Queue::write_buffer");
374
375 let buffer = buffer.get()?;
376
377 let data_size = data.len() as wgt::BufferAddress;
378
379 self.same_device_as(buffer.as_ref())?;
380
381 let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
382 data_size
383 } else {
384 log::trace!("Ignoring write_buffer of size 0");
385 return Ok(());
386 };
387
388 let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
392 let mut pending_writes = self.device.pending_writes.lock();
393
394 let staging_buffer = {
395 profiling::scope!("copy");
396 staging_buffer.write(data);
397 staging_buffer.flush()
398 };
399
400 let result = self.write_staging_buffer_impl(
401 &mut pending_writes,
402 &staging_buffer,
403 buffer,
404 buffer_offset,
405 );
406
407 pending_writes.consume(staging_buffer);
408 result
409 }
410
411 pub fn create_staging_buffer(
412 &self,
413 buffer_size: wgt::BufferSize,
414 ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
415 profiling::scope!("Queue::create_staging_buffer");
416 resource_log!("Queue::create_staging_buffer");
417
418 let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
419 let ptr = unsafe { staging_buffer.ptr() };
420
421 Ok((staging_buffer, ptr))
422 }
423
424 pub fn write_staging_buffer(
425 &self,
426 buffer: Fallible<Buffer>,
427 buffer_offset: wgt::BufferAddress,
428 staging_buffer: StagingBuffer,
429 ) -> Result<(), QueueWriteError> {
430 profiling::scope!("Queue::write_staging_buffer");
431
432 let buffer = buffer.get()?;
433
434 let mut pending_writes = self.device.pending_writes.lock();
435
436 let staging_buffer = staging_buffer.flush();
441
442 let result = self.write_staging_buffer_impl(
443 &mut pending_writes,
444 &staging_buffer,
445 buffer,
446 buffer_offset,
447 );
448
449 pending_writes.consume(staging_buffer);
450 result
451 }
452
453 pub fn validate_write_buffer(
454 &self,
455 buffer: Fallible<Buffer>,
456 buffer_offset: u64,
457 buffer_size: wgt::BufferSize,
458 ) -> Result<(), QueueWriteError> {
459 profiling::scope!("Queue::validate_write_buffer");
460
461 let buffer = buffer.get()?;
462
463 self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
464
465 Ok(())
466 }
467
468 fn validate_write_buffer_impl(
469 &self,
470 buffer: &Buffer,
471 buffer_offset: u64,
472 buffer_size: wgt::BufferSize,
473 ) -> Result<(), TransferError> {
474 buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
475 if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
476 return Err(TransferError::UnalignedCopySize(buffer_size.get()));
477 }
478 if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
479 return Err(TransferError::UnalignedBufferOffset(buffer_offset));
480 }
481 if buffer_offset + buffer_size.get() > buffer.size {
482 return Err(TransferError::BufferOverrun {
483 start_offset: buffer_offset,
484 end_offset: buffer_offset + buffer_size.get(),
485 buffer_size: buffer.size,
486 side: CopySide::Destination,
487 });
488 }
489
490 Ok(())
491 }
492
493 fn write_staging_buffer_impl(
494 &self,
495 pending_writes: &mut PendingWrites,
496 staging_buffer: &FlushedStagingBuffer,
497 buffer: Arc<Buffer>,
498 buffer_offset: u64,
499 ) -> Result<(), QueueWriteError> {
500 let transition = {
501 let mut trackers = self.device.trackers.lock();
502 trackers
503 .buffers
504 .set_single(&buffer, hal::BufferUses::COPY_DST)
505 };
506
507 let snatch_guard = self.device.snatchable_lock.read();
508 let dst_raw = buffer.try_raw(&snatch_guard)?;
509
510 self.same_device_as(buffer.as_ref())?;
511
512 self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
513
514 let region = hal::BufferCopy {
515 src_offset: 0,
516 dst_offset: buffer_offset,
517 size: staging_buffer.size,
518 };
519 let barriers = iter::once(hal::BufferBarrier {
520 buffer: staging_buffer.raw(),
521 usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
522 })
523 .chain(transition.map(|pending| pending.into_hal(&buffer, &snatch_guard)))
524 .collect::<Vec<_>>();
525 let encoder = pending_writes.activate();
526 unsafe {
527 encoder.transition_buffers(&barriers);
528 encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
529 }
530
531 pending_writes.insert_buffer(&buffer);
532
533 {
536 buffer
537 .initialization_status
538 .write()
539 .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
540 }
541
542 Ok(())
543 }
544
545 pub fn write_texture(
546 &self,
547 destination: wgt::ImageCopyTexture<Fallible<Texture>>,
548 data: &[u8],
549 data_layout: &wgt::ImageDataLayout,
550 size: &wgt::Extent3d,
551 ) -> Result<(), QueueWriteError> {
552 profiling::scope!("Queue::write_texture");
553 api_log!("Queue::write_texture");
554
555 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
556 log::trace!("Ignoring write_texture of size 0");
557 return Ok(());
558 }
559
560 let dst = destination.texture.get()?;
561 let destination = wgt::ImageCopyTexture {
562 texture: (),
563 mip_level: destination.mip_level,
564 origin: destination.origin,
565 aspect: destination.aspect,
566 };
567
568 self.same_device_as(dst.as_ref())?;
569
570 dst.check_usage(wgt::TextureUsages::COPY_DST)
571 .map_err(TransferError::MissingTextureUsage)?;
572
573 let (hal_copy_size, array_layer_count) =
576 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
577
578 let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
579
580 if !dst_base.aspect.is_one() {
581 return Err(TransferError::CopyAspectNotOne.into());
582 }
583
584 if !conv::is_valid_copy_dst_texture_format(dst.desc.format, destination.aspect) {
585 return Err(TransferError::CopyToForbiddenTextureFormat {
586 format: dst.desc.format,
587 aspect: destination.aspect,
588 }
589 .into());
590 }
591
592 let (required_bytes_in_copy, _source_bytes_per_array_layer) = validate_linear_texture_data(
595 data_layout,
596 dst.desc.format,
597 destination.aspect,
598 data.len() as wgt::BufferAddress,
599 CopySide::Source,
600 size,
601 false,
602 )?;
603
604 if dst.desc.format.is_depth_stencil_format() {
605 self.device
606 .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
607 .map_err(TransferError::from)?;
608 }
609
610 let mut pending_writes = self.device.pending_writes.lock();
611 let encoder = pending_writes.activate();
612
613 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
619 0..1
621 } else {
622 destination.origin.z..destination.origin.z + size.depth_or_array_layers
623 };
624 let mut dst_initialization_status = dst.initialization_status.write();
625 if dst_initialization_status.mips[destination.mip_level as usize]
626 .check(init_layer_range.clone())
627 .is_some()
628 {
629 if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
630 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
631 .drain(init_layer_range)
632 .collect::<Vec<std::ops::Range<u32>>>()
633 {
634 let mut trackers = self.device.trackers.lock();
635 crate::command::clear_texture(
636 &dst,
637 TextureInitRange {
638 mip_range: destination.mip_level..(destination.mip_level + 1),
639 layer_range,
640 },
641 encoder,
642 &mut trackers.textures,
643 &self.device.alignments,
644 self.device.zero_buffer.as_ref(),
645 &self.device.snatchable_lock.read(),
646 )
647 .map_err(QueueWriteError::from)?;
648 }
649 } else {
650 dst_initialization_status.mips[destination.mip_level as usize]
651 .drain(init_layer_range);
652 }
653 }
654
655 let snatch_guard = self.device.snatchable_lock.read();
656
657 let dst_raw = dst.try_raw(&snatch_guard)?;
658
659 let (block_width, block_height) = dst.desc.format.block_dimensions();
660 let width_in_blocks = size.width / block_width;
661 let height_in_blocks = size.height / block_height;
662
663 let block_size = dst
664 .desc
665 .format
666 .block_copy_size(Some(destination.aspect))
667 .unwrap();
668 let bytes_in_last_row = width_in_blocks * block_size;
669
670 let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
671 let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
672
673 let bytes_per_row_alignment = get_lowest_common_denom(
674 self.device.alignments.buffer_copy_pitch.get() as u32,
675 block_size,
676 );
677 let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
678
679 let staging_buffer = if stage_bytes_per_row == bytes_per_row {
683 profiling::scope!("copy aligned");
684 let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
686 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
687 staging_buffer.write(&data[data_layout.offset as usize..]);
688 staging_buffer
689 } else {
690 profiling::scope!("copy chunked");
691 let block_rows_in_copy =
693 (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
694 let stage_size =
695 wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
696 .unwrap();
697 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
698 let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
699 for layer in 0..size.depth_or_array_layers {
700 let rows_offset = layer * rows_per_image;
701 for row in rows_offset..rows_offset + height_in_blocks {
702 let src_offset = data_layout.offset as u32 + row * bytes_per_row;
703 let dst_offset = row * stage_bytes_per_row;
704 unsafe {
705 staging_buffer.write_with_offset(
706 data,
707 src_offset as isize,
708 dst_offset as isize,
709 copy_bytes_per_row,
710 )
711 }
712 }
713 }
714 staging_buffer
715 };
716
717 let staging_buffer = staging_buffer.flush();
718
719 let regions = (0..array_layer_count)
720 .map(|array_layer_offset| {
721 let mut texture_base = dst_base.clone();
722 texture_base.array_layer += array_layer_offset;
723 hal::BufferTextureCopy {
724 buffer_layout: wgt::ImageDataLayout {
725 offset: array_layer_offset as u64
726 * rows_per_image as u64
727 * stage_bytes_per_row as u64,
728 bytes_per_row: Some(stage_bytes_per_row),
729 rows_per_image: Some(rows_per_image),
730 },
731 texture_base,
732 size: hal_copy_size,
733 }
734 })
735 .collect::<Vec<_>>();
736
737 {
738 let buffer_barrier = hal::BufferBarrier {
739 buffer: staging_buffer.raw(),
740 usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
741 };
742
743 let mut trackers = self.device.trackers.lock();
744 let transition =
745 trackers
746 .textures
747 .set_single(&dst, selector, hal::TextureUses::COPY_DST);
748 let texture_barriers = transition
749 .map(|pending| pending.into_hal(dst_raw))
750 .collect::<Vec<_>>();
751
752 unsafe {
753 encoder.transition_textures(&texture_barriers);
754 encoder.transition_buffers(&[buffer_barrier]);
755 encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, ®ions);
756 }
757 }
758
759 pending_writes.consume(staging_buffer);
760 pending_writes.insert_texture(&dst);
761
762 Ok(())
763 }
764
765 #[cfg(webgl)]
766 pub fn copy_external_image_to_texture(
767 &self,
768 source: &wgt::ImageCopyExternalImage,
769 destination: wgt::ImageCopyTextureTagged<Fallible<Texture>>,
770 size: wgt::Extent3d,
771 ) -> Result<(), QueueWriteError> {
772 profiling::scope!("Queue::copy_external_image_to_texture");
773
774 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
775 log::trace!("Ignoring write_texture of size 0");
776 return Ok(());
777 }
778
779 let mut needs_flag = false;
780 needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
781 needs_flag |= source.origin != wgt::Origin2d::ZERO;
782 needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
783 #[allow(clippy::bool_comparison)]
784 if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
785 needs_flag |= source.flip_y != false;
786 needs_flag |= destination.premultiplied_alpha != false;
787 }
788
789 if needs_flag {
790 self.device
791 .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
792 .map_err(TransferError::from)?;
793 }
794
795 let src_width = source.source.width();
796 let src_height = source.source.height();
797
798 let dst = destination.texture.get()?;
799 let premultiplied_alpha = destination.premultiplied_alpha;
800 let destination = wgt::ImageCopyTexture {
801 texture: (),
802 mip_level: destination.mip_level,
803 origin: destination.origin,
804 aspect: destination.aspect,
805 };
806
807 if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
808 return Err(
809 TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
810 );
811 }
812 if dst.desc.dimension != wgt::TextureDimension::D2 {
813 return Err(TransferError::InvalidDimensionExternal.into());
814 }
815 dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
816 .map_err(TransferError::MissingTextureUsage)?;
817 if dst.desc.sample_count != 1 {
818 return Err(TransferError::InvalidSampleCount {
819 sample_count: dst.desc.sample_count,
820 }
821 .into());
822 }
823
824 if source.origin.x + size.width > src_width {
825 return Err(TransferError::TextureOverrun {
826 start_offset: source.origin.x,
827 end_offset: source.origin.x + size.width,
828 texture_size: src_width,
829 dimension: crate::resource::TextureErrorDimension::X,
830 side: CopySide::Source,
831 }
832 .into());
833 }
834 if source.origin.y + size.height > src_height {
835 return Err(TransferError::TextureOverrun {
836 start_offset: source.origin.y,
837 end_offset: source.origin.y + size.height,
838 texture_size: src_height,
839 dimension: crate::resource::TextureErrorDimension::Y,
840 side: CopySide::Source,
841 }
842 .into());
843 }
844 if size.depth_or_array_layers != 1 {
845 return Err(TransferError::TextureOverrun {
846 start_offset: 0,
847 end_offset: size.depth_or_array_layers,
848 texture_size: 1,
849 dimension: crate::resource::TextureErrorDimension::Z,
850 side: CopySide::Source,
851 }
852 .into());
853 }
854
855 let (hal_copy_size, _) =
858 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
859
860 let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
861
862 let mut pending_writes = self.device.pending_writes.lock();
863 let encoder = pending_writes.activate();
864
865 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
871 0..1
873 } else {
874 destination.origin.z..destination.origin.z + size.depth_or_array_layers
875 };
876 let mut dst_initialization_status = dst.initialization_status.write();
877 if dst_initialization_status.mips[destination.mip_level as usize]
878 .check(init_layer_range.clone())
879 .is_some()
880 {
881 if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
882 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
883 .drain(init_layer_range)
884 .collect::<Vec<std::ops::Range<u32>>>()
885 {
886 let mut trackers = self.device.trackers.lock();
887 crate::command::clear_texture(
888 &dst,
889 TextureInitRange {
890 mip_range: destination.mip_level..(destination.mip_level + 1),
891 layer_range,
892 },
893 encoder,
894 &mut trackers.textures,
895 &self.device.alignments,
896 self.device.zero_buffer.as_ref(),
897 &self.device.snatchable_lock.read(),
898 )
899 .map_err(QueueWriteError::from)?;
900 }
901 } else {
902 dst_initialization_status.mips[destination.mip_level as usize]
903 .drain(init_layer_range);
904 }
905 }
906
907 let snatch_guard = self.device.snatchable_lock.read();
908 let dst_raw = dst.try_raw(&snatch_guard)?;
909
910 let regions = hal::TextureCopy {
911 src_base: hal::TextureCopyBase {
912 mip_level: 0,
913 array_layer: 0,
914 origin: source.origin.to_3d(0),
915 aspect: hal::FormatAspects::COLOR,
916 },
917 dst_base,
918 size: hal_copy_size,
919 };
920
921 let mut trackers = self.device.trackers.lock();
922 let transitions = trackers
923 .textures
924 .set_single(&dst, selector, hal::TextureUses::COPY_DST);
925
926 let encoder_webgl = encoder
929 .as_any_mut()
930 .downcast_mut::<hal::gles::CommandEncoder>()
931 .unwrap();
932 let dst_raw_webgl = dst_raw
933 .as_any()
934 .downcast_ref::<hal::gles::Texture>()
935 .unwrap();
936 let transitions_webgl = transitions.map(|pending| {
937 let dyn_transition = pending.into_hal(dst_raw);
938 hal::TextureBarrier {
939 texture: dst_raw_webgl,
940 range: dyn_transition.range,
941 usage: dyn_transition.usage,
942 }
943 });
944
945 use hal::CommandEncoder as _;
946 unsafe {
947 encoder_webgl.transition_textures(transitions_webgl);
948 encoder_webgl.copy_external_image_to_texture(
949 source,
950 dst_raw_webgl,
951 premultiplied_alpha,
952 iter::once(regions),
953 );
954 }
955
956 Ok(())
957 }
958
959 pub fn submit(
960 &self,
961 command_buffers: &[Arc<CommandBuffer>],
962 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
963 profiling::scope!("Queue::submit");
964 api_log!("Queue::submit");
965
966 let submit_index;
967
968 let res = 'error: {
969 let snatch_guard = self.device.snatchable_lock.read();
970
971 let mut fence = self.device.fence.write();
973 submit_index = self
974 .device
975 .active_submission_index
976 .fetch_add(1, Ordering::SeqCst)
977 + 1;
978 let mut active_executions = Vec::new();
979
980 let mut used_surface_textures = track::TextureUsageScope::default();
981
982 let mut submit_surface_textures_owned = FastHashMap::default();
985
986 {
987 if !command_buffers.is_empty() {
988 profiling::scope!("prepare");
989
990 let mut first_error = None;
991
992 for command_buffer in command_buffers {
998 profiling::scope!("process command buffer");
999
1000 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1003
1004 #[allow(unused_mut)]
1007 let mut cmd_buf_data = command_buffer.try_take();
1008
1009 #[cfg(feature = "trace")]
1010 if let Some(ref mut trace) = *self.device.trace.lock() {
1011 if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
1012 trace.add(Action::Submit(
1013 submit_index,
1014 cmd_buf_data.commands.take().unwrap(),
1015 ));
1016 }
1017 }
1018
1019 let mut baked = match cmd_buf_data {
1020 Ok(cmd_buf_data) => {
1021 let res = validate_command_buffer(
1022 command_buffer,
1023 self,
1024 &cmd_buf_data,
1025 &snatch_guard,
1026 &mut submit_surface_textures_owned,
1027 &mut used_surface_textures,
1028 );
1029 if let Err(err) = res {
1030 first_error.get_or_insert(err);
1031 cmd_buf_data.destroy(&command_buffer.device);
1032 continue;
1033 }
1034 cmd_buf_data.into_baked_commands()
1035 }
1036 Err(err) => {
1037 first_error.get_or_insert(err.into());
1038 continue;
1039 }
1040 };
1041
1042 if first_error.is_some() {
1043 continue;
1044 }
1045
1046 if let Err(e) = unsafe {
1048 baked.encoder.begin_encoding(hal_label(
1049 Some("(wgpu internal) Transit"),
1050 self.device.instance_flags,
1051 ))
1052 }
1053 .map_err(|e| self.device.handle_hal_error(e))
1054 {
1055 break 'error Err(e.into());
1056 }
1057
1058 let mut trackers = self.device.trackers.lock();
1060 if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1061 {
1062 break 'error Err(e.into());
1063 }
1064 if let Err(e) = baked.initialize_texture_memory(
1065 &mut trackers,
1066 &self.device,
1067 &snatch_guard,
1068 ) {
1069 break 'error Err(e.into());
1070 }
1071
1072 CommandBuffer::insert_barriers_from_device_tracker(
1075 baked.encoder.as_mut(),
1076 &mut trackers,
1077 &baked.trackers,
1078 &snatch_guard,
1079 );
1080
1081 let transit = unsafe { baked.encoder.end_encoding().unwrap() };
1082 baked.list.insert(0, transit);
1083
1084 if !used_surface_textures.is_empty() {
1088 if let Err(e) = unsafe {
1089 baked.encoder.begin_encoding(hal_label(
1090 Some("(wgpu internal) Present"),
1091 self.device.instance_flags,
1092 ))
1093 }
1094 .map_err(|e| self.device.handle_hal_error(e))
1095 {
1096 break 'error Err(e.into());
1097 }
1098 let texture_barriers = trackers
1099 .textures
1100 .set_from_usage_scope_and_drain_transitions(
1101 &used_surface_textures,
1102 &snatch_guard,
1103 )
1104 .collect::<Vec<_>>();
1105 let present = unsafe {
1106 baked.encoder.transition_textures(&texture_barriers);
1107 baked.encoder.end_encoding().unwrap()
1108 };
1109 baked.list.push(present);
1110 used_surface_textures = track::TextureUsageScope::default();
1111 }
1112
1113 active_executions.push(EncoderInFlight {
1115 raw: baked.encoder,
1116 cmd_buffers: baked.list,
1117 trackers: baked.trackers,
1118 pending_buffers: FastHashMap::default(),
1119 pending_textures: FastHashMap::default(),
1120 });
1121 }
1122
1123 if let Some(first_error) = first_error {
1124 break 'error Err(first_error);
1125 }
1126 }
1127 }
1128
1129 let mut pending_writes = self.device.pending_writes.lock();
1130
1131 {
1132 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1133 for texture in pending_writes.dst_textures.values() {
1134 match texture.try_inner(&snatch_guard) {
1135 Ok(TextureInner::Native { .. }) => {}
1136 Ok(TextureInner::Surface { .. }) => {
1137 submit_surface_textures_owned
1139 .insert(Arc::as_ptr(texture), texture.clone());
1140
1141 unsafe {
1142 used_surface_textures
1143 .merge_single(texture, None, hal::TextureUses::PRESENT)
1144 .unwrap()
1145 };
1146 }
1147 Err(e) => break 'error Err(e.into()),
1148 }
1149 }
1150
1151 if !used_surface_textures.is_empty() {
1152 let mut trackers = self.device.trackers.lock();
1153
1154 let texture_barriers = trackers
1155 .textures
1156 .set_from_usage_scope_and_drain_transitions(
1157 &used_surface_textures,
1158 &snatch_guard,
1159 )
1160 .collect::<Vec<_>>();
1161 unsafe {
1162 pending_writes
1163 .command_encoder
1164 .transition_textures(&texture_barriers);
1165 };
1166 }
1167 }
1168
1169 match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1170 Ok(Some(pending_execution)) => {
1171 active_executions.insert(0, pending_execution);
1172 }
1173 Ok(None) => {}
1174 Err(e) => break 'error Err(e.into()),
1175 }
1176 let hal_command_buffers = active_executions
1177 .iter()
1178 .flat_map(|e| e.cmd_buffers.iter().map(|b| b.as_ref()))
1179 .collect::<Vec<_>>();
1180
1181 {
1182 let mut submit_surface_textures =
1183 SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1184 submit_surface_textures_owned.len(),
1185 );
1186
1187 for texture in submit_surface_textures_owned.values() {
1188 let raw = match texture.inner.get(&snatch_guard) {
1189 Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1190 _ => unreachable!(),
1191 };
1192 submit_surface_textures.push(raw);
1193 }
1194
1195 if let Err(e) = unsafe {
1196 self.raw().submit(
1197 &hal_command_buffers,
1198 &submit_surface_textures,
1199 (fence.as_mut(), submit_index),
1200 )
1201 }
1202 .map_err(|e| self.device.handle_hal_error(e))
1203 {
1204 break 'error Err(e.into());
1205 }
1206
1207 self.device
1209 .last_successful_submission_index
1210 .fetch_max(submit_index, Ordering::SeqCst);
1211 }
1212
1213 profiling::scope!("cleanup");
1214
1215 self.device.lock_life().track_submission(
1217 submit_index,
1218 pending_writes.temp_resources.drain(..),
1219 active_executions,
1220 );
1221 drop(pending_writes);
1222
1223 let fence_guard = RwLockWriteGuard::downgrade(fence);
1226 let (closures, _) =
1227 match self
1228 .device
1229 .maintain(fence_guard, wgt::Maintain::Poll, snatch_guard)
1230 {
1231 Ok(closures) => closures,
1232 Err(WaitIdleError::Device(err)) => {
1233 break 'error Err(QueueSubmitError::Queue(err))
1234 }
1235 Err(WaitIdleError::StuckGpu) => break 'error Err(QueueSubmitError::StuckGpu),
1236 Err(WaitIdleError::WrongSubmissionIndex(..)) => unreachable!(),
1237 };
1238
1239 Ok(closures)
1240 };
1241
1242 let callbacks = match res {
1243 Ok(ok) => ok,
1244 Err(e) => return Err((submit_index, e)),
1245 };
1246
1247 callbacks.fire();
1249
1250 api_log!("Queue::submit returned submit index {submit_index}");
1251
1252 Ok(submit_index)
1253 }
1254
1255 pub fn get_timestamp_period(&self) -> f32 {
1256 unsafe { self.raw().get_timestamp_period() }
1257 }
1258
1259 pub fn on_submitted_work_done(&self, closure: SubmittedWorkDoneClosure) {
1260 api_log!("Queue::on_submitted_work_done");
1261 self.device.lock_life().add_work_done_closure(closure);
1263 }
1264}
1265
1266impl Global {
1267 pub fn queue_write_buffer(
1268 &self,
1269 queue_id: QueueId,
1270 buffer_id: id::BufferId,
1271 buffer_offset: wgt::BufferAddress,
1272 data: &[u8],
1273 ) -> Result<(), QueueWriteError> {
1274 let queue = self.hub.queues.get(queue_id);
1275
1276 #[cfg(feature = "trace")]
1277 if let Some(ref mut trace) = *queue.device.trace.lock() {
1278 let data_path = trace.make_binary("bin", data);
1279 trace.add(Action::WriteBuffer {
1280 id: buffer_id,
1281 data: data_path,
1282 range: buffer_offset..buffer_offset + data.len() as u64,
1283 queued: true,
1284 });
1285 }
1286
1287 let buffer = self.hub.buffers.get(buffer_id);
1288 queue.write_buffer(buffer, buffer_offset, data)
1289 }
1290
1291 pub fn queue_create_staging_buffer(
1292 &self,
1293 queue_id: QueueId,
1294 buffer_size: wgt::BufferSize,
1295 id_in: Option<id::StagingBufferId>,
1296 ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1297 let queue = self.hub.queues.get(queue_id);
1298 let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1299
1300 let fid = self.hub.staging_buffers.prepare(id_in);
1301 let id = fid.assign(staging_buffer);
1302
1303 Ok((id, ptr))
1304 }
1305
1306 pub fn queue_write_staging_buffer(
1307 &self,
1308 queue_id: QueueId,
1309 buffer_id: id::BufferId,
1310 buffer_offset: wgt::BufferAddress,
1311 staging_buffer_id: id::StagingBufferId,
1312 ) -> Result<(), QueueWriteError> {
1313 let queue = self.hub.queues.get(queue_id);
1314 let buffer = self.hub.buffers.get(buffer_id);
1315 let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1316 queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1317 }
1318
1319 pub fn queue_validate_write_buffer(
1320 &self,
1321 queue_id: QueueId,
1322 buffer_id: id::BufferId,
1323 buffer_offset: u64,
1324 buffer_size: wgt::BufferSize,
1325 ) -> Result<(), QueueWriteError> {
1326 let queue = self.hub.queues.get(queue_id);
1327 let buffer = self.hub.buffers.get(buffer_id);
1328 queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1329 }
1330
1331 pub fn queue_write_texture(
1332 &self,
1333 queue_id: QueueId,
1334 destination: &ImageCopyTexture,
1335 data: &[u8],
1336 data_layout: &wgt::ImageDataLayout,
1337 size: &wgt::Extent3d,
1338 ) -> Result<(), QueueWriteError> {
1339 let queue = self.hub.queues.get(queue_id);
1340
1341 #[cfg(feature = "trace")]
1342 if let Some(ref mut trace) = *queue.device.trace.lock() {
1343 let data_path = trace.make_binary("bin", data);
1344 trace.add(Action::WriteTexture {
1345 to: *destination,
1346 data: data_path,
1347 layout: *data_layout,
1348 size: *size,
1349 });
1350 }
1351
1352 let destination = wgt::ImageCopyTexture {
1353 texture: self.hub.textures.get(destination.texture),
1354 mip_level: destination.mip_level,
1355 origin: destination.origin,
1356 aspect: destination.aspect,
1357 };
1358 queue.write_texture(destination, data, data_layout, size)
1359 }
1360
1361 #[cfg(webgl)]
1362 pub fn queue_copy_external_image_to_texture(
1363 &self,
1364 queue_id: QueueId,
1365 source: &wgt::ImageCopyExternalImage,
1366 destination: crate::command::ImageCopyTextureTagged,
1367 size: wgt::Extent3d,
1368 ) -> Result<(), QueueWriteError> {
1369 let queue = self.hub.queues.get(queue_id);
1370 let destination = wgt::ImageCopyTextureTagged {
1371 texture: self.hub.textures.get(destination.texture),
1372 mip_level: destination.mip_level,
1373 origin: destination.origin,
1374 aspect: destination.aspect,
1375 color_space: destination.color_space,
1376 premultiplied_alpha: destination.premultiplied_alpha,
1377 };
1378 queue.copy_external_image_to_texture(source, destination, size)
1379 }
1380
1381 pub fn queue_submit(
1382 &self,
1383 queue_id: QueueId,
1384 command_buffer_ids: &[id::CommandBufferId],
1385 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1386 let queue = self.hub.queues.get(queue_id);
1387 let command_buffer_guard = self.hub.command_buffers.read();
1388 let command_buffers = command_buffer_ids
1389 .iter()
1390 .map(|id| command_buffer_guard.get(*id))
1391 .collect::<Vec<_>>();
1392 drop(command_buffer_guard);
1393 queue.submit(&command_buffers)
1394 }
1395
1396 pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1397 let queue = self.hub.queues.get(queue_id);
1398 queue.get_timestamp_period()
1399 }
1400
1401 pub fn queue_on_submitted_work_done(
1402 &self,
1403 queue_id: QueueId,
1404 closure: SubmittedWorkDoneClosure,
1405 ) {
1406 let queue = self.hub.queues.get(queue_id);
1407 queue.on_submitted_work_done(closure);
1408 }
1409}
1410
1411fn validate_command_buffer(
1412 command_buffer: &CommandBuffer,
1413 queue: &Queue,
1414 cmd_buf_data: &crate::command::CommandBufferMutable,
1415 snatch_guard: &crate::snatch::SnatchGuard<'_>,
1416 submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1417 used_surface_textures: &mut track::TextureUsageScope,
1418) -> Result<(), QueueSubmitError> {
1419 command_buffer.same_device_as(queue)?;
1420 cmd_buf_data.check_finished()?;
1421
1422 {
1423 profiling::scope!("check resource state");
1424
1425 {
1426 profiling::scope!("buffers");
1427 for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1428 buffer.check_destroyed(snatch_guard)?;
1429
1430 match *buffer.map_state.lock() {
1431 BufferMapState::Idle => (),
1432 _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1433 }
1434 }
1435 }
1436 {
1437 profiling::scope!("textures");
1438 for texture in cmd_buf_data.trackers.textures.used_resources() {
1439 let should_extend = match texture.try_inner(snatch_guard)? {
1440 TextureInner::Native { .. } => false,
1441 TextureInner::Surface { .. } => {
1442 submit_surface_textures_owned
1444 .insert(Arc::as_ptr(&texture), texture.clone());
1445
1446 true
1447 }
1448 };
1449 if should_extend {
1450 unsafe {
1451 used_surface_textures
1452 .merge_single(&texture, None, hal::TextureUses::PRESENT)
1453 .unwrap();
1454 };
1455 }
1456 }
1457 }
1458 }
1459 Ok(())
1460}