wgpu/util/
mod.rs

1//! Utility structures and functions that are built on top of the main `wgpu` API.
2//!
3//! Nothing in this module is a part of the WebGPU API specification;
4//! they are unique to the `wgpu` library.
5
6mod belt;
7mod device;
8mod encoder;
9mod init;
10
11use std::sync::Arc;
12use std::{
13    borrow::Cow,
14    mem::{align_of, size_of},
15    ptr::copy_nonoverlapping,
16};
17
18pub use belt::StagingBelt;
19pub use device::{BufferInitDescriptor, DeviceExt, TextureDataOrder};
20pub use encoder::RenderEncoder;
21pub use init::*;
22pub use wgt::{math::*, DispatchIndirectArgs, DrawIndexedIndirectArgs, DrawIndirectArgs};
23
24/// Treat the given byte slice as a SPIR-V module.
25///
26/// # Panic
27///
28/// This function panics if:
29///
30/// - Input length isn't multiple of 4
31/// - Input is longer than [`usize::MAX`]
32/// - Input is empty
33/// - SPIR-V magic number is missing from beginning of stream
34#[cfg(feature = "spirv")]
35pub fn make_spirv(data: &[u8]) -> super::ShaderSource<'_> {
36    super::ShaderSource::SpirV(make_spirv_raw(data))
37}
38
39/// Version of make_spirv intended for use with [`Device::create_shader_module_spirv`].
40/// Returns raw slice instead of ShaderSource.
41///
42/// [`Device::create_shader_module_spirv`]: crate::Device::create_shader_module_spirv
43pub fn make_spirv_raw(data: &[u8]) -> Cow<'_, [u32]> {
44    const MAGIC_NUMBER: u32 = 0x0723_0203;
45    assert_eq!(
46        data.len() % size_of::<u32>(),
47        0,
48        "data size is not a multiple of 4"
49    );
50    assert_ne!(data.len(), 0, "data size must be larger than zero");
51
52    // If the data happens to be aligned, directly use the byte array,
53    // otherwise copy the byte array in an owned vector and use that instead.
54    let mut words = if data.as_ptr().align_offset(align_of::<u32>()) == 0 {
55        let (pre, words, post) = unsafe { data.align_to::<u32>() };
56        debug_assert!(pre.is_empty());
57        debug_assert!(post.is_empty());
58        Cow::from(words)
59    } else {
60        let mut words = vec![0u32; data.len() / size_of::<u32>()];
61        unsafe {
62            copy_nonoverlapping(data.as_ptr(), words.as_mut_ptr() as *mut u8, data.len());
63        }
64        Cow::from(words)
65    };
66
67    // Before checking if the data starts with the magic, check if it starts
68    // with the magic in non-native endianness, own & swap the data if so.
69    if words[0] == MAGIC_NUMBER.swap_bytes() {
70        for word in Cow::to_mut(&mut words) {
71            *word = word.swap_bytes();
72        }
73    }
74
75    assert_eq!(
76        words[0], MAGIC_NUMBER,
77        "wrong magic word {:x}. Make sure you are using a binary SPIRV file.",
78        words[0]
79    );
80
81    words
82}
83
84/// CPU accessible buffer used to download data back from the GPU.
85pub struct DownloadBuffer {
86    _gpu_buffer: Arc<super::Buffer>,
87    mapped_range: Box<dyn crate::context::BufferMappedRange>,
88}
89
90impl DownloadBuffer {
91    /// Asynchronously read the contents of a buffer.
92    pub fn read_buffer(
93        device: &super::Device,
94        queue: &super::Queue,
95        buffer: &super::BufferSlice<'_>,
96        callback: impl FnOnce(Result<Self, super::BufferAsyncError>) + Send + 'static,
97    ) {
98        let size = match buffer.size {
99            Some(size) => size.into(),
100            None => buffer.buffer.map_context.lock().total_size - buffer.offset,
101        };
102
103        #[allow(clippy::arc_with_non_send_sync)] // False positive on emscripten
104        let download = Arc::new(device.create_buffer(&super::BufferDescriptor {
105            size,
106            usage: super::BufferUsages::COPY_DST | super::BufferUsages::MAP_READ,
107            mapped_at_creation: false,
108            label: None,
109        }));
110
111        let mut encoder =
112            device.create_command_encoder(&super::CommandEncoderDescriptor { label: None });
113        encoder.copy_buffer_to_buffer(buffer.buffer, buffer.offset, &download, 0, size);
114        let command_buffer: super::CommandBuffer = encoder.finish();
115        queue.submit(Some(command_buffer));
116
117        download
118            .clone()
119            .slice(..)
120            .map_async(super::MapMode::Read, move |result| {
121                if let Err(e) = result {
122                    callback(Err(e));
123                    return;
124                }
125
126                let mapped_range = crate::context::DynContext::buffer_get_mapped_range(
127                    &*download.context,
128                    download.data.as_ref(),
129                    0..size,
130                );
131                callback(Ok(Self {
132                    _gpu_buffer: download,
133                    mapped_range,
134                }));
135            });
136    }
137}
138
139impl std::ops::Deref for DownloadBuffer {
140    type Target = [u8];
141    fn deref(&self) -> &[u8] {
142        self.mapped_range.slice()
143    }
144}
145
146/// A recommended key for storing [`PipelineCache`]s for the adapter
147/// associated with the given [`AdapterInfo`](wgt::AdapterInfo)
148/// This key will define a class of adapters for which the same cache
149/// might be valid.
150///
151/// If this returns `None`, the adapter doesn't support [`PipelineCache`].
152/// This may be because the API doesn't support application managed caches
153/// (such as browser WebGPU), or that `wgpu` hasn't implemented it for
154/// that API yet.
155///
156/// This key could be used as a filename, as seen in the example below.
157///
158/// # Examples
159///
160/// ``` no_run
161/// # use std::path::PathBuf;
162/// # let adapter_info = todo!();
163/// let cache_dir: PathBuf = PathBuf::new();
164/// let filename = wgpu::util::pipeline_cache_key(&adapter_info);
165/// if let Some(filename) = filename {
166///     let cache_file = cache_dir.join(&filename);
167///     let cache_data = std::fs::read(&cache_file);
168///     let pipeline_cache: wgpu::PipelineCache = todo!("Use data (if present) to create a pipeline cache");
169///
170///     let data = pipeline_cache.get_data();
171///     if let Some(data) = data {
172///         let temp_file = cache_file.with_extension("temp");
173///         std::fs::write(&temp_file, &data)?;
174///         std::fs::rename(&temp_file, &cache_file)?;
175///     }
176/// }
177/// # Ok::<(), std::io::Error>(())
178/// ```
179///
180/// [`PipelineCache`]: super::PipelineCache
181pub fn pipeline_cache_key(adapter_info: &wgt::AdapterInfo) -> Option<String> {
182    match adapter_info.backend {
183        wgt::Backend::Vulkan => Some(format!(
184            // The vendor/device should uniquely define a driver
185            // We/the driver will also later validate that the vendor/device and driver
186            // version match, which may lead to clearing an outdated
187            // cache for the same device.
188            "wgpu_pipeline_cache_vulkan_{}_{}",
189            adapter_info.vendor, adapter_info.device
190        )),
191        _ => None,
192    }
193}