wgpu_hal/vulkan/
mod.rs

1/*!
2# Vulkan API internals.
3
4## Stack memory
5
6Ash expects slices, which we don't generally have available.
7We cope with this requirement by the combination of the following ways:
8  - temporarily allocating `Vec` on heap, where overhead is permitted
9  - growing temporary local storage
10
11## Framebuffers and Render passes
12
13Render passes are cached on the device and kept forever.
14
15Framebuffers are also cached on the device, but they are removed when
16any of the image views (they have) gets removed.
17If Vulkan supports image-less framebuffers,
18then the actual views are excluded from the framebuffer key.
19
20## Fences
21
22If timeline semaphores are available, they are used 1:1 with wgpu-hal fences.
23Otherwise, we manage a pool of `VkFence` objects behind each `hal::Fence`.
24
25!*/
26
27mod adapter;
28mod command;
29mod conv;
30mod device;
31mod instance;
32
33use std::{
34    borrow::Borrow,
35    collections::HashSet,
36    ffi::{CStr, CString},
37    fmt, mem,
38    num::NonZeroU32,
39    sync::Arc,
40};
41
42use arrayvec::ArrayVec;
43use ash::{ext, khr, vk};
44use parking_lot::{Mutex, RwLock};
45use wgt::InternalCounter;
46
47const MILLIS_TO_NANOS: u64 = 1_000_000;
48const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1;
49
50#[derive(Clone, Debug)]
51pub struct Api;
52
53impl crate::Api for Api {
54    type Instance = Instance;
55    type Surface = Surface;
56    type Adapter = Adapter;
57    type Device = Device;
58
59    type Queue = Queue;
60    type CommandEncoder = CommandEncoder;
61    type CommandBuffer = CommandBuffer;
62
63    type Buffer = Buffer;
64    type Texture = Texture;
65    type SurfaceTexture = SurfaceTexture;
66    type TextureView = TextureView;
67    type Sampler = Sampler;
68    type QuerySet = QuerySet;
69    type Fence = Fence;
70    type AccelerationStructure = AccelerationStructure;
71    type PipelineCache = PipelineCache;
72
73    type BindGroupLayout = BindGroupLayout;
74    type BindGroup = BindGroup;
75    type PipelineLayout = PipelineLayout;
76    type ShaderModule = ShaderModule;
77    type RenderPipeline = RenderPipeline;
78    type ComputePipeline = ComputePipeline;
79}
80
81crate::impl_dyn_resource!(
82    Adapter,
83    AccelerationStructure,
84    BindGroup,
85    BindGroupLayout,
86    Buffer,
87    CommandBuffer,
88    CommandEncoder,
89    ComputePipeline,
90    Device,
91    Fence,
92    Instance,
93    PipelineCache,
94    PipelineLayout,
95    QuerySet,
96    Queue,
97    RenderPipeline,
98    Sampler,
99    ShaderModule,
100    Surface,
101    SurfaceTexture,
102    Texture,
103    TextureView
104);
105
106struct DebugUtils {
107    extension: ext::debug_utils::Instance,
108    messenger: vk::DebugUtilsMessengerEXT,
109
110    /// Owning pointer to the debug messenger callback user data.
111    ///
112    /// `InstanceShared::drop` destroys the debug messenger before
113    /// dropping this, so the callback should never receive a dangling
114    /// user data pointer.
115    #[allow(dead_code)]
116    callback_data: Box<DebugUtilsMessengerUserData>,
117}
118
119pub struct DebugUtilsCreateInfo {
120    severity: vk::DebugUtilsMessageSeverityFlagsEXT,
121    message_type: vk::DebugUtilsMessageTypeFlagsEXT,
122    callback_data: Box<DebugUtilsMessengerUserData>,
123}
124
125#[derive(Debug)]
126/// The properties related to the validation layer needed for the
127/// DebugUtilsMessenger for their workarounds
128struct ValidationLayerProperties {
129    /// Validation layer description, from `vk::LayerProperties`.
130    layer_description: CString,
131
132    /// Validation layer specification version, from `vk::LayerProperties`.
133    layer_spec_version: u32,
134}
135
136/// User data needed by `instance::debug_utils_messenger_callback`.
137///
138/// When we create the [`vk::DebugUtilsMessengerEXT`], the `pUserData`
139/// pointer refers to one of these values.
140#[derive(Debug)]
141pub struct DebugUtilsMessengerUserData {
142    /// The properties related to the validation layer, if present
143    validation_layer_properties: Option<ValidationLayerProperties>,
144
145    /// If the OBS layer is present. OBS never increments the version of their layer,
146    /// so there's no reason to have the version.
147    has_obs_layer: bool,
148}
149
150pub struct InstanceShared {
151    raw: ash::Instance,
152    extensions: Vec<&'static CStr>,
153    drop_guard: Option<crate::DropGuard>,
154    flags: wgt::InstanceFlags,
155    debug_utils: Option<DebugUtils>,
156    get_physical_device_properties: Option<khr::get_physical_device_properties2::Instance>,
157    entry: ash::Entry,
158    has_nv_optimus: bool,
159    android_sdk_version: u32,
160    /// The instance API version.
161    ///
162    /// Which is the version of Vulkan supported for instance-level functionality.
163    ///
164    /// It is associated with a `VkInstance` and its children,
165    /// except for a `VkPhysicalDevice` and its children.
166    instance_api_version: u32,
167}
168
169pub struct Instance {
170    shared: Arc<InstanceShared>,
171}
172
173/// The semaphores needed to use one image in a swapchain.
174#[derive(Debug)]
175struct SwapchainImageSemaphores {
176    /// A semaphore that is signaled when this image is safe for us to modify.
177    ///
178    /// When [`vkAcquireNextImageKHR`] returns the index of the next swapchain
179    /// image that we should use, that image may actually still be in use by the
180    /// presentation engine, and is not yet safe to modify. However, that
181    /// function does accept a semaphore that it will signal when the image is
182    /// indeed safe to begin messing with.
183    ///
184    /// This semaphore is:
185    ///
186    /// - waited for by the first queue submission to operate on this image
187    ///   since it was acquired, and
188    ///
189    /// - signaled by [`vkAcquireNextImageKHR`] when the acquired image is ready
190    ///   for us to use.
191    ///
192    /// [`vkAcquireNextImageKHR`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkAcquireNextImageKHR
193    acquire: vk::Semaphore,
194
195    /// True if the next command submission operating on this image should wait
196    /// for [`acquire`].
197    ///
198    /// We must wait for `acquire` before drawing to this swapchain image, but
199    /// because `wgpu-hal` queue submissions are always strongly ordered, only
200    /// the first submission that works with a swapchain image actually needs to
201    /// wait. We set this flag when this image is acquired, and clear it the
202    /// first time it's passed to [`Queue::submit`] as a surface texture.
203    ///
204    /// [`acquire`]: SwapchainImageSemaphores::acquire
205    /// [`Queue::submit`]: crate::Queue::submit
206    should_wait_for_acquire: bool,
207
208    /// A pool of semaphores for ordering presentation after drawing.
209    ///
210    /// The first [`present_index`] semaphores in this vector are:
211    ///
212    /// - all waited on by the call to [`vkQueuePresentKHR`] that presents this
213    ///   image, and
214    ///
215    /// - each signaled by some [`vkQueueSubmit`] queue submission that draws to
216    ///   this image, when the submission finishes execution.
217    ///
218    /// This vector accumulates one semaphore per submission that writes to this
219    /// image. This is awkward, but hard to avoid: [`vkQueuePresentKHR`]
220    /// requires a semaphore to order it with respect to drawing commands, and
221    /// we can't attach new completion semaphores to a command submission after
222    /// it's been submitted. This means that, at submission time, we must create
223    /// the semaphore we might need if the caller's next action is to enqueue a
224    /// presentation of this image.
225    ///
226    /// An alternative strategy would be for presentation to enqueue an empty
227    /// submit, ordered relative to other submits in the usual way, and
228    /// signaling a single presentation semaphore. But we suspect that submits
229    /// are usually expensive enough, and semaphores usually cheap enough, that
230    /// performance-sensitive users will avoid making many submits, so that the
231    /// cost of accumulated semaphores will usually be less than the cost of an
232    /// additional submit.
233    ///
234    /// Only the first [`present_index`] semaphores in the vector are actually
235    /// going to be signalled by submitted commands, and need to be waited for
236    /// by the next present call. Any semaphores beyond that index were created
237    /// for prior presents and are simply being retained for recycling.
238    ///
239    /// [`present_index`]: SwapchainImageSemaphores::present_index
240    /// [`vkQueuePresentKHR`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueuePresentKHR
241    /// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
242    present: Vec<vk::Semaphore>,
243
244    /// The number of semaphores in [`present`] to be signalled for this submission.
245    ///
246    /// [`present`]: SwapchainImageSemaphores::present
247    present_index: usize,
248
249    /// The fence value of the last command submission that wrote to this image.
250    ///
251    /// The next time we try to acquire this image, we'll block until
252    /// this submission finishes, proving that [`acquire`] is ready to
253    /// pass to `vkAcquireNextImageKHR` again.
254    ///
255    /// [`acquire`]: SwapchainImageSemaphores::acquire
256    previously_used_submission_index: crate::FenceValue,
257}
258
259impl SwapchainImageSemaphores {
260    fn new(device: &DeviceShared) -> Result<Self, crate::DeviceError> {
261        Ok(Self {
262            acquire: device.new_binary_semaphore()?,
263            should_wait_for_acquire: true,
264            present: Vec::new(),
265            present_index: 0,
266            previously_used_submission_index: 0,
267        })
268    }
269
270    fn set_used_fence_value(&mut self, value: crate::FenceValue) {
271        self.previously_used_submission_index = value;
272    }
273
274    /// Return the semaphore that commands drawing to this image should wait for, if any.
275    ///
276    /// This only returns `Some` once per acquisition; see
277    /// [`SwapchainImageSemaphores::should_wait_for_acquire`] for details.
278    fn get_acquire_wait_semaphore(&mut self) -> Option<vk::Semaphore> {
279        if self.should_wait_for_acquire {
280            self.should_wait_for_acquire = false;
281            Some(self.acquire)
282        } else {
283            None
284        }
285    }
286
287    /// Return a semaphore that a submission that writes to this image should
288    /// signal when it's done.
289    ///
290    /// See [`SwapchainImageSemaphores::present`] for details.
291    fn get_submit_signal_semaphore(
292        &mut self,
293        device: &DeviceShared,
294    ) -> Result<vk::Semaphore, crate::DeviceError> {
295        // Try to recycle a semaphore we created for a previous presentation.
296        let sem = match self.present.get(self.present_index) {
297            Some(sem) => *sem,
298            None => {
299                let sem = device.new_binary_semaphore()?;
300                self.present.push(sem);
301                sem
302            }
303        };
304
305        self.present_index += 1;
306
307        Ok(sem)
308    }
309
310    /// Return the semaphores that a presentation of this image should wait on.
311    ///
312    /// Return a slice of semaphores that the call to [`vkQueueSubmit`] that
313    /// ends this image's acquisition should wait for. See
314    /// [`SwapchainImageSemaphores::present`] for details.
315    ///
316    /// Reset `self` to be ready for the next acquisition cycle.
317    ///
318    /// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
319    fn get_present_wait_semaphores(&mut self) -> &[vk::Semaphore] {
320        let old_index = self.present_index;
321
322        // Since this marks the end of this acquire/draw/present cycle, take the
323        // opportunity to reset `self` in preparation for the next acquisition.
324        self.present_index = 0;
325        self.should_wait_for_acquire = true;
326
327        &self.present[0..old_index]
328    }
329
330    unsafe fn destroy(&self, device: &ash::Device) {
331        unsafe {
332            device.destroy_semaphore(self.acquire, None);
333            for sem in &self.present {
334                device.destroy_semaphore(*sem, None);
335            }
336        }
337    }
338}
339
340struct Swapchain {
341    raw: vk::SwapchainKHR,
342    raw_flags: vk::SwapchainCreateFlagsKHR,
343    functor: khr::swapchain::Device,
344    device: Arc<DeviceShared>,
345    images: Vec<vk::Image>,
346    config: crate::SurfaceConfiguration,
347    view_formats: Vec<wgt::TextureFormat>,
348    /// One wait semaphore per swapchain image. This will be associated with the
349    /// surface texture, and later collected during submission.
350    ///
351    /// We need this to be `Arc<Mutex<>>` because we need to be able to pass this
352    /// data into the surface texture, so submit/present can use it.
353    surface_semaphores: Vec<Arc<Mutex<SwapchainImageSemaphores>>>,
354    /// The index of the next semaphore to use. Ideally we would use the same
355    /// index as the image index, but we need to specify the semaphore as an argument
356    /// to the acquire_next_image function which is what tells us which image to use.
357    next_semaphore_index: usize,
358    /// The present timing information which will be set in the next call to [`present()`](crate::Queue::present()).
359    ///
360    /// # Safety
361    ///
362    /// This must only be set if [`wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING`] is enabled, and
363    /// so the VK_GOOGLE_display_timing extension is present.
364    next_present_time: Option<vk::PresentTimeGOOGLE>,
365}
366
367impl Swapchain {
368    fn advance_surface_semaphores(&mut self) {
369        let semaphore_count = self.surface_semaphores.len();
370        self.next_semaphore_index = (self.next_semaphore_index + 1) % semaphore_count;
371    }
372
373    fn get_surface_semaphores(&self) -> Arc<Mutex<SwapchainImageSemaphores>> {
374        self.surface_semaphores[self.next_semaphore_index].clone()
375    }
376}
377
378pub struct Surface {
379    raw: vk::SurfaceKHR,
380    functor: khr::surface::Instance,
381    instance: Arc<InstanceShared>,
382    swapchain: RwLock<Option<Swapchain>>,
383}
384
385impl Surface {
386    /// Get the raw Vulkan swapchain associated with this surface.
387    ///
388    /// Returns [`None`] if the surface is not configured.
389    pub fn raw_swapchain(&self) -> Option<vk::SwapchainKHR> {
390        let read = self.swapchain.read();
391        read.as_ref().map(|it| it.raw)
392    }
393
394    /// Set the present timing information which will be used for the next [presentation](crate::Queue::present()) of this surface,
395    /// using [VK_GOOGLE_display_timing].
396    ///
397    /// This can be used to give an id to presentations, for future use of [`vk::PastPresentationTimingGOOGLE`].
398    /// Note that `wgpu-hal` does *not* provide a way to use that API - you should manually access this through [`ash`].
399    ///
400    /// This can also be used to add a "not before" timestamp to the presentation.
401    ///
402    /// The exact semantics of the fields are also documented in the [specification](https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPresentTimeGOOGLE.html) for the extension.
403    ///
404    /// # Panics
405    ///
406    /// - If the surface hasn't been configured.
407    /// - If the device doesn't [support present timing](wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING).
408    ///
409    /// [VK_GOOGLE_display_timing]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_GOOGLE_display_timing.html
410    #[track_caller]
411    pub fn set_next_present_time(&self, present_timing: vk::PresentTimeGOOGLE) {
412        let mut swapchain = self.swapchain.write();
413        let swapchain = swapchain
414            .as_mut()
415            .expect("Surface should have been configured");
416        let features = wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING;
417        if swapchain.device.features.contains(features) {
418            swapchain.next_present_time = Some(present_timing);
419        } else {
420            // Ideally we'd use something like `device.required_features` here, but that's in `wgpu-core`, which we are a dependency of
421            panic!(
422                concat!(
423                    "Tried to set display timing properties ",
424                    "without the corresponding feature ({:?}) enabled."
425                ),
426                features
427            );
428        }
429    }
430}
431
432#[derive(Debug)]
433pub struct SurfaceTexture {
434    index: u32,
435    texture: Texture,
436    surface_semaphores: Arc<Mutex<SwapchainImageSemaphores>>,
437}
438
439impl crate::DynSurfaceTexture for SurfaceTexture {}
440
441impl Borrow<Texture> for SurfaceTexture {
442    fn borrow(&self) -> &Texture {
443        &self.texture
444    }
445}
446
447impl Borrow<dyn crate::DynTexture> for SurfaceTexture {
448    fn borrow(&self) -> &dyn crate::DynTexture {
449        &self.texture
450    }
451}
452
453pub struct Adapter {
454    raw: vk::PhysicalDevice,
455    instance: Arc<InstanceShared>,
456    //queue_families: Vec<vk::QueueFamilyProperties>,
457    known_memory_flags: vk::MemoryPropertyFlags,
458    phd_capabilities: adapter::PhysicalDeviceProperties,
459    //phd_features: adapter::PhysicalDeviceFeatures,
460    downlevel_flags: wgt::DownlevelFlags,
461    private_caps: PrivateCapabilities,
462    workarounds: Workarounds,
463}
464
465// TODO there's no reason why this can't be unified--the function pointers should all be the same--it's not clear how to do this with `ash`.
466enum ExtensionFn<T> {
467    /// The loaded function pointer struct for an extension.
468    Extension(T),
469    /// The extension was promoted to a core version of Vulkan and the functions on `ash`'s `DeviceV1_x` traits should be used.
470    Promoted,
471}
472
473struct DeviceExtensionFunctions {
474    debug_utils: Option<ext::debug_utils::Device>,
475    draw_indirect_count: Option<khr::draw_indirect_count::Device>,
476    timeline_semaphore: Option<ExtensionFn<khr::timeline_semaphore::Device>>,
477    ray_tracing: Option<RayTracingDeviceExtensionFunctions>,
478}
479
480struct RayTracingDeviceExtensionFunctions {
481    acceleration_structure: khr::acceleration_structure::Device,
482    buffer_device_address: khr::buffer_device_address::Device,
483}
484
485/// Set of internal capabilities, which don't show up in the exposed
486/// device geometry, but affect the code paths taken internally.
487#[derive(Clone, Debug)]
488struct PrivateCapabilities {
489    /// Y-flipping is implemented with either `VK_AMD_negative_viewport_height` or `VK_KHR_maintenance1`/1.1+. The AMD extension for negative viewport height does not require a Y shift.
490    ///
491    /// This flag is `true` if the device has `VK_KHR_maintenance1`/1.1+ and `false` otherwise (i.e. in the case of `VK_AMD_negative_viewport_height`).
492    flip_y_requires_shift: bool,
493    imageless_framebuffers: bool,
494    image_view_usage: bool,
495    timeline_semaphores: bool,
496    texture_d24: bool,
497    texture_d24_s8: bool,
498    texture_s8: bool,
499    /// Ability to present contents to any screen. Only needed to work around broken platform configurations.
500    can_present: bool,
501    non_coherent_map_mask: wgt::BufferAddress,
502
503    /// True if this adapter advertises the [`robustBufferAccess`][vrba] feature.
504    ///
505    /// Note that Vulkan's `robustBufferAccess` is not sufficient to implement
506    /// `wgpu_hal`'s guarantee that shaders will not access buffer contents via
507    /// a given bindgroup binding outside that binding's [accessible
508    /// region][ar]. Enabling `robustBufferAccess` does ensure that
509    /// out-of-bounds reads and writes are not undefined behavior (that's good),
510    /// but still permits out-of-bounds reads to return data from anywhere
511    /// within the buffer, not just the accessible region.
512    ///
513    /// [ar]: ../struct.BufferBinding.html#accessible-region
514    /// [vrba]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#features-robustBufferAccess
515    robust_buffer_access: bool,
516
517    robust_image_access: bool,
518
519    /// True if this adapter supports the [`VK_EXT_robustness2`] extension's
520    /// [`robustBufferAccess2`] feature.
521    ///
522    /// This is sufficient to implement `wgpu_hal`'s [required bounds-checking][ar] of
523    /// shader accesses to buffer contents. If this feature is not available,
524    /// this backend must have Naga inject bounds checks in the generated
525    /// SPIR-V.
526    ///
527    /// [`VK_EXT_robustness2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_robustness2.html
528    /// [`robustBufferAccess2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPhysicalDeviceRobustness2FeaturesEXT.html#features-robustBufferAccess2
529    /// [ar]: ../struct.BufferBinding.html#accessible-region
530    robust_buffer_access2: bool,
531
532    robust_image_access2: bool,
533    zero_initialize_workgroup_memory: bool,
534    image_format_list: bool,
535    #[cfg(windows)]
536    external_memory_win32: bool,
537}
538
539bitflags::bitflags!(
540    /// Workaround flags.
541    #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
542    pub struct Workarounds: u32 {
543        /// Only generate SPIR-V for one entry point at a time.
544        const SEPARATE_ENTRY_POINTS = 0x1;
545        /// Qualcomm OOMs when there are zero color attachments but a non-null pointer
546        /// to a subpass resolve attachment array. This nulls out that pointer in that case.
547        const EMPTY_RESOLVE_ATTACHMENT_LISTS = 0x2;
548        /// If the following code returns false, then nvidia will end up filling the wrong range.
549        ///
550        /// ```skip
551        /// fn nvidia_succeeds() -> bool {
552        ///   # let (copy_length, start_offset) = (0, 0);
553        ///     if copy_length >= 4096 {
554        ///         if start_offset % 16 != 0 {
555        ///             if copy_length == 4096 {
556        ///                 return true;
557        ///             }
558        ///             if copy_length % 16 == 0 {
559        ///                 return false;
560        ///             }
561        ///         }
562        ///     }
563        ///     true
564        /// }
565        /// ```
566        ///
567        /// As such, we need to make sure all calls to vkCmdFillBuffer are aligned to 16 bytes
568        /// if they cover a range of 4096 bytes or more.
569        const FORCE_FILL_BUFFER_WITH_SIZE_GREATER_4096_ALIGNED_OFFSET_16 = 0x4;
570    }
571);
572
573#[derive(Clone, Debug, Eq, Hash, PartialEq)]
574struct AttachmentKey {
575    format: vk::Format,
576    layout: vk::ImageLayout,
577    ops: crate::AttachmentOps,
578}
579
580impl AttachmentKey {
581    /// Returns an attachment key for a compatible attachment.
582    fn compatible(format: vk::Format, layout: vk::ImageLayout) -> Self {
583        Self {
584            format,
585            layout,
586            ops: crate::AttachmentOps::all(),
587        }
588    }
589}
590
591#[derive(Clone, Eq, Hash, PartialEq)]
592struct ColorAttachmentKey {
593    base: AttachmentKey,
594    resolve: Option<AttachmentKey>,
595}
596
597#[derive(Clone, Eq, Hash, PartialEq)]
598struct DepthStencilAttachmentKey {
599    base: AttachmentKey,
600    stencil_ops: crate::AttachmentOps,
601}
602
603#[derive(Clone, Eq, Default, Hash, PartialEq)]
604struct RenderPassKey {
605    colors: ArrayVec<Option<ColorAttachmentKey>, { crate::MAX_COLOR_ATTACHMENTS }>,
606    depth_stencil: Option<DepthStencilAttachmentKey>,
607    sample_count: u32,
608    multiview: Option<NonZeroU32>,
609}
610
611#[derive(Clone, Debug, Eq, Hash, PartialEq)]
612struct FramebufferAttachment {
613    /// Can be NULL if the framebuffer is image-less
614    raw: vk::ImageView,
615    raw_image_flags: vk::ImageCreateFlags,
616    view_usage: crate::TextureUses,
617    view_format: wgt::TextureFormat,
618    raw_view_formats: Vec<vk::Format>,
619}
620
621#[derive(Clone, Eq, Hash, PartialEq)]
622struct FramebufferKey {
623    attachments: ArrayVec<FramebufferAttachment, { MAX_TOTAL_ATTACHMENTS }>,
624    extent: wgt::Extent3d,
625    sample_count: u32,
626}
627
628struct DeviceShared {
629    raw: ash::Device,
630    family_index: u32,
631    queue_index: u32,
632    raw_queue: vk::Queue,
633    drop_guard: Option<crate::DropGuard>,
634    instance: Arc<InstanceShared>,
635    physical_device: vk::PhysicalDevice,
636    enabled_extensions: Vec<&'static CStr>,
637    extension_fns: DeviceExtensionFunctions,
638    vendor_id: u32,
639    pipeline_cache_validation_key: [u8; 16],
640    timestamp_period: f32,
641    private_caps: PrivateCapabilities,
642    workarounds: Workarounds,
643    features: wgt::Features,
644    render_passes: Mutex<rustc_hash::FxHashMap<RenderPassKey, vk::RenderPass>>,
645    framebuffers: Mutex<rustc_hash::FxHashMap<FramebufferKey, vk::Framebuffer>>,
646    memory_allocations_counter: InternalCounter,
647}
648
649pub struct Device {
650    shared: Arc<DeviceShared>,
651    mem_allocator: Mutex<gpu_alloc::GpuAllocator<vk::DeviceMemory>>,
652    desc_allocator:
653        Mutex<gpu_descriptor::DescriptorAllocator<vk::DescriptorPool, vk::DescriptorSet>>,
654    valid_ash_memory_types: u32,
655    naga_options: naga::back::spv::Options<'static>,
656    #[cfg(feature = "renderdoc")]
657    render_doc: crate::auxil::renderdoc::RenderDoc,
658    counters: wgt::HalCounters,
659}
660
661/// Semaphores for forcing queue submissions to run in order.
662///
663/// The [`wgpu_hal::Queue`] trait promises that if two calls to [`submit`] are
664/// ordered, then the first submission will finish on the GPU before the second
665/// submission begins. To get this behavior on Vulkan we need to pass semaphores
666/// to [`vkQueueSubmit`] for the commands to wait on before beginning execution,
667/// and to signal when their execution is done.
668///
669/// Normally this can be done with a single semaphore, waited on and then
670/// signalled for each submission. At any given time there's exactly one
671/// submission that would signal the semaphore, and exactly one waiting on it,
672/// as Vulkan requires.
673///
674/// However, as of Oct 2021, bug [#5508] in the Mesa ANV drivers caused them to
675/// hang if we use a single semaphore. The workaround is to alternate between
676/// two semaphores. The bug has been fixed in Mesa, but we should probably keep
677/// the workaround until, say, Oct 2026.
678///
679/// [`wgpu_hal::Queue`]: crate::Queue
680/// [`submit`]: crate::Queue::submit
681/// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
682/// [#5508]: https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508
683#[derive(Clone)]
684struct RelaySemaphores {
685    /// The semaphore the next submission should wait on before beginning
686    /// execution on the GPU. This is `None` for the first submission, which
687    /// should not wait on anything at all.
688    wait: Option<vk::Semaphore>,
689
690    /// The semaphore the next submission should signal when it has finished
691    /// execution on the GPU.
692    signal: vk::Semaphore,
693}
694
695impl RelaySemaphores {
696    fn new(device: &DeviceShared) -> Result<Self, crate::DeviceError> {
697        Ok(Self {
698            wait: None,
699            signal: device.new_binary_semaphore()?,
700        })
701    }
702
703    /// Advances the semaphores, returning the semaphores that should be used for a submission.
704    fn advance(&mut self, device: &DeviceShared) -> Result<Self, crate::DeviceError> {
705        let old = self.clone();
706
707        // Build the state for the next submission.
708        match self.wait {
709            None => {
710                // The `old` values describe the first submission to this queue.
711                // The second submission should wait on `old.signal`, and then
712                // signal a new semaphore which we'll create now.
713                self.wait = Some(old.signal);
714                self.signal = device.new_binary_semaphore()?;
715            }
716            Some(ref mut wait) => {
717                // What this submission signals, the next should wait.
718                mem::swap(wait, &mut self.signal);
719            }
720        };
721
722        Ok(old)
723    }
724
725    /// Destroys the semaphores.
726    unsafe fn destroy(&self, device: &ash::Device) {
727        unsafe {
728            if let Some(wait) = self.wait {
729                device.destroy_semaphore(wait, None);
730            }
731            device.destroy_semaphore(self.signal, None);
732        }
733    }
734}
735
736pub struct Queue {
737    raw: vk::Queue,
738    swapchain_fn: khr::swapchain::Device,
739    device: Arc<DeviceShared>,
740    family_index: u32,
741    relay_semaphores: Mutex<RelaySemaphores>,
742}
743
744#[derive(Debug)]
745pub struct Buffer {
746    raw: vk::Buffer,
747    block: Option<Mutex<gpu_alloc::MemoryBlock<vk::DeviceMemory>>>,
748}
749
750impl crate::DynBuffer for Buffer {}
751
752#[derive(Debug)]
753pub struct AccelerationStructure {
754    raw: vk::AccelerationStructureKHR,
755    buffer: vk::Buffer,
756    block: Mutex<gpu_alloc::MemoryBlock<vk::DeviceMemory>>,
757}
758
759impl crate::DynAccelerationStructure for AccelerationStructure {}
760
761#[derive(Debug)]
762pub struct Texture {
763    raw: vk::Image,
764    drop_guard: Option<crate::DropGuard>,
765    external_memory: Option<vk::DeviceMemory>,
766    block: Option<gpu_alloc::MemoryBlock<vk::DeviceMemory>>,
767    usage: crate::TextureUses,
768    format: wgt::TextureFormat,
769    raw_flags: vk::ImageCreateFlags,
770    copy_size: crate::CopyExtent,
771    view_formats: Vec<wgt::TextureFormat>,
772}
773
774impl crate::DynTexture for Texture {}
775
776impl Texture {
777    /// # Safety
778    ///
779    /// - The image handle must not be manually destroyed
780    pub unsafe fn raw_handle(&self) -> vk::Image {
781        self.raw
782    }
783}
784
785#[derive(Debug)]
786pub struct TextureView {
787    raw: vk::ImageView,
788    layers: NonZeroU32,
789    attachment: FramebufferAttachment,
790}
791
792impl crate::DynTextureView for TextureView {}
793
794impl TextureView {
795    /// # Safety
796    ///
797    /// - The image view handle must not be manually destroyed
798    pub unsafe fn raw_handle(&self) -> vk::ImageView {
799        self.raw
800    }
801}
802
803#[derive(Debug)]
804pub struct Sampler {
805    raw: vk::Sampler,
806}
807
808impl crate::DynSampler for Sampler {}
809
810#[derive(Debug)]
811pub struct BindGroupLayout {
812    raw: vk::DescriptorSetLayout,
813    desc_count: gpu_descriptor::DescriptorTotalCount,
814    types: Box<[(vk::DescriptorType, u32)]>,
815    /// Map of binding index to size,
816    binding_arrays: Vec<(u32, NonZeroU32)>,
817}
818
819impl crate::DynBindGroupLayout for BindGroupLayout {}
820
821#[derive(Debug)]
822pub struct PipelineLayout {
823    raw: vk::PipelineLayout,
824    binding_arrays: naga::back::spv::BindingMap,
825}
826
827impl crate::DynPipelineLayout for PipelineLayout {}
828
829#[derive(Debug)]
830pub struct BindGroup {
831    set: gpu_descriptor::DescriptorSet<vk::DescriptorSet>,
832}
833
834impl crate::DynBindGroup for BindGroup {}
835
836/// Miscellaneous allocation recycling pool for `CommandAllocator`.
837#[derive(Default)]
838struct Temp {
839    marker: Vec<u8>,
840    buffer_barriers: Vec<vk::BufferMemoryBarrier<'static>>,
841    image_barriers: Vec<vk::ImageMemoryBarrier<'static>>,
842}
843
844impl Temp {
845    fn clear(&mut self) {
846        self.marker.clear();
847        self.buffer_barriers.clear();
848        self.image_barriers.clear();
849    }
850
851    fn make_c_str(&mut self, name: &str) -> &CStr {
852        self.marker.clear();
853        self.marker.extend_from_slice(name.as_bytes());
854        self.marker.push(0);
855        unsafe { CStr::from_bytes_with_nul_unchecked(&self.marker) }
856    }
857}
858
859pub struct CommandEncoder {
860    raw: vk::CommandPool,
861    device: Arc<DeviceShared>,
862
863    /// The current command buffer, if `self` is in the ["recording"]
864    /// state.
865    ///
866    /// ["recording"]: crate::CommandEncoder
867    ///
868    /// If non-`null`, the buffer is in the Vulkan "recording" state.
869    active: vk::CommandBuffer,
870
871    /// What kind of pass we are currently within: compute or render.
872    bind_point: vk::PipelineBindPoint,
873
874    /// Allocation recycling pool for this encoder.
875    temp: Temp,
876
877    /// A pool of available command buffers.
878    ///
879    /// These are all in the Vulkan "initial" state.
880    free: Vec<vk::CommandBuffer>,
881
882    /// A pool of discarded command buffers.
883    ///
884    /// These could be in any Vulkan state except "pending".
885    discarded: Vec<vk::CommandBuffer>,
886
887    /// If this is true, the active renderpass enabled a debug span,
888    /// and needs to be disabled on renderpass close.
889    rpass_debug_marker_active: bool,
890
891    /// If set, the end of the next render/compute pass will write a timestamp at
892    /// the given pool & location.
893    end_of_pass_timer_query: Option<(vk::QueryPool, u32)>,
894}
895
896impl CommandEncoder {
897    /// # Safety
898    ///
899    /// - The command buffer handle must not be manually destroyed
900    pub unsafe fn raw_handle(&self) -> vk::CommandBuffer {
901        self.active
902    }
903}
904
905impl fmt::Debug for CommandEncoder {
906    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
907        f.debug_struct("CommandEncoder")
908            .field("raw", &self.raw)
909            .finish()
910    }
911}
912
913#[derive(Debug)]
914pub struct CommandBuffer {
915    raw: vk::CommandBuffer,
916}
917
918impl crate::DynCommandBuffer for CommandBuffer {}
919
920#[derive(Debug)]
921#[allow(clippy::large_enum_variant)]
922pub enum ShaderModule {
923    Raw(vk::ShaderModule),
924    Intermediate {
925        naga_shader: crate::NagaShader,
926        runtime_checks: bool,
927    },
928}
929
930impl crate::DynShaderModule for ShaderModule {}
931
932#[derive(Debug)]
933pub struct RenderPipeline {
934    raw: vk::Pipeline,
935}
936
937impl crate::DynRenderPipeline for RenderPipeline {}
938
939#[derive(Debug)]
940pub struct ComputePipeline {
941    raw: vk::Pipeline,
942}
943
944impl crate::DynComputePipeline for ComputePipeline {}
945
946#[derive(Debug)]
947pub struct PipelineCache {
948    raw: vk::PipelineCache,
949}
950
951impl crate::DynPipelineCache for PipelineCache {}
952
953#[derive(Debug)]
954pub struct QuerySet {
955    raw: vk::QueryPool,
956}
957
958impl crate::DynQuerySet for QuerySet {}
959
960/// The [`Api::Fence`] type for [`vulkan::Api`].
961///
962/// This is an `enum` because there are two possible implementations of
963/// `wgpu-hal` fences on Vulkan: Vulkan fences, which work on any version of
964/// Vulkan, and Vulkan timeline semaphores, which are easier and cheaper but
965/// require non-1.0 features.
966///
967/// [`Device::create_fence`] returns a [`TimelineSemaphore`] if
968/// [`VK_KHR_timeline_semaphore`] is available and enabled, and a [`FencePool`]
969/// otherwise.
970///
971/// [`Api::Fence`]: crate::Api::Fence
972/// [`vulkan::Api`]: Api
973/// [`Device::create_fence`]: crate::Device::create_fence
974/// [`TimelineSemaphore`]: Fence::TimelineSemaphore
975/// [`VK_KHR_timeline_semaphore`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VK_KHR_timeline_semaphore
976/// [`FencePool`]: Fence::FencePool
977#[derive(Debug)]
978pub enum Fence {
979    /// A Vulkan [timeline semaphore].
980    ///
981    /// These are simpler to use than Vulkan fences, since timeline semaphores
982    /// work exactly the way [`wpgu_hal::Api::Fence`] is specified to work.
983    ///
984    /// [timeline semaphore]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#synchronization-semaphores
985    /// [`wpgu_hal::Api::Fence`]: crate::Api::Fence
986    TimelineSemaphore(vk::Semaphore),
987
988    /// A collection of Vulkan [fence]s, each associated with a [`FenceValue`].
989    ///
990    /// The effective [`FenceValue`] of this variant is the greater of
991    /// `last_completed` and the maximum value associated with a signalled fence
992    /// in `active`.
993    ///
994    /// Fences are available in all versions of Vulkan, but since they only have
995    /// two states, "signaled" and "unsignaled", we need to use a separate fence
996    /// for each queue submission we might want to wait for, and remember which
997    /// [`FenceValue`] each one represents.
998    ///
999    /// [fence]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#synchronization-fences
1000    /// [`FenceValue`]: crate::FenceValue
1001    FencePool {
1002        last_completed: crate::FenceValue,
1003        /// The pending fence values have to be ascending.
1004        active: Vec<(crate::FenceValue, vk::Fence)>,
1005        free: Vec<vk::Fence>,
1006    },
1007}
1008
1009impl crate::DynFence for Fence {}
1010
1011impl Fence {
1012    /// Return the highest [`FenceValue`] among the signalled fences in `active`.
1013    ///
1014    /// As an optimization, assume that we already know that the fence has
1015    /// reached `last_completed`, and don't bother checking fences whose values
1016    /// are less than that: those fences remain in the `active` array only
1017    /// because we haven't called `maintain` yet to clean them up.
1018    ///
1019    /// [`FenceValue`]: crate::FenceValue
1020    fn check_active(
1021        device: &ash::Device,
1022        mut last_completed: crate::FenceValue,
1023        active: &[(crate::FenceValue, vk::Fence)],
1024    ) -> Result<crate::FenceValue, crate::DeviceError> {
1025        for &(value, raw) in active.iter() {
1026            unsafe {
1027                if value > last_completed
1028                    && device
1029                        .get_fence_status(raw)
1030                        .map_err(map_host_device_oom_and_lost_err)?
1031                {
1032                    last_completed = value;
1033                }
1034            }
1035        }
1036        Ok(last_completed)
1037    }
1038
1039    /// Return the highest signalled [`FenceValue`] for `self`.
1040    ///
1041    /// [`FenceValue`]: crate::FenceValue
1042    fn get_latest(
1043        &self,
1044        device: &ash::Device,
1045        extension: Option<&ExtensionFn<khr::timeline_semaphore::Device>>,
1046    ) -> Result<crate::FenceValue, crate::DeviceError> {
1047        match *self {
1048            Self::TimelineSemaphore(raw) => unsafe {
1049                Ok(match *extension.unwrap() {
1050                    ExtensionFn::Extension(ref ext) => ext
1051                        .get_semaphore_counter_value(raw)
1052                        .map_err(map_host_device_oom_and_lost_err)?,
1053                    ExtensionFn::Promoted => device
1054                        .get_semaphore_counter_value(raw)
1055                        .map_err(map_host_device_oom_and_lost_err)?,
1056                })
1057            },
1058            Self::FencePool {
1059                last_completed,
1060                ref active,
1061                free: _,
1062            } => Self::check_active(device, last_completed, active),
1063        }
1064    }
1065
1066    /// Trim the internal state of this [`Fence`].
1067    ///
1068    /// This function has no externally visible effect, but you should call it
1069    /// periodically to keep this fence's resource consumption under control.
1070    ///
1071    /// For fences using the [`FencePool`] implementation, this function
1072    /// recycles fences that have been signaled. If you don't call this,
1073    /// [`Queue::submit`] will just keep allocating a new Vulkan fence every
1074    /// time it's called.
1075    ///
1076    /// [`FencePool`]: Fence::FencePool
1077    /// [`Queue::submit`]: crate::Queue::submit
1078    fn maintain(&mut self, device: &ash::Device) -> Result<(), crate::DeviceError> {
1079        match *self {
1080            Self::TimelineSemaphore(_) => {}
1081            Self::FencePool {
1082                ref mut last_completed,
1083                ref mut active,
1084                ref mut free,
1085            } => {
1086                let latest = Self::check_active(device, *last_completed, active)?;
1087                let base_free = free.len();
1088                for &(value, raw) in active.iter() {
1089                    if value <= latest {
1090                        free.push(raw);
1091                    }
1092                }
1093                if free.len() != base_free {
1094                    active.retain(|&(value, _)| value > latest);
1095                    unsafe { device.reset_fences(&free[base_free..]) }
1096                        .map_err(map_device_oom_err)?
1097                }
1098                *last_completed = latest;
1099            }
1100        }
1101        Ok(())
1102    }
1103}
1104
1105impl crate::Queue for Queue {
1106    type A = Api;
1107
1108    unsafe fn submit(
1109        &self,
1110        command_buffers: &[&CommandBuffer],
1111        surface_textures: &[&SurfaceTexture],
1112        (signal_fence, signal_value): (&mut Fence, crate::FenceValue),
1113    ) -> Result<(), crate::DeviceError> {
1114        let mut fence_raw = vk::Fence::null();
1115
1116        let mut wait_stage_masks = Vec::new();
1117        let mut wait_semaphores = Vec::new();
1118        let mut signal_semaphores = Vec::new();
1119        let mut signal_values = Vec::new();
1120
1121        // Double check that the same swapchain image isn't being given to us multiple times,
1122        // as that will deadlock when we try to lock them all.
1123        debug_assert!(
1124            {
1125                let mut check = HashSet::with_capacity(surface_textures.len());
1126                // We compare the Arcs by pointer, as Eq isn't well defined for SurfaceSemaphores.
1127                for st in surface_textures {
1128                    check.insert(Arc::as_ptr(&st.surface_semaphores));
1129                }
1130                check.len() == surface_textures.len()
1131            },
1132            "More than one surface texture is being used from the same swapchain. This will cause a deadlock in release."
1133        );
1134
1135        let locked_swapchain_semaphores = surface_textures
1136            .iter()
1137            .map(|st| {
1138                st.surface_semaphores
1139                    .try_lock()
1140                    .expect("Failed to lock surface semaphore.")
1141            })
1142            .collect::<Vec<_>>();
1143
1144        for mut swapchain_semaphore in locked_swapchain_semaphores {
1145            swapchain_semaphore.set_used_fence_value(signal_value);
1146
1147            // If we're the first submission to operate on this image, wait on
1148            // its acquire semaphore, to make sure the presentation engine is
1149            // done with it.
1150            if let Some(sem) = swapchain_semaphore.get_acquire_wait_semaphore() {
1151                wait_stage_masks.push(vk::PipelineStageFlags::TOP_OF_PIPE);
1152                wait_semaphores.push(sem);
1153            }
1154
1155            // Get a semaphore to signal when we're done writing to this surface
1156            // image. Presentation of this image will wait for this.
1157            let signal_semaphore = swapchain_semaphore.get_submit_signal_semaphore(&self.device)?;
1158            signal_semaphores.push(signal_semaphore);
1159            signal_values.push(!0);
1160        }
1161
1162        // In order for submissions to be strictly ordered, we encode a dependency between each submission
1163        // using a pair of semaphores. This adds a wait if it is needed, and signals the next semaphore.
1164        let semaphore_state = self.relay_semaphores.lock().advance(&self.device)?;
1165
1166        if let Some(sem) = semaphore_state.wait {
1167            wait_stage_masks.push(vk::PipelineStageFlags::TOP_OF_PIPE);
1168            wait_semaphores.push(sem);
1169        }
1170
1171        signal_semaphores.push(semaphore_state.signal);
1172        signal_values.push(!0);
1173
1174        // We need to signal our wgpu::Fence if we have one, this adds it to the signal list.
1175        signal_fence.maintain(&self.device.raw)?;
1176        match *signal_fence {
1177            Fence::TimelineSemaphore(raw) => {
1178                signal_semaphores.push(raw);
1179                signal_values.push(signal_value);
1180            }
1181            Fence::FencePool {
1182                ref mut active,
1183                ref mut free,
1184                ..
1185            } => {
1186                fence_raw = match free.pop() {
1187                    Some(raw) => raw,
1188                    None => unsafe {
1189                        self.device
1190                            .raw
1191                            .create_fence(&vk::FenceCreateInfo::default(), None)
1192                            .map_err(map_host_device_oom_err)?
1193                    },
1194                };
1195                active.push((signal_value, fence_raw));
1196            }
1197        }
1198
1199        let vk_cmd_buffers = command_buffers
1200            .iter()
1201            .map(|cmd| cmd.raw)
1202            .collect::<Vec<_>>();
1203
1204        let mut vk_info = vk::SubmitInfo::default().command_buffers(&vk_cmd_buffers);
1205
1206        vk_info = vk_info
1207            .wait_semaphores(&wait_semaphores)
1208            .wait_dst_stage_mask(&wait_stage_masks)
1209            .signal_semaphores(&signal_semaphores);
1210
1211        let mut vk_timeline_info;
1212
1213        if self.device.private_caps.timeline_semaphores {
1214            vk_timeline_info =
1215                vk::TimelineSemaphoreSubmitInfo::default().signal_semaphore_values(&signal_values);
1216            vk_info = vk_info.push_next(&mut vk_timeline_info);
1217        }
1218
1219        profiling::scope!("vkQueueSubmit");
1220        unsafe {
1221            self.device
1222                .raw
1223                .queue_submit(self.raw, &[vk_info], fence_raw)
1224                .map_err(map_host_device_oom_and_lost_err)?
1225        };
1226        Ok(())
1227    }
1228
1229    unsafe fn present(
1230        &self,
1231        surface: &Surface,
1232        texture: SurfaceTexture,
1233    ) -> Result<(), crate::SurfaceError> {
1234        let mut swapchain = surface.swapchain.write();
1235        let ssc = swapchain.as_mut().unwrap();
1236        let mut swapchain_semaphores = texture.surface_semaphores.lock();
1237
1238        let swapchains = [ssc.raw];
1239        let image_indices = [texture.index];
1240        let vk_info = vk::PresentInfoKHR::default()
1241            .swapchains(&swapchains)
1242            .image_indices(&image_indices)
1243            .wait_semaphores(swapchain_semaphores.get_present_wait_semaphores());
1244
1245        let mut display_timing;
1246        let present_times;
1247        let vk_info = if let Some(present_time) = ssc.next_present_time.take() {
1248            debug_assert!(
1249                ssc.device
1250                    .features
1251                    .contains(wgt::Features::VULKAN_GOOGLE_DISPLAY_TIMING),
1252                "`next_present_time` should only be set if `VULKAN_GOOGLE_DISPLAY_TIMING` is enabled"
1253            );
1254            present_times = [present_time];
1255            display_timing = vk::PresentTimesInfoGOOGLE::default().times(&present_times);
1256            // SAFETY: We know that VK_GOOGLE_display_timing is present because of the safety contract on `next_present_time`.
1257            vk_info.push_next(&mut display_timing)
1258        } else {
1259            vk_info
1260        };
1261
1262        let suboptimal = {
1263            profiling::scope!("vkQueuePresentKHR");
1264            unsafe { self.swapchain_fn.queue_present(self.raw, &vk_info) }.map_err(|error| {
1265                match error {
1266                    vk::Result::ERROR_OUT_OF_DATE_KHR => crate::SurfaceError::Outdated,
1267                    vk::Result::ERROR_SURFACE_LOST_KHR => crate::SurfaceError::Lost,
1268                    // We don't use VK_EXT_full_screen_exclusive
1269                    // VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT
1270                    _ => map_host_device_oom_and_lost_err(error).into(),
1271                }
1272            })?
1273        };
1274        if suboptimal {
1275            // We treat `VK_SUBOPTIMAL_KHR` as `VK_SUCCESS` on Android.
1276            // On Android 10+, libvulkan's `vkQueuePresentKHR` implementation returns `VK_SUBOPTIMAL_KHR` if not doing pre-rotation
1277            // (i.e `VkSwapchainCreateInfoKHR::preTransform` not being equal to the current device orientation).
1278            // This is always the case when the device orientation is anything other than the identity one, as we unconditionally use `VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR`.
1279            #[cfg(not(target_os = "android"))]
1280            log::warn!("Suboptimal present of frame {}", texture.index);
1281        }
1282        Ok(())
1283    }
1284
1285    unsafe fn get_timestamp_period(&self) -> f32 {
1286        self.device.timestamp_period
1287    }
1288}
1289
1290/// Maps
1291///
1292/// - VK_ERROR_OUT_OF_HOST_MEMORY
1293/// - VK_ERROR_OUT_OF_DEVICE_MEMORY
1294fn map_host_device_oom_err(err: vk::Result) -> crate::DeviceError {
1295    match err {
1296        vk::Result::ERROR_OUT_OF_HOST_MEMORY | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
1297            get_oom_err(err)
1298        }
1299        e => get_unexpected_err(e),
1300    }
1301}
1302
1303/// Maps
1304///
1305/// - VK_ERROR_OUT_OF_HOST_MEMORY
1306/// - VK_ERROR_OUT_OF_DEVICE_MEMORY
1307/// - VK_ERROR_DEVICE_LOST
1308fn map_host_device_oom_and_lost_err(err: vk::Result) -> crate::DeviceError {
1309    match err {
1310        vk::Result::ERROR_DEVICE_LOST => get_lost_err(),
1311        other => map_host_device_oom_err(other),
1312    }
1313}
1314
1315/// Maps
1316///
1317/// - VK_ERROR_OUT_OF_HOST_MEMORY
1318/// - VK_ERROR_OUT_OF_DEVICE_MEMORY
1319/// - VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR
1320fn map_host_device_oom_and_ioca_err(err: vk::Result) -> crate::DeviceError {
1321    // We don't use VK_KHR_buffer_device_address
1322    // VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR
1323    map_host_device_oom_err(err)
1324}
1325
1326/// Maps
1327///
1328/// - VK_ERROR_OUT_OF_HOST_MEMORY
1329fn map_host_oom_err(err: vk::Result) -> crate::DeviceError {
1330    match err {
1331        vk::Result::ERROR_OUT_OF_HOST_MEMORY => get_oom_err(err),
1332        e => get_unexpected_err(e),
1333    }
1334}
1335
1336/// Maps
1337///
1338/// - VK_ERROR_OUT_OF_DEVICE_MEMORY
1339fn map_device_oom_err(err: vk::Result) -> crate::DeviceError {
1340    match err {
1341        vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => get_oom_err(err),
1342        e => get_unexpected_err(e),
1343    }
1344}
1345
1346/// Maps
1347///
1348/// - VK_ERROR_OUT_OF_HOST_MEMORY
1349/// - VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR
1350fn map_host_oom_and_ioca_err(err: vk::Result) -> crate::DeviceError {
1351    // We don't use VK_KHR_buffer_device_address
1352    // VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR
1353    map_host_oom_err(err)
1354}
1355
1356/// Maps
1357///
1358/// - VK_ERROR_OUT_OF_HOST_MEMORY
1359/// - VK_ERROR_OUT_OF_DEVICE_MEMORY
1360/// - VK_PIPELINE_COMPILE_REQUIRED_EXT
1361/// - VK_ERROR_INVALID_SHADER_NV
1362fn map_pipeline_err(err: vk::Result) -> crate::DeviceError {
1363    // We don't use VK_EXT_pipeline_creation_cache_control
1364    // VK_PIPELINE_COMPILE_REQUIRED_EXT
1365    // We don't use VK_NV_glsl_shader
1366    // VK_ERROR_INVALID_SHADER_NV
1367    map_host_device_oom_err(err)
1368}
1369
1370/// Returns [`crate::DeviceError::Unexpected`] or panics if the `internal_error_panic`
1371/// feature flag is enabled.
1372fn get_unexpected_err(_err: vk::Result) -> crate::DeviceError {
1373    #[cfg(feature = "internal_error_panic")]
1374    panic!("Unexpected Vulkan error: {_err:?}");
1375
1376    #[allow(unreachable_code)]
1377    crate::DeviceError::Unexpected
1378}
1379
1380/// Returns [`crate::DeviceError::OutOfMemory`] or panics if the `oom_panic`
1381/// feature flag is enabled.
1382fn get_oom_err(_err: vk::Result) -> crate::DeviceError {
1383    #[cfg(feature = "oom_panic")]
1384    panic!("Out of memory ({_err:?})");
1385
1386    #[allow(unreachable_code)]
1387    crate::DeviceError::OutOfMemory
1388}
1389
1390/// Returns [`crate::DeviceError::Lost`] or panics if the `device_lost_panic`
1391/// feature flag is enabled.
1392fn get_lost_err() -> crate::DeviceError {
1393    #[cfg(feature = "device_lost_panic")]
1394    panic!("Device lost");
1395
1396    #[allow(unreachable_code)]
1397    crate::DeviceError::Lost
1398}