wgpu/api/device.rs
1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21 pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38 #[cfg(custom)]
39 /// Returns custom implementation of Device (if custom backend and is internally T)
40 pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41 self.inner.as_custom()
42 }
43
44 #[cfg(custom)]
45 /// Creates Device from custom implementation
46 pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47 Self {
48 inner: dispatch::DispatchDevice::custom(device),
49 }
50 }
51
52 /// Constructs a stub device for testing using [`Backend::Noop`].
53 ///
54 /// This is a convenience function which avoids the configuration, `async`, and fallibility
55 /// aspects of constructing a device through `Instance`.
56 #[cfg(feature = "noop")]
57 pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58 use core::future::Future as _;
59 use core::pin::pin;
60 use core::task;
61 let ctx = &mut task::Context::from_waker(waker::noop_waker_ref());
62
63 let instance = Instance::new(&InstanceDescriptor {
64 backends: Backends::NOOP,
65 backend_options: BackendOptions {
66 noop: NoopBackendOptions { enable: true },
67 ..Default::default()
68 },
69 ..Default::default()
70 });
71
72 // Both of these futures are trivial and should complete instantaneously,
73 // so we do not need an executor and can just poll them once.
74 let task::Poll::Ready(Ok(adapter)) =
75 pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76 else {
77 unreachable!()
78 };
79 let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80 else {
81 unreachable!()
82 };
83 device_and_queue
84 }
85
86 /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87 ///
88 /// Return `true` if the queue is empty, or `false` if there are more queue
89 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90 /// coordinated somehow, this information could be out of date by the time
91 /// the caller receives it. `Queue`s can be shared between threads, so
92 /// other threads could submit new work at any time.)
93 ///
94 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95 pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96 self.inner.poll(poll_type.map_index(|s| s.index))
97 }
98
99 /// The features which can be used on this device.
100 ///
101 /// No additional features can be used, even if the underlying adapter can support them.
102 #[must_use]
103 pub fn features(&self) -> Features {
104 self.inner.features()
105 }
106
107 /// The limits which can be used on this device.
108 ///
109 /// No better limits can be used, even if the underlying adapter can support them.
110 #[must_use]
111 pub fn limits(&self) -> Limits {
112 self.inner.limits()
113 }
114
115 /// Creates a shader module.
116 ///
117 /// <div class="warning">
118 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
119 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
120 ///
121 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
122 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
123 /// However, on some build profiles and platforms, the default stack size for a thread may be
124 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
125 /// enough stack space for this, particularly if calls to this method are exposed to user
126 /// input.
127 ///
128 /// </div>
129 #[must_use]
130 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
131 let module = self
132 .inner
133 .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
134 ShaderModule { inner: module }
135 }
136
137 /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
138 ///
139 /// # Safety
140 ///
141 /// See [`create_shader_module_trusted`][csmt].
142 ///
143 /// [csmt]: Self::create_shader_module_trusted
144 #[deprecated(
145 since = "24.0.0",
146 note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
147 )]
148 #[must_use]
149 pub unsafe fn create_shader_module_unchecked(
150 &self,
151 desc: ShaderModuleDescriptor<'_>,
152 ) -> ShaderModule {
153 unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
154 }
155
156 /// Creates a shader module with flags to dictate runtime checks.
157 ///
158 /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
159 ///
160 /// # Safety
161 ///
162 /// In contrast with [`create_shader_module`][csm] this function
163 /// creates a shader module with user-customizable runtime checks which allows shaders to
164 /// perform operations which can lead to undefined behavior like indexing out of bounds,
165 /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
166 /// operations.
167 ///
168 /// See the documentation for [`ShaderRuntimeChecks`][src] for more information about specific checks.
169 ///
170 /// [csm]: Self::create_shader_module
171 /// [src]: crate::ShaderRuntimeChecks
172 #[must_use]
173 pub unsafe fn create_shader_module_trusted(
174 &self,
175 desc: ShaderModuleDescriptor<'_>,
176 runtime_checks: crate::ShaderRuntimeChecks,
177 ) -> ShaderModule {
178 let module = self.inner.create_shader_module(desc, runtime_checks);
179 ShaderModule { inner: module }
180 }
181
182 /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
183 ///
184 /// # Safety
185 ///
186 /// This function passes data to the backend as-is and can potentially result in a
187 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
188 #[must_use]
189 pub unsafe fn create_shader_module_passthrough(
190 &self,
191 desc: ShaderModuleDescriptorPassthrough<'_>,
192 ) -> ShaderModule {
193 let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
194 ShaderModule { inner: module }
195 }
196
197 /// Creates an empty [`CommandEncoder`].
198 #[must_use]
199 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
200 let encoder = self.inner.create_command_encoder(desc);
201 CommandEncoder { inner: encoder }
202 }
203
204 /// Creates an empty [`RenderBundleEncoder`].
205 #[must_use]
206 pub fn create_render_bundle_encoder<'a>(
207 &self,
208 desc: &RenderBundleEncoderDescriptor<'_>,
209 ) -> RenderBundleEncoder<'a> {
210 let encoder = self.inner.create_render_bundle_encoder(desc);
211 RenderBundleEncoder {
212 inner: encoder,
213 _p: core::marker::PhantomData,
214 }
215 }
216
217 /// Creates a new [`BindGroup`].
218 #[must_use]
219 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
220 let group = self.inner.create_bind_group(desc);
221 BindGroup { inner: group }
222 }
223
224 /// Creates a [`BindGroupLayout`].
225 #[must_use]
226 pub fn create_bind_group_layout(
227 &self,
228 desc: &BindGroupLayoutDescriptor<'_>,
229 ) -> BindGroupLayout {
230 let layout = self.inner.create_bind_group_layout(desc);
231 BindGroupLayout { inner: layout }
232 }
233
234 /// Creates a [`PipelineLayout`].
235 #[must_use]
236 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
237 let layout = self.inner.create_pipeline_layout(desc);
238 PipelineLayout { inner: layout }
239 }
240
241 /// Creates a [`RenderPipeline`].
242 #[must_use]
243 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
244 let pipeline = self.inner.create_render_pipeline(desc);
245 RenderPipeline { inner: pipeline }
246 }
247
248 /// Creates a [`ComputePipeline`].
249 #[must_use]
250 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
251 let pipeline = self.inner.create_compute_pipeline(desc);
252 ComputePipeline { inner: pipeline }
253 }
254
255 /// Creates a [`Buffer`].
256 #[must_use]
257 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
258 let mut map_context = MapContext::new();
259 if desc.mapped_at_creation {
260 map_context.initial_range = 0..desc.size;
261 }
262
263 let buffer = self.inner.create_buffer(desc);
264
265 Buffer {
266 inner: buffer,
267 map_context: Arc::new(Mutex::new(map_context)),
268 size: desc.size,
269 usage: desc.usage,
270 }
271 }
272
273 /// Creates a new [`Texture`].
274 ///
275 /// `desc` specifies the general format of the texture.
276 #[must_use]
277 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
278 let texture = self.inner.create_texture(desc);
279
280 Texture {
281 inner: texture,
282 descriptor: TextureDescriptor {
283 label: None,
284 view_formats: &[],
285 ..desc.clone()
286 },
287 }
288 }
289
290 /// Creates a [`Texture`] from a wgpu-hal Texture.
291 ///
292 /// # Safety
293 ///
294 /// - `hal_texture` must be created from this device internal handle
295 /// - `hal_texture` must be created respecting `desc`
296 /// - `hal_texture` must be initialized
297 #[cfg(wgpu_core)]
298 #[must_use]
299 pub unsafe fn create_texture_from_hal<A: wgc::hal_api::HalApi>(
300 &self,
301 hal_texture: A::Texture,
302 desc: &TextureDescriptor<'_>,
303 ) -> Texture {
304 let texture = unsafe {
305 let core_device = self.inner.as_core();
306 core_device
307 .context
308 .create_texture_from_hal::<A>(hal_texture, core_device, desc)
309 };
310 Texture {
311 inner: texture.into(),
312 descriptor: TextureDescriptor {
313 label: None,
314 view_formats: &[],
315 ..desc.clone()
316 },
317 }
318 }
319
320 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
321 ///
322 /// # Safety
323 ///
324 /// - `hal_buffer` must be created from this device internal handle
325 /// - `hal_buffer` must be created respecting `desc`
326 /// - `hal_buffer` must be initialized
327 #[cfg(wgpu_core)]
328 #[must_use]
329 pub unsafe fn create_buffer_from_hal<A: wgc::hal_api::HalApi>(
330 &self,
331 hal_buffer: A::Buffer,
332 desc: &BufferDescriptor<'_>,
333 ) -> Buffer {
334 let mut map_context = MapContext::new();
335 if desc.mapped_at_creation {
336 map_context.initial_range = 0..desc.size;
337 }
338
339 let buffer = unsafe {
340 let core_device = self.inner.as_core();
341 core_device
342 .context
343 .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
344 };
345
346 Buffer {
347 inner: buffer.into(),
348 map_context: Arc::new(Mutex::new(map_context)),
349 size: desc.size,
350 usage: desc.usage,
351 }
352 }
353
354 /// Creates a new [`Sampler`].
355 ///
356 /// `desc` specifies the behavior of the sampler.
357 #[must_use]
358 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
359 let sampler = self.inner.create_sampler(desc);
360 Sampler { inner: sampler }
361 }
362
363 /// Creates a new [`QuerySet`].
364 #[must_use]
365 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
366 let query_set = self.inner.create_query_set(desc);
367 QuerySet { inner: query_set }
368 }
369
370 /// Set a callback for errors that are not handled in error scopes.
371 pub fn on_uncaptured_error(&self, handler: Box<dyn UncapturedErrorHandler>) {
372 self.inner.on_uncaptured_error(handler)
373 }
374
375 /// Push an error scope.
376 pub fn push_error_scope(&self, filter: ErrorFilter) {
377 self.inner.push_error_scope(filter)
378 }
379
380 /// Pop an error scope.
381 pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
382 self.inner.pop_error_scope()
383 }
384
385 /// Starts a capture in the attached graphics debugger.
386 ///
387 /// This behaves differently depending on which graphics debugger is attached:
388 ///
389 /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
390 /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
391 /// - None: No action is taken.
392 ///
393 /// # Safety
394 ///
395 /// - There should not be any other captures currently active.
396 /// - All other safety rules are defined by the graphics debugger, see the
397 /// documentation for the specific debugger.
398 /// - In general, graphics debuggers can easily cause crashes, so this isn't
399 /// ever guaranteed to be sound.
400 ///
401 /// # Tips
402 ///
403 /// - Debuggers need to capture both the recording of the commands and the
404 /// submission of the commands to the GPU. Try to wrap all of your
405 /// gpu work in a capture.
406 /// - If you encounter issues, try waiting for the GPU to finish all work
407 /// before stopping the capture.
408 ///
409 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
410 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
411 #[doc(alias = "start_renderdoc_capture")]
412 #[doc(alias = "start_xcode_capture")]
413 pub unsafe fn start_graphics_debugger_capture(&self) {
414 unsafe { self.inner.start_graphics_debugger_capture() }
415 }
416
417 /// Stops the current capture in the attached graphics debugger.
418 ///
419 /// This behaves differently depending on which graphics debugger is attached:
420 ///
421 /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
422 /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
423 /// - None: No action is taken.
424 ///
425 /// # Safety
426 ///
427 /// - There should be a capture currently active.
428 /// - All other safety rules are defined by the graphics debugger, see the
429 /// documentation for the specific debugger.
430 /// - In general, graphics debuggers can easily cause crashes, so this isn't
431 /// ever guaranteed to be sound.
432 ///
433 /// # Tips
434 ///
435 /// - If you encounter issues, try to submit all work to the GPU, and waiting
436 /// for that work to finish before stopping the capture.
437 ///
438 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
439 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
440 #[doc(alias = "stop_renderdoc_capture")]
441 #[doc(alias = "stop_xcode_capture")]
442 pub unsafe fn stop_graphics_debugger_capture(&self) {
443 unsafe { self.inner.stop_graphics_debugger_capture() }
444 }
445
446 /// Query internal counters from the native backend for debugging purposes.
447 ///
448 /// Some backends may not set all counters, or may not set any counter at all.
449 /// The `counters` cargo feature must be enabled for any counter to be set.
450 ///
451 /// If a counter is not set, its contains its default value (zero).
452 #[must_use]
453 pub fn get_internal_counters(&self) -> wgt::InternalCounters {
454 self.inner.get_internal_counters()
455 }
456
457 /// Generate an GPU memory allocation report if the underlying backend supports it.
458 ///
459 /// Backends that do not support producing these reports return `None`. A backend may
460 /// Support it and still return `None` if it is not using performing sub-allocation,
461 /// for example as a workaround for driver issues.
462 #[must_use]
463 pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
464 self.inner.generate_allocator_report()
465 }
466
467 /// Get the [`wgpu_hal`] device from this `Device`.
468 ///
469 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
470 /// and pass that struct to the to the `A` type parameter.
471 ///
472 /// Returns a guard that dereferences to the type of the hal backend
473 /// which implements [`A::Device`].
474 ///
475 /// # Errors
476 ///
477 /// This method will return None if:
478 /// - The device is not from the backend specified by `A`.
479 /// - The device is from the `webgpu` or `custom` backend.
480 ///
481 /// # Safety
482 ///
483 /// - The returned resource must not be destroyed unless the guard
484 /// is the last reference to it and it is not in use by the GPU.
485 /// The guard and handle may be dropped at any time however.
486 /// - All the safety requirements of wgpu-hal must be upheld.
487 ///
488 /// [`A::Device`]: hal::Api::Device
489 #[cfg(wgpu_core)]
490 pub unsafe fn as_hal<A: wgc::hal_api::HalApi>(
491 &self,
492 ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
493 let device = self.inner.as_core_opt()?;
494 unsafe { device.context.device_as_hal::<A>(device) }
495 }
496
497 /// Destroy this device.
498 pub fn destroy(&self) {
499 self.inner.destroy()
500 }
501
502 /// Set a DeviceLostCallback on this device.
503 pub fn set_device_lost_callback(
504 &self,
505 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
506 ) {
507 self.inner.set_device_lost_callback(Box::new(callback))
508 }
509
510 /// Create a [`PipelineCache`] with initial data
511 ///
512 /// This can be passed to [`Device::create_compute_pipeline`]
513 /// and [`Device::create_render_pipeline`] to either accelerate these
514 /// or add the cache results from those.
515 ///
516 /// # Safety
517 ///
518 /// If the `data` field of `desc` is set, it must have previously been returned from a call
519 /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
520 /// from an adapter with the same [`util::pipeline_cache_key`].
521 /// This *is* compatible across wgpu versions, as any data format change will
522 /// be accounted for.
523 ///
524 /// It is *not* supported to bring caches from previous direct uses of backend APIs
525 /// into this method.
526 ///
527 /// # Errors
528 ///
529 /// Returns an error value if:
530 /// * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
531 /// * this device is invalid; or
532 /// * the device is out of memory
533 ///
534 /// This method also returns an error value if:
535 /// * The `fallback` field on `desc` is false; and
536 /// * the `data` provided would not be used[^data_not_used]
537 ///
538 /// If an error value is used in subsequent calls, default caching will be used.
539 ///
540 /// [^saving]: We do recognise that saving this data to disk means this condition
541 /// is impossible to fully prove. Consider the risks for your own application in this case.
542 ///
543 /// [^data_not_used]: This data may be not used if: the data was produced by a prior
544 /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
545 /// update. In some cases, the data might not be used and a real value is returned,
546 /// this is left to the discretion of GPU drivers.
547 #[must_use]
548 pub unsafe fn create_pipeline_cache(
549 &self,
550 desc: &PipelineCacheDescriptor<'_>,
551 ) -> PipelineCache {
552 let cache = unsafe { self.inner.create_pipeline_cache(desc) };
553 PipelineCache { inner: cache }
554 }
555}
556
557/// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] must be enabled on the device in order to call these functions.
558impl Device {
559 /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
560 /// - `desc`: The descriptor of the acceleration structure.
561 /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
562 ///
563 /// # Validation
564 /// If any of the following is not satisfied a validation error is generated
565 ///
566 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] enabled.
567 /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
568 /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
569 /// - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
570 /// may be queried with [`Features::allowed_vertex_formats_for_blas`]).
571 /// - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
572 ///
573 /// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
574 /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
575 #[must_use]
576 pub fn create_blas(
577 &self,
578 desc: &CreateBlasDescriptor<'_>,
579 sizes: BlasGeometrySizeDescriptors,
580 ) -> Blas {
581 let (handle, blas) = self.inner.create_blas(desc, sizes);
582
583 Blas {
584 inner: blas,
585 handle,
586 }
587 }
588
589 /// Create a top level acceleration structure, used for ray tracing.
590 /// - `desc`: The descriptor of the acceleration structure.
591 ///
592 /// # Validation
593 /// If any of the following is not satisfied a validation error is generated
594 ///
595 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`] enabled.
596 ///
597 /// [`Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE`]: wgt::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
598 #[must_use]
599 pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
600 let tlas = self.inner.create_tlas(desc);
601
602 Tlas {
603 inner: tlas,
604 instances: vec![None; desc.max_instances as usize],
605 lowest_unmodified: 0,
606 }
607 }
608}
609
610/// Requesting a device from an [`Adapter`] failed.
611#[derive(Clone, Debug)]
612pub struct RequestDeviceError {
613 pub(crate) inner: RequestDeviceErrorKind,
614}
615#[derive(Clone, Debug)]
616pub(crate) enum RequestDeviceErrorKind {
617 /// Error from [`wgpu_core`].
618 // must match dependency cfg
619 #[cfg(wgpu_core)]
620 Core(wgc::instance::RequestDeviceError),
621
622 /// Error from web API that was called by `wgpu` to request a device.
623 ///
624 /// (This is currently never used by the webgl backend, but it could be.)
625 #[cfg(webgpu)]
626 WebGpu(String),
627}
628
629static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
630
631impl fmt::Display for RequestDeviceError {
632 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
633 match &self.inner {
634 #[cfg(wgpu_core)]
635 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
636 #[cfg(webgpu)]
637 RequestDeviceErrorKind::WebGpu(error) => {
638 write!(_f, "{error}")
639 }
640 #[cfg(not(any(webgpu, wgpu_core)))]
641 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
642 }
643 }
644}
645
646impl error::Error for RequestDeviceError {
647 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
648 match &self.inner {
649 #[cfg(wgpu_core)]
650 RequestDeviceErrorKind::Core(error) => error.source(),
651 #[cfg(webgpu)]
652 RequestDeviceErrorKind::WebGpu(_) => None,
653 #[cfg(not(any(webgpu, wgpu_core)))]
654 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
655 }
656 }
657}
658
659#[cfg(wgpu_core)]
660impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
661 fn from(error: wgc::instance::RequestDeviceError) -> Self {
662 Self {
663 inner: RequestDeviceErrorKind::Core(error),
664 }
665 }
666}
667
668/// Type for the callback of uncaptured error handler
669pub trait UncapturedErrorHandler: Fn(Error) + Send + 'static {}
670impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + 'static {}
671
672/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
673#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
674pub enum ErrorFilter {
675 /// Catch only out-of-memory errors.
676 OutOfMemory,
677 /// Catch only validation errors.
678 Validation,
679 /// Catch only internal errors.
680 Internal,
681}
682static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
683
684/// Lower level source of the error.
685///
686/// `Send + Sync` varies depending on configuration.
687#[cfg(send_sync)]
688#[cfg_attr(docsrs, doc(cfg(all())))]
689pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
690/// Lower level source of the error.
691///
692/// `Send + Sync` varies depending on configuration.
693#[cfg(not(send_sync))]
694#[cfg_attr(docsrs, doc(cfg(all())))]
695pub type ErrorSource = Box<dyn error::Error + 'static>;
696
697/// Errors resulting from usage of GPU APIs.
698///
699/// By default, errors translate into panics. Depending on the backend and circumstances,
700/// errors may occur synchronously or asynchronously. When errors need to be handled, use
701/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
702#[derive(Debug)]
703pub enum Error {
704 /// Out of memory.
705 OutOfMemory {
706 /// Lower level source of the error.
707 source: ErrorSource,
708 },
709 /// Validation error, signifying a bug in code or data provided to `wgpu`.
710 Validation {
711 /// Lower level source of the error.
712 source: ErrorSource,
713 /// Description of the validation error.
714 description: String,
715 },
716 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
717 ///
718 /// These could be due to internal implementation or system limits being reached.
719 Internal {
720 /// Lower level source of the error.
721 source: ErrorSource,
722 /// Description of the internal GPU error.
723 description: String,
724 },
725}
726#[cfg(send_sync)]
727static_assertions::assert_impl_all!(Error: Send, Sync);
728
729impl error::Error for Error {
730 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
731 match self {
732 Error::OutOfMemory { source } => Some(source.as_ref()),
733 Error::Validation { source, .. } => Some(source.as_ref()),
734 Error::Internal { source, .. } => Some(source.as_ref()),
735 }
736 }
737}
738
739impl fmt::Display for Error {
740 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
741 match self {
742 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
743 Error::Validation { description, .. } => f.write_str(description),
744 Error::Internal { description, .. } => f.write_str(description),
745 }
746 }
747}
748
749// Copied from [`futures::task::noop_waker`].
750// Needed until MSRV is 1.85 with `task::Waker::noop()` available
751#[cfg(feature = "noop")]
752mod waker {
753 use core::ptr::null;
754 use core::task::{RawWaker, RawWakerVTable, Waker};
755
756 unsafe fn noop_clone(_data: *const ()) -> RawWaker {
757 noop_raw_waker()
758 }
759
760 unsafe fn noop(_data: *const ()) {}
761
762 const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop);
763
764 const fn noop_raw_waker() -> RawWaker {
765 RawWaker::new(null(), &NOOP_WAKER_VTABLE)
766 }
767
768 /// Get a static reference to a [`Waker`] which
769 /// does nothing when `wake()` is called on it.
770 #[inline]
771 pub fn noop_waker_ref() -> &'static Waker {
772 struct SyncRawWaker(RawWaker);
773 unsafe impl Sync for SyncRawWaker {}
774
775 static NOOP_WAKER_INSTANCE: SyncRawWaker = SyncRawWaker(noop_raw_waker());
776
777 // SAFETY: `Waker` is #[repr(transparent)] over its `RawWaker`.
778 unsafe { &*(&NOOP_WAKER_INSTANCE.0 as *const RawWaker as *const Waker) }
779 }
780}