wgpu_core/device/life.rs
1use crate::{
2 device::{
3 queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
4 DeviceError, DeviceLostClosure,
5 },
6 resource::{self, Buffer, Texture, Trackable},
7 snatch::SnatchGuard,
8 SubmissionIndex,
9};
10use smallvec::SmallVec;
11
12use std::sync::Arc;
13use thiserror::Error;
14
15/// A command submitted to the GPU for execution.
16///
17/// ## Keeping resources alive while the GPU is using them
18///
19/// [`wgpu_hal`] requires that, when a command is submitted to a queue, all the
20/// resources it uses must remain alive until it has finished executing.
21///
22/// [`wgpu_hal`]: hal
23/// [`ResourceInfo::submission_index`]: crate::resource::ResourceInfo
24struct ActiveSubmission {
25 /// The index of the submission we track.
26 ///
27 /// When `Device::fence`'s value is greater than or equal to this, our queue
28 /// submission has completed.
29 index: SubmissionIndex,
30
31 /// Temporary resources to be freed once this queue submission has completed.
32 temp_resources: Vec<TempResource>,
33
34 /// Buffers to be mapped once this submission has completed.
35 mapped: Vec<Arc<Buffer>>,
36
37 /// Command buffers used by this submission, and the encoder that owns them.
38 ///
39 /// [`wgpu_hal::Queue::submit`] requires the submitted command buffers to
40 /// remain alive until the submission has completed execution. Command
41 /// encoders double as allocation pools for command buffers, so holding them
42 /// here and cleaning them up in [`LifetimeTracker::triage_submissions`]
43 /// satisfies that requirement.
44 ///
45 /// Once this submission has completed, the command buffers are reset and
46 /// the command encoder is recycled.
47 ///
48 /// [`wgpu_hal::Queue::submit`]: hal::Queue::submit
49 encoders: Vec<EncoderInFlight>,
50
51 /// List of queue "on_submitted_work_done" closures to be called once this
52 /// submission has completed.
53 work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
54}
55
56impl ActiveSubmission {
57 /// Returns true if this submission contains the given buffer.
58 ///
59 /// This only uses constant-time operations.
60 pub fn contains_buffer(&self, buffer: &Buffer) -> bool {
61 for encoder in &self.encoders {
62 // The ownership location of buffers depends on where the command encoder
63 // came from. If it is the staging command encoder on the queue, it is
64 // in the pending buffer list. If it came from a user command encoder,
65 // it is in the tracker.
66
67 if encoder.trackers.buffers.contains(buffer) {
68 return true;
69 }
70
71 if encoder
72 .pending_buffers
73 .contains_key(&buffer.tracker_index())
74 {
75 return true;
76 }
77 }
78
79 false
80 }
81
82 /// Returns true if this submission contains the given texture.
83 ///
84 /// This only uses constant-time operations.
85 pub fn contains_texture(&self, texture: &Texture) -> bool {
86 for encoder in &self.encoders {
87 // The ownership location of textures depends on where the command encoder
88 // came from. If it is the staging command encoder on the queue, it is
89 // in the pending buffer list. If it came from a user command encoder,
90 // it is in the tracker.
91
92 if encoder.trackers.textures.contains(texture) {
93 return true;
94 }
95
96 if encoder
97 .pending_textures
98 .contains_key(&texture.tracker_index())
99 {
100 return true;
101 }
102 }
103
104 false
105 }
106}
107
108#[derive(Clone, Debug, Error)]
109#[non_exhaustive]
110pub enum WaitIdleError {
111 #[error(transparent)]
112 Device(#[from] DeviceError),
113 #[error("Tried to wait using a submission index ({0}) that has not been returned by a successful submission (last successful submission: {1})")]
114 WrongSubmissionIndex(SubmissionIndex, SubmissionIndex),
115 #[error("GPU got stuck :(")]
116 StuckGpu,
117}
118
119/// Resource tracking for a device.
120///
121/// ## Host mapping buffers
122///
123/// A buffer cannot be mapped until all active queue submissions that use it
124/// have completed. To that end:
125///
126/// - Each buffer's `ResourceInfo::submission_index` records the index of the
127/// most recent queue submission that uses that buffer.
128///
129/// - Calling `Global::buffer_map_async` adds the buffer to
130/// `self.mapped`, and changes `Buffer::map_state` to prevent it
131/// from being used in any new submissions.
132///
133/// - When the device is polled, the following `LifetimeTracker` methods decide
134/// what should happen next:
135///
136/// 1) `triage_mapped` drains `self.mapped`, checking the submission index
137/// of each buffer against the queue submissions that have finished
138/// execution. Buffers used by submissions still in flight go in
139/// `self.active[index].mapped`, and the rest go into
140/// `self.ready_to_map`.
141///
142/// 2) `triage_submissions` moves entries in `self.active[i]` for completed
143/// submissions to `self.ready_to_map`. At this point, both
144/// `self.active` and `self.ready_to_map` are up to date with the given
145/// submission index.
146///
147/// 3) `handle_mapping` drains `self.ready_to_map` and actually maps the
148/// buffers, collecting a list of notification closures to call.
149///
150/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
151/// buffer. This new `Arc` is only dropped by `handle_mapping`.
152pub(crate) struct LifetimeTracker {
153 /// Buffers for which a call to [`Buffer::map_async`] has succeeded, but
154 /// which haven't been examined by `triage_mapped` yet to decide when they
155 /// can be mapped.
156 mapped: Vec<Arc<Buffer>>,
157
158 /// Resources used by queue submissions still in flight. One entry per
159 /// submission, with older submissions appearing before younger.
160 ///
161 /// Entries are added by `track_submission` and drained by
162 /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
163 /// to particular entries.
164 active: Vec<ActiveSubmission>,
165
166 /// Buffers the user has asked us to map, and which are not used by any
167 /// queue submission still in flight.
168 ready_to_map: Vec<Arc<Buffer>>,
169
170 /// Queue "on_submitted_work_done" closures that were initiated for while there is no
171 /// currently pending submissions. These cannot be immediately invoked as they
172 /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
173 /// here until the next time the device is maintained.
174 work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
175
176 /// Closure to be called on "lose the device". This is invoked directly by
177 /// device.lose or by the UserCallbacks returned from maintain when the device
178 /// has been destroyed and its queues are empty.
179 pub device_lost_closure: Option<DeviceLostClosure>,
180}
181
182impl LifetimeTracker {
183 pub fn new() -> Self {
184 Self {
185 mapped: Vec::new(),
186 active: Vec::new(),
187 ready_to_map: Vec::new(),
188 work_done_closures: SmallVec::new(),
189 device_lost_closure: None,
190 }
191 }
192
193 /// Return true if there are no queue submissions still in flight.
194 pub fn queue_empty(&self) -> bool {
195 self.active.is_empty()
196 }
197
198 /// Start tracking resources associated with a new queue submission.
199 pub fn track_submission(
200 &mut self,
201 index: SubmissionIndex,
202 temp_resources: impl Iterator<Item = TempResource>,
203 encoders: Vec<EncoderInFlight>,
204 ) {
205 self.active.push(ActiveSubmission {
206 index,
207 temp_resources: temp_resources.collect(),
208 mapped: Vec::new(),
209 encoders,
210 work_done_closures: SmallVec::new(),
211 });
212 }
213
214 pub(crate) fn map(&mut self, value: &Arc<Buffer>) {
215 self.mapped.push(value.clone());
216 }
217
218 /// Returns the submission index of the most recent submission that uses the
219 /// given buffer.
220 pub fn get_buffer_latest_submission_index(&self, buffer: &Buffer) -> Option<SubmissionIndex> {
221 // We iterate in reverse order, so that we can bail out early as soon
222 // as we find a hit.
223 self.active.iter().rev().find_map(|submission| {
224 if submission.contains_buffer(buffer) {
225 Some(submission.index)
226 } else {
227 None
228 }
229 })
230 }
231
232 /// Returns the submission index of the most recent submission that uses the
233 /// given texture.
234 pub fn get_texture_latest_submission_index(
235 &self,
236 texture: &Texture,
237 ) -> Option<SubmissionIndex> {
238 // We iterate in reverse order, so that we can bail out early as soon
239 // as we find a hit.
240 self.active.iter().rev().find_map(|submission| {
241 if submission.contains_texture(texture) {
242 Some(submission.index)
243 } else {
244 None
245 }
246 })
247 }
248
249 /// Sort out the consequences of completed submissions.
250 ///
251 /// Assume that all submissions up through `last_done` have completed.
252 ///
253 /// - Buffers used by those submissions are now ready to map, if requested.
254 /// Add any buffers in the submission's [`mapped`] list to
255 /// [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`]
256 /// will find them.
257 ///
258 /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
259 ///
260 /// [`mapped`]: ActiveSubmission::mapped
261 /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
262 /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
263 #[must_use]
264 pub fn triage_submissions(
265 &mut self,
266 last_done: SubmissionIndex,
267 command_allocator: &crate::command::CommandAllocator,
268 ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
269 profiling::scope!("triage_submissions");
270
271 //TODO: enable when `is_sorted_by_key` is stable
272 //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
273 let done_count = self
274 .active
275 .iter()
276 .position(|a| a.index > last_done)
277 .unwrap_or(self.active.len());
278
279 let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
280 for a in self.active.drain(..done_count) {
281 self.ready_to_map.extend(a.mapped);
282 for encoder in a.encoders {
283 let raw = unsafe { encoder.land() };
284 command_allocator.release_encoder(raw);
285 }
286 drop(a.temp_resources);
287 work_done_closures.extend(a.work_done_closures);
288 }
289 work_done_closures
290 }
291
292 pub fn schedule_resource_destruction(
293 &mut self,
294 temp_resource: TempResource,
295 last_submit_index: SubmissionIndex,
296 ) {
297 let resources = self
298 .active
299 .iter_mut()
300 .find(|a| a.index == last_submit_index)
301 .map(|a| &mut a.temp_resources);
302 if let Some(resources) = resources {
303 resources.push(temp_resource);
304 }
305 }
306
307 pub fn add_work_done_closure(&mut self, closure: SubmittedWorkDoneClosure) {
308 match self.active.last_mut() {
309 Some(active) => {
310 active.work_done_closures.push(closure);
311 }
312 // We must defer the closure until all previously occurring map_async closures
313 // have fired. This is required by the spec.
314 None => {
315 self.work_done_closures.push(closure);
316 }
317 }
318 }
319
320 /// Determine which buffers are ready to map, and which must wait for the
321 /// GPU.
322 ///
323 /// See the documentation for [`LifetimeTracker`] for details.
324 pub(crate) fn triage_mapped(&mut self) {
325 if self.mapped.is_empty() {
326 return;
327 }
328
329 for buffer in self.mapped.drain(..) {
330 let submission = self
331 .active
332 .iter_mut()
333 .rev()
334 .find(|a| a.contains_buffer(&buffer));
335
336 submission
337 .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
338 .push(buffer);
339 }
340 }
341
342 /// Map the buffers in `self.ready_to_map`.
343 ///
344 /// Return a list of mapping notifications to send.
345 ///
346 /// See the documentation for [`LifetimeTracker`] for details.
347 #[must_use]
348 pub(crate) fn handle_mapping(
349 &mut self,
350 raw: &dyn hal::DynDevice,
351 snatch_guard: &SnatchGuard,
352 ) -> Vec<super::BufferMapPendingClosure> {
353 if self.ready_to_map.is_empty() {
354 return Vec::new();
355 }
356 let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
357 Vec::with_capacity(self.ready_to_map.len());
358
359 for buffer in self.ready_to_map.drain(..) {
360 // This _cannot_ be inlined into the match. If it is, the lock will be held
361 // open through the whole match, resulting in a deadlock when we try to re-lock
362 // the buffer back to active.
363 let mapping = std::mem::replace(
364 &mut *buffer.map_state.lock(),
365 resource::BufferMapState::Idle,
366 );
367 let pending_mapping = match mapping {
368 resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
369 // Mapping cancelled
370 resource::BufferMapState::Idle => continue,
371 // Mapping queued at least twice by map -> unmap -> map
372 // and was already successfully mapped below
373 resource::BufferMapState::Active { .. } => {
374 *buffer.map_state.lock() = mapping;
375 continue;
376 }
377 _ => panic!("No pending mapping."),
378 };
379 let status = if pending_mapping.range.start != pending_mapping.range.end {
380 let host = pending_mapping.op.host;
381 let size = pending_mapping.range.end - pending_mapping.range.start;
382 match super::map_buffer(
383 raw,
384 &buffer,
385 pending_mapping.range.start,
386 size,
387 host,
388 snatch_guard,
389 ) {
390 Ok(mapping) => {
391 *buffer.map_state.lock() = resource::BufferMapState::Active {
392 mapping,
393 range: pending_mapping.range.clone(),
394 host,
395 };
396 Ok(())
397 }
398 Err(e) => {
399 log::error!("Mapping failed: {e}");
400 Err(e)
401 }
402 }
403 } else {
404 *buffer.map_state.lock() = resource::BufferMapState::Active {
405 mapping: hal::BufferMapping {
406 ptr: std::ptr::NonNull::dangling(),
407 is_coherent: true,
408 },
409 range: pending_mapping.range,
410 host: pending_mapping.op.host,
411 };
412 Ok(())
413 };
414 pending_callbacks.push((pending_mapping.op, status));
415 }
416 pending_callbacks
417 }
418}