wgpu_core/device/
life.rs

1use alloc::{sync::Arc, vec::Vec};
2
3use smallvec::SmallVec;
4use thiserror::Error;
5
6use crate::{
7    device::{
8        queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
9        DeviceError,
10    },
11    resource::{Buffer, Texture, Trackable},
12    snatch::SnatchGuard,
13    SubmissionIndex,
14};
15
16/// A command submitted to the GPU for execution.
17///
18/// ## Keeping resources alive while the GPU is using them
19///
20/// [`wgpu_hal`] requires that, when a command is submitted to a queue, all the
21/// resources it uses must remain alive until it has finished executing.
22///
23/// [`wgpu_hal`]: hal
24/// [`ResourceInfo::submission_index`]: crate::resource::ResourceInfo
25struct ActiveSubmission {
26    /// The index of the submission we track.
27    ///
28    /// When `Device::fence`'s value is greater than or equal to this, our queue
29    /// submission has completed.
30    index: SubmissionIndex,
31
32    /// Buffers to be mapped once this submission has completed.
33    mapped: Vec<Arc<Buffer>>,
34
35    /// Command buffers used by this submission, and the encoder that owns them.
36    ///
37    /// [`wgpu_hal::Queue::submit`] requires the submitted command buffers to
38    /// remain alive until the submission has completed execution. Command
39    /// encoders double as allocation pools for command buffers, so holding them
40    /// here and cleaning them up in [`LifetimeTracker::triage_submissions`]
41    /// satisfies that requirement.
42    ///
43    /// Once this submission has completed, the command buffers are reset and
44    /// the command encoder is recycled.
45    ///
46    /// [`wgpu_hal::Queue::submit`]: hal::Queue::submit
47    encoders: Vec<EncoderInFlight>,
48
49    /// List of queue "on_submitted_work_done" closures to be called once this
50    /// submission has completed.
51    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
52}
53
54impl ActiveSubmission {
55    /// Returns true if this submission contains the given buffer.
56    ///
57    /// This only uses constant-time operations.
58    pub fn contains_buffer(&self, buffer: &Buffer) -> bool {
59        for encoder in &self.encoders {
60            // The ownership location of buffers depends on where the command encoder
61            // came from. If it is the staging command encoder on the queue, it is
62            // in the pending buffer list. If it came from a user command encoder,
63            // it is in the tracker.
64
65            if encoder.trackers.buffers.contains(buffer) {
66                return true;
67            }
68
69            if encoder
70                .pending_buffers
71                .contains_key(&buffer.tracker_index())
72            {
73                return true;
74            }
75        }
76
77        false
78    }
79
80    /// Returns true if this submission contains the given texture.
81    ///
82    /// This only uses constant-time operations.
83    pub fn contains_texture(&self, texture: &Texture) -> bool {
84        for encoder in &self.encoders {
85            // The ownership location of textures depends on where the command encoder
86            // came from. If it is the staging command encoder on the queue, it is
87            // in the pending buffer list. If it came from a user command encoder,
88            // it is in the tracker.
89
90            if encoder.trackers.textures.contains(texture) {
91                return true;
92            }
93
94            if encoder
95                .pending_textures
96                .contains_key(&texture.tracker_index())
97            {
98                return true;
99            }
100        }
101
102        false
103    }
104}
105
106#[derive(Clone, Debug, Error)]
107#[non_exhaustive]
108pub enum WaitIdleError {
109    #[error(transparent)]
110    Device(#[from] DeviceError),
111    #[error("Tried to wait using a submission index ({0}) that has not been returned by a successful submission (last successful submission: {1})")]
112    WrongSubmissionIndex(SubmissionIndex, SubmissionIndex),
113    #[error("Timed out trying to wait for the given submission index.")]
114    Timeout,
115}
116
117impl WaitIdleError {
118    pub fn to_poll_error(&self) -> Option<wgt::PollError> {
119        match self {
120            WaitIdleError::Timeout => Some(wgt::PollError::Timeout),
121            _ => None,
122        }
123    }
124}
125
126/// Resource tracking for a device.
127///
128/// ## Host mapping buffers
129///
130/// A buffer cannot be mapped until all active queue submissions that use it
131/// have completed. To that end:
132///
133/// -   Each buffer's `ResourceInfo::submission_index` records the index of the
134///     most recent queue submission that uses that buffer.
135///
136/// -   When the device is polled, the following `LifetimeTracker` methods decide
137///     what should happen next:
138///
139///     1)  `triage_submissions` moves entries in `self.active[i]` for completed
140///         submissions to `self.ready_to_map`.  At this point, both
141///         `self.active` and `self.ready_to_map` are up to date with the given
142///         submission index.
143///
144///     2)  `handle_mapping` drains `self.ready_to_map` and actually maps the
145///         buffers, collecting a list of notification closures to call.
146///
147/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
148/// buffer. This new `Arc` is only dropped by `handle_mapping`.
149pub(crate) struct LifetimeTracker {
150    /// Resources used by queue submissions still in flight. One entry per
151    /// submission, with older submissions appearing before younger.
152    ///
153    /// Entries are added by `track_submission` and drained by
154    /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
155    /// to particular entries.
156    active: Vec<ActiveSubmission>,
157
158    /// Buffers the user has asked us to map, and which are not used by any
159    /// queue submission still in flight.
160    ready_to_map: Vec<Arc<Buffer>>,
161
162    /// Queue "on_submitted_work_done" closures that were initiated for while there is no
163    /// currently pending submissions. These cannot be immediately invoked as they
164    /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
165    /// here until the next time the device is maintained.
166    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
167}
168
169impl LifetimeTracker {
170    pub fn new() -> Self {
171        Self {
172            active: Vec::new(),
173            ready_to_map: Vec::new(),
174            work_done_closures: SmallVec::new(),
175        }
176    }
177
178    /// Return true if there are no queue submissions still in flight.
179    pub fn queue_empty(&self) -> bool {
180        self.active.is_empty()
181    }
182
183    /// Start tracking resources associated with a new queue submission.
184    pub fn track_submission(&mut self, index: SubmissionIndex, encoders: Vec<EncoderInFlight>) {
185        self.active.push(ActiveSubmission {
186            index,
187            mapped: Vec::new(),
188            encoders,
189            work_done_closures: SmallVec::new(),
190        });
191    }
192
193    pub(crate) fn map(&mut self, buffer: &Arc<Buffer>) -> Option<SubmissionIndex> {
194        // Determine which buffers are ready to map, and which must wait for the GPU.
195        let submission = self
196            .active
197            .iter_mut()
198            .rev()
199            .find(|a| a.contains_buffer(buffer));
200
201        let maybe_submission_index = submission.as_ref().map(|s| s.index);
202
203        submission
204            .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
205            .push(buffer.clone());
206
207        maybe_submission_index
208    }
209
210    /// Returns the submission index of the most recent submission that uses the
211    /// given buffer.
212    pub fn get_buffer_latest_submission_index(&self, buffer: &Buffer) -> Option<SubmissionIndex> {
213        // We iterate in reverse order, so that we can bail out early as soon
214        // as we find a hit.
215        self.active.iter().rev().find_map(|submission| {
216            if submission.contains_buffer(buffer) {
217                Some(submission.index)
218            } else {
219                None
220            }
221        })
222    }
223
224    /// Returns the submission index of the most recent submission that uses the
225    /// given texture.
226    pub fn get_texture_latest_submission_index(
227        &self,
228        texture: &Texture,
229    ) -> Option<SubmissionIndex> {
230        // We iterate in reverse order, so that we can bail out early as soon
231        // as we find a hit.
232        self.active.iter().rev().find_map(|submission| {
233            if submission.contains_texture(texture) {
234                Some(submission.index)
235            } else {
236                None
237            }
238        })
239    }
240
241    /// Sort out the consequences of completed submissions.
242    ///
243    /// Assume that all submissions up through `last_done` have completed.
244    ///
245    /// -   Buffers used by those submissions are now ready to map, if requested.
246    ///     Add any buffers in the submission's [`mapped`] list to
247    ///     [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`]
248    ///     will find them.
249    ///
250    /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
251    ///
252    /// [`mapped`]: ActiveSubmission::mapped
253    /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
254    /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
255    #[must_use]
256    pub fn triage_submissions(
257        &mut self,
258        last_done: SubmissionIndex,
259    ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
260        profiling::scope!("triage_submissions");
261
262        //TODO: enable when `is_sorted_by_key` is stable
263        //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
264        let done_count = self
265            .active
266            .iter()
267            .position(|a| a.index > last_done)
268            .unwrap_or(self.active.len());
269
270        let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
271        for a in self.active.drain(..done_count) {
272            self.ready_to_map.extend(a.mapped);
273            for encoder in a.encoders {
274                // This involves actually decrementing the ref count of all command buffer
275                // resources, so can be _very_ expensive.
276                profiling::scope!("drop command buffer trackers");
277                drop(encoder);
278            }
279            work_done_closures.extend(a.work_done_closures);
280        }
281        work_done_closures
282    }
283
284    pub fn schedule_resource_destruction(
285        &mut self,
286        temp_resource: TempResource,
287        last_submit_index: SubmissionIndex,
288    ) {
289        let resources = self
290            .active
291            .iter_mut()
292            .find(|a| a.index == last_submit_index)
293            .map(|a| {
294                // Because this resource's `last_submit_index` matches `a.index`,
295                // we know that we must have done something with the resource,
296                // so `a.encoders` should not be empty.
297                &mut a.encoders.last_mut().unwrap().temp_resources
298            });
299        if let Some(resources) = resources {
300            resources.push(temp_resource);
301        }
302    }
303
304    pub fn add_work_done_closure(
305        &mut self,
306        closure: SubmittedWorkDoneClosure,
307    ) -> Option<SubmissionIndex> {
308        match self.active.last_mut() {
309            Some(active) => {
310                active.work_done_closures.push(closure);
311                Some(active.index)
312            }
313            // We must defer the closure until all previously occurring map_async closures
314            // have fired. This is required by the spec.
315            None => {
316                self.work_done_closures.push(closure);
317                None
318            }
319        }
320    }
321
322    /// Map the buffers in `self.ready_to_map`.
323    ///
324    /// Return a list of mapping notifications to send.
325    ///
326    /// See the documentation for [`LifetimeTracker`] for details.
327    #[must_use]
328    pub(crate) fn handle_mapping(
329        &mut self,
330        snatch_guard: &SnatchGuard,
331    ) -> Vec<super::BufferMapPendingClosure> {
332        if self.ready_to_map.is_empty() {
333            return Vec::new();
334        }
335        let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
336            Vec::with_capacity(self.ready_to_map.len());
337
338        for buffer in self.ready_to_map.drain(..) {
339            match buffer.map(snatch_guard) {
340                Some(cb) => pending_callbacks.push(cb),
341                None => continue,
342            }
343        }
344        pending_callbacks
345    }
346}