Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eagerly release GPU resources when we lose the device. #4851

Merged
merged 3 commits into from
Dec 20, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Eagerly release GPU resources when we lose the device.
  • Loading branch information
bradwerth committed Dec 8, 2023
commit 6814c55db7986a34455045ca1e6267f6cf7c6227
45 changes: 44 additions & 1 deletion wgpu-core/src/device/life.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 236,7 @@ pub(crate) struct LifetimeTracker<A: HalApi> {
ready_to_map: Vec<Arc<Buffer<A>>>,

/// Queue "on_submitted_work_done" closures that were initiated for while there is no
/// currently pending submissions. These cannot be immeidately invoked as they
/// currently pending submissions. These cannot be immediately invoked as they
/// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
/// here until the next time the device is maintained.
work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
Expand Down Expand Up @@ -947,4 947,47 @@ impl<A: HalApi> LifetimeTracker<A> {
}
pending_callbacks
}

pub(crate) fn release_gpu_resources(&mut self) {
// This is called when the device is lost, which makes every associated
// resource invalid and unusable. This is an opportunity to release all of
// the underlying gpu resources, even though the objects remain visible to
// the user agent. We purge this memory naturally when resources have been
// moved into the appropriate buckets, so this function just needs to
// initiate movement into those buckets, and it can do that by calling
// "destroy" on all the resources we know about which aren't already marked
// for cleanup.

// During these iterations, we discard all errors. We don't care!

// Destroy all the mapped buffers.
for buffer in &self.mapped {
let _ = buffer.destroy();
}

// Destroy all the unmapped buffers.
for buffer in &self.ready_to_map {
let _ = buffer.destroy();
}

// Destroy all the future_suspected_buffers.
for buffer in &self.future_suspected_buffers {
let _ = buffer.destroy();
}

// Destroy the buffers in all active submissions.
for submission in &self.active {
for buffer in &submission.mapped {
let _ = buffer.destroy();
}
}

// Destroy all the future_suspected_textures.
// TODO: texture.destroy is not implemented
/*
for texture in &self.future_suspected_textures {
let _ = texture.destroy();
}
*/
}
}
31 changes: 21 additions & 10 deletions wgpu-core/src/device/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,15 378,19 @@ impl<A: HalApi> Device<A> {
// our caller. This will complete the steps for both destroy and for
// "lose the device".
let mut device_lost_invocations = SmallVec::new();
if !self.is_valid()
&& life_tracker.queue_empty()
&& life_tracker.device_lost_closure.is_some()
{
device_lost_invocations.push(DeviceLostInvocation {
closure: life_tracker.device_lost_closure.take().unwrap(),
reason: DeviceLostReason::Destroyed,
message: String::new(),
});
if !self.is_valid() && life_tracker.queue_empty() {
// We can release gpu resources associated with this device.
life_tracker.release_gpu_resources();

// If we have a DeviceLostClosure, build an invocation with the
// reason DeviceLostReason::Destroyed and no message.
if life_tracker.device_lost_closure.is_some() {
device_lost_invocations.push(DeviceLostInvocation {
closure: life_tracker.device_lost_closure.take().unwrap(),
reason: DeviceLostReason::Destroyed,
message: String::new(),
});
}
}

let closures = UserClosures {
Expand Down Expand Up @@ -3339,9 3343,13 @@ impl<A: HalApi> Device<A> {
self.valid.store(false, Ordering::Release);

// 1) Resolve the GPUDevice device.lost promise.
let closure = self.lock_life().device_lost_closure.take();
let mut life_lock = self.lock_life();
let closure = life_lock.device_lost_closure.take();
if let Some(device_lost_closure) = closure {
// It's important to not hold the lock while calling the closure.
drop(life_lock);
device_lost_closure.call(DeviceLostReason::Unknown, message.to_string());
life_lock = self.lock_life();
}

// 2) Complete any outstanding mapAsync() steps.
Expand All @@ -3351,6 3359,9 @@ impl<A: HalApi> Device<A> {
// since that will prevent any new work from being added to the queues.
// Future calls to poll_devices will continue to check the work queues
// until they are cleared, and then drop the device.

// Eagerly release GPU resources.
life_lock.release_gpu_resources();
}
}

Expand Down
Loading