diff --git a/wgpu-bindings/wgpu.h b/wgpu-bindings/wgpu.h index 7a1055302..c8dfac8ab 100644 --- a/wgpu-bindings/wgpu.h +++ b/wgpu-bindings/wgpu.h @@ -387,3 +387,7 @@ WGPUTextureViewId wgpu_texture_create_default_texture_view(WGPUTextureId texture WGPUTextureViewId wgpu_texture_create_texture_view(WGPUTextureId texture_id, const WGPUTextureViewDescriptor *desc); + +void wgpu_texture_destroy(WGPUDeviceId texture_id); + +void wgpu_texture_view_destroy(WGPUTextureViewId _texture_view_id); diff --git a/wgpu-native/src/command/allocator.rs b/wgpu-native/src/command/allocator.rs index d37b4e5e4..c9b9b7ddd 100644 --- a/wgpu-native/src/command/allocator.rs +++ b/wgpu-native/src/command/allocator.rs @@ -1,5 +1,5 @@ use super::CommandBuffer; -use {DeviceId, Stored}; +use {DeviceId, LifeGuard, Stored}; use track::{Tracker}; use hal::command::RawCommandBuffer; @@ -61,8 +61,8 @@ impl CommandAllocator { } } - pub fn allocate( - &self, device_id: DeviceId, device: &B::Device + pub(crate) fn allocate( + &self, device_id: Stored, device: &B::Device ) -> CommandBuffer { let thread_id = thread::current().id(); let mut inner = self.inner.lock().unwrap(); @@ -90,7 +90,8 @@ impl CommandAllocator { raw: vec![init], fence, recorded_thread_id: thread_id, - device_id: Stored(device_id), + device_id, + life_guard: LifeGuard::new(), buffer_tracker: Tracker::new(), texture_tracker: Tracker::new(), } diff --git a/wgpu-native/src/command/compute.rs b/wgpu-native/src/command/compute.rs index 05bcc7703..9f72c54f4 100644 --- a/wgpu-native/src/command/compute.rs +++ b/wgpu-native/src/command/compute.rs @@ -16,10 +16,10 @@ pub struct ComputePass { } impl ComputePass { - pub fn new(raw: B::CommandBuffer, cmb_id: CommandBufferId) -> Self { + pub(crate) fn new(raw: B::CommandBuffer, cmb_id: Stored) -> Self { ComputePass { raw, - cmb_id: Stored(cmb_id), + cmb_id, } } } @@ -34,10 +34,10 @@ pub extern "C" fn wgpu_compute_pass_end_pass( HUB.command_buffers .lock() - .get_mut(pass.cmb_id.0) + .get_mut(pass.cmb_id.value) .raw .push(pass.raw); - pass.cmb_id.0 + pass.cmb_id.value } pub extern "C" fn wgpu_compute_pass_set_bind_group( diff --git a/wgpu-native/src/command/mod.rs b/wgpu-native/src/command/mod.rs index ec348ee93..feb10fe7c 100644 --- a/wgpu-native/src/command/mod.rs +++ b/wgpu-native/src/command/mod.rs @@ -2,7 +2,7 @@ mod allocator; mod compute; mod render; -pub use self::allocator::CommandAllocator; +pub(crate) use self::allocator::CommandAllocator; pub use self::compute::*; pub use self::render::*; @@ -10,7 +10,7 @@ use hal::{self, Device}; use hal::command::RawCommandBuffer; use { - B, Color, Origin3d, Stored, BufferUsageFlags, TextureUsageFlags, + B, Color, LifeGuard, Origin3d, Stored, BufferUsageFlags, TextureUsageFlags, WeaklyStored, BufferId, CommandBufferId, ComputePassId, DeviceId, RenderPassId, TextureId, TextureViewId, }; use conv; @@ -83,6 +83,7 @@ pub struct CommandBuffer { fence: B::Fence, recorded_thread_id: ThreadId, device_id: Stored, + life_guard: LifeGuard, pub(crate) buffer_tracker: BufferTracker, pub(crate) texture_tracker: TextureTracker, } @@ -139,7 +140,7 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( let mut cmb_guard = HUB.command_buffers.lock(); let cmb = cmb_guard.get_mut(command_buffer_id); let device_guard = HUB.devices.lock(); - let device = device_guard.get(cmb.device_id.0); + let device = device_guard.get(cmb.device_id.value); let view_guard = HUB.texture_views.lock(); let mut current_comb = device.com_allocator.extend(cmb); @@ -160,7 +161,7 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( } else { extent = Some(view.extent); } - let query = tracker.query(view.texture_id.0, TextureUsageFlags::empty()); + let query = tracker.query(&view.texture_id, TextureUsageFlags::empty()); let (_, layout) = conv::map_texture_state( query.usage, hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL, @@ -185,7 +186,7 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( } else { extent = Some(view.extent); } - let query = tracker.query(view.texture_id.0, TextureUsageFlags::empty()); + let query = tracker.query(&view.texture_id, TextureUsageFlags::empty()); let (_, layout) = conv::map_texture_state(query.usage, hal::format::Aspects::COLOR); hal::pass::Attachment { format: Some(conv::map_texture_format(view.format)), @@ -234,8 +235,8 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( let fb_key = FramebufferKey { attachments: desc.color_attachments .iter() - .map(|at| Stored(at.attachment)) - .chain(desc.depth_stencil_attachment.as_ref().map(|at| Stored(at.attachment))) + .map(|at| WeaklyStored(at.attachment)) + .chain(desc.depth_stencil_attachment.as_ref().map(|at| WeaklyStored(at.attachment))) .collect(), }; let framebuffer = match framebuffer_cache.entry(fb_key) { @@ -246,7 +247,7 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( .key() .attachments .iter() - .map(|&Stored(id)| &view_guard.get(id).raw); + .map(|&WeaklyStored(id)| &view_guard.get(id).raw); device.raw .create_framebuffer(&render_pass, attachments, extent.unwrap()) @@ -289,7 +290,10 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass( .lock() .register(RenderPass::new( current_comb, - command_buffer_id, + Stored { + value: command_buffer_id, + ref_count: cmb.life_guard.ref_count.clone(), + }, )) } @@ -301,8 +305,12 @@ pub extern "C" fn wgpu_command_buffer_begin_compute_pass( let cmb = cmb_guard.get_mut(command_buffer_id); let raw = cmb.raw.pop().unwrap(); + let stored = Stored { + value: command_buffer_id, + ref_count: cmb.life_guard.ref_count.clone(), + }; HUB.compute_passes .lock() - .register(ComputePass::new(raw, command_buffer_id)) + .register(ComputePass::new(raw, stored)) } diff --git a/wgpu-native/src/command/render.rs b/wgpu-native/src/command/render.rs index 417373173..9a3840159 100644 --- a/wgpu-native/src/command/render.rs +++ b/wgpu-native/src/command/render.rs @@ -17,13 +17,13 @@ pub struct RenderPass { } impl RenderPass { - pub fn new( + pub(crate) fn new( raw: B::CommandBuffer, - cmb_id: CommandBufferId, + cmb_id: Stored, ) -> Self { RenderPass { raw, - cmb_id: Stored(cmb_id), + cmb_id, buffer_tracker: BufferTracker::new(), texture_tracker: TextureTracker::new(), } @@ -40,7 +40,7 @@ pub extern "C" fn wgpu_render_pass_end_pass( pass.raw.end_render_pass(); let mut cmb_guard = HUB.command_buffers.lock(); - let cmb = cmb_guard.get_mut(pass.cmb_id.0); + let cmb = cmb_guard.get_mut(pass.cmb_id.value); if let Some(ref mut last) = cmb.raw.last_mut() { CommandBuffer::insert_barriers( @@ -52,5 +52,5 @@ pub extern "C" fn wgpu_render_pass_end_pass( } cmb.raw.push(pass.raw); - pass.cmb_id.0 + pass.cmb_id.value } diff --git a/wgpu-native/src/device.rs b/wgpu-native/src/device.rs index ee53e951c..156f9ef45 100644 --- a/wgpu-native/src/device.rs +++ b/wgpu-native/src/device.rs @@ -1,9 +1,10 @@ use {back, binding_model, command, conv, pipeline, resource}; -use registry::{HUB, Items, Registry}; +use registry::{HUB, Items, ItemsGuard, Registry}; use track::{BufferTracker, TextureTracker}; use { - CommandBuffer, Stored, TextureUsageFlags, - BindGroupLayoutId, BlendStateId, CommandBufferId, DepthStencilStateId, + CommandBuffer, LifeGuard, RefCount, Stored, SubmissionIndex, WeaklyStored, + TextureUsageFlags, + BindGroupLayoutId, BlendStateId, BufferId, CommandBufferId, DepthStencilStateId, DeviceId, PipelineLayoutId, QueueId, RenderPipelineId, ShaderModuleId, TextureId, TextureViewId, }; @@ -16,6 +17,7 @@ use rendy_memory::{allocator, Config, Heaps}; use std::{ffi, slice}; use std::collections::hash_map::{Entry, HashMap}; use std::sync::Mutex; +use std::sync::atomic::Ordering; #[derive(Hash, PartialEq)] @@ -26,21 +28,115 @@ impl Eq for RenderPassKey {} #[derive(Hash, PartialEq)] pub(crate) struct FramebufferKey { - pub attachments: Vec>, + pub attachments: Vec>, } impl Eq for FramebufferKey {} +enum ResourceId { + Buffer(BufferId), + Texture(TextureId), +} + +enum Resource { + Buffer(resource::Buffer), + Texture(resource::Texture), +} + +struct ActiveFrame { + submission_index: SubmissionIndex, + fence: B::Fence, + resources: Vec>, +} + +struct DestroyedResources { + /// Resources that are destroyed by the user but still referenced by + /// other objects or command buffers. + referenced: Vec<(ResourceId, RefCount)>, + /// Resources that are not referenced any more but still used by GPU. + /// Grouped by frames associated with a fence and a submission index. + active: Vec>, + /// Resources that are neither referenced or used, just pending + /// actual deletion. + free: Vec>, +} + +unsafe impl Send for DestroyedResources {} +unsafe impl Sync for DestroyedResources {} + +impl DestroyedResources { + fn add(&mut self, resource_id: ResourceId, life_guard: &LifeGuard) { + self.referenced.push((resource_id, life_guard.ref_count.clone())); + } + + fn triage_referenced( + &mut self, + buffer_guard: &mut ItemsGuard>, + texture_guard: &mut ItemsGuard>, + ) { + for i in (0 .. self.referenced.len()).rev() { + // one in resource itself, and one here in this list + let num_refs = self.referenced[i].1.load(); + if num_refs <= 2 { + assert_eq!(num_refs, 2); + let resource_id = self.referenced.swap_remove(i).0; + let (submit_index, resource) = match resource_id { + ResourceId::Buffer(id) => { + let buf = buffer_guard.take(id); + let si = buf.life_guard.submission_index.load(Ordering::Acquire); + (si, Resource::Buffer(buf)) + } + ResourceId::Texture(id) => { + let tex = texture_guard.take(id); + let si = tex.life_guard.submission_index.load(Ordering::Acquire); + (si, Resource::Texture(tex)) + } + }; + match self.active + .iter_mut() + .find(|af| af.submission_index == submit_index) + { + Some(af) => af.resources.push(resource), + None => self.free.push(resource), + } + } + } + } + + fn cleanup(&mut self, raw: &B::Device) { + for i in (0 .. self.active.len()).rev() { + if raw.get_fence_status(&self.active[i].fence) { + let af = self.active.swap_remove(i); + self.free.extend(af.resources); + raw.destroy_fence(af.fence); + } + } + + for resource in self.free.drain(..) { + match resource { + Resource::Buffer(buf) => { + raw.destroy_buffer(buf.raw); + } + Resource::Texture(tex) => { + raw.destroy_image(tex.raw); + } + } + } + } +} pub struct Device { pub(crate) raw: B::Device, queue_group: hal::QueueGroup, mem_allocator: Heaps, pub(crate) com_allocator: command::CommandAllocator, + life_guard: LifeGuard, buffer_tracker: Mutex, texture_tracker: Mutex, mem_props: hal::MemoryProperties, pub(crate) render_passes: Mutex>, pub(crate) framebuffers: Mutex>, + last_submission_index: SubmissionIndex, + destroyed: Mutex>, } impl Device { @@ -75,11 +171,18 @@ impl Device { mem_allocator, com_allocator: command::CommandAllocator::new(queue_group.family()), queue_group, + life_guard: LifeGuard::new(), buffer_tracker: Mutex::new(BufferTracker::new()), texture_tracker: Mutex::new(TextureTracker::new()), mem_props, render_passes: Mutex::new(HashMap::new()), framebuffers: Mutex::new(HashMap::new()), + last_submission_index: 0, + destroyed: Mutex::new(DestroyedResources { + referenced: Vec::new(), + active: Vec::new(), + free: Vec::new(), + }), } } } @@ -136,19 +239,28 @@ pub extern "C" fn wgpu_device_create_texture( layers: 0 .. 1, //TODO }; + let life_guard = LifeGuard::new(); + let ref_count = life_guard.ref_count.clone(); let id = HUB.textures .lock() .register(resource::Texture { raw: bound_image, - device_id: Stored(device_id), + device_id: Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }, kind, format: desc.format, full_range, + life_guard, }); let query = device.texture_tracker .lock() .unwrap() - .query(id, TextureUsageFlags::WRITE_ALL); + .query( + &Stored { value: id, ref_count }, + TextureUsageFlags::WRITE_ALL, + ); assert!(query.initialized); id @@ -164,7 +276,7 @@ pub extern "C" fn wgpu_texture_create_texture_view( let raw = HUB.devices .lock() - .get(texture.device_id.0) + .get(texture.device_id.value) .raw .create_image_view( &texture.raw, @@ -183,10 +295,14 @@ pub extern "C" fn wgpu_texture_create_texture_view( .lock() .register(resource::TextureView { raw, - texture_id: Stored(texture_id), + texture_id: Stored { + value: texture_id, + ref_count: texture.life_guard.ref_count.clone(), + }, format: texture.format, extent: texture.kind.extent(), samples: texture.kind.num_samples(), + life_guard: LifeGuard::new(), }) } @@ -205,7 +321,7 @@ pub extern "C" fn wgpu_texture_create_default_texture_view( let raw = HUB.devices .lock() - .get(texture.device_id.0) + .get(texture.device_id.value) .raw .create_image_view( &texture.raw, @@ -220,13 +336,38 @@ pub extern "C" fn wgpu_texture_create_default_texture_view( .lock() .register(resource::TextureView { raw, - texture_id: Stored(texture_id), + texture_id: Stored { + value: texture_id, + ref_count: texture.life_guard.ref_count.clone(), + }, format: texture.format, extent: texture.kind.extent(), samples: texture.kind.num_samples(), + life_guard: LifeGuard::new(), }) } +#[no_mangle] +pub extern "C" fn wgpu_texture_destroy( + texture_id: DeviceId, +) { + let texture_guard = HUB.textures.lock(); + let texture = texture_guard.get(texture_id); + let device_guard = HUB.devices.lock(); + device_guard + .get(texture.device_id.value) + .destroyed + .lock() + .unwrap() + .add(ResourceId::Texture(texture_id), &texture.life_guard); +} + +#[no_mangle] +pub extern "C" fn wgpu_texture_view_destroy( + _texture_view_id: TextureViewId, +) { + unimplemented!() +} #[no_mangle] pub extern "C" fn wgpu_device_create_bind_group_layout( @@ -338,7 +479,11 @@ pub extern "C" fn wgpu_device_create_command_buffer( let device_guard = HUB.devices.lock(); let device = device_guard.get(device_id); - let mut cmd_buf = device.com_allocator.allocate(device_id, &device.raw); + let dev_stored = Stored { + value: device_id, + ref_count: device.life_guard.ref_count.clone(), + }; + let mut cmd_buf = device.com_allocator.allocate(dev_stored, &device.raw); cmd_buf.raw.last_mut().unwrap().begin( hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, hal::command::CommandBufferInheritanceInfo::default(), @@ -367,6 +512,10 @@ pub extern "C" fn wgpu_queue_submit( slice::from_raw_parts(command_buffer_ptr, command_buffer_count) }; + let mut buffer_guard = HUB.buffers.lock(); + let mut texture_guard = HUB.textures.lock(); + let old_submit_index = device.life_guard.submission_index.fetch_add(1, Ordering::Relaxed); + //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. @@ -374,11 +523,21 @@ pub extern "C" fn wgpu_queue_submit( // finish all the command buffers first for &cmb_id in command_buffer_ids { let comb = command_buffer_guard.get_mut(cmb_id); + // update submission IDs + for id in comb.buffer_tracker.used() { + buffer_guard.get(id).life_guard.submission_index.store(old_submit_index, Ordering::Release); + } + for id in comb.texture_tracker.used() { + texture_guard.get(id).life_guard.submission_index.store(old_submit_index, Ordering::Release); + } + + // execute resource transitions let mut transit = device.com_allocator.extend(comb); transit.begin( hal::command::CommandBufferFlags::ONE_TIME_SUBMIT, hal::command::CommandBufferInheritanceInfo::default(), ); + //TODO: fix the consume CommandBuffer::insert_barriers( &mut transit, buffer_tracker.consume(&comb.buffer_tracker), @@ -393,6 +552,7 @@ pub extern "C" fn wgpu_queue_submit( } // now prepare the GPU submission + let fence = device.raw.create_fence(false); { let submission = hal::queue::RawSubmission { cmd_buffers: command_buffer_ids @@ -406,10 +566,21 @@ pub extern "C" fn wgpu_queue_submit( unsafe { device.queue_group.queues[0] .as_raw_mut() - .submit_raw(submission, None); + .submit_raw(submission, Some(&fence)); } } + if let Ok(mut destroyed) = device.destroyed.lock() { + destroyed.triage_referenced(&mut buffer_guard, &mut texture_guard); + destroyed.cleanup(&device.raw); + + destroyed.active.push(ActiveFrame { + submission_index: old_submit_index + 1, + fence, + resources: Vec::new(), + }); + } + // finally, return the command buffers to the allocator for &cmb_id in command_buffer_ids { let cmd_buf = command_buffer_guard.take(cmb_id); diff --git a/wgpu-native/src/lib.rs b/wgpu-native/src/lib.rs index 924d7460e..206510ef0 100644 --- a/wgpu-native/src/lib.rs +++ b/wgpu-native/src/lib.rs @@ -43,12 +43,78 @@ pub use self::resource::*; use back::Backend as B; use registry::Id; -#[derive(Debug, Hash, PartialEq, Eq)] -struct Stored(T); -#[cfg(not(feature = "remote"))] -unsafe impl Sync for Stored {} -#[cfg(not(feature = "remote"))] +use std::ptr; +use std::sync::atomic::{AtomicUsize, Ordering}; + + +//#[cfg(not(feature = "remote"))] +//unsafe impl Sync for Stored {} +//#[cfg(not(feature = "remote"))] +//unsafe impl Send for Stored {} + +type SubmissionIndex = usize; + +#[derive(Debug)] +struct RefCount(ptr::NonNull); + +impl RefCount { + const MAX: usize = 1 << 24; + + fn load(&self) -> usize { + unsafe { self.0.as_ref() }.load(Ordering::Acquire) + } +} + +impl Clone for RefCount { + fn clone(&self) -> Self { + let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::Relaxed); + assert!(old_size < Self::MAX); + RefCount(self.0) + } +} + +impl Drop for RefCount { + fn drop(&mut self) { + if unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::Relaxed) == 1 { + let _ = unsafe { Box::from_raw(self.0.as_ptr()) }; + } + } +} + +struct LifeGuard { + ref_count: RefCount, + submission_index: AtomicUsize, +} + +//TODO: reconsider this +unsafe impl Send for LifeGuard {} +unsafe impl Sync for LifeGuard {} + +impl LifeGuard { + fn new() -> Self { + let bx = Box::new(AtomicUsize::new(1)); + LifeGuard { + ref_count: RefCount(ptr::NonNull::new(Box::into_raw(bx)).unwrap()), + submission_index: AtomicUsize::new(0), + } + } +} + +#[derive(Debug)] +struct Stored { + value: T, + ref_count: RefCount, +} + unsafe impl Send for Stored {} +unsafe impl Sync for Stored {} + +#[derive(Debug, Hash, PartialEq, Eq)] +struct WeaklyStored(T); + +unsafe impl Send for WeaklyStored {} +unsafe impl Sync for WeaklyStored {} + #[repr(C)] #[derive(Clone, Copy, Debug)] diff --git a/wgpu-native/src/resource.rs b/wgpu-native/src/resource.rs index f914da45a..bfe0903e6 100644 --- a/wgpu-native/src/resource.rs +++ b/wgpu-native/src/resource.rs @@ -1,5 +1,5 @@ use { - Extent3d, Stored, + Extent3d, LifeGuard, Stored, DeviceId, TextureId, }; @@ -32,6 +32,7 @@ pub(crate) struct Buffer { //pub raw: B::UnboundBuffer, pub raw: B::Buffer, pub memory_properties: hal::memory::Properties, + pub life_guard: LifeGuard, // TODO: mapping, unmap() } @@ -82,6 +83,7 @@ pub(crate) struct Texture { pub kind: hal::image::Kind, pub format: TextureFormat, pub full_range: hal::image::SubresourceRange, + pub life_guard: LifeGuard, } @@ -122,6 +124,7 @@ pub(crate) struct TextureView { pub format: TextureFormat, pub extent: hal::image::Extent, pub samples: hal::image::NumSamples, + pub life_guard: LifeGuard, } diff --git a/wgpu-native/src/track.rs b/wgpu-native/src/track.rs index a7428b384..bfa9a80a8 100644 --- a/wgpu-native/src/track.rs +++ b/wgpu-native/src/track.rs @@ -1,8 +1,9 @@ -use {Stored, BufferId, TextureId}; +use {RefCount, Stored, WeaklyStored, BufferId, TextureId}; use resource::{BufferUsageFlags, TextureUsageFlags}; use std::collections::hash_map::{Entry, HashMap}; use std::hash::Hash; +use std::mem; use std::ops::{BitOr, Range}; @@ -42,10 +43,19 @@ impl GenericUsage for TextureUsageFlags { } } +#[derive(Clone)] +struct Track { + ref_count: RefCount, + init: U, + last: U, +} + +unsafe impl Send for Track {} +unsafe impl Sync for Track {} //TODO: consider having `I` as an associated type of `U`? pub struct Tracker { - map: HashMap, Range>, + map: HashMap, Track>, } pub type BufferTracker = Tracker; pub type TextureTracker = Tracker; @@ -54,16 +64,23 @@ impl< I: Clone + Hash + Eq, U: Copy + GenericUsage + BitOr + PartialEq, > Tracker { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Tracker { map: HashMap::new(), } } - pub fn query(&mut self, id: I, default: U) -> Query { - match self.map.entry(Stored(id)) { + /// Get the last usage on a resource. + pub(crate) fn query( + &mut self, stored: &Stored, default: U + ) -> Query { + match self.map.entry(WeaklyStored(stored.value.clone())) { Entry::Vacant(e) => { - e.insert(default .. default); + e.insert(Track { + ref_count: stored.ref_count.clone(), + init: default, + last: default, + }); Query { usage: default, initialized: true, @@ -71,28 +88,35 @@ impl< } Entry::Occupied(e) => { Query { - usage: e.get().end, + usage: e.get().last, initialized: false, } } } } - pub fn transit(&mut self, id: I, usage: U, permit: TrackPermit) -> Result, U> { - match self.map.entry(Stored(id)) { + /// Transit a specified resource into a different usage. + pub(crate) fn transit( + &mut self, id: I, ref_count: &RefCount, usage: U, permit: TrackPermit + ) -> Result, U> { + match self.map.entry(WeaklyStored(id)) { Entry::Vacant(e) => { - e.insert(usage .. usage); + e.insert(Track { + ref_count: ref_count.clone(), + init: usage, + last: usage, + }); Ok(Tracktion::Init) } Entry::Occupied(mut e) => { - let old = e.get().end; + let old = e.get().last; if usage == old { Ok(Tracktion::Keep) } else if permit.contains(TrackPermit::EXTEND) && !(old | usage).is_exclusive() { - e.get_mut().end = old | usage; + e.get_mut().last = old | usage; Ok(Tracktion::Extend { old }) } else if permit.contains(TrackPermit::REPLACE) { - e.get_mut().end = usage; + e.get_mut().last = usage; Ok(Tracktion::Replace { old }) } else { Err(old) @@ -101,15 +125,30 @@ impl< } } - pub fn consume<'a>(&'a mut self, other: &'a Self) -> impl 'a + Iterator)> { + /// Consume another tacker, adding it's transitions to `self`. + pub fn consume<'a>( + &'a mut self, other: &'a Self + ) -> impl 'a + Iterator)> { other.map .iter() - .flat_map(move |(id, new)| match self.transit(id.0.clone(), new.end, TrackPermit::REPLACE) { - Ok(Tracktion::Init) | - Ok(Tracktion::Keep) => None, - Ok(Tracktion::Replace { old }) => Some((id.0.clone(), old .. new.end)), - Ok(Tracktion::Extend { .. }) | - Err(_) => panic!("Unable to consume a resource transition!"), + .flat_map(move |(id, new)| match self.map.entry(WeaklyStored(id.0.clone())) { + Entry::Vacant(e) => { + e.insert(new.clone()); + None + } + Entry::Occupied(mut e) => { + let old = mem::replace(&mut e.get_mut().last, new.last); + if old == new.init { + None + } else { + Some((id.0.clone(), old .. new.last)) + } + } }) } + + /// Return an iterator over used resources keys. + pub fn used<'a>(&'a self) -> impl 'a + Iterator { + self.map.keys().map(|&WeaklyStored(ref id)| id.clone()) + } }