mirror of
https://github.com/gfx-rs/wgpu.git
synced 2025-12-08 21:26:17 +00:00
use StagingBuffer instead of Buffer for mapped_at_creation Buffers
This commit is contained in:
parent
5266bd1f08
commit
fabbca294a
@ -317,7 +317,7 @@ impl<A: HalApi> PendingWrites<A> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_staging_buffer<A: HalApi>(
|
pub(crate) fn prepare_staging_buffer<A: HalApi>(
|
||||||
device: &Arc<Device<A>>,
|
device: &Arc<Device<A>>,
|
||||||
size: wgt::BufferAddress,
|
size: wgt::BufferAddress,
|
||||||
instance_flags: wgt::InstanceFlags,
|
instance_flags: wgt::InstanceFlags,
|
||||||
|
|||||||
@ -587,33 +587,17 @@ impl<A: HalApi> Device<A> {
|
|||||||
};
|
};
|
||||||
hal::BufferUses::MAP_WRITE
|
hal::BufferUses::MAP_WRITE
|
||||||
} else {
|
} else {
|
||||||
// buffer needs staging area for initialization only
|
let (staging_buffer, staging_buffer_ptr) =
|
||||||
let stage_desc = wgt::BufferDescriptor {
|
queue::prepare_staging_buffer(self, desc.size, self.instance_flags)?;
|
||||||
label: Some(Cow::Borrowed(
|
|
||||||
"(wgpu internal) initializing unmappable buffer",
|
|
||||||
)),
|
|
||||||
size: desc.size,
|
|
||||||
usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC,
|
|
||||||
mapped_at_creation: false,
|
|
||||||
};
|
|
||||||
let stage = self.create_buffer_impl(&stage_desc, true)?;
|
|
||||||
|
|
||||||
let snatch_guard = self.snatchable_lock.read();
|
// Zero initialize memory and then mark the buffer as initialized
|
||||||
let stage_raw = stage.raw(&snatch_guard).unwrap();
|
|
||||||
let mapping = unsafe { self.raw().map_buffer(stage_raw, 0..stage.size) }
|
|
||||||
.map_err(DeviceError::from)?;
|
|
||||||
|
|
||||||
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
|
|
||||||
// Zero initialize memory and then mark both staging and buffer as initialized
|
|
||||||
// (it's guaranteed that this is the case by the time the buffer is usable)
|
// (it's guaranteed that this is the case by the time the buffer is usable)
|
||||||
unsafe { std::ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) };
|
unsafe { std::ptr::write_bytes(staging_buffer_ptr.as_ptr(), 0, buffer.size as usize) };
|
||||||
buffer.initialization_status.write().drain(0..buffer.size);
|
buffer.initialization_status.write().drain(0..buffer.size);
|
||||||
stage.initialization_status.write().drain(0..buffer.size);
|
|
||||||
|
|
||||||
*buffer.map_state.lock() = resource::BufferMapState::Init {
|
*buffer.map_state.lock() = resource::BufferMapState::Init {
|
||||||
ptr: mapping.ptr,
|
staging_buffer: Arc::new(staging_buffer),
|
||||||
needs_flush: !mapping.is_coherent,
|
ptr: staging_buffer_ptr,
|
||||||
stage_buffer: stage,
|
|
||||||
};
|
};
|
||||||
hal::BufferUses::COPY_DST
|
hal::BufferUses::COPY_DST
|
||||||
};
|
};
|
||||||
|
|||||||
@ -260,9 +260,8 @@ pub enum BufferMapAsyncStatus {
|
|||||||
pub(crate) enum BufferMapState<A: HalApi> {
|
pub(crate) enum BufferMapState<A: HalApi> {
|
||||||
/// Mapped at creation.
|
/// Mapped at creation.
|
||||||
Init {
|
Init {
|
||||||
|
staging_buffer: Arc<StagingBuffer<A>>,
|
||||||
ptr: NonNull<u8>,
|
ptr: NonNull<u8>,
|
||||||
stage_buffer: Arc<Buffer<A>>,
|
|
||||||
needs_flush: bool,
|
|
||||||
},
|
},
|
||||||
/// Waiting for GPU to be done before mapping
|
/// Waiting for GPU to be done before mapping
|
||||||
Waiting(BufferPendingMapping<A>),
|
Waiting(BufferPendingMapping<A>),
|
||||||
@ -657,9 +656,8 @@ impl<A: HalApi> Buffer<A> {
|
|||||||
log::debug!("{} map state -> Idle", self.error_ident());
|
log::debug!("{} map state -> Idle", self.error_ident());
|
||||||
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
|
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
|
||||||
BufferMapState::Init {
|
BufferMapState::Init {
|
||||||
|
staging_buffer,
|
||||||
ptr,
|
ptr,
|
||||||
stage_buffer,
|
|
||||||
needs_flush,
|
|
||||||
} => {
|
} => {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
if let Some(ref mut trace) = *device.trace.lock() {
|
if let Some(ref mut trace) = *device.trace.lock() {
|
||||||
@ -674,12 +672,14 @@ impl<A: HalApi> Buffer<A> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
let _ = ptr;
|
let _ = ptr;
|
||||||
if needs_flush {
|
|
||||||
|
let raw_staging_buffer_guard = staging_buffer.raw.lock();
|
||||||
|
let raw_staging_buffer = raw_staging_buffer_guard.as_ref().unwrap();
|
||||||
|
if !staging_buffer.is_coherent {
|
||||||
unsafe {
|
unsafe {
|
||||||
device.raw().flush_mapped_ranges(
|
device
|
||||||
stage_buffer.raw(&snatch_guard).unwrap(),
|
.raw()
|
||||||
iter::once(0..self.size),
|
.flush_mapped_ranges(raw_staging_buffer, iter::once(0..self.size));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -690,7 +690,7 @@ impl<A: HalApi> Buffer<A> {
|
|||||||
size,
|
size,
|
||||||
});
|
});
|
||||||
let transition_src = hal::BufferBarrier {
|
let transition_src = hal::BufferBarrier {
|
||||||
buffer: stage_buffer.raw(&snatch_guard).unwrap(),
|
buffer: raw_staging_buffer,
|
||||||
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
|
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
|
||||||
};
|
};
|
||||||
let transition_dst = hal::BufferBarrier {
|
let transition_dst = hal::BufferBarrier {
|
||||||
@ -706,13 +706,14 @@ impl<A: HalApi> Buffer<A> {
|
|||||||
);
|
);
|
||||||
if self.size > 0 {
|
if self.size > 0 {
|
||||||
encoder.copy_buffer_to_buffer(
|
encoder.copy_buffer_to_buffer(
|
||||||
stage_buffer.raw(&snatch_guard).unwrap(),
|
raw_staging_buffer,
|
||||||
raw_buf,
|
raw_buf,
|
||||||
region.into_iter(),
|
region.into_iter(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer));
|
drop(raw_staging_buffer_guard);
|
||||||
|
pending_writes.consume_temp(queue::TempResource::StagingBuffer(staging_buffer));
|
||||||
pending_writes.insert_buffer(self);
|
pending_writes.insert_buffer(self);
|
||||||
}
|
}
|
||||||
BufferMapState::Idle => {
|
BufferMapState::Idle => {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user