mirror of
https://github.com/gfx-rs/wgpu.git
synced 2025-12-08 21:26:17 +00:00
Deferred error reporting for other command encoder operations
* clear commands * query set functions * command_encoder_as_hal_mut * ray_tracing
This commit is contained in:
parent
e702d1c116
commit
3a5d0f2747
@ -27,6 +27,13 @@ pub use run::{execute_test, TestingContext};
|
||||
pub use wgpu_macros::gpu_test;
|
||||
|
||||
/// Run some code in an error scope and assert that validation fails.
|
||||
///
|
||||
/// Note that errors related to commands for the GPU (i.e. raised by methods on
|
||||
/// GPUCommandEncoder, GPURenderPassEncoder, GPUComputePassEncoder,
|
||||
/// GPURenderBundleEncoder) are usually not raised immediately. They are raised
|
||||
/// only when `finish()` is called on the command encoder. Tests of such error
|
||||
/// cases should call `fail` with a closure that calls `finish()`, not with a
|
||||
/// closure that encodes the actual command.
|
||||
pub fn fail<T>(
|
||||
device: &wgpu::Device,
|
||||
callback: impl FnOnce() -> T,
|
||||
|
||||
@ -344,13 +344,12 @@ static CLEAR_OFFSET_OUTSIDE_RESOURCE_BOUNDS: GpuTestConfiguration = GpuTestConfi
|
||||
|
||||
let out_of_bounds = size.checked_add(wgpu::COPY_BUFFER_ALIGNMENT).unwrap();
|
||||
|
||||
let mut encoder = ctx.device.create_command_encoder(&Default::default());
|
||||
encoder.clear_buffer(&buffer, out_of_bounds, None);
|
||||
|
||||
wgpu_test::fail(
|
||||
&ctx.device,
|
||||
|| {
|
||||
ctx.device
|
||||
.create_command_encoder(&Default::default())
|
||||
.clear_buffer(&buffer, out_of_bounds, None)
|
||||
},
|
||||
|| encoder.finish(),
|
||||
Some("Clear of 20..20 would end up overrunning the bounds of the buffer of size 16"),
|
||||
);
|
||||
});
|
||||
@ -370,17 +369,16 @@ static CLEAR_OFFSET_PLUS_SIZE_OUTSIDE_U64_BOUNDS: GpuTestConfiguration =
|
||||
let max_valid_offset = u64::MAX - (u64::MAX % wgpu::COPY_BUFFER_ALIGNMENT);
|
||||
let smallest_aligned_invalid_size = wgpu::COPY_BUFFER_ALIGNMENT;
|
||||
|
||||
let mut encoder = ctx.device.create_command_encoder(&Default::default());
|
||||
encoder.clear_buffer(
|
||||
&buffer,
|
||||
max_valid_offset,
|
||||
Some(smallest_aligned_invalid_size),
|
||||
);
|
||||
|
||||
wgpu_test::fail(
|
||||
&ctx.device,
|
||||
|| {
|
||||
ctx.device
|
||||
.create_command_encoder(&Default::default())
|
||||
.clear_buffer(
|
||||
&buffer,
|
||||
max_valid_offset,
|
||||
Some(smallest_aligned_invalid_size),
|
||||
)
|
||||
},
|
||||
|| encoder.finish(),
|
||||
Some(concat!(
|
||||
"Clear starts at offset 18446744073709551612 with size of 4, ",
|
||||
"but these added together exceed `u64::MAX`"
|
||||
|
||||
@ -330,20 +330,19 @@ static DEVICE_DESTROY_THEN_MORE: GpuTestConfiguration = GpuTestConfiguration::ne
|
||||
);
|
||||
|
||||
// Texture clear should fail.
|
||||
encoder_for_clear.clear_texture(
|
||||
&texture_for_write,
|
||||
&wgpu::ImageSubresourceRange {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
base_mip_level: 0,
|
||||
mip_level_count: None,
|
||||
base_array_layer: 0,
|
||||
array_layer_count: None,
|
||||
},
|
||||
);
|
||||
fail(
|
||||
&ctx.device,
|
||||
|| {
|
||||
encoder_for_clear.clear_texture(
|
||||
&texture_for_write,
|
||||
&wgpu::ImageSubresourceRange {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
base_mip_level: 0,
|
||||
mip_level_count: None,
|
||||
base_array_layer: 0,
|
||||
array_layer_count: None,
|
||||
},
|
||||
);
|
||||
},
|
||||
|| encoder_for_clear.finish(),
|
||||
Some("device with '' label is invalid"),
|
||||
);
|
||||
|
||||
|
||||
@ -188,11 +188,8 @@ fn blas_compaction(ctx: TestingContext) {
|
||||
let mut build_entry = as_ctx.blas_build_entry();
|
||||
build_entry.blas = &compacted;
|
||||
|
||||
fail(
|
||||
&ctx.device,
|
||||
|| fail_encoder.build_acceleration_structures([&build_entry], []),
|
||||
None,
|
||||
);
|
||||
fail_encoder.build_acceleration_structures([&build_entry], []);
|
||||
fail(&ctx.device, || fail_encoder.finish(), None);
|
||||
}
|
||||
|
||||
#[gpu_test]
|
||||
@ -733,13 +730,8 @@ fn only_tlas_vertex_return(ctx: TestingContext) {
|
||||
label: Some("TLAS 1"),
|
||||
});
|
||||
|
||||
fail(
|
||||
&ctx.device,
|
||||
|| {
|
||||
encoder_tlas.build_acceleration_structures([], [&as_ctx.tlas]);
|
||||
},
|
||||
None,
|
||||
);
|
||||
encoder_tlas.build_acceleration_structures([], [&as_ctx.tlas]);
|
||||
fail(&ctx.device, || encoder_tlas.finish(), None);
|
||||
}
|
||||
|
||||
#[gpu_test]
|
||||
@ -817,30 +809,29 @@ fn test_as_build_format_stride(
|
||||
.create_command_encoder(&CommandEncoderDescriptor {
|
||||
label: Some("BLAS_1"),
|
||||
});
|
||||
fail_if(
|
||||
command_encoder.build_acceleration_structures(
|
||||
&[BlasBuildEntry {
|
||||
blas: &blas,
|
||||
geometry: BlasGeometries::TriangleGeometries(vec![BlasTriangleGeometry {
|
||||
size: &blas_size,
|
||||
vertex_buffer: &vertices,
|
||||
first_vertex: 0,
|
||||
vertex_stride: stride,
|
||||
index_buffer: None,
|
||||
first_index: None,
|
||||
transform_buffer: None,
|
||||
transform_buffer_offset: None,
|
||||
}]),
|
||||
}],
|
||||
&[],
|
||||
);
|
||||
let command_buffer = fail_if(
|
||||
&ctx.device,
|
||||
invalid_combination,
|
||||
|| {
|
||||
command_encoder.build_acceleration_structures(
|
||||
&[BlasBuildEntry {
|
||||
blas: &blas,
|
||||
geometry: BlasGeometries::TriangleGeometries(vec![BlasTriangleGeometry {
|
||||
size: &blas_size,
|
||||
vertex_buffer: &vertices,
|
||||
first_vertex: 0,
|
||||
vertex_stride: stride,
|
||||
index_buffer: None,
|
||||
first_index: None,
|
||||
transform_buffer: None,
|
||||
transform_buffer_offset: None,
|
||||
}]),
|
||||
}],
|
||||
&[],
|
||||
)
|
||||
},
|
||||
|| command_encoder.finish(),
|
||||
None,
|
||||
);
|
||||
if !invalid_combination {
|
||||
ctx.queue.submit([command_encoder.finish()]);
|
||||
ctx.queue.submit([command_buffer]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ impl Global {
|
||||
dst: BufferId,
|
||||
offset: BufferAddress,
|
||||
size: Option<BufferAddress>,
|
||||
) -> Result<(), ClearError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
profiling::scope!("CommandEncoder::clear_buffer");
|
||||
api_log!("CommandEncoder::clear_buffer {dst:?}");
|
||||
|
||||
@ -96,77 +96,74 @@ impl Global {
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), ClearError> {
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ClearBuffer { dst, offset, size });
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ClearBuffer { dst, offset, size });
|
||||
}
|
||||
let dst_buffer = hub.buffers.get(dst).get()?;
|
||||
|
||||
let dst_buffer = hub.buffers.get(dst).get()?;
|
||||
dst_buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
dst_buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
let dst_pending = cmd_buf_data
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&dst_buffer, wgt::BufferUses::COPY_DST);
|
||||
|
||||
let dst_pending = cmd_buf_data
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&dst_buffer, wgt::BufferUses::COPY_DST);
|
||||
let snatch_guard = dst_buffer.device.snatchable_lock.read();
|
||||
let dst_raw = dst_buffer.try_raw(&snatch_guard)?;
|
||||
dst_buffer.check_usage(BufferUsages::COPY_DST)?;
|
||||
|
||||
let snatch_guard = dst_buffer.device.snatchable_lock.read();
|
||||
let dst_raw = dst_buffer.try_raw(&snatch_guard)?;
|
||||
dst_buffer.check_usage(BufferUsages::COPY_DST)?;
|
||||
// Check if offset & size are valid.
|
||||
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedBufferOffset(offset));
|
||||
}
|
||||
|
||||
// Check if offset & size are valid.
|
||||
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedBufferOffset(offset));
|
||||
}
|
||||
|
||||
let size = size.unwrap_or(dst_buffer.size.saturating_sub(offset));
|
||||
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedFillSize(size));
|
||||
}
|
||||
let end_offset =
|
||||
offset
|
||||
.checked_add(size)
|
||||
.ok_or(ClearError::OffsetPlusSizeExceeds64BitBounds {
|
||||
let size = size.unwrap_or(dst_buffer.size.saturating_sub(offset));
|
||||
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedFillSize(size));
|
||||
}
|
||||
let end_offset =
|
||||
offset
|
||||
.checked_add(size)
|
||||
.ok_or(ClearError::OffsetPlusSizeExceeds64BitBounds {
|
||||
start_offset: offset,
|
||||
requested_size: size,
|
||||
})?;
|
||||
if end_offset > dst_buffer.size {
|
||||
return Err(ClearError::BufferOverrun {
|
||||
start_offset: offset,
|
||||
requested_size: size,
|
||||
})?;
|
||||
if end_offset > dst_buffer.size {
|
||||
return Err(ClearError::BufferOverrun {
|
||||
start_offset: offset,
|
||||
end_offset,
|
||||
buffer_size: dst_buffer.size,
|
||||
});
|
||||
}
|
||||
end_offset,
|
||||
buffer_size: dst_buffer.size,
|
||||
});
|
||||
}
|
||||
|
||||
if offset == end_offset {
|
||||
log::trace!("Ignoring fill_buffer of size 0");
|
||||
if offset == end_offset {
|
||||
log::trace!("Ignoring fill_buffer of size 0");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
return Ok(());
|
||||
}
|
||||
// Mark dest as initialized.
|
||||
cmd_buf_data.buffer_memory_init_actions.extend(
|
||||
dst_buffer.initialization_status.read().create_action(
|
||||
&dst_buffer,
|
||||
offset..end_offset,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
),
|
||||
);
|
||||
|
||||
// Mark dest as initialized.
|
||||
cmd_buf_data.buffer_memory_init_actions.extend(
|
||||
dst_buffer.initialization_status.read().create_action(
|
||||
&dst_buffer,
|
||||
offset..end_offset,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
),
|
||||
);
|
||||
// actual hal barrier & operation
|
||||
let dst_barrier =
|
||||
dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(dst_barrier.as_slice());
|
||||
cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
|
||||
}
|
||||
|
||||
// actual hal barrier & operation
|
||||
let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(dst_barrier.as_slice());
|
||||
cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
|
||||
}
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn command_encoder_clear_texture(
|
||||
@ -174,7 +171,7 @@ impl Global {
|
||||
command_encoder_id: CommandEncoderId,
|
||||
dst: TextureId,
|
||||
subresource_range: &ImageSubresourceRange,
|
||||
) -> Result<(), ClearError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
profiling::scope!("CommandEncoder::clear_texture");
|
||||
api_log!("CommandEncoder::clear_texture {dst:?}");
|
||||
|
||||
@ -184,79 +181,78 @@ impl Global {
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), ClearError> {
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ClearTexture {
|
||||
dst,
|
||||
subresource_range: *subresource_range,
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ClearTexture {
|
||||
dst,
|
||||
subresource_range: *subresource_range,
|
||||
});
|
||||
}
|
||||
if !cmd_buf.support_clear_texture {
|
||||
return Err(ClearError::MissingClearTextureFeature);
|
||||
}
|
||||
|
||||
if !cmd_buf.support_clear_texture {
|
||||
return Err(ClearError::MissingClearTextureFeature);
|
||||
}
|
||||
let dst_texture = hub.textures.get(dst).get()?;
|
||||
|
||||
let dst_texture = hub.textures.get(dst).get()?;
|
||||
dst_texture.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
dst_texture.same_device_as(cmd_buf.as_ref())?;
|
||||
// Check if subresource aspects are valid.
|
||||
let clear_aspects =
|
||||
hal::FormatAspects::new(dst_texture.desc.format, subresource_range.aspect);
|
||||
if clear_aspects.is_empty() {
|
||||
return Err(ClearError::MissingTextureAspect {
|
||||
texture_format: dst_texture.desc.format,
|
||||
subresource_range_aspects: subresource_range.aspect,
|
||||
});
|
||||
};
|
||||
|
||||
// Check if subresource aspects are valid.
|
||||
let clear_aspects =
|
||||
hal::FormatAspects::new(dst_texture.desc.format, subresource_range.aspect);
|
||||
if clear_aspects.is_empty() {
|
||||
return Err(ClearError::MissingTextureAspect {
|
||||
texture_format: dst_texture.desc.format,
|
||||
subresource_range_aspects: subresource_range.aspect,
|
||||
});
|
||||
};
|
||||
// Check if subresource level range is valid
|
||||
let subresource_mip_range =
|
||||
subresource_range.mip_range(dst_texture.full_range.mips.end);
|
||||
if dst_texture.full_range.mips.start > subresource_mip_range.start
|
||||
|| dst_texture.full_range.mips.end < subresource_mip_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLevelRange {
|
||||
texture_level_range: dst_texture.full_range.mips.clone(),
|
||||
subresource_base_mip_level: subresource_range.base_mip_level,
|
||||
subresource_mip_level_count: subresource_range.mip_level_count,
|
||||
});
|
||||
}
|
||||
// Check if subresource layer range is valid
|
||||
let subresource_layer_range =
|
||||
subresource_range.layer_range(dst_texture.full_range.layers.end);
|
||||
if dst_texture.full_range.layers.start > subresource_layer_range.start
|
||||
|| dst_texture.full_range.layers.end < subresource_layer_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLayerRange {
|
||||
texture_layer_range: dst_texture.full_range.layers.clone(),
|
||||
subresource_base_array_layer: subresource_range.base_array_layer,
|
||||
subresource_array_layer_count: subresource_range.array_layer_count,
|
||||
});
|
||||
}
|
||||
|
||||
// Check if subresource level range is valid
|
||||
let subresource_mip_range = subresource_range.mip_range(dst_texture.full_range.mips.end);
|
||||
if dst_texture.full_range.mips.start > subresource_mip_range.start
|
||||
|| dst_texture.full_range.mips.end < subresource_mip_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLevelRange {
|
||||
texture_level_range: dst_texture.full_range.mips.clone(),
|
||||
subresource_base_mip_level: subresource_range.base_mip_level,
|
||||
subresource_mip_level_count: subresource_range.mip_level_count,
|
||||
});
|
||||
}
|
||||
// Check if subresource layer range is valid
|
||||
let subresource_layer_range =
|
||||
subresource_range.layer_range(dst_texture.full_range.layers.end);
|
||||
if dst_texture.full_range.layers.start > subresource_layer_range.start
|
||||
|| dst_texture.full_range.layers.end < subresource_layer_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLayerRange {
|
||||
texture_layer_range: dst_texture.full_range.layers.clone(),
|
||||
subresource_base_array_layer: subresource_range.base_array_layer,
|
||||
subresource_array_layer_count: subresource_range.array_layer_count,
|
||||
});
|
||||
}
|
||||
let device = &cmd_buf.device;
|
||||
device.check_is_valid()?;
|
||||
let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker()?;
|
||||
|
||||
let device = &cmd_buf.device;
|
||||
device.check_is_valid()?;
|
||||
let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker()?;
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
clear_texture(
|
||||
&dst_texture,
|
||||
TextureInitRange {
|
||||
mip_range: subresource_mip_range,
|
||||
layer_range: subresource_layer_range,
|
||||
},
|
||||
encoder,
|
||||
&mut tracker.textures,
|
||||
&device.alignments,
|
||||
device.zero_buffer.as_ref(),
|
||||
&snatch_guard,
|
||||
)?;
|
||||
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
clear_texture(
|
||||
&dst_texture,
|
||||
TextureInitRange {
|
||||
mip_range: subresource_mip_range,
|
||||
layer_range: subresource_layer_range,
|
||||
},
|
||||
encoder,
|
||||
&mut tracker.textures,
|
||||
&device.alignments,
|
||||
device.zero_buffer.as_ref(),
|
||||
&snatch_guard,
|
||||
)?;
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@ use crate::lock::{rank, Mutex};
|
||||
use crate::snatch::SnatchGuard;
|
||||
|
||||
use crate::init_tracker::BufferInitTrackerAction;
|
||||
use crate::ray_tracing::AsAction;
|
||||
use crate::ray_tracing::{AsAction, BuildAccelerationStructureError};
|
||||
use crate::resource::{
|
||||
DestroyedResourceError, Fallible, InvalidResourceError, Labeled, ParentDevice as _, QuerySet,
|
||||
};
|
||||
@ -106,17 +106,6 @@ pub(crate) enum CommandEncoderStatus {
|
||||
}
|
||||
|
||||
impl CommandEncoderStatus {
|
||||
/// Checks that the encoder is in the [`Self::Recording`] state.
|
||||
pub(crate) fn record(&mut self) -> Result<RecordingGuard<'_>, EncoderStateError> {
|
||||
match self {
|
||||
Self::Recording(_) => Ok(RecordingGuard { inner: self }),
|
||||
Self::Locked(_) => Err(self.invalidate(EncoderStateError::Locked)),
|
||||
Self::Finished(_) => Err(EncoderStateError::Ended),
|
||||
Self::Error(_) => Err(EncoderStateError::Invalid),
|
||||
Self::Transitioning => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record commands using the supplied closure.
|
||||
///
|
||||
/// If the encoder is in the [`Self::Recording`] state, calls the closure to
|
||||
@ -138,29 +127,50 @@ impl CommandEncoderStatus {
|
||||
&mut self,
|
||||
f: F,
|
||||
) -> Result<(), EncoderStateError> {
|
||||
let err = match self.record() {
|
||||
Ok(guard) => {
|
||||
guard.record(f);
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => err,
|
||||
};
|
||||
match err {
|
||||
err @ EncoderStateError::Locked => {
|
||||
// Invalidate the encoder and do not record anything, but do not
|
||||
// return an immediate validation error.
|
||||
self.invalidate(err);
|
||||
match self {
|
||||
Self::Recording(_) => {
|
||||
RecordingGuard { inner: self }.record(f);
|
||||
Ok(())
|
||||
}
|
||||
err @ EncoderStateError::Ended => {
|
||||
// Invalidate the encoder, do not record anything, and return an
|
||||
// immediate validation error.
|
||||
Err(self.invalidate(err))
|
||||
Self::Locked(_) => {
|
||||
// Invalidate the encoder and do not record anything, but do not
|
||||
// return an immediate validation error.
|
||||
self.invalidate(EncoderStateError::Locked);
|
||||
Ok(())
|
||||
}
|
||||
// Encoder is ended. Invalidate the encoder, do not record anything,
|
||||
// and return an immediate validation error.
|
||||
Self::Finished(_) => Err(self.invalidate(EncoderStateError::Ended)),
|
||||
// Encoder is already invalid. Do not record anything, but do not
|
||||
// return an immediate validation error.
|
||||
EncoderStateError::Invalid => Ok(()),
|
||||
EncoderStateError::Unlocked | EncoderStateError::Submitted => unreachable!(),
|
||||
Self::Error(_) => Ok(()),
|
||||
Self::Transitioning => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Special version of record used by `command_encoder_as_hal_mut`. This
|
||||
/// differs from the regular version in two ways:
|
||||
///
|
||||
/// 1. The recording closure is infallible.
|
||||
/// 2. The recording closure takes `Option<&mut CommandBufferMutable>`, and
|
||||
/// in the case that the encoder is not in a valid state for recording, the
|
||||
/// closure is still called, with `None` as its argument.
|
||||
pub(crate) fn record_as_hal_mut<T, F: FnOnce(Option<&mut CommandBufferMutable>) -> T>(
|
||||
&mut self,
|
||||
f: F,
|
||||
) -> T {
|
||||
match self {
|
||||
Self::Recording(_) => RecordingGuard { inner: self }.record_as_hal_mut(f),
|
||||
Self::Locked(_) => {
|
||||
self.invalidate(EncoderStateError::Locked);
|
||||
f(None)
|
||||
}
|
||||
Self::Finished(_) => {
|
||||
self.invalidate(EncoderStateError::Ended);
|
||||
f(None)
|
||||
}
|
||||
Self::Error(_) => f(None),
|
||||
Self::Transitioning => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,6 +305,17 @@ impl<'a> RecordingGuard<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Special version of record used by `command_encoder_as_hal_mut`. This
|
||||
/// version takes an infallible recording closure.
|
||||
pub(crate) fn record_as_hal_mut<T, F: FnOnce(Option<&mut CommandBufferMutable>) -> T>(
|
||||
mut self,
|
||||
f: F,
|
||||
) -> T {
|
||||
let res = f(Some(&mut self));
|
||||
self.mark_successful();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for RecordingGuard<'a> {
|
||||
@ -899,6 +920,10 @@ pub enum CommandEncoderError {
|
||||
#[error(transparent)]
|
||||
Clear(#[from] ClearError),
|
||||
#[error(transparent)]
|
||||
Query(#[from] QueryError),
|
||||
#[error(transparent)]
|
||||
BuildAccelerationStructure(#[from] BuildAccelerationStructureError),
|
||||
#[error(transparent)]
|
||||
TransitionResources(#[from] TransitionResourcesError),
|
||||
#[error(
|
||||
"begin and end indices of pass timestamp writes are both set to {idx}, which is not allowed"
|
||||
|
||||
@ -317,38 +317,36 @@ impl Global {
|
||||
command_encoder_id: id::CommandEncoderId,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
) -> Result<(), QueryError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
let hub = &self.hub;
|
||||
|
||||
let cmd_buf = hub
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), QueryError> {
|
||||
cmd_buf
|
||||
.device
|
||||
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
|
||||
|
||||
cmd_buf
|
||||
.device
|
||||
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::WriteTimestamp {
|
||||
query_set_id,
|
||||
query_index,
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::WriteTimestamp {
|
||||
query_set_id,
|
||||
query_index,
|
||||
});
|
||||
}
|
||||
let raw_encoder = cmd_buf_data.encoder.open()?;
|
||||
|
||||
let raw_encoder = cmd_buf_data.encoder.open()?;
|
||||
let query_set = hub.query_sets.get(query_set_id).get()?;
|
||||
|
||||
let query_set = hub.query_sets.get(query_set_id).get()?;
|
||||
query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?;
|
||||
|
||||
query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?;
|
||||
cmd_buf_data.trackers.query_sets.insert_single(query_set);
|
||||
|
||||
cmd_buf_data.trackers.query_sets.insert_single(query_set);
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn command_encoder_resolve_query_set(
|
||||
@ -359,136 +357,135 @@ impl Global {
|
||||
query_count: u32,
|
||||
destination: id::BufferId,
|
||||
destination_offset: BufferAddress,
|
||||
) -> Result<(), QueryError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
let hub = &self.hub;
|
||||
|
||||
let cmd_buf = hub
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ResolveQuerySet {
|
||||
query_set_id,
|
||||
start_query,
|
||||
query_count,
|
||||
destination,
|
||||
destination_offset,
|
||||
});
|
||||
}
|
||||
|
||||
if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment));
|
||||
}
|
||||
|
||||
let query_set = hub.query_sets.get(query_set_id).get()?;
|
||||
|
||||
query_set.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
let dst_buffer = hub.buffers.get(destination).get()?;
|
||||
|
||||
dst_buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
let snatch_guard = dst_buffer.device.snatchable_lock.read();
|
||||
dst_buffer.check_destroyed(&snatch_guard)?;
|
||||
|
||||
let dst_pending = cmd_buf_data
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&dst_buffer, wgt::BufferUses::COPY_DST);
|
||||
|
||||
let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
|
||||
|
||||
dst_buffer
|
||||
.check_usage(wgt::BufferUsages::QUERY_RESOLVE)
|
||||
.map_err(ResolveError::MissingBufferUsage)?;
|
||||
|
||||
let end_query = u64::from(start_query)
|
||||
.checked_add(u64::from(query_count))
|
||||
.expect("`u64` overflow from adding two `u32`s, should be unreachable");
|
||||
if end_query > u64::from(query_set.desc.count) {
|
||||
return Err(ResolveError::QueryOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
query_set_size: query_set.desc.count,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
let end_query = u32::try_from(end_query)
|
||||
.expect("`u32` overflow for `end_query`, which should be `u32`");
|
||||
|
||||
let elements_per_query = match query_set.desc.ty {
|
||||
wgt::QueryType::Occlusion => 1,
|
||||
wgt::QueryType::PipelineStatistics(ps) => ps.bits().count_ones(),
|
||||
wgt::QueryType::Timestamp => 1,
|
||||
};
|
||||
let stride = elements_per_query * wgt::QUERY_SIZE;
|
||||
let bytes_used: BufferAddress = u64::from(stride)
|
||||
.checked_mul(u64::from(query_count))
|
||||
.expect("`stride` * `query_count` overflowed `u32`, should be unreachable");
|
||||
|
||||
let buffer_start_offset = destination_offset;
|
||||
let buffer_end_offset = buffer_start_offset
|
||||
.checked_add(bytes_used)
|
||||
.filter(|buffer_end_offset| *buffer_end_offset <= dst_buffer.size)
|
||||
.ok_or(ResolveError::BufferOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
stride,
|
||||
buffer_size: dst_buffer.size,
|
||||
buffer_start_offset,
|
||||
bytes_used,
|
||||
})?;
|
||||
|
||||
// TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state.
|
||||
cmd_buf_data.buffer_memory_init_actions.extend(
|
||||
dst_buffer.initialization_status.read().create_action(
|
||||
&dst_buffer,
|
||||
buffer_start_offset..buffer_end_offset,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
),
|
||||
);
|
||||
|
||||
let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?;
|
||||
let raw_encoder = cmd_buf_data.encoder.open()?;
|
||||
unsafe {
|
||||
raw_encoder.transition_buffers(dst_barrier.as_slice());
|
||||
raw_encoder.copy_query_results(
|
||||
query_set.raw(),
|
||||
start_query..end_query,
|
||||
raw_dst_buffer,
|
||||
destination_offset,
|
||||
wgt::BufferSize::new_unchecked(stride as u64),
|
||||
);
|
||||
}
|
||||
|
||||
if matches!(query_set.desc.ty, wgt::QueryType::Timestamp) {
|
||||
// Timestamp normalization is only needed for timestamps.
|
||||
cmd_buf
|
||||
.device
|
||||
.timestamp_normalizer
|
||||
.get()
|
||||
.unwrap()
|
||||
.normalize(
|
||||
&snatch_guard,
|
||||
raw_encoder,
|
||||
&mut cmd_buf_data.trackers.buffers,
|
||||
dst_buffer
|
||||
.timestamp_normalization_bind_group
|
||||
.get(&snatch_guard)
|
||||
.unwrap(),
|
||||
&dst_buffer,
|
||||
destination_offset,
|
||||
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), QueryError> {
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(TraceCommand::ResolveQuerySet {
|
||||
query_set_id,
|
||||
start_query,
|
||||
query_count,
|
||||
destination,
|
||||
destination_offset,
|
||||
});
|
||||
}
|
||||
|
||||
if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment));
|
||||
}
|
||||
|
||||
let query_set = hub.query_sets.get(query_set_id).get()?;
|
||||
|
||||
query_set.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
let dst_buffer = hub.buffers.get(destination).get()?;
|
||||
|
||||
dst_buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
let snatch_guard = dst_buffer.device.snatchable_lock.read();
|
||||
dst_buffer.check_destroyed(&snatch_guard)?;
|
||||
|
||||
let dst_pending = cmd_buf_data
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&dst_buffer, wgt::BufferUses::COPY_DST);
|
||||
|
||||
let dst_barrier =
|
||||
dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
|
||||
|
||||
dst_buffer
|
||||
.check_usage(wgt::BufferUsages::QUERY_RESOLVE)
|
||||
.map_err(ResolveError::MissingBufferUsage)?;
|
||||
|
||||
let end_query = u64::from(start_query)
|
||||
.checked_add(u64::from(query_count))
|
||||
.expect("`u64` overflow from adding two `u32`s, should be unreachable");
|
||||
if end_query > u64::from(query_set.desc.count) {
|
||||
return Err(ResolveError::QueryOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
query_set_size: query_set.desc.count,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
let end_query = u32::try_from(end_query)
|
||||
.expect("`u32` overflow for `end_query`, which should be `u32`");
|
||||
|
||||
let elements_per_query = match query_set.desc.ty {
|
||||
wgt::QueryType::Occlusion => 1,
|
||||
wgt::QueryType::PipelineStatistics(ps) => ps.bits().count_ones(),
|
||||
wgt::QueryType::Timestamp => 1,
|
||||
};
|
||||
let stride = elements_per_query * wgt::QUERY_SIZE;
|
||||
let bytes_used: BufferAddress = u64::from(stride)
|
||||
.checked_mul(u64::from(query_count))
|
||||
.expect("`stride` * `query_count` overflowed `u32`, should be unreachable");
|
||||
|
||||
let buffer_start_offset = destination_offset;
|
||||
let buffer_end_offset = buffer_start_offset
|
||||
.checked_add(bytes_used)
|
||||
.filter(|buffer_end_offset| *buffer_end_offset <= dst_buffer.size)
|
||||
.ok_or(ResolveError::BufferOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
stride,
|
||||
buffer_size: dst_buffer.size,
|
||||
buffer_start_offset,
|
||||
bytes_used,
|
||||
})?;
|
||||
|
||||
// TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state.
|
||||
cmd_buf_data.buffer_memory_init_actions.extend(
|
||||
dst_buffer.initialization_status.read().create_action(
|
||||
&dst_buffer,
|
||||
buffer_start_offset..buffer_end_offset,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
),
|
||||
);
|
||||
|
||||
let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?;
|
||||
let raw_encoder = cmd_buf_data.encoder.open()?;
|
||||
unsafe {
|
||||
raw_encoder.transition_buffers(dst_barrier.as_slice());
|
||||
raw_encoder.copy_query_results(
|
||||
query_set.raw(),
|
||||
start_query..end_query,
|
||||
raw_dst_buffer,
|
||||
destination_offset,
|
||||
wgt::BufferSize::new_unchecked(stride as u64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
cmd_buf_data.trackers.query_sets.insert_single(query_set);
|
||||
if matches!(query_set.desc.ty, wgt::QueryType::Timestamp) {
|
||||
// Timestamp normalization is only needed for timestamps.
|
||||
cmd_buf
|
||||
.device
|
||||
.timestamp_normalizer
|
||||
.get()
|
||||
.unwrap()
|
||||
.normalize(
|
||||
&snatch_guard,
|
||||
raw_encoder,
|
||||
&mut cmd_buf_data.trackers.buffers,
|
||||
dst_buffer
|
||||
.timestamp_normalization_bind_group
|
||||
.get(&snatch_guard)
|
||||
.unwrap(),
|
||||
&dst_buffer,
|
||||
destination_offset,
|
||||
query_count,
|
||||
);
|
||||
}
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
Ok(())
|
||||
cmd_buf_data.trackers.query_sets.insert_single(query_set);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,7 +7,6 @@ use core::{
|
||||
|
||||
use wgt::{math::align_to, BufferUsages, BufferUses, Features};
|
||||
|
||||
use crate::device::resource::CommandIndices;
|
||||
use crate::lock::RwLockWriteGuard;
|
||||
use crate::ray_tracing::{AsAction, AsBuild, TlasBuild, ValidateAsActionsError};
|
||||
use crate::{
|
||||
@ -29,6 +28,7 @@ use crate::{
|
||||
snatch::SnatchGuard,
|
||||
track::PendingTransition,
|
||||
};
|
||||
use crate::{command::EncoderStateError, device::resource::CommandIndices};
|
||||
|
||||
use crate::id::{BlasId, TlasId};
|
||||
|
||||
@ -64,7 +64,7 @@ impl Global {
|
||||
command_encoder_id: CommandEncoderId,
|
||||
blas_ids: &[BlasId],
|
||||
tlas_ids: &[TlasId],
|
||||
) -> Result<(), BuildAccelerationStructureError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
profiling::scope!("CommandEncoder::mark_acceleration_structures_built");
|
||||
|
||||
let hub = &self.hub;
|
||||
@ -73,34 +73,32 @@ impl Global {
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
|
||||
let device = &cmd_buf.device;
|
||||
|
||||
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
|
||||
|
||||
let mut build_command = AsBuild::default();
|
||||
|
||||
for blas in blas_ids {
|
||||
let blas = hub.blas_s.get(*blas).get()?;
|
||||
build_command.blas_s_built.push(blas);
|
||||
}
|
||||
|
||||
for tlas in tlas_ids {
|
||||
let tlas = hub.tlas_s.get(*tlas).get()?;
|
||||
build_command.tlas_s_built.push(TlasBuild {
|
||||
tlas,
|
||||
dependencies: Vec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(
|
||||
|cmd_buf_data| -> Result<(), BuildAccelerationStructureError> {
|
||||
let device = &cmd_buf.device;
|
||||
device
|
||||
.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
|
||||
|
||||
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
|
||||
let mut build_command = AsBuild::default();
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
for blas in blas_ids {
|
||||
let blas = hub.blas_s.get(*blas).get()?;
|
||||
build_command.blas_s_built.push(blas);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
for tlas in tlas_ids {
|
||||
let tlas = hub.tlas_s.get(*tlas).get()?;
|
||||
build_command.tlas_s_built.push(TlasBuild {
|
||||
tlas,
|
||||
dependencies: Vec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn command_encoder_build_acceleration_structures<'a>(
|
||||
@ -108,7 +106,7 @@ impl Global {
|
||||
command_encoder_id: CommandEncoderId,
|
||||
blas_iter: impl Iterator<Item = BlasBuildEntry<'a>>,
|
||||
tlas_iter: impl Iterator<Item = TlasPackage<'a>>,
|
||||
) -> Result<(), BuildAccelerationStructureError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
profiling::scope!("CommandEncoder::build_acceleration_structures");
|
||||
|
||||
let hub = &self.hub;
|
||||
@ -117,10 +115,6 @@ impl Global {
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
|
||||
let device = &cmd_buf.device;
|
||||
|
||||
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
|
||||
|
||||
let mut build_command = AsBuild::default();
|
||||
|
||||
let trace_blas: Vec<TraceBlasBuildEntry> = blas_iter
|
||||
@ -171,14 +165,6 @@ impl Global {
|
||||
})
|
||||
.collect();
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.data.lock().get_inner().commands {
|
||||
list.push(crate::device::trace::Command::BuildAccelerationStructures {
|
||||
blas: trace_blas.clone(),
|
||||
tlas: trace_tlas.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
let blas_iter = trace_blas.iter().map(|blas_entry| {
|
||||
let geometries = match &blas_entry.geometries {
|
||||
TraceBlasGeometries::TriangleGeometries(triangle_geometries) => {
|
||||
@ -217,298 +203,305 @@ impl Global {
|
||||
}
|
||||
});
|
||||
|
||||
let mut input_barriers = Vec::<hal::BufferBarrier<dyn hal::DynBuffer>>::new();
|
||||
let mut buf_storage = Vec::new();
|
||||
|
||||
let mut scratch_buffer_blas_size = 0;
|
||||
let mut blas_storage = Vec::new();
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(|cmd_buf_data| {
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf_data.commands {
|
||||
list.push(crate::device::trace::Command::BuildAccelerationStructures {
|
||||
blas: trace_blas.clone(),
|
||||
tlas: trace_tlas.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
iter_blas(
|
||||
blas_iter,
|
||||
cmd_buf_data,
|
||||
&mut build_command,
|
||||
&mut buf_storage,
|
||||
hub,
|
||||
)?;
|
||||
let device = &cmd_buf.device;
|
||||
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
|
||||
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
iter_buffers(
|
||||
&mut buf_storage,
|
||||
&snatch_guard,
|
||||
&mut input_barriers,
|
||||
cmd_buf_data,
|
||||
&mut scratch_buffer_blas_size,
|
||||
&mut blas_storage,
|
||||
hub,
|
||||
device.alignments.ray_tracing_scratch_buffer_alignment,
|
||||
)?;
|
||||
let mut tlas_lock_store = Vec::<(Option<TlasPackage>, Arc<Tlas>)>::new();
|
||||
let mut buf_storage = Vec::new();
|
||||
iter_blas(
|
||||
blas_iter,
|
||||
cmd_buf_data,
|
||||
&mut build_command,
|
||||
&mut buf_storage,
|
||||
hub,
|
||||
)?;
|
||||
|
||||
for package in tlas_iter {
|
||||
let tlas = hub.tlas_s.get(package.tlas_id).get()?;
|
||||
|
||||
cmd_buf_data.trackers.tlas_s.insert_single(tlas.clone());
|
||||
|
||||
tlas_lock_store.push((Some(package), tlas))
|
||||
}
|
||||
|
||||
let mut scratch_buffer_tlas_size = 0;
|
||||
let mut tlas_storage = Vec::<TlasStore>::new();
|
||||
let mut instance_buffer_staging_source = Vec::<u8>::new();
|
||||
|
||||
for (package, tlas) in &mut tlas_lock_store {
|
||||
let package = package.take().unwrap();
|
||||
|
||||
let scratch_buffer_offset = scratch_buffer_tlas_size;
|
||||
scratch_buffer_tlas_size += align_to(
|
||||
tlas.size_info.build_scratch_size as u32,
|
||||
let snatch_guard = device.snatchable_lock.read();
|
||||
let mut input_barriers = Vec::<hal::BufferBarrier<dyn hal::DynBuffer>>::new();
|
||||
let mut scratch_buffer_blas_size = 0;
|
||||
let mut blas_storage = Vec::new();
|
||||
iter_buffers(
|
||||
&mut buf_storage,
|
||||
&snatch_guard,
|
||||
&mut input_barriers,
|
||||
cmd_buf_data,
|
||||
&mut scratch_buffer_blas_size,
|
||||
&mut blas_storage,
|
||||
hub,
|
||||
device.alignments.ray_tracing_scratch_buffer_alignment,
|
||||
) as u64;
|
||||
)?;
|
||||
let mut tlas_lock_store = Vec::<(Option<TlasPackage>, Arc<Tlas>)>::new();
|
||||
|
||||
let first_byte_index = instance_buffer_staging_source.len();
|
||||
for package in tlas_iter {
|
||||
let tlas = hub.tlas_s.get(package.tlas_id).get()?;
|
||||
|
||||
let mut dependencies = Vec::new();
|
||||
cmd_buf_data.trackers.tlas_s.insert_single(tlas.clone());
|
||||
|
||||
let mut instance_count = 0;
|
||||
for instance in package.instances.flatten() {
|
||||
if instance.custom_data >= (1u32 << 24u32) {
|
||||
return Err(BuildAccelerationStructureError::TlasInvalidCustomIndex(
|
||||
tlas_lock_store.push((Some(package), tlas))
|
||||
}
|
||||
|
||||
let mut scratch_buffer_tlas_size = 0;
|
||||
let mut tlas_storage = Vec::<TlasStore>::new();
|
||||
let mut instance_buffer_staging_source = Vec::<u8>::new();
|
||||
|
||||
for (package, tlas) in &mut tlas_lock_store {
|
||||
let package = package.take().unwrap();
|
||||
|
||||
let scratch_buffer_offset = scratch_buffer_tlas_size;
|
||||
scratch_buffer_tlas_size += align_to(
|
||||
tlas.size_info.build_scratch_size as u32,
|
||||
device.alignments.ray_tracing_scratch_buffer_alignment,
|
||||
) as u64;
|
||||
|
||||
let first_byte_index = instance_buffer_staging_source.len();
|
||||
|
||||
let mut dependencies = Vec::new();
|
||||
|
||||
let mut instance_count = 0;
|
||||
for instance in package.instances.flatten() {
|
||||
if instance.custom_data >= (1u32 << 24u32) {
|
||||
return Err(BuildAccelerationStructureError::TlasInvalidCustomIndex(
|
||||
tlas.error_ident(),
|
||||
));
|
||||
}
|
||||
let blas = hub.blas_s.get(instance.blas_id).get()?;
|
||||
|
||||
cmd_buf_data.trackers.blas_s.insert_single(blas.clone());
|
||||
|
||||
instance_buffer_staging_source.extend(device.raw().tlas_instance_to_bytes(
|
||||
hal::TlasInstance {
|
||||
transform: *instance.transform,
|
||||
custom_data: instance.custom_data,
|
||||
mask: instance.mask,
|
||||
blas_address: blas.handle,
|
||||
},
|
||||
));
|
||||
|
||||
if tlas.flags.contains(
|
||||
wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
|
||||
) && !blas.flags.contains(
|
||||
wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
|
||||
) {
|
||||
return Err(
|
||||
BuildAccelerationStructureError::TlasDependentMissingVertexReturn(
|
||||
tlas.error_ident(),
|
||||
blas.error_ident(),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
instance_count += 1;
|
||||
|
||||
dependencies.push(blas.clone());
|
||||
}
|
||||
|
||||
build_command.tlas_s_built.push(TlasBuild {
|
||||
tlas: tlas.clone(),
|
||||
dependencies,
|
||||
});
|
||||
|
||||
if instance_count > tlas.max_instance_count {
|
||||
return Err(BuildAccelerationStructureError::TlasInstanceCountExceeded(
|
||||
tlas.error_ident(),
|
||||
instance_count,
|
||||
tlas.max_instance_count,
|
||||
));
|
||||
}
|
||||
let blas = hub.blas_s.get(instance.blas_id).get()?;
|
||||
|
||||
cmd_buf_data.trackers.blas_s.insert_single(blas.clone());
|
||||
|
||||
instance_buffer_staging_source.extend(device.raw().tlas_instance_to_bytes(
|
||||
hal::TlasInstance {
|
||||
transform: *instance.transform,
|
||||
custom_data: instance.custom_data,
|
||||
mask: instance.mask,
|
||||
blas_address: blas.handle,
|
||||
},
|
||||
));
|
||||
|
||||
if tlas
|
||||
.flags
|
||||
.contains(wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN)
|
||||
&& !blas.flags.contains(
|
||||
wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
|
||||
)
|
||||
{
|
||||
return Err(
|
||||
BuildAccelerationStructureError::TlasDependentMissingVertexReturn(
|
||||
tlas.error_ident(),
|
||||
blas.error_ident(),
|
||||
tlas_storage.push(TlasStore {
|
||||
internal: UnsafeTlasStore {
|
||||
tlas: tlas.clone(),
|
||||
entries: hal::AccelerationStructureEntries::Instances(
|
||||
hal::AccelerationStructureInstances {
|
||||
buffer: Some(tlas.instance_buffer.as_ref()),
|
||||
offset: 0,
|
||||
count: instance_count,
|
||||
},
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
instance_count += 1;
|
||||
|
||||
dependencies.push(blas.clone());
|
||||
scratch_buffer_offset,
|
||||
},
|
||||
range: first_byte_index..instance_buffer_staging_source.len(),
|
||||
});
|
||||
}
|
||||
|
||||
build_command.tlas_s_built.push(TlasBuild {
|
||||
tlas: tlas.clone(),
|
||||
dependencies,
|
||||
});
|
||||
|
||||
if instance_count > tlas.max_instance_count {
|
||||
return Err(BuildAccelerationStructureError::TlasInstanceCountExceeded(
|
||||
tlas.error_ident(),
|
||||
instance_count,
|
||||
tlas.max_instance_count,
|
||||
));
|
||||
}
|
||||
|
||||
tlas_storage.push(TlasStore {
|
||||
internal: UnsafeTlasStore {
|
||||
tlas: tlas.clone(),
|
||||
entries: hal::AccelerationStructureEntries::Instances(
|
||||
hal::AccelerationStructureInstances {
|
||||
buffer: Some(tlas.instance_buffer.as_ref()),
|
||||
offset: 0,
|
||||
count: instance_count,
|
||||
},
|
||||
),
|
||||
scratch_buffer_offset,
|
||||
},
|
||||
range: first_byte_index..instance_buffer_staging_source.len(),
|
||||
});
|
||||
}
|
||||
|
||||
let scratch_size =
|
||||
match wgt::BufferSize::new(max(scratch_buffer_blas_size, scratch_buffer_tlas_size)) {
|
||||
let Some(scratch_size) =
|
||||
wgt::BufferSize::new(max(scratch_buffer_blas_size, scratch_buffer_tlas_size))
|
||||
else {
|
||||
// if the size is zero there is nothing to build
|
||||
None => {
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
return Ok(());
|
||||
}
|
||||
Some(size) => size,
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let scratch_buffer = ScratchBuffer::new(device, scratch_size)?;
|
||||
let scratch_buffer = ScratchBuffer::new(device, scratch_size)?;
|
||||
|
||||
let scratch_buffer_barrier = hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: scratch_buffer.raw(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::ACCELERATION_STRUCTURE_SCRATCH,
|
||||
to: BufferUses::ACCELERATION_STRUCTURE_SCRATCH,
|
||||
},
|
||||
};
|
||||
|
||||
let mut tlas_descriptors = Vec::with_capacity(tlas_storage.len());
|
||||
|
||||
for &TlasStore {
|
||||
internal:
|
||||
UnsafeTlasStore {
|
||||
ref tlas,
|
||||
ref entries,
|
||||
ref scratch_buffer_offset,
|
||||
let scratch_buffer_barrier = hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: scratch_buffer.raw(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::ACCELERATION_STRUCTURE_SCRATCH,
|
||||
to: BufferUses::ACCELERATION_STRUCTURE_SCRATCH,
|
||||
},
|
||||
..
|
||||
} in &tlas_storage
|
||||
{
|
||||
if tlas.update_mode == wgt::AccelerationStructureUpdateMode::PreferUpdate {
|
||||
log::info!("only rebuild implemented")
|
||||
}
|
||||
tlas_descriptors.push(hal::BuildAccelerationStructureDescriptor {
|
||||
entries,
|
||||
mode: hal::AccelerationStructureBuildMode::Build,
|
||||
flags: tlas.flags,
|
||||
source_acceleration_structure: None,
|
||||
destination_acceleration_structure: tlas.try_raw(&snatch_guard)?,
|
||||
scratch_buffer: scratch_buffer.raw(),
|
||||
scratch_buffer_offset: *scratch_buffer_offset,
|
||||
})
|
||||
}
|
||||
|
||||
let blas_present = !blas_storage.is_empty();
|
||||
let tlas_present = !tlas_storage.is_empty();
|
||||
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
|
||||
let mut blas_s_compactable = Vec::new();
|
||||
let mut descriptors = Vec::new();
|
||||
|
||||
for storage in &blas_storage {
|
||||
descriptors.push(map_blas(
|
||||
storage,
|
||||
scratch_buffer.raw(),
|
||||
&snatch_guard,
|
||||
&mut blas_s_compactable,
|
||||
)?);
|
||||
}
|
||||
|
||||
build_blas(
|
||||
cmd_buf_raw,
|
||||
blas_present,
|
||||
tlas_present,
|
||||
input_barriers,
|
||||
&descriptors,
|
||||
scratch_buffer_barrier,
|
||||
blas_s_compactable,
|
||||
);
|
||||
|
||||
if tlas_present {
|
||||
let staging_buffer = if !instance_buffer_staging_source.is_empty() {
|
||||
let mut staging_buffer = StagingBuffer::new(
|
||||
device,
|
||||
wgt::BufferSize::new(instance_buffer_staging_source.len() as u64).unwrap(),
|
||||
)?;
|
||||
staging_buffer.write(&instance_buffer_staging_source);
|
||||
let flushed = staging_buffer.flush();
|
||||
Some(flushed)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
unsafe {
|
||||
if let Some(ref staging_buffer) = staging_buffer {
|
||||
cmd_buf_raw.transition_buffers(&[hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: staging_buffer.raw(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::MAP_WRITE,
|
||||
to: BufferUses::COPY_SRC,
|
||||
},
|
||||
}]);
|
||||
}
|
||||
}
|
||||
let mut tlas_descriptors = Vec::with_capacity(tlas_storage.len());
|
||||
|
||||
let mut instance_buffer_barriers = Vec::new();
|
||||
for &TlasStore {
|
||||
internal: UnsafeTlasStore { ref tlas, .. },
|
||||
ref range,
|
||||
internal:
|
||||
UnsafeTlasStore {
|
||||
ref tlas,
|
||||
ref entries,
|
||||
ref scratch_buffer_offset,
|
||||
},
|
||||
..
|
||||
} in &tlas_storage
|
||||
{
|
||||
let size = match wgt::BufferSize::new((range.end - range.start) as u64) {
|
||||
None => continue,
|
||||
Some(size) => size,
|
||||
if tlas.update_mode == wgt::AccelerationStructureUpdateMode::PreferUpdate {
|
||||
log::info!("only rebuild implemented")
|
||||
}
|
||||
tlas_descriptors.push(hal::BuildAccelerationStructureDescriptor {
|
||||
entries,
|
||||
mode: hal::AccelerationStructureBuildMode::Build,
|
||||
flags: tlas.flags,
|
||||
source_acceleration_structure: None,
|
||||
destination_acceleration_structure: tlas.try_raw(&snatch_guard)?,
|
||||
scratch_buffer: scratch_buffer.raw(),
|
||||
scratch_buffer_offset: *scratch_buffer_offset,
|
||||
})
|
||||
}
|
||||
|
||||
let blas_present = !blas_storage.is_empty();
|
||||
let tlas_present = !tlas_storage.is_empty();
|
||||
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
|
||||
let mut blas_s_compactable = Vec::new();
|
||||
let mut descriptors = Vec::new();
|
||||
|
||||
for storage in &blas_storage {
|
||||
descriptors.push(map_blas(
|
||||
storage,
|
||||
scratch_buffer.raw(),
|
||||
&snatch_guard,
|
||||
&mut blas_s_compactable,
|
||||
)?);
|
||||
}
|
||||
|
||||
build_blas(
|
||||
cmd_buf_raw,
|
||||
blas_present,
|
||||
tlas_present,
|
||||
input_barriers,
|
||||
&descriptors,
|
||||
scratch_buffer_barrier,
|
||||
blas_s_compactable,
|
||||
);
|
||||
|
||||
if tlas_present {
|
||||
let staging_buffer = if !instance_buffer_staging_source.is_empty() {
|
||||
let mut staging_buffer = StagingBuffer::new(
|
||||
device,
|
||||
wgt::BufferSize::new(instance_buffer_staging_source.len() as u64).unwrap(),
|
||||
)?;
|
||||
staging_buffer.write(&instance_buffer_staging_source);
|
||||
let flushed = staging_buffer.flush();
|
||||
Some(flushed)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
instance_buffer_barriers.push(hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: tlas.instance_buffer.as_ref(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::COPY_DST,
|
||||
to: BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT,
|
||||
},
|
||||
});
|
||||
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(&[hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
if let Some(ref staging_buffer) = staging_buffer {
|
||||
cmd_buf_raw.transition_buffers(&[
|
||||
hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: staging_buffer.raw(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::MAP_WRITE,
|
||||
to: BufferUses::COPY_SRC,
|
||||
},
|
||||
},
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
let mut instance_buffer_barriers = Vec::new();
|
||||
for &TlasStore {
|
||||
internal: UnsafeTlasStore { ref tlas, .. },
|
||||
ref range,
|
||||
} in &tlas_storage
|
||||
{
|
||||
let size = match wgt::BufferSize::new((range.end - range.start) as u64) {
|
||||
None => continue,
|
||||
Some(size) => size,
|
||||
};
|
||||
instance_buffer_barriers.push(hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: tlas.instance_buffer.as_ref(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT,
|
||||
to: BufferUses::COPY_DST,
|
||||
from: BufferUses::COPY_DST,
|
||||
to: BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT,
|
||||
},
|
||||
});
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(&[
|
||||
hal::BufferBarrier::<dyn hal::DynBuffer> {
|
||||
buffer: tlas.instance_buffer.as_ref(),
|
||||
usage: hal::StateTransition {
|
||||
from: BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT,
|
||||
to: BufferUses::COPY_DST,
|
||||
},
|
||||
},
|
||||
]);
|
||||
let temp = hal::BufferCopy {
|
||||
src_offset: range.start as u64,
|
||||
dst_offset: 0,
|
||||
size,
|
||||
};
|
||||
cmd_buf_raw.copy_buffer_to_buffer(
|
||||
// the range whose size we just checked end is at (at that point in time) instance_buffer_staging_source.len()
|
||||
// and since instance_buffer_staging_source doesn't shrink we can un wrap this without a panic
|
||||
staging_buffer.as_ref().unwrap().raw(),
|
||||
tlas.instance_buffer.as_ref(),
|
||||
&[temp],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(&instance_buffer_barriers);
|
||||
|
||||
cmd_buf_raw.build_acceleration_structures(&tlas_descriptors);
|
||||
|
||||
cmd_buf_raw.place_acceleration_structure_barrier(
|
||||
hal::AccelerationStructureBarrier {
|
||||
usage: hal::StateTransition {
|
||||
from: hal::AccelerationStructureUses::BUILD_OUTPUT,
|
||||
to: hal::AccelerationStructureUses::SHADER_INPUT,
|
||||
},
|
||||
},
|
||||
}]);
|
||||
let temp = hal::BufferCopy {
|
||||
src_offset: range.start as u64,
|
||||
dst_offset: 0,
|
||||
size,
|
||||
};
|
||||
cmd_buf_raw.copy_buffer_to_buffer(
|
||||
// the range whose size we just checked end is at (at that point in time) instance_buffer_staging_source.len()
|
||||
// and since instance_buffer_staging_source doesn't shrink we can un wrap this without a panic
|
||||
staging_buffer.as_ref().unwrap().raw(),
|
||||
tlas.instance_buffer.as_ref(),
|
||||
&[temp],
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(staging_buffer) = staging_buffer {
|
||||
cmd_buf_data
|
||||
.temp_resources
|
||||
.push(TempResource::StagingBuffer(staging_buffer));
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(&instance_buffer_barriers);
|
||||
cmd_buf_data
|
||||
.temp_resources
|
||||
.push(TempResource::ScratchBuffer(scratch_buffer));
|
||||
|
||||
cmd_buf_raw.build_acceleration_structures(&tlas_descriptors);
|
||||
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
|
||||
|
||||
cmd_buf_raw.place_acceleration_structure_barrier(
|
||||
hal::AccelerationStructureBarrier {
|
||||
usage: hal::StateTransition {
|
||||
from: hal::AccelerationStructureUses::BUILD_OUTPUT,
|
||||
to: hal::AccelerationStructureUses::SHADER_INPUT,
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(staging_buffer) = staging_buffer {
|
||||
cmd_buf_data
|
||||
.temp_resources
|
||||
.push(TempResource::StagingBuffer(staging_buffer));
|
||||
}
|
||||
}
|
||||
|
||||
cmd_buf_data
|
||||
.temp_resources
|
||||
.push(TempResource::ScratchBuffer(scratch_buffer));
|
||||
|
||||
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
|
||||
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
command::{CommandBuffer, EncoderStateError},
|
||||
command::{CommandBuffer, CommandEncoderError, EncoderStateError},
|
||||
device::DeviceError,
|
||||
global::Global,
|
||||
id::{BufferId, CommandEncoderId, TextureId},
|
||||
@ -15,7 +15,7 @@ impl Global {
|
||||
command_encoder_id: CommandEncoderId,
|
||||
buffer_transitions: impl Iterator<Item = wgt::BufferTransition<BufferId>>,
|
||||
texture_transitions: impl Iterator<Item = wgt::TextureTransition<TextureId>>,
|
||||
) -> Result<(), TransitionResourcesError> {
|
||||
) -> Result<(), EncoderStateError> {
|
||||
profiling::scope!("CommandEncoder::transition_resources");
|
||||
|
||||
let hub = &self.hub;
|
||||
@ -25,54 +25,51 @@ impl Global {
|
||||
.command_buffers
|
||||
.get(command_encoder_id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
|
||||
let cmd_buf_data = &mut *cmd_buf_data_guard;
|
||||
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), CommandEncoderError> {
|
||||
// Get and lock device
|
||||
let device = &cmd_buf.device;
|
||||
device.check_is_valid()?;
|
||||
let snatch_guard = &device.snatchable_lock.read();
|
||||
|
||||
// Get and lock device
|
||||
let device = &cmd_buf.device;
|
||||
device.check_is_valid()?;
|
||||
let snatch_guard = &device.snatchable_lock.read();
|
||||
let mut usage_scope = device.new_usage_scope();
|
||||
let indices = &device.tracker_indices;
|
||||
usage_scope.buffers.set_size(indices.buffers.size());
|
||||
usage_scope.textures.set_size(indices.textures.size());
|
||||
|
||||
let mut usage_scope = device.new_usage_scope();
|
||||
let indices = &device.tracker_indices;
|
||||
usage_scope.buffers.set_size(indices.buffers.size());
|
||||
usage_scope.textures.set_size(indices.textures.size());
|
||||
// Process buffer transitions
|
||||
for buffer_transition in buffer_transitions {
|
||||
let buffer = hub.buffers.get(buffer_transition.buffer).get()?;
|
||||
buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
// Process buffer transitions
|
||||
for buffer_transition in buffer_transitions {
|
||||
let buffer = hub.buffers.get(buffer_transition.buffer).get()?;
|
||||
buffer.same_device_as(cmd_buf.as_ref())?;
|
||||
usage_scope
|
||||
.buffers
|
||||
.merge_single(&buffer, buffer_transition.state)?;
|
||||
}
|
||||
|
||||
usage_scope
|
||||
.buffers
|
||||
.merge_single(&buffer, buffer_transition.state)?;
|
||||
}
|
||||
// Process texture transitions
|
||||
for texture_transition in texture_transitions {
|
||||
let texture = hub.textures.get(texture_transition.texture).get()?;
|
||||
texture.same_device_as(cmd_buf.as_ref())?;
|
||||
|
||||
// Process texture transitions
|
||||
for texture_transition in texture_transitions {
|
||||
let texture = hub.textures.get(texture_transition.texture).get()?;
|
||||
texture.same_device_as(cmd_buf.as_ref())?;
|
||||
unsafe {
|
||||
usage_scope.textures.merge_single(
|
||||
&texture,
|
||||
texture_transition.selector,
|
||||
texture_transition.state,
|
||||
)
|
||||
}?;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
usage_scope.textures.merge_single(
|
||||
&texture,
|
||||
texture_transition.selector,
|
||||
texture_transition.state,
|
||||
)
|
||||
}?;
|
||||
}
|
||||
|
||||
// Record any needed barriers based on tracker data
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
CommandBuffer::insert_barriers_from_scope(
|
||||
cmd_buf_raw,
|
||||
&mut cmd_buf_data.trackers,
|
||||
&usage_scope,
|
||||
snatch_guard,
|
||||
);
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
|
||||
Ok(())
|
||||
// Record any needed barriers based on tracker data
|
||||
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
|
||||
CommandBuffer::insert_barriers_from_scope(
|
||||
cmd_buf_raw,
|
||||
&mut cmd_buf_data.trackers,
|
||||
&usage_scope,
|
||||
snatch_guard,
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1383,20 +1383,15 @@ impl Global {
|
||||
|
||||
let cmd_buf = hub.command_buffers.get(id.into_command_buffer_id());
|
||||
let mut cmd_buf_data = cmd_buf.data.lock();
|
||||
let cmd_buf_data_guard = cmd_buf_data.record();
|
||||
|
||||
if let Ok(mut cmd_buf_data_guard) = cmd_buf_data_guard {
|
||||
let cmd_buf_raw = cmd_buf_data_guard
|
||||
.encoder
|
||||
.open()
|
||||
.ok()
|
||||
.and_then(|encoder| encoder.as_any_mut().downcast_mut());
|
||||
let ret = hal_command_encoder_callback(cmd_buf_raw);
|
||||
cmd_buf_data_guard.mark_successful();
|
||||
ret
|
||||
} else {
|
||||
hal_command_encoder_callback(None)
|
||||
}
|
||||
cmd_buf_data.record_as_hal_mut(|opt_cmd_buf| -> R {
|
||||
hal_command_encoder_callback(opt_cmd_buf.and_then(|cmd_buf| {
|
||||
cmd_buf
|
||||
.encoder
|
||||
.open()
|
||||
.ok()
|
||||
.and_then(|encoder| encoder.as_any_mut().downcast_mut())
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user