Deferred error reporting for other command encoder operations

* clear commands
* query set functions
* command_encoder_as_hal_mut
* ray_tracing
This commit is contained in:
Andy Leiserson 2025-06-06 12:31:14 -07:00 committed by Jim Blandy
parent e702d1c116
commit 3a5d0f2747
10 changed files with 706 additions and 708 deletions

View File

@ -27,6 +27,13 @@ pub use run::{execute_test, TestingContext};
pub use wgpu_macros::gpu_test;
/// Run some code in an error scope and assert that validation fails.
///
/// Note that errors related to commands for the GPU (i.e. raised by methods on
/// GPUCommandEncoder, GPURenderPassEncoder, GPUComputePassEncoder,
/// GPURenderBundleEncoder) are usually not raised immediately. They are raised
/// only when `finish()` is called on the command encoder. Tests of such error
/// cases should call `fail` with a closure that calls `finish()`, not with a
/// closure that encodes the actual command.
pub fn fail<T>(
device: &wgpu::Device,
callback: impl FnOnce() -> T,

View File

@ -344,13 +344,12 @@ static CLEAR_OFFSET_OUTSIDE_RESOURCE_BOUNDS: GpuTestConfiguration = GpuTestConfi
let out_of_bounds = size.checked_add(wgpu::COPY_BUFFER_ALIGNMENT).unwrap();
let mut encoder = ctx.device.create_command_encoder(&Default::default());
encoder.clear_buffer(&buffer, out_of_bounds, None);
wgpu_test::fail(
&ctx.device,
|| {
ctx.device
.create_command_encoder(&Default::default())
.clear_buffer(&buffer, out_of_bounds, None)
},
|| encoder.finish(),
Some("Clear of 20..20 would end up overrunning the bounds of the buffer of size 16"),
);
});
@ -370,17 +369,16 @@ static CLEAR_OFFSET_PLUS_SIZE_OUTSIDE_U64_BOUNDS: GpuTestConfiguration =
let max_valid_offset = u64::MAX - (u64::MAX % wgpu::COPY_BUFFER_ALIGNMENT);
let smallest_aligned_invalid_size = wgpu::COPY_BUFFER_ALIGNMENT;
wgpu_test::fail(
&ctx.device,
|| {
ctx.device
.create_command_encoder(&Default::default())
.clear_buffer(
let mut encoder = ctx.device.create_command_encoder(&Default::default());
encoder.clear_buffer(
&buffer,
max_valid_offset,
Some(smallest_aligned_invalid_size),
)
},
);
wgpu_test::fail(
&ctx.device,
|| encoder.finish(),
Some(concat!(
"Clear starts at offset 18446744073709551612 with size of 4, ",
"but these added together exceed `u64::MAX`"

View File

@ -330,9 +330,6 @@ static DEVICE_DESTROY_THEN_MORE: GpuTestConfiguration = GpuTestConfiguration::ne
);
// Texture clear should fail.
fail(
&ctx.device,
|| {
encoder_for_clear.clear_texture(
&texture_for_write,
&wgpu::ImageSubresourceRange {
@ -343,7 +340,9 @@ static DEVICE_DESTROY_THEN_MORE: GpuTestConfiguration = GpuTestConfiguration::ne
array_layer_count: None,
},
);
},
fail(
&ctx.device,
|| encoder_for_clear.finish(),
Some("device with '' label is invalid"),
);

View File

@ -188,11 +188,8 @@ fn blas_compaction(ctx: TestingContext) {
let mut build_entry = as_ctx.blas_build_entry();
build_entry.blas = &compacted;
fail(
&ctx.device,
|| fail_encoder.build_acceleration_structures([&build_entry], []),
None,
);
fail_encoder.build_acceleration_structures([&build_entry], []);
fail(&ctx.device, || fail_encoder.finish(), None);
}
#[gpu_test]
@ -733,13 +730,8 @@ fn only_tlas_vertex_return(ctx: TestingContext) {
label: Some("TLAS 1"),
});
fail(
&ctx.device,
|| {
encoder_tlas.build_acceleration_structures([], [&as_ctx.tlas]);
},
None,
);
fail(&ctx.device, || encoder_tlas.finish(), None);
}
#[gpu_test]
@ -817,10 +809,6 @@ fn test_as_build_format_stride(
.create_command_encoder(&CommandEncoderDescriptor {
label: Some("BLAS_1"),
});
fail_if(
&ctx.device,
invalid_combination,
|| {
command_encoder.build_acceleration_structures(
&[BlasBuildEntry {
blas: &blas,
@ -836,11 +824,14 @@ fn test_as_build_format_stride(
}]),
}],
&[],
)
},
);
let command_buffer = fail_if(
&ctx.device,
invalid_combination,
|| command_encoder.finish(),
None,
);
if !invalid_combination {
ctx.queue.submit([command_encoder.finish()]);
ctx.queue.submit([command_buffer]);
}
}

View File

@ -86,7 +86,7 @@ impl Global {
dst: BufferId,
offset: BufferAddress,
size: Option<BufferAddress>,
) -> Result<(), ClearError> {
) -> Result<(), EncoderStateError> {
profiling::scope!("CommandEncoder::clear_buffer");
api_log!("CommandEncoder::clear_buffer {dst:?}");
@ -96,9 +96,7 @@ impl Global {
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), ClearError> {
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::ClearBuffer { dst, offset, size });
@ -143,8 +141,6 @@ impl Global {
if offset == end_offset {
log::trace!("Ignoring fill_buffer of size 0");
cmd_buf_data_guard.mark_successful();
return Ok(());
}
@ -158,15 +154,16 @@ impl Global {
);
// actual hal barrier & operation
let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
let dst_barrier =
dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier.as_slice());
cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
}
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
pub fn command_encoder_clear_texture(
@ -174,7 +171,7 @@ impl Global {
command_encoder_id: CommandEncoderId,
dst: TextureId,
subresource_range: &ImageSubresourceRange,
) -> Result<(), ClearError> {
) -> Result<(), EncoderStateError> {
profiling::scope!("CommandEncoder::clear_texture");
api_log!("CommandEncoder::clear_texture {dst:?}");
@ -184,9 +181,7 @@ impl Global {
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), ClearError> {
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::ClearTexture {
@ -214,7 +209,8 @@ impl Global {
};
// Check if subresource level range is valid
let subresource_mip_range = subresource_range.mip_range(dst_texture.full_range.mips.end);
let subresource_mip_range =
subresource_range.mip_range(dst_texture.full_range.mips.end);
if dst_texture.full_range.mips.start > subresource_mip_range.start
|| dst_texture.full_range.mips.end < subresource_mip_range.end
{
@ -255,8 +251,8 @@ impl Global {
&snatch_guard,
)?;
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
}

View File

@ -37,7 +37,7 @@ use crate::lock::{rank, Mutex};
use crate::snatch::SnatchGuard;
use crate::init_tracker::BufferInitTrackerAction;
use crate::ray_tracing::AsAction;
use crate::ray_tracing::{AsAction, BuildAccelerationStructureError};
use crate::resource::{
DestroyedResourceError, Fallible, InvalidResourceError, Labeled, ParentDevice as _, QuerySet,
};
@ -106,17 +106,6 @@ pub(crate) enum CommandEncoderStatus {
}
impl CommandEncoderStatus {
/// Checks that the encoder is in the [`Self::Recording`] state.
pub(crate) fn record(&mut self) -> Result<RecordingGuard<'_>, EncoderStateError> {
match self {
Self::Recording(_) => Ok(RecordingGuard { inner: self }),
Self::Locked(_) => Err(self.invalidate(EncoderStateError::Locked)),
Self::Finished(_) => Err(EncoderStateError::Ended),
Self::Error(_) => Err(EncoderStateError::Invalid),
Self::Transitioning => unreachable!(),
}
}
/// Record commands using the supplied closure.
///
/// If the encoder is in the [`Self::Recording`] state, calls the closure to
@ -138,29 +127,50 @@ impl CommandEncoderStatus {
&mut self,
f: F,
) -> Result<(), EncoderStateError> {
let err = match self.record() {
Ok(guard) => {
guard.record(f);
return Ok(());
}
Err(err) => err,
};
match err {
err @ EncoderStateError::Locked => {
// Invalidate the encoder and do not record anything, but do not
// return an immediate validation error.
self.invalidate(err);
match self {
Self::Recording(_) => {
RecordingGuard { inner: self }.record(f);
Ok(())
}
err @ EncoderStateError::Ended => {
// Invalidate the encoder, do not record anything, and return an
// immediate validation error.
Err(self.invalidate(err))
Self::Locked(_) => {
// Invalidate the encoder and do not record anything, but do not
// return an immediate validation error.
self.invalidate(EncoderStateError::Locked);
Ok(())
}
// Encoder is ended. Invalidate the encoder, do not record anything,
// and return an immediate validation error.
Self::Finished(_) => Err(self.invalidate(EncoderStateError::Ended)),
// Encoder is already invalid. Do not record anything, but do not
// return an immediate validation error.
EncoderStateError::Invalid => Ok(()),
EncoderStateError::Unlocked | EncoderStateError::Submitted => unreachable!(),
Self::Error(_) => Ok(()),
Self::Transitioning => unreachable!(),
}
}
/// Special version of record used by `command_encoder_as_hal_mut`. This
/// differs from the regular version in two ways:
///
/// 1. The recording closure is infallible.
/// 2. The recording closure takes `Option<&mut CommandBufferMutable>`, and
/// in the case that the encoder is not in a valid state for recording, the
/// closure is still called, with `None` as its argument.
pub(crate) fn record_as_hal_mut<T, F: FnOnce(Option<&mut CommandBufferMutable>) -> T>(
&mut self,
f: F,
) -> T {
match self {
Self::Recording(_) => RecordingGuard { inner: self }.record_as_hal_mut(f),
Self::Locked(_) => {
self.invalidate(EncoderStateError::Locked);
f(None)
}
Self::Finished(_) => {
self.invalidate(EncoderStateError::Ended);
f(None)
}
Self::Error(_) => f(None),
Self::Transitioning => unreachable!(),
}
}
@ -295,6 +305,17 @@ impl<'a> RecordingGuard<'a> {
}
}
}
/// Special version of record used by `command_encoder_as_hal_mut`. This
/// version takes an infallible recording closure.
pub(crate) fn record_as_hal_mut<T, F: FnOnce(Option<&mut CommandBufferMutable>) -> T>(
mut self,
f: F,
) -> T {
let res = f(Some(&mut self));
self.mark_successful();
res
}
}
impl<'a> Drop for RecordingGuard<'a> {
@ -899,6 +920,10 @@ pub enum CommandEncoderError {
#[error(transparent)]
Clear(#[from] ClearError),
#[error(transparent)]
Query(#[from] QueryError),
#[error(transparent)]
BuildAccelerationStructure(#[from] BuildAccelerationStructureError),
#[error(transparent)]
TransitionResources(#[from] TransitionResourcesError),
#[error(
"begin and end indices of pass timestamp writes are both set to {idx}, which is not allowed"

View File

@ -317,16 +317,14 @@ impl Global {
command_encoder_id: id::CommandEncoderId,
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), QueryError> {
) -> Result<(), EncoderStateError> {
let hub = &self.hub;
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), QueryError> {
cmd_buf
.device
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
@ -347,8 +345,8 @@ impl Global {
cmd_buf_data.trackers.query_sets.insert_single(query_set);
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
pub fn command_encoder_resolve_query_set(
@ -359,16 +357,14 @@ impl Global {
query_count: u32,
destination: id::BufferId,
destination_offset: BufferAddress,
) -> Result<(), QueryError> {
) -> Result<(), EncoderStateError> {
let hub = &self.hub;
let cmd_buf = hub
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), QueryError> {
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(TraceCommand::ResolveQuerySet {
@ -400,7 +396,8 @@ impl Global {
.buffers
.set_single(&dst_buffer, wgt::BufferUses::COPY_DST);
let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
let dst_barrier =
dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard));
dst_buffer
.check_usage(wgt::BufferUsages::QUERY_RESOLVE)
@ -488,7 +485,7 @@ impl Global {
cmd_buf_data.trackers.query_sets.insert_single(query_set);
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
}

View File

@ -7,7 +7,6 @@ use core::{
use wgt::{math::align_to, BufferUsages, BufferUses, Features};
use crate::device::resource::CommandIndices;
use crate::lock::RwLockWriteGuard;
use crate::ray_tracing::{AsAction, AsBuild, TlasBuild, ValidateAsActionsError};
use crate::{
@ -29,6 +28,7 @@ use crate::{
snatch::SnatchGuard,
track::PendingTransition,
};
use crate::{command::EncoderStateError, device::resource::CommandIndices};
use crate::id::{BlasId, TlasId};
@ -64,7 +64,7 @@ impl Global {
command_encoder_id: CommandEncoderId,
blas_ids: &[BlasId],
tlas_ids: &[TlasId],
) -> Result<(), BuildAccelerationStructureError> {
) -> Result<(), EncoderStateError> {
profiling::scope!("CommandEncoder::mark_acceleration_structures_built");
let hub = &self.hub;
@ -73,9 +73,12 @@ impl Global {
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
cmd_buf_data.record_with(
|cmd_buf_data| -> Result<(), BuildAccelerationStructureError> {
let device = &cmd_buf.device;
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
device
.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
let mut build_command = AsBuild::default();
@ -92,15 +95,10 @@ impl Global {
});
}
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
cmd_buf_data_guard.mark_successful();
Ok(())
},
)
}
pub fn command_encoder_build_acceleration_structures<'a>(
@ -108,7 +106,7 @@ impl Global {
command_encoder_id: CommandEncoderId,
blas_iter: impl Iterator<Item = BlasBuildEntry<'a>>,
tlas_iter: impl Iterator<Item = TlasPackage<'a>>,
) -> Result<(), BuildAccelerationStructureError> {
) -> Result<(), EncoderStateError> {
profiling::scope!("CommandEncoder::build_acceleration_structures");
let hub = &self.hub;
@ -117,10 +115,6 @@ impl Global {
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let device = &cmd_buf.device;
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
let mut build_command = AsBuild::default();
let trace_blas: Vec<TraceBlasBuildEntry> = blas_iter
@ -171,14 +165,6 @@ impl Global {
})
.collect();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf.data.lock().get_inner().commands {
list.push(crate::device::trace::Command::BuildAccelerationStructures {
blas: trace_blas.clone(),
tlas: trace_tlas.clone(),
});
}
let blas_iter = trace_blas.iter().map(|blas_entry| {
let geometries = match &blas_entry.geometries {
TraceBlasGeometries::TriangleGeometries(triangle_geometries) => {
@ -217,15 +203,20 @@ impl Global {
}
});
let mut input_barriers = Vec::<hal::BufferBarrier<dyn hal::DynBuffer>>::new();
let mut buf_storage = Vec::new();
let mut scratch_buffer_blas_size = 0;
let mut blas_storage = Vec::new();
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| {
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(crate::device::trace::Command::BuildAccelerationStructures {
blas: trace_blas.clone(),
tlas: trace_tlas.clone(),
});
}
let device = &cmd_buf.device;
device.require_features(Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE)?;
let mut buf_storage = Vec::new();
iter_blas(
blas_iter,
cmd_buf_data,
@ -235,6 +226,9 @@ impl Global {
)?;
let snatch_guard = device.snatchable_lock.read();
let mut input_barriers = Vec::<hal::BufferBarrier<dyn hal::DynBuffer>>::new();
let mut scratch_buffer_blas_size = 0;
let mut blas_storage = Vec::new();
iter_buffers(
&mut buf_storage,
&snatch_guard,
@ -292,13 +286,11 @@ impl Global {
},
));
if tlas
.flags
.contains(wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN)
&& !blas.flags.contains(
if tlas.flags.contains(
wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
)
{
) && !blas.flags.contains(
wgpu_types::AccelerationStructureFlags::ALLOW_RAY_HIT_VERTEX_RETURN,
) {
return Err(
BuildAccelerationStructureError::TlasDependentMissingVertexReturn(
tlas.error_ident(),
@ -341,14 +333,11 @@ impl Global {
});
}
let scratch_size =
match wgt::BufferSize::new(max(scratch_buffer_blas_size, scratch_buffer_tlas_size)) {
let Some(scratch_size) =
wgt::BufferSize::new(max(scratch_buffer_blas_size, scratch_buffer_tlas_size))
else {
// if the size is zero there is nothing to build
None => {
cmd_buf_data_guard.mark_successful();
return Ok(());
}
Some(size) => size,
};
let scratch_buffer = ScratchBuffer::new(device, scratch_size)?;
@ -429,13 +418,15 @@ impl Global {
unsafe {
if let Some(ref staging_buffer) = staging_buffer {
cmd_buf_raw.transition_buffers(&[hal::BufferBarrier::<dyn hal::DynBuffer> {
cmd_buf_raw.transition_buffers(&[
hal::BufferBarrier::<dyn hal::DynBuffer> {
buffer: staging_buffer.raw(),
usage: hal::StateTransition {
from: BufferUses::MAP_WRITE,
to: BufferUses::COPY_SRC,
},
}]);
},
]);
}
}
@ -457,13 +448,15 @@ impl Global {
},
});
unsafe {
cmd_buf_raw.transition_buffers(&[hal::BufferBarrier::<dyn hal::DynBuffer> {
cmd_buf_raw.transition_buffers(&[
hal::BufferBarrier::<dyn hal::DynBuffer> {
buffer: tlas.instance_buffer.as_ref(),
usage: hal::StateTransition {
from: BufferUses::TOP_LEVEL_ACCELERATION_STRUCTURE_INPUT,
to: BufferUses::COPY_DST,
},
}]);
},
]);
let temp = hal::BufferCopy {
src_offset: range.start as u64,
dst_offset: 0,
@ -507,8 +500,8 @@ impl Global {
cmd_buf_data.as_actions.push(AsAction::Build(build_command));
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
}

View File

@ -1,7 +1,7 @@
use thiserror::Error;
use crate::{
command::{CommandBuffer, EncoderStateError},
command::{CommandBuffer, CommandEncoderError, EncoderStateError},
device::DeviceError,
global::Global,
id::{BufferId, CommandEncoderId, TextureId},
@ -15,7 +15,7 @@ impl Global {
command_encoder_id: CommandEncoderId,
buffer_transitions: impl Iterator<Item = wgt::BufferTransition<BufferId>>,
texture_transitions: impl Iterator<Item = wgt::TextureTransition<TextureId>>,
) -> Result<(), TransitionResourcesError> {
) -> Result<(), EncoderStateError> {
profiling::scope!("CommandEncoder::transition_resources");
let hub = &self.hub;
@ -25,9 +25,7 @@ impl Global {
.command_buffers
.get(command_encoder_id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let mut cmd_buf_data_guard = cmd_buf_data.record()?;
let cmd_buf_data = &mut *cmd_buf_data_guard;
cmd_buf_data.record_with(|cmd_buf_data| -> Result<(), CommandEncoderError> {
// Get and lock device
let device = &cmd_buf.device;
device.check_is_valid()?;
@ -70,9 +68,8 @@ impl Global {
&usage_scope,
snatch_guard,
);
cmd_buf_data_guard.mark_successful();
Ok(())
})
}
}

View File

@ -1383,20 +1383,15 @@ impl Global {
let cmd_buf = hub.command_buffers.get(id.into_command_buffer_id());
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data_guard = cmd_buf_data.record();
if let Ok(mut cmd_buf_data_guard) = cmd_buf_data_guard {
let cmd_buf_raw = cmd_buf_data_guard
cmd_buf_data.record_as_hal_mut(|opt_cmd_buf| -> R {
hal_command_encoder_callback(opt_cmd_buf.and_then(|cmd_buf| {
cmd_buf
.encoder
.open()
.ok()
.and_then(|encoder| encoder.as_any_mut().downcast_mut());
let ret = hal_command_encoder_callback(cmd_buf_raw);
cmd_buf_data_guard.mark_successful();
ret
} else {
hal_command_encoder_callback(None)
}
.and_then(|encoder| encoder.as_any_mut().downcast_mut())
}))
})
}
/// # Safety