mirror of
https://github.com/gfx-rs/wgpu.git
synced 2025-12-08 21:26:17 +00:00
Lint all the things
This commit is contained in:
parent
763c14a6d9
commit
231bba03a1
16
.github/workflows/ci.yml
vendored
16
.github/workflows/ci.yml
vendored
@ -113,3 +113,19 @@ jobs:
|
|||||||
run: cargo clippy
|
run: cargo clippy
|
||||||
- if: matrix.channel == 'nightly'
|
- if: matrix.channel == 'nightly'
|
||||||
run: cargo test -- --nocapture
|
run: cargo test -- --nocapture
|
||||||
|
|
||||||
|
lint:
|
||||||
|
name: Clippy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- run: rustup component add clippy
|
||||||
|
- uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
args: -- -D warnings
|
||||||
|
|||||||
@ -1158,8 +1158,6 @@ pub mod bundle_ffi {
|
|||||||
///
|
///
|
||||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
/// valid for `offset_length` elements.
|
/// valid for `offset_length` elements.
|
||||||
// TODO: There might be other safety issues, such as using the unsafe
|
|
||||||
// `RawPass::encode` and `RawPass::encode_slice`.
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
|
pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group(
|
||||||
bundle: &mut RenderBundleEncoder,
|
bundle: &mut RenderBundleEncoder,
|
||||||
@ -1211,6 +1209,10 @@ pub mod bundle_ffi {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
|
/// valid for `data` elements.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_bundle_set_push_constants(
|
pub unsafe extern "C" fn wgpu_render_bundle_set_push_constants(
|
||||||
pass: &mut RenderBundleEncoder,
|
pass: &mut RenderBundleEncoder,
|
||||||
@ -1315,6 +1317,10 @@ pub mod bundle_ffi {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_bundle_push_debug_group(
|
pub unsafe extern "C" fn wgpu_render_bundle_push_debug_group(
|
||||||
_bundle: &mut RenderBundleEncoder,
|
_bundle: &mut RenderBundleEncoder,
|
||||||
@ -1325,11 +1331,15 @@ pub mod bundle_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) {
|
pub extern "C" fn wgpu_render_bundle_pop_debug_group(_bundle: &mut RenderBundleEncoder) {
|
||||||
span!(_guard, DEBUG, "RenderBundle::pop_debug_group");
|
span!(_guard, DEBUG, "RenderBundle::pop_debug_group");
|
||||||
//TODO
|
//TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_bundle_insert_debug_marker(
|
pub unsafe extern "C" fn wgpu_render_bundle_insert_debug_marker(
|
||||||
_bundle: &mut RenderBundleEncoder,
|
_bundle: &mut RenderBundleEncoder,
|
||||||
|
|||||||
@ -657,8 +657,6 @@ pub mod compute_ffi {
|
|||||||
///
|
///
|
||||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
/// valid for `offset_length` elements.
|
/// valid for `offset_length` elements.
|
||||||
// TODO: There might be other safety issues, such as using the unsafe
|
|
||||||
// `RawPass::encode` and `RawPass::encode_slice`.
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
|
pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
@ -691,6 +689,10 @@ pub mod compute_ffi {
|
|||||||
.push(ComputeCommand::SetPipeline(pipeline_id));
|
.push(ComputeCommand::SetPipeline(pipeline_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
|
/// valid for `size_bytes` bytes.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
|
pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
@ -752,6 +754,10 @@ pub mod compute_ffi {
|
|||||||
.push(ComputeCommand::DispatchIndirect { buffer_id, offset });
|
.push(ComputeCommand::DispatchIndirect { buffer_id, offset });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
|
pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
@ -774,6 +780,10 @@ pub mod compute_ffi {
|
|||||||
pass.base.commands.push(ComputeCommand::PopDebugGroup);
|
pass.base.commands.push(ComputeCommand::PopDebugGroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
|
pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
@ -791,7 +801,7 @@ pub mod compute_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_write_timestamp(
|
pub extern "C" fn wgpu_compute_pass_write_timestamp(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
query_set_id: id::QuerySetId,
|
query_set_id: id::QuerySetId,
|
||||||
query_index: u32,
|
query_index: u32,
|
||||||
@ -805,7 +815,7 @@ pub mod compute_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
|
pub extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
|
||||||
pass: &mut ComputePass,
|
pass: &mut ComputePass,
|
||||||
query_set_id: id::QuerySetId,
|
query_set_id: id::QuerySetId,
|
||||||
query_index: u32,
|
query_index: u32,
|
||||||
@ -825,9 +835,7 @@ pub mod compute_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(
|
pub extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(pass: &mut ComputePass) {
|
||||||
pass: &mut ComputePass,
|
|
||||||
) {
|
|
||||||
span!(_guard, DEBUG, "ComputePass::end_pipeline_statistics_query");
|
span!(_guard, DEBUG, "ComputePass::end_pipeline_statistics_query");
|
||||||
|
|
||||||
pass.base
|
pass.base
|
||||||
|
|||||||
@ -175,7 +175,7 @@ impl<C: Clone> BasePass<C> {
|
|||||||
|
|
||||||
pub fn as_ref(&self) -> BasePassRef<C> {
|
pub fn as_ref(&self) -> BasePassRef<C> {
|
||||||
BasePassRef {
|
BasePassRef {
|
||||||
label: self.label.as_ref().map(String::as_str),
|
label: self.label.as_deref(),
|
||||||
commands: &self.commands,
|
commands: &self.commands,
|
||||||
dynamic_offsets: &self.dynamic_offsets,
|
dynamic_offsets: &self.dynamic_offsets,
|
||||||
string_data: &self.string_data,
|
string_data: &self.string_data,
|
||||||
|
|||||||
@ -39,12 +39,12 @@ impl<B: hal::Backend> QueryResetMap<B> {
|
|||||||
query: u32,
|
query: u32,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let (index, epoch, _) = id.unzip();
|
let (index, epoch, _) = id.unzip();
|
||||||
let (vec, _) = self
|
let vec_pair = self
|
||||||
.map
|
.map
|
||||||
.entry(index)
|
.entry(index)
|
||||||
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
|
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
|
||||||
|
|
||||||
std::mem::replace(&mut vec[query as usize], true)
|
std::mem::replace(&mut vec_pair.0[query as usize], true)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_queries(
|
pub fn reset_queries(
|
||||||
@ -62,9 +62,8 @@ impl<B: hal::Backend> QueryResetMap<B> {
|
|||||||
// Need to find all "runs" of values which need resets. If the state vector is:
|
// Need to find all "runs" of values which need resets. If the state vector is:
|
||||||
// [false, true, true, false, true], we want to reset [1..3, 4..5]. This minimizes
|
// [false, true, true, false, true], we want to reset [1..3, 4..5]. This minimizes
|
||||||
// the amount of resets needed.
|
// the amount of resets needed.
|
||||||
let mut state_iter = state.into_iter().chain(iter::once(false)).enumerate();
|
|
||||||
let mut run_start: Option<u32> = None;
|
let mut run_start: Option<u32> = None;
|
||||||
while let Some((idx, value)) = state_iter.next() {
|
for (idx, value) in state.into_iter().chain(iter::once(false)).enumerate() {
|
||||||
match (run_start, value) {
|
match (run_start, value) {
|
||||||
// We're inside of a run, do nothing
|
// We're inside of a run, do nothing
|
||||||
(Some(..), true) => {}
|
(Some(..), true) => {}
|
||||||
@ -174,7 +173,7 @@ impl<B: GfxBackend> QuerySet<B> {
|
|||||||
if let Some(reset) = reset_state {
|
if let Some(reset) = reset_state {
|
||||||
let used = reset.use_query_set(query_set_id, self, query_index);
|
let used = reset.use_query_set(query_set_id, self, query_index);
|
||||||
if used {
|
if used {
|
||||||
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index }.into());
|
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,16 +182,14 @@ impl<B: GfxBackend> QuerySet<B> {
|
|||||||
return Err(QueryUseError::IncompatibleType {
|
return Err(QueryUseError::IncompatibleType {
|
||||||
query_type,
|
query_type,
|
||||||
set_type: simple_set_type,
|
set_type: simple_set_type,
|
||||||
}
|
});
|
||||||
.into());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if query_index >= self.desc.count {
|
if query_index >= self.desc.count {
|
||||||
return Err(QueryUseError::OutOfBounds {
|
return Err(QueryUseError::OutOfBounds {
|
||||||
query_index,
|
query_index,
|
||||||
query_set_size: self.desc.count,
|
query_set_size: self.desc.count,
|
||||||
}
|
});
|
||||||
.into());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let hal_query = hal::query::Query::<B> {
|
let hal_query = hal::query::Query::<B> {
|
||||||
@ -249,8 +246,7 @@ impl<B: GfxBackend> QuerySet<B> {
|
|||||||
return Err(QueryUseError::AlreadyStarted {
|
return Err(QueryUseError::AlreadyStarted {
|
||||||
active_query_index: old_idx,
|
active_query_index: old_idx,
|
||||||
new_query_index: query_index,
|
new_query_index: query_index,
|
||||||
}
|
});
|
||||||
.into());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|||||||
@ -804,7 +804,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
|
|||||||
inputs: &[],
|
inputs: &[],
|
||||||
preserves: &[],
|
preserves: &[],
|
||||||
};
|
};
|
||||||
let all = entry.key().all().map(|(at, _)| at.clone());
|
let all = entry.key().all().map(|&(ref at, _)| at.clone());
|
||||||
|
|
||||||
let pass = unsafe {
|
let pass = unsafe {
|
||||||
device
|
device
|
||||||
@ -848,7 +848,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
|
|||||||
.raw
|
.raw
|
||||||
.create_framebuffer(
|
.create_framebuffer(
|
||||||
&render_pass,
|
&render_pass,
|
||||||
e.key().0.all().map(|fat| fat.clone()),
|
e.key().0.all().cloned(),
|
||||||
conv::map_extent(&extent, wgt::TextureDimension::D3),
|
conv::map_extent(&extent, wgt::TextureDimension::D3),
|
||||||
)
|
)
|
||||||
.or(Err(RenderPassErrorInner::OutOfMemory))?
|
.or(Err(RenderPassErrorInner::OutOfMemory))?
|
||||||
@ -874,7 +874,7 @@ impl<'a, B: GfxBackend> RenderPassInfo<'a, B> {
|
|||||||
.zip(&rp_key.colors)
|
.zip(&rp_key.colors)
|
||||||
.zip(raw_views.colors)
|
.zip(raw_views.colors)
|
||||||
.map(
|
.map(
|
||||||
|((at, (rat, _layout)), image_view)| hal::command::RenderAttachmentInfo {
|
|((at, &(ref rat, _layout)), image_view)| hal::command::RenderAttachmentInfo {
|
||||||
image_view,
|
image_view,
|
||||||
clear_value: match at.channel.load_op {
|
clear_value: match at.channel.load_op {
|
||||||
LoadOp::Load => Default::default(),
|
LoadOp::Load => Default::default(),
|
||||||
@ -1945,7 +1945,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
if let Some(ref mut list) = cmd_buf.commands {
|
if let Some(ref mut list) = cmd_buf.commands {
|
||||||
list.push(crate::device::trace::Command::RunRenderPass {
|
list.push(crate::device::trace::Command::RunRenderPass {
|
||||||
base: BasePass::from_ref(base),
|
base: BasePass::from_ref(base),
|
||||||
target_colors: color_attachments.iter().cloned().collect(),
|
target_colors: color_attachments.to_vec(),
|
||||||
target_depth_stencil: depth_stencil_attachment.cloned(),
|
target_depth_stencil: depth_stencil_attachment.cloned(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1990,8 +1990,6 @@ pub mod render_ffi {
|
|||||||
///
|
///
|
||||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
/// valid for `offset_length` elements.
|
/// valid for `offset_length` elements.
|
||||||
// TODO: There might be other safety issues, such as using the unsafe
|
|
||||||
// `RawPass::encode` and `RawPass::encode_slice`.
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_set_bind_group(
|
pub unsafe extern "C" fn wgpu_render_pass_set_bind_group(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
@ -2089,6 +2087,10 @@ pub mod render_ffi {
|
|||||||
.push(RenderCommand::SetScissor(Rect { x, y, w, h }));
|
.push(RenderCommand::SetScissor(Rect { x, y, w, h }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
|
/// valid for `size_bytes` bytes.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_set_push_constants(
|
pub unsafe extern "C" fn wgpu_render_pass_set_push_constants(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
@ -2273,6 +2275,10 @@ pub mod render_ffi {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_push_debug_group(
|
pub unsafe extern "C" fn wgpu_render_pass_push_debug_group(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
@ -2295,6 +2301,10 @@ pub mod render_ffi {
|
|||||||
pass.base.commands.push(RenderCommand::PopDebugGroup);
|
pass.base.commands.push(RenderCommand::PopDebugGroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given `label`
|
||||||
|
/// is a valid null-terminated stricng.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_insert_debug_marker(
|
pub unsafe extern "C" fn wgpu_render_pass_insert_debug_marker(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
@ -2312,7 +2322,7 @@ pub mod render_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_write_timestamp(
|
pub extern "C" fn wgpu_render_pass_write_timestamp(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
query_set_id: id::QuerySetId,
|
query_set_id: id::QuerySetId,
|
||||||
query_index: u32,
|
query_index: u32,
|
||||||
@ -2326,7 +2336,7 @@ pub mod render_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_begin_pipeline_statistics_query(
|
pub extern "C" fn wgpu_render_pass_begin_pipeline_statistics_query(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
query_set_id: id::QuerySetId,
|
query_set_id: id::QuerySetId,
|
||||||
query_index: u32,
|
query_index: u32,
|
||||||
@ -2342,7 +2352,7 @@ pub mod render_ffi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern "C" fn wgpu_render_pass_end_pipeline_statistics_query(pass: &mut RenderPass) {
|
pub extern "C" fn wgpu_render_pass_end_pipeline_statistics_query(pass: &mut RenderPass) {
|
||||||
span!(_guard, DEBUG, "RenderPass::end_pipeline_statistics_query");
|
span!(_guard, DEBUG, "RenderPass::end_pipeline_statistics_query");
|
||||||
|
|
||||||
pass.base
|
pass.base
|
||||||
@ -2350,6 +2360,10 @@ pub mod render_ffi {
|
|||||||
.push(RenderCommand::EndPipelineStatisticsQuery);
|
.push(RenderCommand::EndPipelineStatisticsQuery);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||||
|
/// valid for `render_bundle_ids_length` elements.
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe fn wgpu_render_pass_execute_bundles(
|
pub unsafe fn wgpu_render_pass_execute_bundles(
|
||||||
pass: &mut RenderPass,
|
pass: &mut RenderPass,
|
||||||
|
|||||||
@ -313,7 +313,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_buffer");
|
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_buffer");
|
||||||
|
|
||||||
if source == destination {
|
if source == destination {
|
||||||
Err(TransferError::SameSourceDestinationBuffer)?
|
return Err(TransferError::SameSourceDestinationBuffer.into());
|
||||||
}
|
}
|
||||||
let hub = B::hub(self);
|
let hub = B::hub(self);
|
||||||
let mut token = Token::root();
|
let mut token = Token::root();
|
||||||
@ -343,7 +343,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidBuffer(source))?;
|
.ok_or(TransferError::InvalidBuffer(source))?;
|
||||||
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
||||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
return Err(TransferError::MissingCopySrcUsageFlag.into());
|
||||||
}
|
}
|
||||||
// expecting only a single barrier
|
// expecting only a single barrier
|
||||||
let src_barrier = src_pending
|
let src_barrier = src_pending
|
||||||
@ -360,42 +360,41 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidBuffer(destination))?;
|
.ok_or(TransferError::InvalidBuffer(destination))?;
|
||||||
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into());
|
||||||
Some(destination),
|
|
||||||
None,
|
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
let dst_barrier = dst_pending
|
let dst_barrier = dst_pending
|
||||||
.map(|pending| pending.into_hal(dst_buffer))
|
.map(|pending| pending.into_hal(dst_buffer))
|
||||||
.next();
|
.next();
|
||||||
|
|
||||||
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||||
Err(TransferError::UnalignedCopySize(size))?
|
return Err(TransferError::UnalignedCopySize(size).into());
|
||||||
}
|
}
|
||||||
if source_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
if source_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||||
Err(TransferError::UnalignedBufferOffset(source_offset))?
|
return Err(TransferError::UnalignedBufferOffset(source_offset).into());
|
||||||
}
|
}
|
||||||
if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||||
Err(TransferError::UnalignedBufferOffset(destination_offset))?
|
return Err(TransferError::UnalignedBufferOffset(destination_offset).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let source_end_offset = source_offset + size;
|
let source_end_offset = source_offset + size;
|
||||||
let destination_end_offset = destination_offset + size;
|
let destination_end_offset = destination_offset + size;
|
||||||
if source_end_offset > src_buffer.size {
|
if source_end_offset > src_buffer.size {
|
||||||
Err(TransferError::BufferOverrun {
|
return Err(TransferError::BufferOverrun {
|
||||||
start_offset: source_offset,
|
start_offset: source_offset,
|
||||||
end_offset: source_end_offset,
|
end_offset: source_end_offset,
|
||||||
buffer_size: src_buffer.size,
|
buffer_size: src_buffer.size,
|
||||||
side: CopySide::Source,
|
side: CopySide::Source,
|
||||||
})?
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
if destination_end_offset > dst_buffer.size {
|
if destination_end_offset > dst_buffer.size {
|
||||||
Err(TransferError::BufferOverrun {
|
return Err(TransferError::BufferOverrun {
|
||||||
start_offset: destination_offset,
|
start_offset: destination_offset,
|
||||||
end_offset: destination_end_offset,
|
end_offset: destination_end_offset,
|
||||||
buffer_size: dst_buffer.size,
|
buffer_size: dst_buffer.size,
|
||||||
side: CopySide::Destination,
|
side: CopySide::Destination,
|
||||||
})?
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
@ -484,7 +483,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
|
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
|
||||||
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
|
||||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
return Err(TransferError::MissingCopySrcUsageFlag.into());
|
||||||
}
|
}
|
||||||
let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
|
let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
|
||||||
|
|
||||||
@ -503,10 +502,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
||||||
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
|
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(
|
||||||
None,
|
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
|
||||||
Some(destination.texture),
|
);
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
|
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
|
||||||
|
|
||||||
@ -517,10 +515,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
/ BITS_PER_BYTE;
|
/ BITS_PER_BYTE;
|
||||||
let src_bytes_per_row = source.layout.bytes_per_row;
|
let src_bytes_per_row = source.layout.bytes_per_row;
|
||||||
if bytes_per_row_alignment % bytes_per_block != 0 {
|
if bytes_per_row_alignment % bytes_per_block != 0 {
|
||||||
Err(TransferError::UnalignedBytesPerRow)?
|
return Err(TransferError::UnalignedBytesPerRow.into());
|
||||||
}
|
}
|
||||||
if src_bytes_per_row % bytes_per_row_alignment != 0 {
|
if src_bytes_per_row % bytes_per_row_alignment != 0 {
|
||||||
Err(TransferError::UnalignedBytesPerRow)?
|
return Err(TransferError::UnalignedBytesPerRow.into());
|
||||||
}
|
}
|
||||||
validate_texture_copy_range(
|
validate_texture_copy_range(
|
||||||
destination,
|
destination,
|
||||||
@ -551,9 +549,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
|
|
||||||
let (block_width, _) = dst_texture.format.describe().block_dimensions;
|
let (block_width, _) = dst_texture.format.describe().block_dimensions;
|
||||||
if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
|
if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
|
||||||
Err(TransferError::CopyToForbiddenTextureFormat(
|
return Err(TransferError::CopyToForbiddenTextureFormat(dst_texture.format).into());
|
||||||
dst_texture.format,
|
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
|
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
|
||||||
@ -640,7 +636,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidTexture(source.texture))?;
|
.ok_or(TransferError::InvalidTexture(source.texture))?;
|
||||||
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
|
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
|
||||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
return Err(TransferError::MissingCopySrcUsageFlag.into());
|
||||||
}
|
}
|
||||||
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
|
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
|
||||||
|
|
||||||
@ -654,10 +650,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
|
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
|
||||||
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(
|
||||||
Some(destination.buffer),
|
TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(),
|
||||||
None,
|
);
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
|
let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
|
||||||
|
|
||||||
@ -668,10 +663,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
/ BITS_PER_BYTE;
|
/ BITS_PER_BYTE;
|
||||||
let dst_bytes_per_row = destination.layout.bytes_per_row;
|
let dst_bytes_per_row = destination.layout.bytes_per_row;
|
||||||
if bytes_per_row_alignment % bytes_per_block != 0 {
|
if bytes_per_row_alignment % bytes_per_block != 0 {
|
||||||
Err(TransferError::UnalignedBytesPerRow)?
|
return Err(TransferError::UnalignedBytesPerRow.into());
|
||||||
}
|
}
|
||||||
if dst_bytes_per_row % bytes_per_row_alignment != 0 {
|
if dst_bytes_per_row % bytes_per_row_alignment != 0 {
|
||||||
Err(TransferError::UnalignedBytesPerRow)?
|
return Err(TransferError::UnalignedBytesPerRow.into());
|
||||||
}
|
}
|
||||||
validate_texture_copy_range(
|
validate_texture_copy_range(
|
||||||
source,
|
source,
|
||||||
@ -691,9 +686,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
|
|
||||||
let (block_width, _) = src_texture.format.describe().block_dimensions;
|
let (block_width, _) = src_texture.format.describe().block_dimensions;
|
||||||
if !conv::is_valid_copy_src_texture_format(src_texture.format) {
|
if !conv::is_valid_copy_src_texture_format(src_texture.format) {
|
||||||
Err(TransferError::CopyFromForbiddenTextureFormat(
|
return Err(TransferError::CopyFromForbiddenTextureFormat(src_texture.format).into());
|
||||||
src_texture.format,
|
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd_buf.buffer_memory_init_actions.extend(
|
cmd_buf.buffer_memory_init_actions.extend(
|
||||||
@ -769,7 +762,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
let (dst_layers, dst_selector, dst_offset) =
|
let (dst_layers, dst_selector, dst_offset) =
|
||||||
texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
|
texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
|
||||||
if src_layers.aspects != dst_layers.aspects {
|
if src_layers.aspects != dst_layers.aspects {
|
||||||
Err(TransferError::MismatchedAspects)?
|
return Err(TransferError::MismatchedAspects.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
@ -801,7 +794,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidTexture(source.texture))?;
|
.ok_or(TransferError::InvalidTexture(source.texture))?;
|
||||||
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
|
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
|
||||||
Err(TransferError::MissingCopySrcUsageFlag)?
|
return Err(TransferError::MissingCopySrcUsageFlag.into());
|
||||||
}
|
}
|
||||||
//TODO: try to avoid this the collection. It's needed because both
|
//TODO: try to avoid this the collection. It's needed because both
|
||||||
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
|
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
|
||||||
@ -824,10 +817,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
||||||
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
|
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(
|
||||||
None,
|
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
|
||||||
Some(destination.texture),
|
);
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));
|
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));
|
||||||
|
|
||||||
|
|||||||
@ -683,7 +683,7 @@ pub(crate) fn map_texture_state(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn map_query_type(ty: &wgt::QueryType) -> (hal::query::Type, u32) {
|
pub fn map_query_type(ty: &wgt::QueryType) -> (hal::query::Type, u32) {
|
||||||
match ty {
|
match *ty {
|
||||||
wgt::QueryType::PipelineStatistics(pipeline_statistics) => {
|
wgt::QueryType::PipelineStatistics(pipeline_statistics) => {
|
||||||
let mut ps = hal::query::PipelineStatistic::empty();
|
let mut ps = hal::query::PipelineStatistic::empty();
|
||||||
ps.set(
|
ps.set(
|
||||||
|
|||||||
@ -169,7 +169,7 @@ impl<B: hal::Backend> MemoryBlock<B> {
|
|||||||
) -> hal::memory::Segment {
|
) -> hal::memory::Segment {
|
||||||
hal::memory::Segment {
|
hal::memory::Segment {
|
||||||
offset: self.0.offset() + inner_offset,
|
offset: self.0.offset() + inner_offset,
|
||||||
size: size.or(Some(self.0.size())),
|
size: size.or_else(|| Some(self.0.size())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -302,7 +302,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
|
|||||||
};
|
};
|
||||||
tracing::debug!("...Done");
|
tracing::debug!("...Done");
|
||||||
|
|
||||||
if status == false {
|
if !status {
|
||||||
// We timed out while waiting for the fences
|
// We timed out while waiting for the fences
|
||||||
return Err(WaitIdleError::StuckGpu);
|
return Err(WaitIdleError::StuckGpu);
|
||||||
}
|
}
|
||||||
@ -389,7 +389,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
while let Some(id) = self.suspected_resources.render_bundles.pop() {
|
while let Some(id) = self.suspected_resources.render_bundles.pop() {
|
||||||
if trackers.bundles.remove_abandoned(id) {
|
if trackers.bundles.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyRenderBundle(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
|
||||||
self.suspected_resources.add_trackers(&res.used);
|
self.suspected_resources.add_trackers(&res.used);
|
||||||
@ -405,7 +407,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
while let Some(id) = self.suspected_resources.bind_groups.pop() {
|
while let Some(id) = self.suspected_resources.bind_groups.pop() {
|
||||||
if trackers.bind_groups.remove_abandoned(id) {
|
if trackers.bind_groups.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyBindGroup(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
|
||||||
self.suspected_resources.add_trackers(&res.used);
|
self.suspected_resources.add_trackers(&res.used);
|
||||||
@ -429,7 +433,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.texture_views.drain(..) {
|
for id in self.suspected_resources.texture_views.drain(..) {
|
||||||
if trackers.views.remove_abandoned(id) {
|
if trackers.views.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyTextureView(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
|
||||||
let raw = match res.inner {
|
let raw = match res.inner {
|
||||||
@ -459,7 +465,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.textures.drain(..) {
|
for id in self.suspected_resources.textures.drain(..) {
|
||||||
if trackers.textures.remove_abandoned(id) {
|
if trackers.textures.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyTexture(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
|
||||||
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
||||||
@ -481,7 +489,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.samplers.drain(..) {
|
for id in self.suspected_resources.samplers.drain(..) {
|
||||||
if trackers.samplers.remove_abandoned(id) {
|
if trackers.samplers.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroySampler(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
|
||||||
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
||||||
@ -503,7 +513,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.buffers.drain(..) {
|
for id in self.suspected_resources.buffers.drain(..) {
|
||||||
if trackers.buffers.remove_abandoned(id) {
|
if trackers.buffers.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyBuffer(id.0));
|
||||||
|
}
|
||||||
tracing::debug!("Buffer {:?} is detached", id);
|
tracing::debug!("Buffer {:?} is detached", id);
|
||||||
|
|
||||||
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
|
||||||
@ -536,7 +548,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.compute_pipelines.drain(..) {
|
for id in self.suspected_resources.compute_pipelines.drain(..) {
|
||||||
if trackers.compute_pipes.remove_abandoned(id) {
|
if trackers.compute_pipes.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyComputePipeline(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
|
||||||
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
||||||
@ -558,7 +572,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
for id in self.suspected_resources.render_pipelines.drain(..) {
|
for id in self.suspected_resources.render_pipelines.drain(..) {
|
||||||
if trackers.render_pipes.remove_abandoned(id) {
|
if trackers.render_pipes.remove_abandoned(id) {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyRenderPipeline(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
|
if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
|
||||||
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
|
||||||
@ -584,7 +600,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
//Note: this has to happen after all the suspected pipelines are destroyed
|
//Note: this has to happen after all the suspected pipelines are destroyed
|
||||||
if ref_count.load() == 1 {
|
if ref_count.load() == 1 {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyPipelineLayout(id.0));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
|
if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
|
||||||
self.suspected_resources
|
self.suspected_resources
|
||||||
@ -606,7 +624,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
|
|||||||
// encounter could drop the refcount to 0.
|
// encounter could drop the refcount to 0.
|
||||||
if guard[id].multi_ref_count.dec_and_check_empty() {
|
if guard[id].multi_ref_count.dec_and_check_empty() {
|
||||||
#[cfg(feature = "trace")]
|
#[cfg(feature = "trace")]
|
||||||
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
|
if let Some(t) = trace {
|
||||||
|
t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
|
||||||
|
}
|
||||||
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
|
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
|
||||||
self.free_resources.descriptor_set_layouts.push(lay.raw);
|
self.free_resources.descriptor_set_layouts.push(lay.raw);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -155,8 +155,8 @@ impl RenderPassContext {
|
|||||||
if self.attachments.depth_stencil != other.attachments.depth_stencil {
|
if self.attachments.depth_stencil != other.attachments.depth_stencil {
|
||||||
return Err(
|
return Err(
|
||||||
RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
|
RenderPassCompatibilityError::IncompatibleDepthStencilAttachment(
|
||||||
self.attachments.depth_stencil.clone(),
|
self.attachments.depth_stencil,
|
||||||
other.attachments.depth_stencil.clone(),
|
other.attachments.depth_stencil,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -298,6 +298,7 @@ pub enum CreateDeviceError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<B: GfxBackend> Device<B> {
|
impl<B: GfxBackend> Device<B> {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
raw: B::Device,
|
raw: B::Device,
|
||||||
adapter_id: Stored<id::AdapterId>,
|
adapter_id: Stored<id::AdapterId>,
|
||||||
@ -373,7 +374,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
hal_limits,
|
hal_limits,
|
||||||
private_features,
|
private_features,
|
||||||
limits: desc.limits.clone(),
|
limits: desc.limits.clone(),
|
||||||
features: desc.features.clone(),
|
features: desc.features,
|
||||||
spv_options,
|
spv_options,
|
||||||
pending_writes: queue::PendingWrites::new(),
|
pending_writes: queue::PendingWrites::new(),
|
||||||
})
|
})
|
||||||
@ -860,7 +861,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
conv::map_texture_view_dimension(view_dim),
|
conv::map_texture_view_dimension(view_dim),
|
||||||
conv::map_texture_format(format, self.private_features),
|
conv::map_texture_format(format, self.private_features),
|
||||||
hal::format::Swizzle::NO,
|
hal::format::Swizzle::NO,
|
||||||
range.clone(),
|
range,
|
||||||
)
|
)
|
||||||
.or(Err(resource::CreateTextureViewError::OutOfMemory))?
|
.or(Err(resource::CreateTextureViewError::OutOfMemory))?
|
||||||
};
|
};
|
||||||
@ -1106,8 +1107,8 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
key: &RenderPassKey,
|
key: &RenderPassKey,
|
||||||
) -> Result<B::RenderPass, hal::device::OutOfMemory> {
|
) -> Result<B::RenderPass, hal::device::OutOfMemory> {
|
||||||
let mut color_ids = [(0, hal::image::Layout::ColorAttachmentOptimal); MAX_COLOR_TARGETS];
|
let mut color_ids = [(0, hal::image::Layout::ColorAttachmentOptimal); MAX_COLOR_TARGETS];
|
||||||
for i in 0..key.colors.len() {
|
for (index, color) in color_ids[..key.colors.len()].iter_mut().enumerate() {
|
||||||
color_ids[i].0 = i;
|
color.0 = index;
|
||||||
}
|
}
|
||||||
let depth_id = key.depth_stencil.as_ref().map(|_| {
|
let depth_id = key.depth_stencil.as_ref().map(|_| {
|
||||||
(
|
(
|
||||||
@ -1123,7 +1124,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
resolves: &[],
|
resolves: &[],
|
||||||
preserves: &[],
|
preserves: &[],
|
||||||
};
|
};
|
||||||
let all = key.all().map(|(at, _)| at.clone());
|
let all = key.all().map(|&(ref at, _)| at.clone());
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
self.raw
|
self.raw
|
||||||
@ -1138,7 +1139,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
) -> Option<id::BindGroupLayoutId> {
|
) -> Option<id::BindGroupLayoutId> {
|
||||||
guard
|
guard
|
||||||
.iter(self_id.backend())
|
.iter(self_id.backend())
|
||||||
.find(|(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
|
.find(|&(_, ref bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
|
||||||
.map(|(id, value)| {
|
.map(|(id, value)| {
|
||||||
value.multi_ref_count.inc();
|
value.multi_ref_count.inc();
|
||||||
id
|
id
|
||||||
@ -1313,7 +1314,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
_ => {
|
_ => {
|
||||||
return Err(Error::WrongBindingType {
|
return Err(Error::WrongBindingType {
|
||||||
binding,
|
binding,
|
||||||
actual: decl.ty.clone(),
|
actual: decl.ty,
|
||||||
expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
|
expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1425,7 +1426,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
_ => {
|
_ => {
|
||||||
return Err(Error::WrongBindingType {
|
return Err(Error::WrongBindingType {
|
||||||
binding,
|
binding,
|
||||||
actual: decl.ty.clone(),
|
actual: decl.ty,
|
||||||
expected: "Sampler",
|
expected: "Sampler",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1528,7 +1529,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
}
|
}
|
||||||
_ => return Err(Error::WrongBindingType {
|
_ => return Err(Error::WrongBindingType {
|
||||||
binding,
|
binding,
|
||||||
actual: decl.ty.clone(),
|
actual: decl.ty,
|
||||||
expected:
|
expected:
|
||||||
"SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
|
"SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
|
||||||
}),
|
}),
|
||||||
@ -1598,7 +1599,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
_ => {
|
_ => {
|
||||||
return Err(Error::WrongBindingType {
|
return Err(Error::WrongBindingType {
|
||||||
binding,
|
binding,
|
||||||
actual: decl.ty.clone(),
|
actual: decl.ty,
|
||||||
expected: "SampledTextureArray",
|
expected: "SampledTextureArray",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -2230,8 +2231,8 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let fragment = match &desc.fragment {
|
let fragment = match desc.fragment {
|
||||||
Some(fragment) => {
|
Some(ref fragment) => {
|
||||||
let entry_point_name = &fragment.stage.entry_point;
|
let entry_point_name = &fragment.stage.entry_point;
|
||||||
let flag = wgt::ShaderStage::FRAGMENT;
|
let flag = wgt::ShaderStage::FRAGMENT;
|
||||||
|
|
||||||
@ -2385,9 +2386,7 @@ impl<B: GfxBackend> Device<B> {
|
|||||||
attachments: AttachmentData {
|
attachments: AttachmentData {
|
||||||
colors: color_states.iter().map(|state| state.format).collect(),
|
colors: color_states.iter().map(|state| state.format).collect(),
|
||||||
resolves: ArrayVec::new(),
|
resolves: ArrayVec::new(),
|
||||||
depth_stencil: depth_stencil_state
|
depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
|
||||||
.as_ref()
|
|
||||||
.map(|state| state.format.clone()),
|
|
||||||
},
|
},
|
||||||
sample_count: samples,
|
sample_count: samples,
|
||||||
};
|
};
|
||||||
@ -2814,8 +2813,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let (_, block) = buffer.raw.as_mut().unwrap();
|
buffer
|
||||||
block.write_bytes(&device.raw, offset, data)?;
|
.raw
|
||||||
|
.as_mut()
|
||||||
|
.unwrap()
|
||||||
|
.1
|
||||||
|
.write_bytes(&device.raw, offset, data)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -2843,8 +2846,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
|
check_buffer_usage(buffer.usage, wgt::BufferUsage::MAP_READ)?;
|
||||||
//assert!(buffer isn't used by the GPU);
|
//assert!(buffer isn't used by the GPU);
|
||||||
|
|
||||||
let (_, block) = buffer.raw.as_mut().unwrap();
|
buffer
|
||||||
block.read_bytes(&device.raw, offset, data)?;
|
.raw
|
||||||
|
.as_mut()
|
||||||
|
.unwrap()
|
||||||
|
.1
|
||||||
|
.read_bytes(&device.raw, offset, data)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -4291,7 +4298,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
|
|
||||||
let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
|
let sc_id = surface_id.to_swap_chain_id(B::VARIANT);
|
||||||
if let Some(sc) = swap_chain_guard.try_remove(sc_id) {
|
if let Some(sc) = swap_chain_guard.try_remove(sc_id) {
|
||||||
if !sc.acquired_view_id.is_none() {
|
if sc.acquired_view_id.is_some() {
|
||||||
return Err(swap_chain::CreateSwapChainError::SwapChainOutputExists);
|
return Err(swap_chain::CreateSwapChainError::SwapChainOutputExists);
|
||||||
}
|
}
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|||||||
@ -233,26 +233,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
|
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
|
||||||
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
|
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(TransferError::MissingCopyDstUsageFlag(Some(buffer_id), None).into());
|
||||||
Some(buffer_id),
|
|
||||||
None,
|
|
||||||
))?;
|
|
||||||
}
|
}
|
||||||
dst.life_guard.use_at(device.active_submission_index + 1);
|
dst.life_guard.use_at(device.active_submission_index + 1);
|
||||||
|
|
||||||
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||||
Err(TransferError::UnalignedCopySize(data_size))?
|
return Err(TransferError::UnalignedCopySize(data_size).into());
|
||||||
}
|
}
|
||||||
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||||
Err(TransferError::UnalignedBufferOffset(buffer_offset))?
|
return Err(TransferError::UnalignedBufferOffset(buffer_offset).into());
|
||||||
}
|
}
|
||||||
if buffer_offset + data_size > dst.size {
|
if buffer_offset + data_size > dst.size {
|
||||||
Err(TransferError::BufferOverrun {
|
return Err(TransferError::BufferOverrun {
|
||||||
start_offset: buffer_offset,
|
start_offset: buffer_offset,
|
||||||
end_offset: buffer_offset + data_size,
|
end_offset: buffer_offset + data_size,
|
||||||
buffer_size: dst.size,
|
buffer_size: dst.size,
|
||||||
side: CopySide::Destination,
|
side: CopySide::Destination,
|
||||||
})?
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let region = hal::command::BufferCopy {
|
let region = hal::command::BufferCopy {
|
||||||
@ -349,7 +347,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
let block_height = block_height as u32;
|
let block_height = block_height as u32;
|
||||||
|
|
||||||
if !conv::is_valid_copy_dst_texture_format(texture_format) {
|
if !conv::is_valid_copy_dst_texture_format(texture_format) {
|
||||||
Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
|
return Err(TransferError::CopyToForbiddenTextureFormat(texture_format).into());
|
||||||
}
|
}
|
||||||
let width_blocks = size.width / block_width;
|
let width_blocks = size.width / block_width;
|
||||||
let height_blocks = size.height / block_width;
|
let height_blocks = size.height / block_width;
|
||||||
@ -384,10 +382,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
.ok_or(TransferError::InvalidTexture(destination.texture))?;
|
||||||
|
|
||||||
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
|
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
|
||||||
Err(TransferError::MissingCopyDstUsageFlag(
|
return Err(
|
||||||
None,
|
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
|
||||||
Some(destination.texture),
|
);
|
||||||
))?
|
|
||||||
}
|
}
|
||||||
validate_texture_copy_range(
|
validate_texture_copy_range(
|
||||||
destination,
|
destination,
|
||||||
@ -505,7 +502,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
.get(cmb_id)
|
.get(cmb_id)
|
||||||
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
|
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
|
||||||
|
|
||||||
if cmdbuf.buffer_memory_init_actions.len() == 0 {
|
if cmdbuf.buffer_memory_init_actions.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -685,7 +682,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
for id in cmdbuf.trackers.buffers.used() {
|
for id in cmdbuf.trackers.buffers.used() {
|
||||||
let buffer = &mut buffer_guard[id];
|
let buffer = &mut buffer_guard[id];
|
||||||
if buffer.raw.is_none() {
|
if buffer.raw.is_none() {
|
||||||
return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
|
return Err(QueueSubmitError::DestroyedBuffer(id.0));
|
||||||
}
|
}
|
||||||
if !buffer.life_guard.use_at(submit_index) {
|
if !buffer.life_guard.use_at(submit_index) {
|
||||||
if let BufferMapState::Active { .. } = buffer.map_state {
|
if let BufferMapState::Active { .. } = buffer.map_state {
|
||||||
@ -703,7 +700,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
for id in cmdbuf.trackers.textures.used() {
|
for id in cmdbuf.trackers.textures.used() {
|
||||||
let texture = &texture_guard[id];
|
let texture = &texture_guard[id];
|
||||||
if texture.raw.is_none() {
|
if texture.raw.is_none() {
|
||||||
return Err(QueueSubmitError::DestroyedTexture(id.0))?;
|
return Err(QueueSubmitError::DestroyedTexture(id.0));
|
||||||
}
|
}
|
||||||
if !texture.life_guard.use_at(submit_index) {
|
if !texture.life_guard.use_at(submit_index) {
|
||||||
device.temp_suspected.textures.push(id);
|
device.temp_suspected.textures.push(id);
|
||||||
|
|||||||
@ -26,6 +26,7 @@ pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||||
|
|||||||
@ -218,7 +218,6 @@ impl<T, I: TypedId> Storage<T, I> {
|
|||||||
}
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
.into_iter()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -586,7 +585,7 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
|
|||||||
|
|
||||||
let mut devices = self.devices.data.write();
|
let mut devices = self.devices.data.write();
|
||||||
for element in devices.map.iter_mut() {
|
for element in devices.map.iter_mut() {
|
||||||
if let Element::Occupied(device, _) = element {
|
if let Element::Occupied(ref mut device, _) = *element {
|
||||||
device.prepare_to_die();
|
device.prepare_to_die();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -639,14 +639,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
) -> SurfaceId {
|
) -> SurfaceId {
|
||||||
span!(_guard, INFO, "Instance::instance_create_surface_metal");
|
span!(_guard, INFO, "Instance::instance_create_surface_metal");
|
||||||
|
|
||||||
let surface =
|
let surface = Surface {
|
||||||
Surface {
|
#[cfg(feature = "gfx-backend-vulkan")]
|
||||||
#[cfg(feature = "gfx-backend-vulkan")]
|
vulkan: None, //TODO: create_surface_from_layer ?
|
||||||
vulkan: None, //TODO: create_surface_from_layer ?
|
metal: self.instance.metal.as_ref().map(|inst| {
|
||||||
metal: self.instance.metal.as_ref().map(|inst| {
|
// we don't want to link to metal-rs for this
|
||||||
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
|
#[allow(clippy::transmute_ptr_to_ref)]
|
||||||
}),
|
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
|
||||||
};
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
let mut token = Token::root();
|
let mut token = Token::root();
|
||||||
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
|
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
|
||||||
@ -669,7 +670,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
|
|
||||||
backends_map! {
|
backends_map! {
|
||||||
let map = |(instance_field, backend, backend_info, backend_hub)| {
|
let map = |(instance_field, backend, backend_info, backend_hub)| {
|
||||||
if let Some(inst) = instance_field {
|
if let Some(ref inst) = *instance_field {
|
||||||
let hub = backend_hub(self);
|
let hub = backend_hub(self);
|
||||||
if let Some(id_backend) = inputs.find(backend) {
|
if let Some(id_backend) = inputs.find(backend) {
|
||||||
for raw in inst.enumerate_adapters() {
|
for raw in inst.enumerate_adapters() {
|
||||||
@ -727,7 +728,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
|
|
||||||
backends_map! {
|
backends_map! {
|
||||||
let map = |(instance_backend, id_backend, surface_backend)| {
|
let map = |(instance_backend, id_backend, surface_backend)| {
|
||||||
match instance_backend {
|
match *instance_backend {
|
||||||
Some(ref inst) if id_backend.is_some() => {
|
Some(ref inst) if id_backend.is_some() => {
|
||||||
let mut adapters = inst.enumerate_adapters();
|
let mut adapters = inst.enumerate_adapters();
|
||||||
if let Some(surface_backend) = compatible_surface.and_then(surface_backend) {
|
if let Some(surface_backend) = compatible_surface.and_then(surface_backend) {
|
||||||
|
|||||||
@ -2,14 +2,26 @@
|
|||||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
|
#![allow(
|
||||||
|
// We use loops for getting early-out of scope without closures.
|
||||||
|
clippy::never_loop,
|
||||||
|
// We don't use syntax sugar where it's not necessary.
|
||||||
|
clippy::match_like_matches_macro,
|
||||||
|
// Redundant matching is more explicit.
|
||||||
|
clippy::redundant_pattern_matching,
|
||||||
|
// Explicit lifetimes are often easier to reason about.
|
||||||
|
clippy::needless_lifetimes,
|
||||||
|
// No need for defaults in the internal types.
|
||||||
|
clippy::new_without_default,
|
||||||
|
)]
|
||||||
#![warn(
|
#![warn(
|
||||||
trivial_casts,
|
trivial_casts,
|
||||||
trivial_numeric_casts,
|
trivial_numeric_casts,
|
||||||
unused_extern_crates,
|
unused_extern_crates,
|
||||||
unused_qualifications
|
unused_qualifications,
|
||||||
|
// We don't match on a reference, unless required.
|
||||||
|
clippy::pattern_type_mismatch,
|
||||||
)]
|
)]
|
||||||
// We use loops for getting early-out of scope without closures.
|
|
||||||
#![allow(clippy::never_loop)]
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod macros;
|
mod macros;
|
||||||
|
|||||||
@ -157,10 +157,10 @@ impl MemoryInitTracker {
|
|||||||
|
|
||||||
// Drains uninitialized ranges in a query range.
|
// Drains uninitialized ranges in a query range.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub(crate) fn drain<'a>(
|
pub(crate) fn drain(
|
||||||
&'a mut self,
|
&mut self,
|
||||||
drain_range: Range<wgt::BufferAddress>,
|
drain_range: Range<wgt::BufferAddress>,
|
||||||
) -> MemoryInitTrackerDrain<'a> {
|
) -> MemoryInitTrackerDrain {
|
||||||
let index = self.lower_bound(drain_range.start);
|
let index = self.lower_bound(drain_range.start);
|
||||||
MemoryInitTrackerDrain {
|
MemoryInitTrackerDrain {
|
||||||
drain_range,
|
drain_range,
|
||||||
|
|||||||
@ -172,11 +172,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
|||||||
Err(err) => (
|
Err(err) => (
|
||||||
None,
|
None,
|
||||||
match err {
|
match err {
|
||||||
hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
|
hal::window::AcquireError::OutOfMemory(_) => {
|
||||||
|
return Err(DeviceError::OutOfMemory.into())
|
||||||
|
}
|
||||||
hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
|
hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
|
||||||
hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
|
hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
|
||||||
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
|
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
|
||||||
hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
|
hal::window::AcquireError::DeviceLost(_) => {
|
||||||
|
return Err(DeviceError::Lost.into())
|
||||||
|
}
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|||||||
@ -195,6 +195,10 @@ impl<S: ResourceState + fmt::Debug> fmt::Debug for ResourceTracker<S> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(
|
||||||
|
// Explicit lifetimes are easier to reason about here.
|
||||||
|
clippy::needless_lifetimes,
|
||||||
|
)]
|
||||||
impl<S: ResourceState> ResourceTracker<S> {
|
impl<S: ResourceState> ResourceTracker<S> {
|
||||||
/// Create a new empty tracker.
|
/// Create a new empty tracker.
|
||||||
pub fn new(backend: wgt::Backend) -> Self {
|
pub fn new(backend: wgt::Backend) -> Self {
|
||||||
|
|||||||
@ -66,6 +66,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Merge the neighboring ranges together, where possible.
|
/// Merge the neighboring ranges together, where possible.
|
||||||
|
#[allow(clippy::suspicious_operation_groupings)]
|
||||||
pub fn coalesce(&mut self) {
|
pub fn coalesce(&mut self) {
|
||||||
let mut num_removed = 0;
|
let mut num_removed = 0;
|
||||||
let mut iter = self.ranges.iter_mut();
|
let mut iter = self.ranges.iter_mut();
|
||||||
@ -204,40 +205,40 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
|
|||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
match (self.sa.peek(), self.sb.peek()) {
|
match (self.sa.peek(), self.sb.peek()) {
|
||||||
// we have both streams
|
// we have both streams
|
||||||
(Some(&(ref ra, va)), Some(&(ref rb, vb))) => {
|
(Some(&&(ref ra, va)), Some(&&(ref rb, vb))) => {
|
||||||
let (range, usage) = if ra.start < self.base {
|
let (range, usage) = if ra.start < self.base {
|
||||||
// in the middle of the left stream
|
// in the middle of the left stream
|
||||||
let (end, end_value) = if self.base == rb.start {
|
let (end, end_value) = if self.base == rb.start {
|
||||||
// right stream is starting
|
// right stream is starting
|
||||||
debug_assert!(self.base < ra.end);
|
debug_assert!(self.base < ra.end);
|
||||||
(rb.end, Some(*vb))
|
(rb.end, Some(vb))
|
||||||
} else {
|
} else {
|
||||||
// right hasn't started yet
|
// right hasn't started yet
|
||||||
debug_assert!(self.base < rb.start);
|
debug_assert!(self.base < rb.start);
|
||||||
(rb.start, None)
|
(rb.start, None)
|
||||||
};
|
};
|
||||||
(self.base..ra.end.min(end), Some(*va)..end_value)
|
(self.base..ra.end.min(end), Some(va)..end_value)
|
||||||
} else if rb.start < self.base {
|
} else if rb.start < self.base {
|
||||||
// in the middle of the right stream
|
// in the middle of the right stream
|
||||||
let (end, start_value) = if self.base == ra.start {
|
let (end, start_value) = if self.base == ra.start {
|
||||||
// left stream is starting
|
// left stream is starting
|
||||||
debug_assert!(self.base < rb.end);
|
debug_assert!(self.base < rb.end);
|
||||||
(ra.end, Some(*va))
|
(ra.end, Some(va))
|
||||||
} else {
|
} else {
|
||||||
// left hasn't started yet
|
// left hasn't started yet
|
||||||
debug_assert!(self.base < ra.start);
|
debug_assert!(self.base < ra.start);
|
||||||
(ra.start, None)
|
(ra.start, None)
|
||||||
};
|
};
|
||||||
(self.base..rb.end.min(end), start_value..Some(*vb))
|
(self.base..rb.end.min(end), start_value..Some(vb))
|
||||||
} else {
|
} else {
|
||||||
// no active streams
|
// no active streams
|
||||||
match ra.start.cmp(&rb.start) {
|
match ra.start.cmp(&rb.start) {
|
||||||
// both are starting
|
// both are starting
|
||||||
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(*va)..Some(*vb)),
|
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(va)..Some(vb)),
|
||||||
// only left is starting
|
// only left is starting
|
||||||
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(*va)..None),
|
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(va)..None),
|
||||||
// only right is starting
|
// only right is starting
|
||||||
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(*vb)),
|
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(vb)),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.base = range.end;
|
self.base = range.end;
|
||||||
@ -250,18 +251,18 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
|
|||||||
Some((range, usage))
|
Some((range, usage))
|
||||||
}
|
}
|
||||||
// only right stream
|
// only right stream
|
||||||
(None, Some(&(ref rb, vb))) => {
|
(None, Some(&&(ref rb, vb))) => {
|
||||||
let range = self.base.max(rb.start)..rb.end;
|
let range = self.base.max(rb.start)..rb.end;
|
||||||
self.base = rb.end;
|
self.base = rb.end;
|
||||||
let _ = self.sb.next();
|
let _ = self.sb.next();
|
||||||
Some((range, None..Some(*vb)))
|
Some((range, None..Some(vb)))
|
||||||
}
|
}
|
||||||
// only left stream
|
// only left stream
|
||||||
(Some(&(ref ra, va)), None) => {
|
(Some(&&(ref ra, va)), None) => {
|
||||||
let range = self.base.max(ra.start)..ra.end;
|
let range = self.base.max(ra.start)..ra.end;
|
||||||
self.base = ra.end;
|
self.base = ra.end;
|
||||||
let _ = self.sa.next();
|
let _ = self.sa.next();
|
||||||
Some((range, Some(*va)..None))
|
Some((range, Some(va)..None))
|
||||||
}
|
}
|
||||||
// done
|
// done
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
|
|||||||
@ -735,7 +735,7 @@ impl Interface {
|
|||||||
|
|
||||||
let mut entry_points = FastHashMap::default();
|
let mut entry_points = FastHashMap::default();
|
||||||
entry_points.reserve(module.entry_points.len());
|
entry_points.reserve(module.entry_points.len());
|
||||||
for (index, entry_point) in (&module.entry_points).into_iter().enumerate() {
|
for (index, entry_point) in (&module.entry_points).iter().enumerate() {
|
||||||
let info = analysis.get_entry_point(index);
|
let info = analysis.get_entry_point(index);
|
||||||
let mut ep = EntryPoint::default();
|
let mut ep = EntryPoint::default();
|
||||||
for (var_handle, var) in module.global_variables.iter() {
|
for (var_handle, var) in module.global_variables.iter() {
|
||||||
@ -837,7 +837,7 @@ impl Interface {
|
|||||||
.ok_or(BindingError::Missing)
|
.ok_or(BindingError::Missing)
|
||||||
.and_then(|set| {
|
.and_then(|set| {
|
||||||
let ty = res.derive_binding_type(usage)?;
|
let ty = res.derive_binding_type(usage)?;
|
||||||
Ok(match set.entry(res.binding) {
|
match set.entry(res.binding) {
|
||||||
Entry::Occupied(e) if e.get().ty != ty => {
|
Entry::Occupied(e) if e.get().ty != ty => {
|
||||||
return Err(BindingError::InconsistentlyDerivedType)
|
return Err(BindingError::InconsistentlyDerivedType)
|
||||||
}
|
}
|
||||||
@ -852,7 +852,8 @@ impl Interface {
|
|||||||
count: None,
|
count: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
|
Ok(())
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
if let Err(error) = result {
|
if let Err(error) = result {
|
||||||
|
|||||||
@ -6,11 +6,15 @@
|
|||||||
* This API is used for targeting both Web and Native.
|
* This API is used for targeting both Web and Native.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// The intra doc links to the wgpu crate in this crate actually succesfully link to the types in the wgpu crate, when built from the wgpu crate.
|
#![allow(
|
||||||
// However when building from both the wgpu crate or this crate cargo doc will claim all the links cannot be resolved
|
// The intra doc links to the wgpu crate in this crate actually succesfully link to the types in the wgpu crate, when built from the wgpu crate.
|
||||||
// despite the fact that it works fine when it needs to.
|
// However when building from both the wgpu crate or this crate cargo doc will claim all the links cannot be resolved
|
||||||
// So we just disable those warnings.
|
// despite the fact that it works fine when it needs to.
|
||||||
#![allow(broken_intra_doc_links)]
|
// So we just disable those warnings.
|
||||||
|
broken_intra_doc_links,
|
||||||
|
// We don't use syntax sugar where it's not necessary.
|
||||||
|
clippy::match_like_matches_macro,
|
||||||
|
)]
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user