hal: port the rest of wgpu-core

This commit is contained in:
Dzmitry Malyshau 2021-06-08 00:45:41 -04:00
parent c61ee1262b
commit 0a82c232ba
26 changed files with 1599 additions and 1809 deletions

View File

@ -37,7 +37,7 @@ thiserror = "1"
[dependencies.naga]
git = "https://github.com/gfx-rs/naga"
tag = "gfx-25"
features = ["spv-in", "spv-out", "wgsl-in"]
features = ["spv-in", "wgsl-in"]
[dependencies.wgt]
path = "../wgpu-types"

View File

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::{descriptor::DescriptorSet, DeviceError, MissingFeatures, SHADER_STAGE_COUNT},
device::{DeviceError, MissingFeatures, SHADER_STAGE_COUNT},
hub::Resource,
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
memory_init_tracker::MemoryInitTrackerAction,
@ -633,7 +633,7 @@ pub struct BindGroupDynamicBindingData {
#[derive(Debug)]
pub struct BindGroup<A: hal::Api> {
pub(crate) raw: DescriptorSet<A>,
pub(crate) raw: A::BindGroup,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) layout_id: Valid<BindGroupLayoutId>,
pub(crate) life_guard: LifeGuard,

View File

@ -7,7 +7,7 @@ use crate::{
device::SHADER_STAGE_COUNT,
hub::{HalApi, Storage},
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid},
Stored, MAX_BIND_GROUPS,
Stored,
};
use arrayvec::ArrayVec;
@ -42,7 +42,7 @@ mod compat {
#[derive(Debug)]
pub struct Manager<T> {
entries: [Entry<T>; crate::MAX_BIND_GROUPS],
entries: [Entry<T>; hal::MAX_BIND_GROUPS],
}
impl<T: Copy + PartialEq> Manager<T> {
@ -145,7 +145,7 @@ pub(super) struct EntryPayload {
pub(super) struct Binder {
pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
manager: compat::Manager<Valid<BindGroupLayoutId>>,
payloads: [EntryPayload; MAX_BIND_GROUPS],
payloads: [EntryPayload; hal::MAX_BIND_GROUPS],
}
impl Binder {

View File

@ -43,22 +43,20 @@ use crate::{
StateChange,
},
conv,
device::{
AttachmentData, Device, DeviceError, RenderPassContext, MAX_VERTEX_BUFFERS,
SHADER_STAGE_COUNT,
},
hal::BufferUse,
device::{AttachmentData, Device, DeviceError, RenderPassContext, SHADER_STAGE_COUNT},
hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Resource, Storage, Token},
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
track::{TrackerSet, UsageConflict},
validation::check_buffer_usage,
Label, LabelHelpers, LifeGuard, Stored, MAX_BIND_GROUPS,
Label, LabelHelpers, LifeGuard, Stored,
};
use arrayvec::ArrayVec;
use std::{borrow::Cow, iter, mem, ops::Range};
use std::{borrow::Cow, mem, ops::Range};
use thiserror::Error;
use hal::CommandBuffer as _;
/// Describes a [`RenderBundleEncoder`].
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
@ -105,7 +103,7 @@ impl RenderBundleEncoder {
if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
return Err(CreateRenderBundleError::InvalidSampleCount(sc));
}
sc as u8
sc
},
},
})
@ -150,10 +148,12 @@ impl RenderBundleEncoder {
let mut state = State {
trackers: TrackerSet::new(self.parent_id.backend()),
index: IndexState::new(),
vertex: (0..MAX_VERTEX_BUFFERS)
vertex: (0..hal::MAX_VERTEX_BUFFERS)
.map(|_| VertexState::new())
.collect(),
bind: (0..MAX_BIND_GROUPS).map(|_| BindState::new()).collect(),
bind: (0..hal::MAX_BIND_GROUPS)
.map(|_| BindState::new())
.collect(),
push_constant_ranges: PushConstantState::new(),
raw_dynamic_offsets: Vec::new(),
flat_dynamic_offsets: Vec::new(),
@ -260,7 +260,7 @@ impl RenderBundleEncoder {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUse::INDEX)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDEX)
.map_pass_err(scope)?;
@ -287,7 +287,7 @@ impl RenderBundleEncoder {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUse::VERTEX)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::VERTEX)
.map_pass_err(scope)?;
@ -408,7 +408,7 @@ impl RenderBundleEncoder {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUse::INDIRECT)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
.map_pass_err(scope)?;
@ -444,7 +444,7 @@ impl RenderBundleEncoder {
let buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUse::INDIRECT)
.map_err(|err| RenderCommandError::Buffer(buffer_id, err))
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
@ -567,7 +567,7 @@ impl RenderBundle {
/// The only failure condition is if some of the used buffers are destroyed.
pub(crate) unsafe fn execute<A: HalApi>(
&self,
cmd_buf: &mut B::CommandBuffer,
cmd_buf: &mut A::CommandBuffer,
pipeline_layout_guard: &Storage<
crate::binding_model::PipelineLayout<A>,
id::PipelineLayoutId,
@ -576,12 +576,10 @@ impl RenderBundle {
pipeline_guard: &Storage<crate::pipeline::RenderPipeline<A>, id::RenderPipelineId>,
buffer_guard: &Storage<crate::resource::Buffer<A>, id::BufferId>,
) -> Result<(), ExecutionError> {
use hal::command::CommandBuffer as _;
let mut offsets = self.base.dynamic_offsets.as_slice();
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
if let Some(ref label) = self.base.label {
cmd_buf.begin_debug_marker(label, 0);
cmd_buf.begin_debug_marker(label);
}
for command in self.base.commands.iter() {
@ -592,17 +590,17 @@ impl RenderBundle {
bind_group_id,
} => {
let bind_group = bind_group_guard.get(bind_group_id).unwrap();
cmd_buf.bind_graphics_descriptor_sets(
cmd_buf.set_bind_group(
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw,
index as usize,
iter::once(bind_group.raw.raw()),
offsets.iter().take(num_dynamic_offsets as usize).cloned(),
index as u32,
&bind_group.raw,
&offsets[num_dynamic_offsets as usize..],
);
offsets = &offsets[num_dynamic_offsets as usize..];
}
RenderCommand::SetPipeline(pipeline_id) => {
let pipeline = pipeline_guard.get(pipeline_id).unwrap();
cmd_buf.bind_graphics_pipeline(&pipeline.raw);
cmd_buf.set_render_pipeline(&pipeline.raw);
pipeline_layout_id = Some(pipeline.layout_id.value);
}
@ -612,19 +610,18 @@ impl RenderBundle {
offset,
size,
} => {
let index_type = conv::map_index_format(index_format);
let &(ref buffer, _) = buffer_guard
let buffer = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
let range = hal::buffer::SubRange {
let bb = hal::BufferBinding {
buffer,
offset,
size: size.map(|s| s.get()),
size,
};
cmd_buf.bind_index_buffer(buffer, range, index_type);
cmd_buf.set_index_buffer(bb, index_format);
}
RenderCommand::SetVertexBuffer {
slot,
@ -632,17 +629,18 @@ impl RenderBundle {
offset,
size,
} => {
let &(ref buffer, _) = buffer_guard
let buffer = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
let range = hal::buffer::SubRange {
let bb = hal::BufferBinding {
buffer,
offset,
size: size.map(|s| s.get()),
size,
};
cmd_buf.bind_vertex_buffers(slot, iter::once((buffer, range)));
cmd_buf.set_vertex_buffer(slot, bb);
}
RenderCommand::SetPushConstant {
stages,
@ -659,20 +657,15 @@ impl RenderBundle {
let data_slice = &self.base.push_constant_data
[(values_offset as usize)..values_end_offset];
cmd_buf.push_graphics_constants(
&pipeline_layout.raw,
conv::map_shader_stage_flags(stages),
offset,
&data_slice,
)
cmd_buf.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice)
} else {
super::push_constant_clear(
offset,
size_bytes,
|clear_offset, clear_data| {
cmd_buf.push_graphics_constants(
cmd_buf.set_push_constants(
&pipeline_layout.raw,
conv::map_shader_stage_flags(stages),
stages,
clear_offset,
clear_data,
);
@ -686,10 +679,7 @@ impl RenderBundle {
first_vertex,
first_instance,
} => {
cmd_buf.draw(
first_vertex..first_vertex + vertex_count,
first_instance..first_instance + instance_count,
);
cmd_buf.draw(first_vertex, vertex_count, first_instance, instance_count);
}
RenderCommand::DrawIndexed {
index_count,
@ -699,9 +689,11 @@ impl RenderBundle {
first_instance,
} => {
cmd_buf.draw_indexed(
first_index..first_index + index_count,
first_index,
index_count,
base_vertex,
first_instance..first_instance + instance_count,
first_instance,
instance_count,
);
}
RenderCommand::MultiDrawIndirect {
@ -710,13 +702,13 @@ impl RenderBundle {
count: None,
indexed: false,
} => {
let &(ref buffer, _) = buffer_guard
let buffer = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
cmd_buf.draw_indirect(buffer, offset, 1, 0);
cmd_buf.draw_indirect(buffer, offset, 1);
}
RenderCommand::MultiDrawIndirect {
buffer_id,
@ -724,13 +716,13 @@ impl RenderBundle {
count: None,
indexed: true,
} => {
let &(ref buffer, _) = buffer_guard
let buffer = buffer_guard
.get(buffer_id)
.unwrap()
.raw
.as_ref()
.ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
cmd_buf.draw_indexed_indirect(buffer, offset, 1, 0);
cmd_buf.draw_indexed_indirect(buffer, offset, 1);
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => {
@ -943,8 +935,8 @@ struct VertexLimitState {
struct State {
trackers: TrackerSet,
index: IndexState,
vertex: ArrayVec<[VertexState; MAX_VERTEX_BUFFERS]>,
bind: ArrayVec<[BindState; MAX_BIND_GROUPS]>,
vertex: ArrayVec<[VertexState; hal::MAX_VERTEX_BUFFERS]>,
bind: ArrayVec<[BindState; hal::MAX_BIND_GROUPS]>,
push_constant_ranges: PushConstantState,
raw_dynamic_offsets: Vec<wgt::DynamicOffset>,
flat_dynamic_offsets: Vec<wgt::DynamicOffset>,

View File

@ -8,16 +8,13 @@ use std::{num::NonZeroU32, ops::Range};
use crate::device::trace::Command as TraceCommand;
use crate::{
command::CommandBuffer,
conv,
device::all_buffer_stages,
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Token},
id::{BufferId, CommandEncoderId, TextureId},
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{BufferUse, TextureUse},
track::TextureSelector,
};
use hal::command::CommandBuffer as _;
use hal::CommandBuffer as _;
use thiserror::Error;
use wgt::{
BufferAddress, BufferSize, BufferUsage, ImageSubresourceRange, TextureAspect, TextureUsage,
@ -46,22 +43,22 @@ pub enum ClearError {
},
#[error("destination buffer/texture is missing the `COPY_DST` usage flag")]
MissingCopyDstUsageFlag(Option<BufferId>, Option<TextureId>),
#[error("texture lacks the aspects that were specified in the image subresource range. Texture has {texture_aspects:?}, specified was {subresource_range_aspects:?}")]
#[error("texture lacks the aspects that were specified in the image subresource range. Texture with format {texture_format:?}, specified was {subresource_range_aspects:?}")]
MissingTextureAspect {
texture_aspects: hal::FormatAspect,
texture_format: wgt::TextureFormat,
subresource_range_aspects: TextureAspect,
},
#[error("image subresource level range is outside of the texture's level range. texture range is {texture_level_range:?}, \
whereas subesource range specified start {subresource_base_mip_level} and count {subresource_mip_level_count:?}")]
InvalidTextureLevelRange {
texture_level_range: Range<hal::image::Level>,
texture_level_range: Range<u32>,
subresource_base_mip_level: u32,
subresource_mip_level_count: Option<NonZeroU32>,
},
#[error("image subresource layer range is outside of the texture's layer range. texture range is {texture_layer_range:?}, \
whereas subesource range specified start {subresource_base_array_layer} and count {subresource_array_layer_count:?}")]
InvalidTextureLayerRange {
texture_layer_range: Range<hal::image::Layer>,
texture_layer_range: Range<u32>,
subresource_base_array_layer: u32,
subresource_array_layer_count: Option<NonZeroU32>,
},
@ -96,9 +93,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, dst, (), BufferUse::COPY_DST)
.use_replace(&*buffer_guard, dst, (), hal::BufferUse::COPY_DST)
.map_err(ClearError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst_buffer
let dst_raw = dst_buffer
.raw
.as_ref()
.ok_or(ClearError::InvalidBuffer(dst))?;
@ -124,8 +121,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
let num_bytes_filled = size.map_or(dst_buffer.size - offset, |s| s.get());
if num_bytes_filled == 0 {
let end = match size {
Some(size) => offset + size.get(),
None => dst_buffer.size,
};
if offset == end {
log::trace!("Ignoring fill_buffer of size 0");
return Ok(());
}
@ -134,7 +134,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
cmd_buf.buffer_memory_init_actions.extend(
dst_buffer
.initialization_status
.check(offset..(offset + num_bytes_filled))
.check(offset..end)
.map(|range| MemoryInitTrackerAction {
id: dst,
range,
@ -143,24 +143,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
// actual hal barrier & operation
let dst_barrier = dst_pending
.map(|pending| pending.into_hal(dst_buffer))
.next();
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmd_buf_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
dst_barrier.into_iter(),
);
cmd_buf_raw.fill_buffer(
dst_raw,
hal::buffer::SubRange {
offset,
size: size.map(|s| s.get()),
},
0,
);
cmd_buf_raw.transition_buffers(dst_barrier);
cmd_buf_raw.fill_buffer(dst_raw, offset..end, 0);
}
Ok(())
}
@ -198,24 +185,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| ClearError::InvalidTexture(dst))?;
// Check if subresource aspects are valid.
let aspects = match subresource_range.aspect {
wgt::TextureAspect::All => dst_texture.aspects,
wgt::TextureAspect::DepthOnly => hal::FormatAspect::DEPTH,
wgt::TextureAspect::StencilOnly => hal::FormatAspect::STENCIL,
};
if !dst_texture.aspects.contains(aspects) {
let requested_aspects = hal::FormatAspect::from(subresource_range.aspect);
let clear_aspects = hal::FormatAspect::from(dst_texture.desc.format) & requested_aspects;
if clear_aspects.is_empty() {
return Err(ClearError::MissingTextureAspect {
texture_aspects: dst_texture.aspects,
texture_format: dst_texture.desc.format,
subresource_range_aspects: subresource_range.aspect,
});
};
// Check if subresource level range is valid
let subresource_level_end = if let Some(count) = subresource_range.mip_level_count {
(subresource_range.base_mip_level + count.get()) as u8
} else {
dst_texture.full_range.levels.end
let subresource_level_end = match subresource_range.mip_level_count {
Some(count) => subresource_range.base_mip_level + count.get(),
None => dst_texture.full_range.levels.end,
};
if dst_texture.full_range.levels.start > subresource_range.base_mip_level as u8
if dst_texture.full_range.levels.start > subresource_range.base_mip_level
|| dst_texture.full_range.levels.end < subresource_level_end
{
return Err(ClearError::InvalidTextureLevelRange {
@ -225,12 +208,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
// Check if subresource layer range is valid
let subresource_layer_end = if let Some(count) = subresource_range.array_layer_count {
(subresource_range.base_array_layer + count.get()) as u16
} else {
dst_texture.full_range.layers.end
let subresource_layer_end = match subresource_range.array_layer_count {
Some(count) => subresource_range.base_array_layer + count.get(),
None => dst_texture.full_range.layers.end,
};
if dst_texture.full_range.layers.start > subresource_range.base_array_layer as u16
if dst_texture.full_range.layers.start > subresource_range.base_array_layer
|| dst_texture.full_range.layers.end < subresource_layer_end
{
return Err(ClearError::InvalidTextureLayerRange {
@ -248,31 +230,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*texture_guard,
dst,
TextureSelector {
levels: subresource_range.base_mip_level as u8..subresource_level_end,
layers: subresource_range.base_array_layer as u16..subresource_layer_end,
levels: subresource_range.base_mip_level..subresource_level_end,
layers: subresource_range.base_array_layer..subresource_layer_end,
},
TextureUse::COPY_DST,
hal::TextureUse::COPY_DST,
)
.map_err(ClearError::InvalidTexture)?;
let &(ref dst_raw, _) = dst_texture
let dst_raw = dst_texture
.raw
.as_ref()
.ok_or(ClearError::InvalidTexture(dst))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
if !dst_texture.desc.usage.contains(TextureUsage::COPY_DST) {
return Err(ClearError::MissingCopyDstUsageFlag(None, Some(dst)));
}
// actual hal barrier & operation
let dst_barrier = dst_pending
.map(|pending| pending.into_hal(dst_texture))
.next();
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture));
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmd_buf_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
dst_barrier.into_iter(),
);
cmd_buf_raw.transition_textures(dst_barrier);
/*TODO: image clears
cmd_buf_raw.clear_image(
dst_raw,
hal::image::Layout::TransferDstOptimal,
@ -288,7 +265,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
layer_start: subresource_range.base_array_layer as u16,
layer_count: subresource_range.array_layer_count.map(|c| c.get() as u16),
}),
);
);*/
}
Ok(())
}

View File

@ -13,7 +13,7 @@ use crate::{
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{Buffer, Texture},
track::{StatefulTrackerSubset, TrackerSet, UsageConflict},
track::{StatefulTrackerSubset, TrackerSet, UsageConflict, UseExtendError},
validation::{check_buffer_usage, MissingBufferUsageError},
Label, DOWNLEVEL_ERROR_WARNING_MESSAGE,
};
@ -22,7 +22,6 @@ use hal::CommandBuffer as _;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, ShaderStage};
use crate::track::UseExtendError;
use std::{fmt, mem, str};
#[doc(hidden)]
@ -293,7 +292,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if let Some(ref label) = base.label {
unsafe {
raw.begin_debug_marker(label, 0);
raw.begin_debug_marker(label);
}
}
@ -377,19 +376,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !entries.is_empty() {
let pipeline_layout =
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw;
let desc_sets = entries.iter().map(|e| {
bind_group_guard[e.group_id.as_ref().unwrap().value]
.raw
.raw()
});
let offsets = entries.iter().flat_map(|e| &e.dynamic_offsets).cloned();
unsafe {
raw.bind_compute_descriptor_sets(
pipeline_layout,
index as usize,
desc_sets,
offsets,
);
for (i, e) in entries.iter().enumerate() {
let raw_bg = &bind_group_guard[e.group_id.as_ref().unwrap().value].raw;
unsafe {
raw.set_bind_group(
pipeline_layout,
index as u32 + i as u32,
raw_bg,
&e.dynamic_offsets,
);
}
}
}
}
@ -408,7 +404,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_pass_err(scope)?;
unsafe {
raw.bind_compute_pipeline(&pipeline.raw);
raw.set_compute_pipeline(&pipeline.raw);
}
// Rebind resources
@ -420,19 +416,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pipeline.layout_id.value,
);
if !entries.is_empty() {
let desc_sets = entries.iter().map(|e| {
bind_group_guard[e.group_id.as_ref().unwrap().value]
.raw
.raw()
});
let offsets = entries.iter().flat_map(|e| &e.dynamic_offsets).cloned();
unsafe {
raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
start_index,
desc_sets,
offsets,
);
for (i, e) in entries.iter().enumerate() {
let raw_bg =
&bind_group_guard[e.group_id.as_ref().unwrap().value].raw;
unsafe {
raw.set_bind_group(
&pipeline_layout.raw,
start_index as u32 + i as u32,
raw_bg,
&e.dynamic_offsets,
);
}
}
}
@ -447,8 +441,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
offset,
size_bytes,
|clear_offset, clear_data| unsafe {
raw.push_compute_constants(
raw.set_push_constants(
&pipeline_layout.raw,
wgt::ShaderStage::COMPUTE,
clear_offset,
clear_data,
);
@ -488,7 +483,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
)
.map_pass_err(scope)?;
unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) }
unsafe {
raw.set_push_constants(
&pipeline_layout.raw,
wgt::ShaderStage::COMPUTE,
offset,
data_slice,
);
}
}
ComputeCommand::Dispatch(groups) => {
let scope = PassErrorScope::Dispatch {
@ -537,7 +539,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_pass_err(scope);
}
let &(ref buf_raw, _) = indirect_buffer
let buf_raw = indirect_buffer
.raw
.as_ref()
.ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
@ -569,14 +571,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.dispatch_indirect(buf_raw, offset);
}
}
ComputeCommand::PushDebugGroup { color, len } => {
ComputeCommand::PushDebugGroup { color: _, len } => {
state.debug_scope_depth += 1;
let label =
str::from_utf8(&base.string_data[string_offset..string_offset + len])
.unwrap();
string_offset += len;
unsafe {
raw.begin_debug_marker(label, color);
raw.begin_debug_marker(label);
}
}
ComputeCommand::PopDebugGroup => {
@ -591,12 +593,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw.end_debug_marker();
}
}
ComputeCommand::InsertDebugMarker { color, len } => {
ComputeCommand::InsertDebugMarker { color: _, len } => {
let label =
str::from_utf8(&base.string_data[string_offset..string_offset + len])
.unwrap();
string_offset += len;
unsafe { raw.insert_debug_marker(label, color) }
unsafe { raw.insert_debug_marker(label) }
}
ComputeCommand::WriteTimestamp {
query_set_id,

View File

@ -7,7 +7,6 @@
use crate::{
binding_model::PushConstantUploadError,
hal::BufferUse,
id,
track::UseExtendError,
validation::{MissingBufferUsageError, MissingTextureUsageError},
@ -17,7 +16,7 @@ use wgt::{BufferAddress, BufferSize, Color};
use std::num::NonZeroU32;
use thiserror::Error;
pub type BufferError = UseExtendError<BufferUse>;
pub type BufferError = UseExtendError<hal::BufferUse>;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, PartialEq)]

View File

@ -70,10 +70,11 @@ impl<A: HalApi> CommandBuffer<A> {
#[cfg(feature = "trace")] enable_tracing: bool,
#[cfg(debug_assertions)] label: &Label,
) -> Self {
use crate::LabelHelpers as _;
CommandBuffer {
raw: vec![raw],
status: CommandEncoderStatus::Recording,
recorded_thread_id: thread::current.id(),
recorded_thread_id: thread::current().id(),
device_id,
trackers: TrackerSet::new(A::VARIANT),
used_swap_chains: Default::default(),
@ -88,7 +89,7 @@ impl<A: HalApi> CommandBuffer<A> {
None
},
#[cfg(debug_assertions)]
label: label.to_string_or_default(),
label: label.borrow_or_default().to_string(),
}
}
@ -242,8 +243,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
cmd_buf.status = CommandEncoderStatus::Finished;
// stop tracking the swapchain image, if used
for sc_id in cmd_buf.used_swap_chains.iter() {
let view_id = swap_chain_guard[sc_id.value]
.acquired_view_id
let &(ref view_id, _) = swap_chain_guard[sc_id.value]
.acquired_texture
.as_ref()
.expect("Used swap chain frame has already presented");
cmd_buf.trackers.views.remove(view_id.value);
@ -272,7 +273,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmd_buf_raw.begin_debug_marker(label, 0);
cmd_buf_raw.begin_debug_marker(label);
}
Ok(())
}
@ -292,7 +293,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmd_buf_raw.insert_debug_marker(label, 0);
cmd_buf_raw.insert_debug_marker(label);
}
Ok(())
}

View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hal::command::CommandBuffer as _;
use hal::CommandBuffer as _;
#[cfg(feature = "trace")]
use crate::device::trace::Command as TraceCommand;
@ -10,7 +10,7 @@ use crate::{
command::{CommandBuffer, CommandEncoderError},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id::{self, Id, TypedId},
resource::{BufferUse, QuerySet},
resource::QuerySet,
track::UseExtendError,
Epoch, FastHashMap, Index,
};
@ -48,7 +48,7 @@ impl<A: hal::Api> QueryResetMap<A> {
pub fn reset_queries(
self,
cmd_buf_raw: &mut B::CommandBuffer,
cmd_buf_raw: &mut A::CommandBuffer,
query_set_storage: &Storage<QuerySet<A>, id::QuerySetId>,
backend: wgt::Backend,
) -> Result<(), id::QuerySetId> {
@ -69,7 +69,7 @@ impl<A: hal::Api> QueryResetMap<A> {
// We've hit the end of a run, dispatch a reset
(Some(start), false) => {
run_start = None;
unsafe { cmd_buf_raw.reset_query_pool(&query_set.raw, start..idx as u32) };
unsafe { cmd_buf_raw.reset_queries(&query_set.raw, start..idx as u32) };
}
// We're starting a run
(None, true) => {
@ -167,7 +167,7 @@ impl<A: HalApi> QuerySet<A> {
query_type: SimplifiedQueryType,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
) -> Result<hal::query::Query<'_, B>, QueryUseError> {
) -> Result<&A::QuerySet, QueryUseError> {
// We need to defer our resets because we are in a renderpass, add the usage to the reset map.
if let Some(reset) = reset_state {
let used = reset.use_query_set(query_set_id, self, query_index);
@ -191,23 +191,18 @@ impl<A: HalApi> QuerySet<A> {
});
}
let hal_query = hal::query::Query::<A> {
pool: &self.raw,
id: query_index,
};
Ok(hal_query)
Ok(&self.raw)
}
pub(super) fn validate_and_write_timestamp(
&self,
cmd_buf_raw: &mut B::CommandBuffer,
cmd_buf_raw: &mut A::CommandBuffer,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let hal_query = self.validate_query(
let query_set = self.validate_query(
query_set_id,
SimplifiedQueryType::Timestamp,
query_index,
@ -217,9 +212,9 @@ impl<A: HalApi> QuerySet<A> {
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
cmd_buf_raw.reset_query_pool(&self.raw, query_index..(query_index + 1));
cmd_buf_raw.reset_queries(&self.raw, query_index..(query_index + 1));
}
cmd_buf_raw.write_timestamp(hal::pso::PipelineStage::BOTTOM_OF_PIPE, hal_query);
cmd_buf_raw.write_timestamp(query_set, query_index);
}
Ok(())
@ -227,14 +222,14 @@ impl<A: HalApi> QuerySet<A> {
pub(super) fn validate_and_begin_pipeline_statistics_query(
&self,
cmd_buf_raw: &mut B::CommandBuffer,
cmd_buf_raw: &mut A::CommandBuffer,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let hal_query = self.validate_query(
let query_set = self.validate_query(
query_set_id,
SimplifiedQueryType::PipelineStatistics,
query_index,
@ -251,9 +246,9 @@ impl<A: HalApi> QuerySet<A> {
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
cmd_buf_raw.reset_query_pool(&self.raw, query_index..(query_index + 1));
cmd_buf_raw.reset_queries(&self.raw, query_index..(query_index + 1));
}
cmd_buf_raw.begin_query(hal_query, hal::query::ControlFlags::empty());
cmd_buf_raw.begin_query(query_set, query_index);
}
Ok(())
@ -261,7 +256,7 @@ impl<A: HalApi> QuerySet<A> {
}
pub(super) fn end_pipeline_statistics_query<A: HalApi>(
cmd_buf_raw: &mut B::CommandBuffer,
cmd_buf_raw: &mut A::CommandBuffer,
storage: &Storage<QuerySet<A>, id::QuerySetId>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
@ -269,12 +264,7 @@ pub(super) fn end_pipeline_statistics_query<A: HalApi>(
// We can unwrap here as the validity was validated when the active query was set
let query_set = storage.get(query_set_id).unwrap();
let hal_query = hal::query::Query::<A> {
pool: &query_set.raw,
id: query_index,
};
unsafe { cmd_buf_raw.end_query(hal_query) }
unsafe { cmd_buf_raw.end_query(&query_set.raw, query_index) };
Ok(())
} else {
@ -362,7 +352,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
.use_replace(&*buffer_guard, destination, (), hal::BufferUse::COPY_DST)
.map_err(QueryError::InvalidBuffer)?;
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
@ -380,9 +370,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.into());
}
let stride = query_set.elements * wgt::QUERY_SIZE;
let bytes_used = (stride * query_count) as BufferAddress;
let bytes_used = (wgt::QUERY_SIZE * query_count) as BufferAddress;
let buffer_start_offset = destination_offset;
let buffer_end_offset = buffer_start_offset + bytes_used;
@ -390,7 +378,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(ResolveError::BufferOverrun {
start_query,
end_query,
stride,
stride: wgt::QUERY_SIZE,
buffer_size: dst_buffer.size,
buffer_start_offset,
buffer_end_offset,
@ -399,18 +387,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
unsafe {
cmd_buf_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
dst_barrier,
);
cmd_buf_raw.copy_query_pool_results(
cmd_buf_raw.transition_buffers(dst_barrier);
cmd_buf_raw.copy_query_results(
&query_set.raw,
start_query..end_query,
&dst_buffer.raw.as_ref().unwrap().0,
dst_buffer.raw.as_ref().unwrap(),
destination_offset,
stride,
hal::query::ResultFlags::WAIT | hal::query::ResultFlags::BITS_64,
);
}

File diff suppressed because it is too large Load Diff

View File

@ -10,14 +10,15 @@ use crate::{
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id::{BufferId, CommandEncoderId, TextureId},
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{Texture, TextureDescriptor, TextureErrorDimension},
resource::{Texture, TextureErrorDimension},
track::TextureSelector,
};
use hal::CommandBuffer as _;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, Extent3d, TextureUsage};
use std::{iter, num::NonZeroU32};
use std::iter;
pub type ImageCopyBuffer = wgt::ImageCopyBuffer<BufferId>;
pub type ImageCopyTexture = wgt::ImageCopyTexture<TextureId>;
@ -106,42 +107,42 @@ pub enum CopyError {
Transfer(#[from] TransferError),
}
pub(crate) fn extract_image_range<A: hal::Api>(
pub(crate) fn extract_texture_selector<A: hal::Api>(
copy_texture: &ImageCopyTexture,
copy_size: &Extent3d,
texture_guard: &Storage<Texture<A>, TextureId>,
) -> Result<(wgt::ImageSubresourceRange, wgt::TextureFormat), TransferError> {
) -> Result<(TextureSelector, hal::TextureCopyBase, wgt::TextureFormat), TransferError> {
let texture = texture_guard
.get(copy_texture.texture)
.ok_or(TransferError::InvalidTexture(copy_texture.texture))?;
.map_err(|_| TransferError::InvalidTexture(copy_texture.texture))?;
let format = texture.desc.format;
let copy_aspect =
hal::FormatAspect::from(format) & hal::FormatAspect::from(copy_texture.aspect);
if copy_aspect.is_empty() {
return Err(TransferError::MissingTextureAspect {
return Err(TransferError::InvalidTextureAspect {
format,
aspect: copy_texture.aspect,
});
}
let (base_array_layer, array_layer_count) = match texture.desc.dimension {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => (
copy_texture.origin.depth_or_array_layers,
NonZeroU32::new(
copy_texture.origin.depth_or_array_layers + copy_size.depth_or_array_layers,
),
),
wgt::TextureDimension::D3 => (0, None),
let layers = match texture.desc.dimension {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => {
copy_texture.origin.z..copy_texture.origin.z + copy_size.depth_or_array_layers
}
wgt::TextureDimension::D3 => 0..1,
};
let range = wgt::ImageSubresourceRange {
let selector = TextureSelector {
levels: copy_texture.mip_level..copy_texture.mip_level + 1,
layers,
};
let base = hal::TextureCopyBase {
origin: copy_texture.origin,
mip_level: copy_texture.mip_level,
aspect: copy_aspect,
base_mip_level: copy_texture.mip_level,
mip_level_count: NonZeroU32::new(1),
base_array_layer,
array_layer_count,
};
Ok((range, format))
Ok((selector, base, format))
}
/// Function copied with some modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#copy-between-buffer-texture>
@ -242,7 +243,7 @@ pub(crate) fn validate_linear_texture_data(
/// Returns the mip level extent.
pub(crate) fn validate_texture_copy_range(
texture_copy_view: &ImageCopyTexture,
desc: &TextureDescriptor,
desc: &wgt::TextureDescriptor<()>,
texture_side: CopySide,
copy_size: &Extent3d,
) -> Result<Extent3d, TransferError> {
@ -278,11 +279,11 @@ pub(crate) fn validate_texture_copy_range(
});
}
let z_copy_max = texture_copy_view.origin.z + copy_size.depth_or_array_layers;
if z_copy_max > extent.depth {
if z_copy_max > extent.depth_or_array_layers {
return Err(TransferError::TextureOverrun {
start_offset: texture_copy_view.origin.z,
end_offset: z_copy_max,
texture_size: extent.depth,
texture_size: extent.depth_or_array_layers,
dimension: TextureErrorDimension::Z,
side: texture_side,
});
@ -342,7 +343,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.use_replace(&*buffer_guard, source, (), hal::BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
let &(ref src_raw, _) = src_buffer
let src_raw = src_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(source))?;
@ -359,7 +360,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.use_replace(&*buffer_guard, destination, (), hal::BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst_buffer
let dst_raw = dst_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination))?;
@ -431,12 +432,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let region = hal::BufferCopy {
src_offset: source_offset,
dst_offset: destination_offset,
size,
size: wgt::BufferSize::new(size).unwrap(),
};
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmd_buf_raw.transition_buffers(src_barrier.into_iter().chain(dst_barrier));
cmd_buf_raw.copy_buffer(src_raw, dst_raw, iter::once(region));
cmd_buf_raw.copy_buffer_to_buffer(src_raw, dst_raw, iter::once(region));
}
Ok(())
}
@ -471,14 +472,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (dst_range, _) = extract_image_range(destination, copy_size, &*texture_guard)?;
let (dst_range, dst_base, _) =
extract_texture_selector(destination, copy_size, &*texture_guard)?;
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, source.buffer, (), hal::BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
let &(ref src_raw, _) = src_buffer
let src_raw = src_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
@ -497,11 +499,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::TextureUse::COPY_DST,
)
.unwrap();
let &(ref dst_raw, _) = dst_texture
let dst_raw = dst_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
if !dst_texture.desc.usage.contains(TextureUsage::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
@ -511,14 +513,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let format_desc = dst_texture.desc.format.describe();
let max_image_extent = validate_texture_copy_range(
destination,
dst_texture.format,
dst_texture.kind,
&dst_texture.desc,
CopySide::Destination,
copy_size,
)?;
let required_buffer_bytes_in_copy = validate_linear_texture_data(
&source.layout,
dst_texture.format,
dst_texture.desc.format,
src_buffer.size,
CopySide::Source,
format_desc.block_size as BufferAddress,
@ -538,8 +539,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
let (block_width, _) = format_desc.block_dimensions;
if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
return Err(TransferError::CopyToForbiddenTextureFormat(dst_texture.format).into());
if !conv::is_valid_copy_dst_texture_format(dst_texture.desc.format) {
return Err(
TransferError::CopyToForbiddenTextureFormat(dst_texture.desc.format).into(),
);
}
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
@ -547,9 +550,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let region = hal::BufferTextureCopy {
buffer_layout: &source.layout,
texture_mip_level: destination.mip_level,
texture_origin: destination.origin,
buffer_layout: source.layout,
texture_base: dst_base,
size: Extent3d {
width: copy_size.width.min(max_image_extent.width),
height: copy_size.height.min(max_image_extent.height),
@ -595,7 +597,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_range, _) = extract_image_range(source, copy_size, &*texture_guard)?;
let (src_range, src_base, _) =
extract_texture_selector(source, copy_size, &*texture_guard)?;
let (src_texture, src_pending) = cmd_buf
.trackers
@ -607,16 +610,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::TextureUse::COPY_SRC,
)
.unwrap();
let &(ref src_raw, _) = src_texture
let src_raw = src_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
if !src_texture.desc.usage.contains(TextureUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
let (dst_buffer, dst_barriers) = cmd_buf
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(
@ -626,7 +629,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::BufferUse::COPY_DST,
)
.map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst_buffer
let dst_raw = dst_buffer
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
@ -635,19 +638,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(),
);
}
let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_buffer));
let format_desc = src_texture.desc.format.describe();
let max_image_extent = validate_texture_copy_range(
source,
src_texture.format,
src_texture.kind,
CopySide::Source,
copy_size,
)?;
let max_image_extent =
validate_texture_copy_range(source, &src_texture.desc, CopySide::Source, copy_size)?;
let required_buffer_bytes_in_copy = validate_linear_texture_data(
&destination.layout,
src_texture.format,
src_texture.desc.format,
dst_buffer.size,
CopySide::Destination,
format_desc.block_size as BufferAddress,
@ -655,9 +653,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
true,
)?;
let (block_width, _) = src_texture.format.describe().block_dimensions;
if !conv::is_valid_copy_src_texture_format(src_texture.format) {
return Err(TransferError::CopyFromForbiddenTextureFormat(src_texture.format).into());
let (block_width, _) = format_desc.block_dimensions;
if !conv::is_valid_copy_src_texture_format(src_texture.desc.format) {
return Err(
TransferError::CopyFromForbiddenTextureFormat(src_texture.desc.format).into(),
);
}
cmd_buf.buffer_memory_init_actions.extend(
@ -679,9 +679,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let region = hal::BufferTextureCopy {
buffer_layout: &destination.layout,
texture_mip_level: source.mip_level,
texture_origin: source.origin,
buffer_layout: destination.layout,
texture_base: src_base,
size: Extent3d {
width: copy_size.width.min(max_image_extent.width),
height: copy_size.height.min(max_image_extent.height),
@ -733,9 +732,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_range, _) = extract_image_range(source, copy_size, &*texture_guard)?;
let (dst_range, _) = extract_image_range(destination, copy_size, &*texture_guard)?;
if src_range.aspects != dst_range.aspects {
let (src_range, src_base, _) =
extract_texture_selector(source, copy_size, &*texture_guard)?;
let (dst_range, dst_base, _) =
extract_texture_selector(destination, copy_size, &*texture_guard)?;
if src_base.aspect != dst_base.aspect {
return Err(TransferError::MismatchedAspects.into());
}
@ -749,11 +750,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::TextureUse::COPY_SRC,
)
.unwrap();
let &(ref src_raw, _) = src_texture
let src_raw = src_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(source.texture))?;
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
if !src_texture.desc.usage.contains(TextureUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
//TODO: try to avoid this the collection. It's needed because both
@ -772,11 +773,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::TextureUse::COPY_DST,
)
.unwrap();
let &(ref dst_raw, _) = dst_texture
let dst_raw = dst_texture
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
if !dst_texture.desc.usage.contains(TextureUsage::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
@ -797,10 +798,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let region = hal::TextureCopy {
src_subresource: src_range,
src_origin: source.origin,
dst_subresource: dst_range,
dst_origin: destination.origin,
src_base,
dst_base,
size: Extent3d {
width: copy_size
.width

View File

@ -55,7 +55,7 @@ pub fn map_buffer_usage(usage: wgt::BufferUsage) -> hal::BufferUse {
usage.contains(wgt::BufferUsage::UNIFORM),
);
u.set(
hal::BufferUse::STORAGE,
hal::BufferUse::STORAGE_LOAD | hal::BufferUse::STORAGE_STORE,
usage.contains(wgt::BufferUsage::STORAGE),
);
u.set(

View File

@ -5,12 +5,7 @@
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
device::{
alloc,
descriptor::{DescriptorAllocator, DescriptorSet},
queue::TempResource,
DeviceError,
},
device::{queue::TempResource, DeviceError},
hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Token},
id, resource,
track::TrackerSet,
@ -18,13 +13,13 @@ use crate::{
};
use copyless::VecHelper as _;
use hal::device::Device as _;
use hal::Device as _;
use parking_lot::Mutex;
use thiserror::Error;
use std::sync::atomic::Ordering;
const CLEANUP_WAIT_MS: u64 = 5000;
const CLEANUP_WAIT_MS: u32 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)]
@ -91,33 +86,31 @@ impl SuspectedResources {
/// A struct that keeps lists of resources that are no longer needed.
#[derive(Debug)]
struct NonReferencedResources<A: hal::Api> {
buffers: Vec<(B::Buffer, alloc::MemoryBlock<A>)>,
images: Vec<(B::Image, alloc::MemoryBlock<A>)>,
buffers: Vec<A::Buffer>,
textures: Vec<A::Texture>,
// Note: we keep the associated ID here in order to be able to check
// at any point what resources are used in a submission.
image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
samplers: Vec<B::Sampler>,
framebuffers: Vec<B::Framebuffer>,
desc_sets: Vec<DescriptorSet<A>>,
compute_pipes: Vec<B::ComputePipeline>,
graphics_pipes: Vec<B::GraphicsPipeline>,
descriptor_set_layouts: Vec<B::DescriptorSetLayout>,
pipeline_layouts: Vec<B::PipelineLayout>,
query_sets: Vec<B::QueryPool>,
texture_views: Vec<(id::Valid<id::TextureViewId>, A::TextureView)>,
samplers: Vec<A::Sampler>,
bind_groups: Vec<A::BindGroup>,
compute_pipes: Vec<A::ComputePipeline>,
render_pipes: Vec<A::RenderPipeline>,
bind_group_layouts: Vec<A::BindGroupLayout>,
pipeline_layouts: Vec<A::PipelineLayout>,
query_sets: Vec<A::QuerySet>,
}
impl<A: hal::Api> NonReferencedResources<A> {
fn new() -> Self {
Self {
buffers: Vec::new(),
images: Vec::new(),
image_views: Vec::new(),
textures: Vec::new(),
texture_views: Vec::new(),
samplers: Vec::new(),
framebuffers: Vec::new(),
desc_sets: Vec::new(),
bind_groups: Vec::new(),
compute_pipes: Vec::new(),
graphics_pipes: Vec::new(),
descriptor_set_layouts: Vec::new(),
render_pipes: Vec::new(),
bind_group_layouts: Vec::new(),
pipeline_layouts: Vec::new(),
query_sets: Vec::new(),
}
@ -125,76 +118,54 @@ impl<A: hal::Api> NonReferencedResources<A> {
fn extend(&mut self, other: Self) {
self.buffers.extend(other.buffers);
self.images.extend(other.images);
self.image_views.extend(other.image_views);
self.textures.extend(other.textures);
self.texture_views.extend(other.texture_views);
self.samplers.extend(other.samplers);
self.framebuffers.extend(other.framebuffers);
self.desc_sets.extend(other.desc_sets);
self.bind_groups.extend(other.bind_groups);
self.compute_pipes.extend(other.compute_pipes);
self.graphics_pipes.extend(other.graphics_pipes);
self.render_pipes.extend(other.render_pipes);
self.query_sets.extend(other.query_sets);
assert!(other.descriptor_set_layouts.is_empty());
assert!(other.bind_group_layouts.is_empty());
assert!(other.pipeline_layouts.is_empty());
}
unsafe fn clean(
&mut self,
device: &B::Device,
memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<A>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<A>>,
) {
if !self.buffers.is_empty() || !self.images.is_empty() {
let mut allocator = memory_allocator_mutex.lock();
for (raw, memory) in self.buffers.drain(..) {
log::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_buffer(raw);
allocator.free(device, memory);
}
for (raw, memory) in self.images.drain(..) {
log::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_image(raw);
allocator.free(device, memory);
}
unsafe fn clean(&mut self, device: &A::Device) {
for raw in self.buffers.drain(..) {
device.destroy_buffer(raw);
}
for (_, raw) in self.image_views.drain(..) {
device.destroy_image_view(raw);
for raw in self.textures.drain(..) {
device.destroy_texture(raw);
}
for (_, raw) in self.texture_views.drain(..) {
device.destroy_texture_view(raw);
}
for raw in self.samplers.drain(..) {
device.destroy_sampler(raw);
}
for raw in self.framebuffers.drain(..) {
device.destroy_framebuffer(raw);
for raw in self.bind_groups.drain(..) {
device.destroy_bind_group(raw);
}
if !self.desc_sets.is_empty() {
descriptor_allocator_mutex
.lock()
.free(device, self.desc_sets.drain(..));
}
for raw in self.compute_pipes.drain(..) {
device.destroy_compute_pipeline(raw);
}
for raw in self.graphics_pipes.drain(..) {
device.destroy_graphics_pipeline(raw);
for raw in self.render_pipes.drain(..) {
device.destroy_render_pipeline(raw);
}
for raw in self.descriptor_set_layouts.drain(..) {
device.destroy_descriptor_set_layout(raw);
for raw in self.bind_group_layouts.drain(..) {
device.destroy_bind_group_layout(raw);
}
for raw in self.pipeline_layouts.drain(..) {
device.destroy_pipeline_layout(raw);
}
for raw in self.query_sets.drain(..) {
device.destroy_query_pool(raw);
device.destroy_query_set(raw);
}
}
}
#[derive(Debug)]
struct ActiveSubmission<A: hal::Api> {
index: SubmissionIndex,
fence: B::Fence,
fence: A::Fence,
last_resources: NonReferencedResources<A>,
mapped: Vec<id::Valid<id::BufferId>>,
}
@ -215,7 +186,6 @@ pub enum WaitIdleError {
/// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
/// 4. Finally, `handle_mapping` issues all the callbacks.
#[derive(Debug)]
pub(super) struct LifetimeTracker<A: hal::Api> {
/// Resources that the user has requested be mapped, but are still in use.
mapped: Vec<Stored<id::BufferId>>,
@ -252,15 +222,15 @@ impl<A: hal::Api> LifetimeTracker<A> {
pub fn track_submission(
&mut self,
index: SubmissionIndex,
fence: B::Fence,
fence: A::Fence,
new_suspects: &SuspectedResources,
temp_resources: impl Iterator<Item = (TempResource<A>, alloc::MemoryBlock<A>)>,
temp_resources: impl Iterator<Item = TempResource<A>>,
) {
let mut last_resources = NonReferencedResources::new();
for (res, memory) in temp_resources {
for res in temp_resources {
match res {
TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
TempResource::Image(raw) => last_resources.images.push((raw, memory)),
TempResource::Buffer(raw) => last_resources.buffers.push(raw),
TempResource::Texture(raw) => last_resources.textures.push(raw),
}
}
@ -288,20 +258,23 @@ impl<A: hal::Api> LifetimeTracker<A> {
self.mapped.push(Stored { value, ref_count });
}
fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
fn wait_idle(&self, device: &A::Device) -> Result<(), WaitIdleError> {
if !self.active.is_empty() {
log::debug!("Waiting for IDLE...");
let status = unsafe {
device
.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)
.map_err(DeviceError::from)?
};
let mut status = true;
//TODO: change this to wait for the last fence value only
for a in self.active.iter() {
status &= unsafe {
device
.wait(
&a.fence,
a.index as u64, //TODO: check this
CLEANUP_WAIT_MS,
)
.map_err(DeviceError::from)?
};
}
log::debug!("...Done");
if !status {
// We timed out while waiting for the fences
return Err(WaitIdleError::StuckGpu);
@ -313,10 +286,11 @@ impl<A: hal::Api> LifetimeTracker<A> {
/// Returns the last submission index that is done.
pub fn triage_submissions(
&mut self,
device: &B::Device,
device: &A::Device,
force_wait: bool,
) -> Result<SubmissionIndex, WaitIdleError> {
profiling::scope!("triage_submissions");
/* TODO: better sync
if force_wait {
self.wait_idle(device)?;
}
@ -325,7 +299,7 @@ impl<A: hal::Api> LifetimeTracker<A> {
let done_count = self
.active
.iter()
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
.position(|a| unsafe { device.get_fence_value(&a.fence).unwrap() })
.unwrap_or_else(|| self.active.len());
let last_done = match done_count.checked_sub(1) {
Some(i) => self.active[i].index,
@ -339,29 +313,22 @@ impl<A: hal::Api> LifetimeTracker<A> {
unsafe {
device.destroy_fence(a.fence);
}
}
}*/
let last_done = 0;
Ok(last_done)
}
pub fn cleanup(
&mut self,
device: &B::Device,
memory_allocator_mutex: &Mutex<alloc::MemoryAllocator<A>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<A>>,
) {
pub fn cleanup(&mut self, device: &A::Device) {
profiling::scope!("cleanup");
unsafe {
self.free_resources
.clean(device, memory_allocator_mutex, descriptor_allocator_mutex);
descriptor_allocator_mutex.lock().cleanup(device);
self.free_resources.clean(device);
}
}
pub fn schedule_resource_destruction(
&mut self,
temp_resource: TempResource<A>,
memory: alloc::MemoryBlock<A>,
last_submit_index: SubmissionIndex,
) {
let resources = self
@ -370,8 +337,8 @@ impl<A: hal::Api> LifetimeTracker<A> {
.find(|a| a.index == last_submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources);
match temp_resource {
TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
TempResource::Image(raw) => resources.images.push((raw, memory)),
TempResource::Buffer(raw) => resources.buffers.push(raw),
TempResource::Texture(raw) => resources.textures.push(raw),
}
}
}
@ -423,7 +390,7 @@ impl<A: HalApi> LifetimeTracker<A> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.desc_sets
.bind_groups
.push(res.raw);
}
}
@ -442,12 +409,11 @@ impl<A: HalApi> LifetimeTracker<A> {
}
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
let raw = match res.inner {
resource::TextureViewInner::Native { raw, source_id } => {
match res.source {
resource::TextureViewSource::Native(source_id) => {
self.suspected_resources.textures.push(source_id.value);
raw
}
resource::TextureViewInner::SwapChain { .. } => unreachable!(),
resource::TextureViewSource::SwapChain { .. } => unreachable!(),
};
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -455,8 +421,8 @@ impl<A: HalApi> LifetimeTracker<A> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.image_views
.push((id, raw));
.texture_views
.push((id, res.raw));
}
}
}
@ -479,7 +445,7 @@ impl<A: HalApi> LifetimeTracker<A> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images
.textures
.extend(res.raw);
}
}
@ -524,15 +490,8 @@ impl<A: HalApi> LifetimeTracker<A> {
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
if let resource::BufferMapState::Init {
stage_buffer,
stage_memory,
..
} = res.map_state
{
self.free_resources
.buffers
.push((stage_buffer, stage_memory));
if let resource::BufferMapState::Init { stage_buffer, .. } = res.map_state {
self.free_resources.buffers.push(stage_buffer);
}
self.active
.iter_mut()
@ -586,7 +545,7 @@ impl<A: HalApi> LifetimeTracker<A> {
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.graphics_pipes
.render_pipes
.push(res.raw);
}
}
@ -632,7 +591,7 @@ impl<A: HalApi> LifetimeTracker<A> {
t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
}
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
self.free_resources.descriptor_set_layouts.push(lay.raw);
self.free_resources.bind_group_layouts.push(lay.raw);
}
}
}
@ -693,7 +652,7 @@ impl<A: HalApi> LifetimeTracker<A> {
pub(super) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
&mut self,
hub: &Hub<A, G>,
raw: &B::Device,
raw: &A::Device,
trackers: &Mutex<TrackerSet>,
token: &mut Token<super::Device<A>>,
) -> Vec<super::BufferMapPendingCallback> {
@ -740,10 +699,7 @@ impl<A: HalApi> LifetimeTracker<A> {
Ok(ptr) => {
buffer.map_state = resource::BufferMapState::Active {
ptr,
sub_range: hal::buffer::SubRange {
offset: mapping.range.start,
size: Some(size),
},
range: mapping.range.start..mapping.range.start + size,
host,
};
resource::BufferMapAsyncStatus::Success

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
use crate::device::trace::Action;
use crate::{
command::{
extract_image_range, validate_linear_texture_data, validate_texture_copy_range,
extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range,
CommandBuffer, CopySide, ImageCopyTexture, TransferError,
},
conv,
@ -26,6 +26,21 @@ use thiserror::Error;
struct StagingData<A: hal::Api> {
buffer: A::Buffer,
cmdbuf: A::CommandBuffer,
is_coherent: bool,
}
impl<A: hal::Api> StagingData<A> {
unsafe fn write(
&self,
device: &A::Device,
offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), hal::DeviceError> {
let ptr = device.map_buffer(&self.buffer, offset..offset + data.len() as u64)?;
ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), data.len());
device.unmap_buffer(&self.buffer)?;
Ok(())
}
}
#[derive(Debug)]
@ -64,7 +79,7 @@ impl<A: hal::Api> PendingWrites<A> {
device.destroy_buffer(buffer);
},
TempResource::Texture(texture) => unsafe {
device.destroy_image(texture);
device.destroy_texture(texture);
},
}
}
@ -91,11 +106,11 @@ impl<A: hal::Api> PendingWrites<A> {
fn create_cmd_buf(device: &A::Device) -> A::CommandBuffer {
unsafe {
let mut cmd_buf = device.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_PendingWrites"),
});
cmd_buf.begin();
cmd_buf
device
.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_PendingWrites"),
})
.unwrap()
}
}
@ -157,9 +172,13 @@ impl<A: hal::Api> super::Device<A> {
let cmdbuf = match self.pending_writes.command_buffer.take() {
Some(cmdbuf) => cmdbuf,
None => PendingWrites::create_cmd_buf(&self.raw),
None => PendingWrites::<A>::create_cmd_buf(&self.raw),
};
Ok(StagingData { buffer, cmdbuf })
Ok(StagingData {
buffer,
cmdbuf,
is_coherent: true, //TODO
})
}
fn initialize_buffer_memory(
@ -171,7 +190,7 @@ impl<A: hal::Api> super::Device<A> {
.dst_buffers
.extend(required_buffer_inits.map.keys());
let cmd_buf = self.pending_writes.borrow_cmd_buf(&self.cmd_allocator);
let cmd_buf = self.pending_writes.borrow_cmd_buf(&self.raw);
let mut trackers = self.trackers.lock();
for (buffer_id, mut ranges) in required_buffer_inits.map.drain() {
@ -193,7 +212,7 @@ impl<A: hal::Api> super::Device<A> {
hal::BufferUse::COPY_DST,
);
let buffer = buffer_guard.get(buffer_id).unwrap();
let &(ref buffer_raw, _) = buffer
let raw_buf = buffer
.raw
.as_ref()
.ok_or(QueueSubmitError::DestroyedBuffer(buffer_id))?;
@ -202,11 +221,11 @@ impl<A: hal::Api> super::Device<A> {
}
for range in ranges {
assert!(range.start % 4 == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", buffer, range.start);
assert!(range.end % 4 == 0, "Buffer {:?} has an uninitialized range with an end not aligned to 4 (end was {})", buffer, range.end);
assert!(range.start % 4 == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", raw_buf, range.start);
assert!(range.end % 4 == 0, "Buffer {:?} has an uninitialized range with an end not aligned to 4 (end was {})", raw_buf, range.end);
unsafe {
cmd_buf.fill_buffer(buffer_raw, range, 0);
cmd_buf.fill_buffer(raw_buf, range, 0);
}
}
}
@ -282,14 +301,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let mut stage = device.prepare_stage(data_size)?;
stage.memory.write_bytes(&device.raw, 0, data)?;
unsafe { stage.write(&device.raw, 0, data) }.map_err(DeviceError::from)?;
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), hal::BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst
let dst_raw = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
@ -314,11 +333,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.into());
}
let region = hal::BufferCopy {
let region = wgt::BufferSize::new(data.len() as u64).map(|size| hal::BufferCopy {
src_offset: 0,
dst_offset: buffer_offset,
size: data.len() as _,
};
size,
});
let barriers = iter::once(hal::BufferBarrier {
buffer: &stage.buffer,
usage: hal::BufferUse::MAP_WRITE..hal::BufferUse::COPY_SRC,
@ -328,7 +347,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
stage.cmdbuf.transition_buffers(barriers);
stage
.cmdbuf
.copy_buffer_to_buffer(&stage.buffer, dst_raw, iter::once(region));
.copy_buffer_to_buffer(&stage.buffer, dst_raw, region.into_iter());
}
device.pending_writes.consume(stage);
@ -382,8 +401,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let (texture_guard, _) = hub.textures.read(&mut token);
let (image_range, texture_format) =
extract_image_range(destination, size, &*texture_guard)?;
let (selector, texture_base, texture_format) =
extract_texture_selector(destination, size, &*texture_guard)?;
let format_desc = texture_format.describe();
validate_linear_texture_data(
data_layout,
@ -414,11 +433,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let block_rows_per_image = texel_rows_per_image / block_height;
let bytes_per_row_alignment = get_lowest_common_denom(
device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
format_desc.block_size,
device.alignments.buffer_copy_pitch.get() as u32,
format_desc.block_size as u32,
);
let stage_bytes_per_row = align_to(
format_desc.block_size * width_blocks,
format_desc.block_size as u32 * width_blocks,
bytes_per_row_alignment,
);
@ -433,39 +452,34 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.use_replace(
&*texture_guard,
destination.texture,
image_range,
selector,
hal::TextureUse::COPY_DST,
)
.unwrap();
let &(ref dst_raw, _) = dst
let dst_raw = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
if !dst.desc.usage.contains(wgt::TextureUsage::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
let max_image_extent = validate_texture_copy_range(
destination,
dst.format,
dst.kind,
CopySide::Destination,
size,
)?;
let max_image_extent =
validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?;
dst.life_guard.use_at(device.active_submission_index + 1);
let bytes_per_row = if let Some(bytes_per_row) = data_layout.bytes_per_row {
bytes_per_row.get()
} else {
width_blocks * format_desc.block_size
width_blocks * format_desc.block_size as u32
};
let ptr = stage.memory.map(&device.raw, 0, stage_size)?;
let ptr = unsafe { device.raw.map_buffer(&stage.buffer, 0..stage_size) }
.map_err(DeviceError::from)?;
unsafe {
profiling::scope!("copy");
//TODO: https://github.com/zakarumych/gpu-alloc/issues/13
if stage_bytes_per_row == bytes_per_row {
// Fast path if the data isalready being aligned optimally.
ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), stage_size as usize);
@ -487,9 +501,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
}
stage.memory.unmap(&device.raw);
if !stage.memory.is_coherent() {
stage.memory.flush_range(&device.raw, 0, None)?;
device
.raw
.unmap_buffer(&stage.buffer)
.map_err(DeviceError::from)?;
if !stage.is_coherent {
device
.raw
.flush_mapped_ranges(&stage.buffer, iter::once(0..stage_size));
}
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
@ -499,11 +518,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let region = hal::BufferTextureCopy {
buffer_layout: wgt::ImageDataLayout {
offset: 0,
bytes_per_row: stage_bytes_per_row,
rows_per_image: texel_rows_per_image,
bytes_per_row: NonZeroU32::new(stage_bytes_per_row),
rows_per_image: NonZeroU32::new(texel_rows_per_image),
},
texture_mip_level: destination.mip_level,
texture_origin: destination.origin,
texture_base,
size: wgt::Extent3d {
width: size.width.min(max_image_extent.width),
height: size.height.min(max_image_extent.height),
@ -520,12 +538,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
stage
.cmdbuf
.transition_textures(transition.map(|pending| pending.into_hal(dst)));
stage.cmdbuf.copy_buffer_to_image(
&stage.buffer,
dst_raw,
hal::TextureUse::COPY_DST,
iter::once(region),
);
stage
.cmdbuf
.copy_buffer_to_texture(&stage.buffer, dst_raw, iter::once(region));
}
device.pending_writes.consume(stage);
@ -607,7 +622,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
for sc_id in cmdbuf.used_swap_chains.drain(..) {
let sc = &mut swap_chain_guard[sc_id.value];
if sc.acquired_view_id.is_none() {
if sc.acquired_texture.is_none() {
return Err(QueueSubmitError::SwapChainOutputDropped);
}
if sc.active_submission_index != submit_index {
@ -621,13 +636,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// update submission IDs
for id in cmdbuf.trackers.buffers.used() {
let buffer = &mut buffer_guard[id];
if buffer.raw.is_none() {
return Err(QueueSubmitError::DestroyedBuffer(id.0));
}
let raw_buf = match buffer.raw {
Some(ref raw) => raw,
None => {
return Err(QueueSubmitError::DestroyedBuffer(id.0));
}
};
if !buffer.life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer.map_state {
log::warn!("Dropped buffer has a pending mapping.");
unsafe { device.raw.unmap_buffer(buffer)? };
unsafe { device.raw.unmap_buffer(raw_buf) }
.map_err(DeviceError::from)?;
}
device.temp_suspected.buffers.push(id);
} else {
@ -677,18 +696,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
// execute resource transitions
let mut transit =
device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_Transit"),
});
unsafe {
// the last buffer was open, closing now
cmdbuf.raw.last_mut().unwrap().end();
transit.begin();
cmdbuf.raw.last_mut().unwrap().finish();
}
// execute resource transitions
let mut transit = device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_Transit"),
})
.map_err(DeviceError::from)?;
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
trackers.merge_extend_stateless(&cmdbuf.trackers);
CommandBuffer::insert_barriers(
@ -700,7 +718,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*texture_guard,
);
unsafe {
transit.end();
transit.finish();
}
cmdbuf.raw.insert(0, transit);
}
@ -714,42 +732,28 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
// now prepare the GPU submission
let mut fence = device
.raw
.create_fence(false)
.or(Err(DeviceError::OutOfMemory))?;
let signal_semaphores = signal_swapchain_semaphores
.into_iter()
.map(|sc_id| &swap_chain_guard[sc_id].semaphore);
let mut fence = device.raw.create_fence().map_err(DeviceError::from)?;
//Note: we could technically avoid the heap Vec here
let mut command_buffers = Vec::new();
command_buffers.extend(pending_write_command_buffer.as_ref());
command_buffers.extend(pending_write_command_buffer);
for &cmd_buf_id in command_buffer_ids.iter() {
match command_buffer_guard.get(cmd_buf_id) {
Ok(cmd_buf) if cmd_buf.is_finished() => {
command_buffers.extend(cmd_buf.raw.iter());
command_buffers.extend(cmd_buf.raw.drain(..));
}
_ => {}
}
}
let fence_value = 1; //TODO
unsafe {
device.queue_group.queues[0].submit(
command_buffers.into_iter(),
iter::empty(),
signal_semaphores,
Some(&mut fence),
);
device
.queue
.submit(command_buffers.into_iter(), Some((&mut fence, fence_value)));
}
fence
};
if let Some(comb_raw) = pending_write_command_buffer {
device
.cmd_allocator
.after_submit_internal(comb_raw, submit_index);
}
let callbacks = match device.maintain(&hub, false, &mut token) {
Ok(callbacks) => callbacks,
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
@ -764,15 +768,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device.pending_writes.temp_resources.drain(..),
);
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
device
.cmd_allocator
.after_submit(cmd_buf, &device.raw, submit_index);
}
}
callbacks
};
@ -791,7 +786,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
match device_guard.get(queue_id) {
Ok(device) => Ok(device.queue_group.queues[0].timestamp_period()),
Ok(_device) => Ok(1.0), //TODO?
Err(_) => Err(InvalidQueue),
}
}

View File

@ -6,14 +6,10 @@ use crate::{
binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
command::{CommandBuffer, RenderBundle},
device::Device,
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, RenderBundleId, RenderPipelineId, SamplerId, ShaderModuleId,
SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId, Valid,
},
id,
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
resource::{Buffer, Sampler, Texture, TextureView},
resource::{Buffer, QuerySet, Sampler, Texture, TextureView},
swap_chain::SwapChain,
Epoch, Index,
};
@ -21,8 +17,6 @@ use crate::{
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use wgt::Backend;
use crate::id::QuerySetId;
use crate::resource::QuerySet;
#[cfg(debug_assertions)]
use std::cell::Cell;
use std::{fmt::Debug, marker::PhantomData, ops};
@ -51,7 +45,7 @@ impl IdentityManager {
}
}
pub fn alloc<I: TypedId>(&mut self, backend: Backend) -> I {
pub fn alloc<I: id::TypedId>(&mut self, backend: Backend) -> I {
match self.free.pop() {
Some(index) => I::zip(index, self.epochs[index as usize], backend),
None => {
@ -63,7 +57,7 @@ impl IdentityManager {
}
}
pub fn free<I: TypedId + Debug>(&mut self, id: I) {
pub fn free<I: id::TypedId + Debug>(&mut self, id: I) {
let (index, epoch, _backend) = id.unzip();
// avoid doing this check in release
if cfg!(debug_assertions) {
@ -87,26 +81,26 @@ enum Element<T> {
pub(crate) struct InvalidId;
#[derive(Debug)]
pub struct Storage<T, I: TypedId> {
pub struct Storage<T, I: id::TypedId> {
map: Vec<Element<T>>,
kind: &'static str,
_phantom: PhantomData<I>,
}
impl<T, I: TypedId> ops::Index<Valid<I>> for Storage<T, I> {
impl<T, I: id::TypedId> ops::Index<id::Valid<I>> for Storage<T, I> {
type Output = T;
fn index(&self, id: Valid<I>) -> &T {
fn index(&self, id: id::Valid<I>) -> &T {
self.get(id.0).unwrap()
}
}
impl<T, I: TypedId> ops::IndexMut<Valid<I>> for Storage<T, I> {
fn index_mut(&mut self, id: Valid<I>) -> &mut T {
impl<T, I: id::TypedId> ops::IndexMut<id::Valid<I>> for Storage<T, I> {
fn index_mut(&mut self, id: id::Valid<I>) -> &mut T {
self.get_mut(id.0).unwrap()
}
}
impl<T, I: TypedId> Storage<T, I> {
impl<T, I: id::TypedId> Storage<T, I> {
pub(crate) fn contains(&self, id: I) -> bool {
let (index, epoch, _) = id.unzip();
match self.map[index as usize] {
@ -347,7 +341,7 @@ pub trait IdentityHandler<I>: Debug {
fn free(&self, id: I);
}
impl<I: TypedId + Debug> IdentityHandler<I> for Mutex<IdentityManager> {
impl<I: id::TypedId + Debug> IdentityHandler<I> for Mutex<IdentityManager> {
type Input = PhantomData<I>;
fn process(&self, _id: Self::Input, backend: Backend) -> I {
self.lock().alloc(backend)
@ -365,7 +359,7 @@ pub trait IdentityHandlerFactory<I> {
#[derive(Debug)]
pub struct IdentityManagerFactory;
impl<I: TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
impl<I: id::TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
type Filter = Mutex<IdentityManager>;
fn spawn(&self, min_index: Index) -> Self::Filter {
Mutex::new(IdentityManager::from_index(min_index))
@ -373,23 +367,23 @@ impl<I: TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
}
pub trait GlobalIdentityHandlerFactory:
IdentityHandlerFactory<AdapterId>
+ IdentityHandlerFactory<DeviceId>
+ IdentityHandlerFactory<SwapChainId>
+ IdentityHandlerFactory<PipelineLayoutId>
+ IdentityHandlerFactory<ShaderModuleId>
+ IdentityHandlerFactory<BindGroupLayoutId>
+ IdentityHandlerFactory<BindGroupId>
+ IdentityHandlerFactory<CommandBufferId>
+ IdentityHandlerFactory<RenderBundleId>
+ IdentityHandlerFactory<RenderPipelineId>
+ IdentityHandlerFactory<ComputePipelineId>
+ IdentityHandlerFactory<QuerySetId>
+ IdentityHandlerFactory<BufferId>
+ IdentityHandlerFactory<TextureId>
+ IdentityHandlerFactory<TextureViewId>
+ IdentityHandlerFactory<SamplerId>
+ IdentityHandlerFactory<SurfaceId>
IdentityHandlerFactory<id::AdapterId>
+ IdentityHandlerFactory<id::DeviceId>
+ IdentityHandlerFactory<id::SwapChainId>
+ IdentityHandlerFactory<id::PipelineLayoutId>
+ IdentityHandlerFactory<id::ShaderModuleId>
+ IdentityHandlerFactory<id::BindGroupLayoutId>
+ IdentityHandlerFactory<id::BindGroupId>
+ IdentityHandlerFactory<id::CommandBufferId>
+ IdentityHandlerFactory<id::RenderBundleId>
+ IdentityHandlerFactory<id::RenderPipelineId>
+ IdentityHandlerFactory<id::ComputePipelineId>
+ IdentityHandlerFactory<id::QuerySetId>
+ IdentityHandlerFactory<id::BufferId>
+ IdentityHandlerFactory<id::TextureId>
+ IdentityHandlerFactory<id::TextureViewId>
+ IdentityHandlerFactory<id::SamplerId>
+ IdentityHandlerFactory<id::SurfaceId>
{
}
@ -409,13 +403,13 @@ pub trait Resource {
}
#[derive(Debug)]
pub struct Registry<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> {
pub struct Registry<T: Resource, I: id::TypedId, F: IdentityHandlerFactory<I>> {
identity: F::Filter,
data: RwLock<Storage<T, I>>,
backend: Backend,
}
impl<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
impl<T: Resource, I: id::TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
fn new(backend: Backend, factory: &F) -> Self {
Self {
identity: factory.spawn(0),
@ -442,12 +436,12 @@ impl<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
}
#[must_use]
pub(crate) struct FutureId<'a, I: TypedId, T> {
pub(crate) struct FutureId<'a, I: id::TypedId, T> {
id: I,
data: &'a RwLock<Storage<T, I>>,
}
impl<I: TypedId + Copy, T> FutureId<'_, I, T> {
impl<I: id::TypedId + Copy, T> FutureId<'_, I, T> {
#[cfg(feature = "trace")]
pub fn id(&self) -> I {
self.id
@ -457,9 +451,9 @@ impl<I: TypedId + Copy, T> FutureId<'_, I, T> {
self.id
}
pub fn assign<'a, A: Access<T>>(self, value: T, _: &'a mut Token<A>) -> Valid<I> {
pub fn assign<'a, A: Access<T>>(self, value: T, _: &'a mut Token<A>) -> id::Valid<I> {
self.data.write().insert(self.id, value);
Valid(self.id)
id::Valid(self.id)
}
pub fn assign_error<'a, A: Access<T>>(self, label: &str, _: &'a mut Token<A>) -> I {
@ -468,7 +462,7 @@ impl<I: TypedId + Copy, T> FutureId<'_, I, T> {
}
}
impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
impl<T: Resource, I: id::TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
pub(crate) fn prepare(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
@ -535,24 +529,23 @@ impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I
}
}
#[derive(Debug)]
pub struct Hub<A: hal::Api, F: GlobalIdentityHandlerFactory> {
pub adapters: Registry<Adapter<A>, AdapterId, F>,
pub devices: Registry<Device<A>, DeviceId, F>,
pub swap_chains: Registry<SwapChain<A>, SwapChainId, F>,
pub pipeline_layouts: Registry<PipelineLayout<A>, PipelineLayoutId, F>,
pub shader_modules: Registry<ShaderModule<A>, ShaderModuleId, F>,
pub bind_group_layouts: Registry<BindGroupLayout<A>, BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<A>, BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<A>, CommandBufferId, F>,
pub render_bundles: Registry<RenderBundle, RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<A>, RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<A>, ComputePipelineId, F>,
pub query_sets: Registry<QuerySet<A>, QuerySetId, F>,
pub buffers: Registry<Buffer<A>, BufferId, F>,
pub textures: Registry<Texture<A>, TextureId, F>,
pub texture_views: Registry<TextureView<A>, TextureViewId, F>,
pub samplers: Registry<Sampler<A>, SamplerId, F>,
pub adapters: Registry<Adapter<A>, id::AdapterId, F>,
pub devices: Registry<Device<A>, id::DeviceId, F>,
pub swap_chains: Registry<SwapChain<A>, id::SwapChainId, F>,
pub pipeline_layouts: Registry<PipelineLayout<A>, id::PipelineLayoutId, F>,
pub shader_modules: Registry<ShaderModule<A>, id::ShaderModuleId, F>,
pub bind_group_layouts: Registry<BindGroupLayout<A>, id::BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<A>, id::BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<A>, id::CommandBufferId, F>,
pub render_bundles: Registry<RenderBundle, id::RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<A>, id::RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<A>, id::ComputePipelineId, F>,
pub query_sets: Registry<QuerySet<A>, id::QuerySetId, F>,
pub buffers: Registry<Buffer<A>, id::BufferId, F>,
pub textures: Registry<Texture<A>, id::TextureId, F>,
pub texture_views: Registry<TextureView<A>, id::TextureViewId, F>,
pub samplers: Registry<Sampler<A>, id::SamplerId, F>,
}
impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
@ -582,9 +575,9 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
//TODO: instead of having a hacky `with_adapters` parameter,
// we should have `clear_device(device_id)` that specifically destroys
// everything related to a logical device.
fn clear(&self, surface_guard: &mut Storage<Surface, SurfaceId>, with_adapters: bool) {
use crate::resource::TextureViewInner;
use hal::Device as _;
fn clear(&self, surface_guard: &mut Storage<Surface, id::SurfaceId>, with_adapters: bool) {
use crate::resource::TextureViewSource;
use hal::{Device as _, Surface as _};
let mut devices = self.devices.data.write();
for element in devices.map.iter_mut() {
@ -606,14 +599,14 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
let textures = self.textures.data.read();
for element in self.texture_views.data.write().map.drain(..) {
if let Element::Occupied(texture_view, _) = element {
match texture_view.inner {
TextureViewInner::Native { raw, source_id } => {
match texture_view.source {
TextureViewSource::Native(source_id) => {
let device = &devices[textures[source_id.value].device_id.value];
unsafe {
device.raw.destroy_image_view(raw);
device.raw.destroy_texture_view(texture_view.raw);
}
}
TextureViewInner::SwapChain { .. } => {} //TODO
TextureViewSource::SwapChain(_) => {} //TODO
}
}
}
@ -633,15 +626,15 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
for element in self.command_buffers.data.write().map.drain(..) {
if let Element::Occupied(command_buffer, _) = element {
let device = &devices[command_buffer.device_id.value];
device
.cmd_allocator
.after_submit(command_buffer, &device.raw, 0);
for raw in command_buffer.raw {
device.raw.destroy_command_buffer(raw);
}
}
}
for element in self.bind_groups.data.write().map.drain(..) {
if let Element::Occupied(bind_group, _) = element {
let device = &devices[bind_group.device_id.value];
device.destroy_bind_group(bind_group);
device.raw.destroy_bind_group(bind_group.raw);
}
}
@ -657,7 +650,7 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
if let Element::Occupied(bgl, _) = element {
let device = &devices[bgl.device_id.value];
unsafe {
device.raw.destroy_descriptor_set_layout(bgl.raw);
device.raw.destroy_bind_group_layout(bgl.raw);
}
}
}
@ -681,7 +674,7 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
if let Element::Occupied(pipeline, _) = element {
let device = &devices[pipeline.device_id.value];
unsafe {
device.raw.destroy_graphics_pipeline(pipeline.raw);
device.raw.destroy_render_pipeline(pipeline.raw);
}
}
}
@ -689,16 +682,13 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
for (index, element) in self.swap_chains.data.write().map.drain(..).enumerate() {
if let Element::Occupied(swap_chain, epoch) = element {
let device = &devices[swap_chain.device_id.value];
unsafe {
device.raw.destroy_semaphore(swap_chain.semaphore);
}
let suf_id = TypedId::zip(index as Index, epoch, A::VARIANT);
let suf_id = id::TypedId::zip(index as Index, epoch, A::VARIANT);
//TODO: hold the surface alive by the swapchain
if surface_guard.contains(suf_id) {
let surface = surface_guard.get_mut(suf_id).unwrap();
let suf = A::get_surface_mut(surface);
unsafe {
suf.unconfigure_swapchain(&device.raw);
suf.unconfigure(&device.raw);
}
}
}
@ -708,7 +698,7 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
if let Element::Occupied(query_set, _) = element {
let device = &devices[query_set.device_id.value];
unsafe {
device.raw.destroy_query_pool(query_set.raw);
device.raw.destroy_query_set(query_set.raw);
}
}
}
@ -727,16 +717,18 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
#[derive(Debug)]
pub struct Hubs<F: GlobalIdentityHandlerFactory> {
/*
#[cfg(vulkan)]
vulkan: Hub<backend::Vulkan, F>,
#[cfg(metal)]
metal: Hub<backend::Metal, F>,
#[cfg(dx12)]
dx12: Hub<backend::Dx12, F>,
#[cfg(dx11)]
dx11: Hub<backend::Dx11, F>,
#[cfg(gl)]
gl: Hub<backend::Gl, F>,*/}
#[cfg(vulkan)]
vulkan: Hub<backend::Vulkan, F>,
#[cfg(metal)]
metal: Hub<backend::Metal, F>,
#[cfg(dx12)]
dx12: Hub<backend::Dx12, F>,
#[cfg(dx11)]
dx11: Hub<backend::Dx11, F>,
#[cfg(gl)]
gl: Hub<backend::Gl, F>,*/
marker: PhantomData<F>,
}
impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
fn new(factory: &F) -> Self {
@ -753,6 +745,7 @@ impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
#[cfg(gl)]
gl: Hub::new(factory),
*/
marker: PhantomData,
}
}
}
@ -760,7 +753,7 @@ impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
#[derive(Debug)]
pub struct Global<G: GlobalIdentityHandlerFactory> {
pub instance: Instance,
pub surfaces: Registry<Surface, SurfaceId, G>,
pub surfaces: Registry<Surface, id::SurfaceId, G>,
hubs: Hubs<G>,
}

View File

@ -113,7 +113,6 @@ impl crate::hub::Resource for Surface {
}
}
#[derive(Debug)]
pub struct Adapter<A: hal::Api> {
pub(crate) raw: hal::ExposedAdapter<A>,
life_guard: LifeGuard,
@ -141,11 +140,16 @@ impl<A: HalApi> Adapter<A> {
wgt::TextureFormat::Rgba8Unorm,
];
let formats = A::get_surface(surface).supported_formats(&self.raw.adapter);
let caps = self
.raw
.adapter
.surface_capabilities(A::get_surface_mut(surface))
.ok_or(GetSwapChainPreferredFormatError::UnsupportedQueueFamily)?;
preferred_formats
.iter()
.cloned()
.find(|preferred| formats.contains(preferred))
.find(|preferred| caps.formats.contains(preferred))
.ok_or(GetSwapChainPreferredFormatError::NotFound)
}
@ -204,7 +208,8 @@ impl<A: HalApi> Adapter<A> {
));
}
if !self.downlevel.is_webgpu_compliant() {
let caps = &self.raw.capabilities;
if !caps.downlevel.is_webgpu_compliant() {
log::warn!("{}", DOWNLEVEL_WARNING_MESSAGE);
}
@ -218,15 +223,14 @@ impl<A: HalApi> Adapter<A> {
}
let gpu = unsafe { self.raw.adapter.open(desc.features) }.map_err(|err| match err {
hal::Error::DeviceLost => RequestDeviceError::DeviceLost,
hal::Error::OutOfMemory => RequestDeviceError::OutOfMemory,
hal::DeviceError::Lost => RequestDeviceError::DeviceLost,
hal::DeviceError::OutOfMemory => RequestDeviceError::OutOfMemory,
})?;
if let Some(_) = desc.label {
//TODO
}
let caps = &self.raw.capabilities;
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % caps.alignments.storage_buffer_offset,
@ -237,7 +241,7 @@ impl<A: HalApi> Adapter<A> {
BIND_BUFFER_ALIGNMENT % caps.alignments.uniform_buffer_offset,
"Adapter uniform buffer offset alignment not compatible with WGPU"
);
if self.raw.limits < desc.limits {
if caps.limits < desc.limits {
return Err(RequestDeviceError::LimitsExceeded);
}
@ -375,11 +379,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
profiling::scope!("create_surface_metal", "Instance");
let surface = Surface {
/*
metal: self.instance.metal.as_ref().map(|inst| {
// we don't want to link to metal-rs for this
#[allow(clippy::transmute_ptr_to_ref)]
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
}),
}),*/
};
let mut token = Token::root();
@ -630,7 +635,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.features)
.map(|adapter| adapter.raw.features)
.map_err(|_| InvalidAdapter)
}
@ -643,7 +648,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.limits.clone())
.map(|adapter| adapter.raw.capabilities.limits.clone())
.map_err(|_| InvalidAdapter)
}
@ -656,7 +661,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.raw.downlevel)
.map(|adapter| adapter.raw.capabilities.downlevel)
.map_err(|_| InvalidAdapter)
}

View File

@ -78,18 +78,15 @@ pub type RawString = *const c_char;
pub type Label<'a> = Option<Cow<'a, str>>;
trait LabelHelpers<'a> {
fn to_string_or_default(&'a self) -> String;
fn borrow_option(&'a self) -> Option<&'a str>;
fn borrow_or_default(&'a self) -> &'a str;
}
impl<'a> LabelHelpers<'a> for Label<'a> {
fn borrow_or_default(&'a self) -> &'a str {
self.as_ref().map(|cow| cow.as_ref()).unwrap_or("")
fn borrow_option(&'a self) -> Option<&'a str> {
self.as_ref().map(|cow| cow.as_ref())
}
fn to_string_or_default(&'a self) -> String {
self.as_ref()
.map(|cow| cow.as_ref())
.unwrap_or("")
.to_string()
fn borrow_or_default(&'a self) -> &'a str {
self.borrow_option().unwrap_or_default()
}
}

View File

@ -15,7 +15,7 @@ use thiserror::Error;
pub enum ShaderModuleSource<'a> {
SpirV(Cow<'a, [u32]>),
Wgsl(Cow<'a, str>),
Naga(&'a naga::Module),
Naga(naga::Module),
}
#[derive(Clone, Debug)]
@ -23,15 +23,13 @@ pub enum ShaderModuleSource<'a> {
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ShaderModuleDescriptor<'a> {
pub label: Label<'a>,
#[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
pub flags: wgt::ShaderFlags,
}
#[derive(Debug)]
pub struct ShaderModule<A: hal::Api> {
pub(crate) raw: A::ShaderModule,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) interface: Option<validation::Interface>,
pub(crate) interface: validation::Interface,
#[cfg(debug_assertions)]
pub(crate) label: String,
}
@ -53,7 +51,7 @@ impl<A: hal::Api> Resource for ShaderModule<A> {
#[derive(Clone, Debug, Error)]
pub enum CreateShaderModuleError {
#[error("Failed to parse WGSL")]
#[error("Failed to parse a shader")]
Parsing,
#[error("Failed to generate the backend-specific code")]
Generation,

View File

@ -242,15 +242,9 @@ pub struct TextureViewDescriptor<'a> {
}
#[derive(Debug)]
pub(crate) enum TextureViewInner<A: hal::Api> {
Native {
raw: A::TextureView,
source_id: Stored<TextureId>,
},
SwapChain {
raw: A::SurfaceTexture,
source_id: Stored<SwapChainId>,
},
pub(crate) enum TextureViewSource {
Native(Stored<TextureId>),
SwapChain(Stored<SwapChainId>),
}
#[derive(Debug)]
@ -260,9 +254,16 @@ pub(crate) struct HalTextureViewDescriptor {
pub range: wgt::ImageSubresourceRange,
}
impl HalTextureViewDescriptor {
pub fn aspects(&self) -> hal::FormatAspect {
hal::FormatAspect::from(self.format) & hal::FormatAspect::from(self.range.aspect)
}
}
#[derive(Debug)]
pub struct TextureView<A: hal::Api> {
pub(crate) inner: TextureViewInner<A>,
pub(crate) raw: A::TextureView,
pub(crate) source: TextureViewSource,
//TODO: store device_id for quick access?
pub(crate) desc: HalTextureViewDescriptor,
pub(crate) format_features: wgt::TextureFormatFeatures,
@ -280,30 +281,35 @@ pub enum CreateTextureViewError {
InvalidTexture,
#[error("not enough memory left")]
OutOfMemory,
#[error("Invalid texture view dimension `{view:?}` with texture of dimension `{image:?}`")]
#[error("Invalid texture view dimension `{view:?}` with texture of dimension `{texture:?}`")]
InvalidTextureViewDimension {
view: wgt::TextureViewDimension,
image: wgt::TextureDimension,
texture: wgt::TextureDimension,
},
#[error("Invalid texture depth `{depth}` for texture view of dimension `Cubemap`. Cubemap views must use images of size 6.")]
InvalidCubemapTextureDepth { depth: u16 },
InvalidCubemapTextureDepth { depth: u32 },
#[error("Invalid texture depth `{depth}` for texture view of dimension `CubemapArray`. Cubemap views must use images with sizes which are a multiple of 6.")]
InvalidCubemapArrayTextureDepth { depth: u16 },
InvalidCubemapArrayTextureDepth { depth: u32 },
#[error(
"TextureView mip level count + base mip level {requested} must be <= Texture mip level count {total}"
)]
TooManyMipLevels { requested: u32, total: u8 },
TooManyMipLevels { requested: u32, total: u32 },
#[error("TextureView array layer count + base array layer {requested} must be <= Texture depth/array layer count {total}")]
TooManyArrayLayers { requested: u32, total: u16 },
TooManyArrayLayers { requested: u32, total: u32 },
#[error("Requested array layer count {requested} is not valid for the target view dimension {dim:?}")]
InvalidArrayLayerCount {
requested: u32,
dim: wgt::TextureViewDimension,
},
#[error("Aspect {requested:?} is not in the source texture ({total:?})")]
#[error("Aspect {requested_aspect:?} is not in the source texture format {texture_format:?}")]
InvalidAspect {
requested: hal::FormatAspect,
total: hal::FormatAspect,
texture_format: wgt::TextureFormat,
requested_aspect: wgt::TextureAspect,
},
#[error("Unable to view texture {texture:?} as {view:?}")]
FormatReinterpretation {
texture: wgt::TextureFormat,
view: wgt::TextureFormat,
},
}
@ -425,10 +431,7 @@ pub struct QuerySet<A: hal::Api> {
pub(crate) raw: A::QuerySet,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
/// Amount of queries in the query set.
pub(crate) desc: wgt::QuerySetDescriptor,
/// Amount of numbers in each query (i.e. a pipeline statistics query for two attributes will have this number be two)
pub(crate) elements: u32,
}
impl<A: hal::Api> Resource for QuerySet<A> {

View File

@ -35,7 +35,6 @@
#[cfg(feature = "trace")]
use crate::device::trace::Action;
use crate::{
conv,
device::DeviceError,
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId, Valid},
@ -44,23 +43,23 @@ use crate::{
LifeGuard, Stored, SubmissionIndex,
};
use hal::{Queue as _, Surface as _};
use hal::{Device as _, Queue as _, Surface as _};
use std::{borrow::Borrow, marker::PhantomData};
use thiserror::Error;
use wgt::{SwapChainDescriptor, SwapChainStatus};
use wgt::SwapChainStatus as Status;
const FRAME_TIMEOUT_MS: u64 = 1000;
const FRAME_TIMEOUT_MS: u32 = 1000;
pub const DESIRED_NUM_FRAMES: u32 = 3;
#[derive(Debug)]
pub struct SwapChain<A: hal::Api> {
pub(crate) life_guard: LifeGuard,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) desc: SwapChainDescriptor,
pub(crate) num_frames: hal::window::SwapImageIndex,
pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
pub(crate) desc: wgt::SwapChainDescriptor,
pub(crate) num_frames: u32,
pub(crate) acquired_texture: Option<(Stored<TextureViewId>, A::SurfaceTexture)>,
pub(crate) active_submission_index: SubmissionIndex,
pub(crate) framebuffer_attachment: hal::image::FramebufferAttachment,
pub(crate) marker: PhantomData<A>,
}
impl<A: hal::Api> crate::hub::Resource for SwapChain<A> {
@ -99,37 +98,17 @@ pub enum CreateSwapChainError {
UnsupportedQueueFamily,
#[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
UnsupportedFormat {
requested: hal::format::Format,
available: Vec<hal::format::Format>,
requested: wgt::TextureFormat,
available: Vec<wgt::TextureFormat>,
},
}
pub(crate) fn swap_chain_descriptor_to_hal(
desc: &SwapChainDescriptor,
num_frames: u32,
private_features: PrivateFeatures,
) -> hal::window::SwapchainConfig {
let mut config = hal::window::SwapchainConfig::new(
desc.width,
desc.height,
conv::map_texture_format(desc.format, private_features),
num_frames,
);
//TODO: check for supported
config.image_usage = conv::map_texture_usage(desc.usage, hal::FormatAspect::COLOR);
config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE;
config.present_mode = match desc.present_mode {
wgt::PresentMode::Immediate => hal::window::PresentMode::IMMEDIATE,
wgt::PresentMode::Mailbox => hal::window::PresentMode::MAILBOX,
wgt::PresentMode::Fifo => hal::window::PresentMode::FIFO,
};
config
#[error("requested usage is not supported")]
UnsupportedUsage,
}
#[repr(C)]
#[derive(Debug)]
pub struct SwapChainOutput {
pub status: SwapChainStatus,
pub status: Status,
pub view_id: Option<TextureViewId>,
}
@ -155,7 +134,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
#[allow(unused_variables)]
let device = &device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
@ -165,51 +143,64 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
let suf = B::get_surface_mut(surface);
let (image, status) = match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
Ok((surface_image, None)) => (Some(surface_image), SwapChainStatus::Good),
Ok((surface_image, Some(_))) => (Some(surface_image), SwapChainStatus::Suboptimal),
let suf = A::get_surface_mut(surface);
let (texture, status) = match unsafe { suf.acquire_texture(FRAME_TIMEOUT_MS) } {
Ok(Some(ast)) => {
let status = if ast.suboptimal {
Status::Suboptimal
} else {
Status::Good
};
(Some(ast.texture), status)
}
Ok(None) => (None, Status::Timeout),
Err(err) => (
None,
match err {
hal::window::AcquireError::OutOfMemory(_) => {
return Err(DeviceError::OutOfMemory.into())
}
hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => {
return Err(DeviceError::Lost.into())
hal::SurfaceError::Lost => Status::Lost,
hal::SurfaceError::Device(err) => {
return Err(DeviceError::from(err).into());
}
hal::SurfaceError::Outdated => Status::Outdated,
},
),
};
let view_id = match image {
Some(image) => {
let hal_desc = hal::TextureViewDescriptor {
label: Some("_Frame"),
format: sc.desc.format,
dimension: wgt::TextureViewDimension::D2,
range: wgt::ImageSubresourceRange::default(),
};
let view_id = match texture {
Some(suf_texture) => {
let raw = device
.raw
.create_texture_view(suf_texture.borrow(), &hal_desc)
.map_err(DeviceError::from)?;
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: Valid(swap_chain_id),
ref_count: sc.life_guard.add_ref(),
},
raw,
source: resource::TextureViewSource::SwapChain(Stored {
value: Valid(swap_chain_id),
ref_count: sc.life_guard.add_ref(),
}),
desc: resource::HalTextureViewDescriptor {
format: sc.desc.format,
dimension: wgt::TextureViewDimension::D2,
range: wgt::ImageSubresourceRange::default(),
},
aspects: hal::FormatAspect::COLOR,
format: sc.desc.format,
format_features: wgt::TextureFormatFeatures {
allowed_usages: wgt::TextureUsage::RENDER_ATTACHMENT,
flags: wgt::TextureFormatFeatureFlags::empty(),
filterable: false,
},
dimension: wgt::TextureViewDimension::D2,
extent: wgt::Extent3d {
width: sc.desc.width,
height: sc.desc.height,
depth_or_array_layers: 1,
},
samples: 1,
framebuffer_attachment: sc.framebuffer_attachment.clone(),
sampled_internal_use: hal::TextureUse::empty(),
selector: TextureSelector {
layers: 0..1,
@ -221,14 +212,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = view.life_guard.add_ref();
let id = fid.assign(view, &mut token);
if sc.acquired_view_id.is_some() {
if sc.acquired_texture.is_some() {
return Err(SwapChainError::AlreadyAcquired);
}
sc.acquired_view_id = Some(Stored {
value: id,
ref_count,
});
sc.acquired_texture = Some((
Stored {
value: id,
ref_count,
},
suf_texture,
));
Some(id.0)
}
@ -241,7 +235,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn swap_chain_present<A: HalApi>(
&self,
swap_chain_id: SwapChainId,
) -> Result<SwapChainStatus, SwapChainError> {
) -> Result<Status, SwapChainError> {
profiling::scope!("present", "SwapChain");
let hub = A::hub(self);
@ -263,44 +257,33 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trace.lock().add(Action::PresentSwapChain(swap_chain_id));
}
let view = {
let view_id = sc
.acquired_view_id
let suf_texture = {
let (view_id, suf_texture) = sc
.acquired_texture
.take()
.ok_or(SwapChainError::AlreadyAcquired)?;
let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
view_maybe.ok_or(SwapChainError::Invalid)?
};
if view.life_guard.ref_count.unwrap().load() != 1 {
return Err(SwapChainError::StillReferenced);
}
let image = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain { image, .. } => image,
let view = view_maybe.ok_or(SwapChainError::Invalid)?;
if view.life_guard.ref_count.unwrap().load() != 1 {
return Err(SwapChainError::StillReferenced);
}
suf_texture
};
let sem = if sc.active_submission_index > device.last_completed_submission_index() {
Some(&mut sc.semaphore)
} else {
None
let result = unsafe {
device
.queue
.present(A::get_surface_mut(surface), suf_texture)
};
let queue = &mut device.queue_group.queues[0];
let result = unsafe { queue.present(B::get_surface_mut(surface), image, sem) };
log::debug!("Presented. End of Frame");
match result {
Ok(None) => Ok(SwapChainStatus::Good),
Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
Ok(()) => Ok(Status::Good),
Err(err) => match err {
hal::window::PresentError::OutOfMemory(_) => {
Err(SwapChainError::Device(DeviceError::OutOfMemory))
}
hal::window::PresentError::OutOfDate(_) => Ok(SwapChainStatus::Outdated),
hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
hal::window::PresentError::DeviceLost(_) => {
Err(SwapChainError::Device(DeviceError::Lost))
}
hal::SurfaceError::Lost => Ok(Status::Lost),
hal::SurfaceError::Device(err) => Err(SwapChainError::from(DeviceError::from(err))),
hal::SurfaceError::Outdated => Ok(Status::Outdated),
},
}
}

View File

@ -12,7 +12,9 @@ use crate::{
resource, Epoch, FastHashMap, Index, RefCount,
};
use std::{collections::hash_map::Entry, fmt, marker::PhantomData, ops, vec::Drain};
use std::{
collections::hash_map::Entry, fmt, marker::PhantomData, num::NonZeroU32, ops, vec::Drain,
};
use thiserror::Error;
pub(crate) use buffer::BufferState;
@ -131,7 +133,7 @@ impl PendingTransition<BufferState> {
buf: &'a resource::Buffer<A>,
) -> hal::BufferBarrier<'a, A> {
log::trace!("\tbuffer -> {:?}", self);
let &(ref buffer, _) = buf.raw.as_ref().expect("Buffer is destroyed");
let buffer = buf.raw.as_ref().expect("Buffer is destroyed");
hal::BufferBarrier {
buffer,
usage: self.usage,
@ -155,10 +157,20 @@ impl PendingTransition<TextureState> {
tex: &'a resource::Texture<A>,
) -> hal::TextureBarrier<'a, A> {
log::trace!("\ttexture -> {:?}", self);
let &(ref texture, _) = tex.raw.as_ref().expect("Texture is destroyed");
let texture = tex.raw.as_ref().expect("Texture is destroyed");
hal::TextureBarrier {
texture,
subresource: self.selector,
range: wgt::ImageSubresourceRange {
aspect: wgt::TextureAspect::All,
base_mip_level: self.selector.levels.start,
mip_level_count: NonZeroU32::new(
self.selector.levels.end - self.selector.levels.start,
),
base_array_layer: self.selector.layers.start,
array_layer_count: NonZeroU32::new(
self.selector.layers.end - self.selector.layers.start,
),
},
usage: self.usage,
}
}
@ -168,8 +180,8 @@ impl From<PendingTransition<TextureState>> for UsageConflict {
fn from(e: PendingTransition<TextureState>) -> Self {
Self::Texture {
id: e.id.0,
mip_levels: e.selector.levels.start as u32..e.selector.levels.end as u32,
array_layers: e.selector.layers.start as u32..e.selector.layers.end as u32,
mip_levels: e.selector.levels.start..e.selector.levels.end,
array_layers: e.selector.layers.start..e.selector.layers.end,
combined_use: e.usage.end,
}
}

View File

@ -3,28 +3,26 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{range::RangedStates, PendingTransition, ResourceState, Unit};
use crate::{
device::MAX_MIP_LEVELS,
id::{TextureId, Valid},
};
use crate::id::{TextureId, Valid};
use hal::TextureUse;
use arrayvec::ArrayVec;
use std::{iter, ops::Range};
type PlaneStates = RangedStates<hal::ArrayLayer, Unit<TextureUse>>;
type PlaneStates = RangedStates<u32, Unit<TextureUse>>;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TextureSelector {
//TODO: rename to `mip_levels` and `array_layers` for consistency
//pub aspects: hal::FormatAspect,
pub levels: Range<hal::MipLevel>,
pub layers: Range<hal::ArrayLayer>,
pub levels: Range<u32>,
pub layers: Range<u32>,
}
#[derive(Clone, Debug, Default, PartialEq)]
pub(crate) struct TextureState {
mips: ArrayVec<[PlaneStates; MAX_MIP_LEVELS as usize]>,
mips: ArrayVec<[PlaneStates; hal::MAX_MIP_LEVELS as usize]>,
/// True if we have the information about all the subresources here
full: bool,
}
@ -43,7 +41,7 @@ impl PendingTransition<TextureState> {
}
impl TextureState {
pub fn new(mip_level_count: hal::MipLevel, array_layer_count: hal::ArrayLayer) -> Self {
pub fn new(mip_level_count: u32, array_layer_count: u32) -> Self {
Self {
mips: iter::repeat_with(|| {
PlaneStates::from_range(0..array_layer_count, Unit::new(TextureUse::UNINITIALIZED))
@ -103,7 +101,7 @@ impl ResourceState for TextureState {
.iter_mut()
.enumerate()
{
let level = selector.levels.start + mip_id as hal::MipLevel;
let level = selector.levels.start + mip_id as u32;
let layers = mip.isolate(&selector.layers, Unit::new(usage));
for &mut (ref range, ref mut unit) in layers {
if unit.last == usage && TextureUse::ORDERED.contains(usage) {
@ -153,7 +151,7 @@ impl ResourceState for TextureState {
.iter_mut()
.enumerate()
{
let level = selector.levels.start + mip_id as hal::MipLevel;
let level = selector.levels.start + mip_id as u32;
let layers = mip.isolate(&selector.layers, Unit::new(usage));
for &mut (ref range, ref mut unit) in layers {
match unit.first {
@ -192,7 +190,7 @@ impl ResourceState for TextureState {
}
for (mip_id, (mip_self, mip_other)) in self.mips.iter_mut().zip(&other.mips).enumerate() {
let level = mip_id as hal::MipLevel;
let level = mip_id as u32;
temp.extend(mip_self.merge(mip_other, 0));
mip_self.clear();
@ -374,7 +372,7 @@ mod test {
2..3,
Unit {
first: Some(TextureUse::COPY_SRC),
last: TextureUse::ATTACHMENT_WRITE,
last: TextureUse::COLOR_TARGET,
},
),
]);
@ -415,7 +413,7 @@ mod test {
ts1.mips[0].query(&(2..3), |&v| v),
Some(Ok(Unit {
first: Some(TextureUse::SAMPLED),
last: TextureUse::ATTACHMENT_WRITE,
last: TextureUse::COLOR_TARGET,
})),
"wrong final layer 2 state"
);
@ -424,7 +422,7 @@ mod test {
ts2.mips[0] = PlaneStates::from_slice(&[(
2..3,
Unit {
first: Some(TextureUse::ATTACHMENT_WRITE),
first: Some(TextureUse::COLOR_TARGET),
last: TextureUse::COPY_SRC,
},
)]);

View File

@ -1,5 +1,7 @@
#![allow(unused_variables)]
use std::ops::Range;
#[derive(Clone)]
pub struct Api;
pub struct Context;
@ -17,15 +19,14 @@ impl crate::Api for Api {
type Device = Context;
type CommandBuffer = Encoder;
type RenderPass = Encoder;
type ComputePass = Encoder;
type Buffer = Resource;
type QuerySet = Resource;
type Texture = Resource;
type SurfaceTexture = Resource;
type TextureView = Resource;
type Sampler = Resource;
type QuerySet = Resource;
type Fence = Resource;
type BindGroupLayout = Resource;
type BindGroup = Resource;
@ -55,9 +56,10 @@ impl crate::Surface<Api> for Context {
unsafe fn acquire_texture(
&mut self,
timeout_ms: u32,
) -> Result<(Resource, Option<crate::Suboptimal>), crate::SurfaceError> {
Ok((Resource, None))
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
Ok(None)
}
unsafe fn discard_texture(&mut self, texture: Resource) {}
}
impl crate::Adapter<Api> for Context {
@ -77,7 +79,19 @@ impl crate::Adapter<Api> for Context {
}
impl crate::Queue<Api> for Context {
unsafe fn submit<I>(&mut self, command_buffers: I) {}
unsafe fn submit<I>(
&mut self,
command_buffers: I,
signal_fence: Option<(&Resource, crate::FenceValue)>,
) {
}
unsafe fn present(
&mut self,
surface: &mut Context,
texture: Resource,
) -> Result<(), crate::SurfaceError> {
Ok(())
}
}
impl crate::Device<Api> for Context {
@ -92,7 +106,9 @@ impl crate::Device<Api> for Context {
) -> DeviceResult<std::ptr::NonNull<u8>> {
Err(crate::DeviceError::Lost)
}
unsafe fn unmap_buffer(&self, buffer: &Resource) {}
unsafe fn unmap_buffer(&self, buffer: &Resource) -> DeviceResult<()> {
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
@ -165,11 +181,35 @@ impl crate::Device<Api> for Context {
Ok(Resource)
}
unsafe fn destroy_compute_pipeline(&self, pipeline: Resource) {}
unsafe fn create_query_set(&self, desc: &wgt::QuerySetDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_query_set(&self, set: Resource) {}
unsafe fn create_fence(&self) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_fence(&self, fence: Resource) {}
unsafe fn get_fence_value(&self, fence: &Resource) -> DeviceResult<crate::FenceValue> {
Ok(0)
}
unsafe fn wait(
&self,
fence: &Resource,
value: crate::FenceValue,
timeout_ms: u32,
) -> DeviceResult<bool> {
Ok(true)
}
unsafe fn start_capture(&self) -> bool {
false
}
unsafe fn stop_capture(&self) {}
}
impl crate::CommandBuffer<Api> for Encoder {
unsafe fn begin(&mut self) {}
unsafe fn end(&mut self) {}
unsafe fn finish(&mut self) {}
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
where
@ -213,20 +253,46 @@ impl crate::CommandBuffer<Api> for Encoder {
) {
}
unsafe fn begin_render_pass(&mut self) -> Encoder {
Encoder
unsafe fn begin_query(&mut self, set: &Resource, index: u32) {}
unsafe fn end_query(&mut self, set: &Resource, index: u32) {}
unsafe fn write_timestamp(&mut self, set: &Resource, index: u32) {}
unsafe fn reset_queries(&mut self, set: &Resource, range: Range<u32>) {}
unsafe fn copy_query_results(
&mut self,
set: &Resource,
range: Range<u32>,
buffer: &Resource,
offset: wgt::BufferAddress,
) {
}
unsafe fn end_render_pass(&mut self, pass: Encoder) {}
unsafe fn begin_compute_pass(&mut self) -> Encoder {
Encoder
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_bind_group(
&mut self,
layout: &Resource,
index: u32,
group: &Resource,
dynamic_offsets: &[u32],
) {
}
unsafe fn set_push_constants(
&mut self,
layout: &Resource,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
) {
}
unsafe fn end_compute_pass(&mut self, pass: Encoder) {}
}
impl crate::RenderPass<Api> for Encoder {
unsafe fn set_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn insert_debug_marker(&mut self, label: &str) {}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {}
unsafe fn end_debug_marker(&mut self) {}
unsafe fn set_bind_group(&mut self, layout: &Resource, index: u32, group: &Resource) {}
unsafe fn set_render_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_index_buffer<'a>(
&mut self,
@ -236,10 +302,10 @@ impl crate::RenderPass<Api> for Encoder {
}
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: crate::BufferBinding<'a, Api>) {
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect, depth_range: std::ops::Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect) {}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {}
unsafe fn set_stencil_reference(&mut self, value: u32) {}
unsafe fn set_blend_constants(&mut self, color: wgt::Color) {}
unsafe fn set_blend_constants(&mut self, color: &wgt::Color) {}
unsafe fn draw(
&mut self,
@ -272,12 +338,26 @@ impl crate::RenderPass<Api> for Encoder {
draw_count: u32,
) {
}
}
unsafe fn draw_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
impl crate::ComputePass<Api> for Encoder {
unsafe fn set_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_bind_group(&mut self, layout: &Resource, index: u32, group: &Resource) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}
unsafe fn dispatch_indirect(&mut self, buffer: &Resource, offset: wgt::BufferAddress) {}

View File

@ -11,6 +11,8 @@
* - Mapping is persistent, with explicit synchronization.
* - Resource transitions are explicit.
* - All layouts are explicit. Binding model has compatibility.
*
* General design direction: follow 2/3 major target APIs.
*/
#![allow(
@ -50,11 +52,13 @@ use thiserror::Error;
pub const MAX_ANISOTROPY: u8 = 16;
pub const MAX_BIND_GROUPS: usize = 8;
pub const MAX_VERTEX_BUFFERS: usize = 16;
pub const MAX_COLOR_TARGETS: usize = 4;
pub const MAX_MIP_LEVELS: u32 = 16;
pub type Label<'a> = Option<&'a str>;
pub type MemoryRange = Range<wgt::BufferAddress>;
pub type MipLevel = u8;
pub type ArrayLayer = u16;
pub type FenceValue = u64;
#[derive(Clone, Debug, PartialEq, Error)]
pub enum DeviceError {
@ -84,35 +88,29 @@ pub enum PipelineError {
pub enum SurfaceError {
#[error("surface is lost")]
Lost,
#[error("surface is outdated, needs to be re-created")]
Outdated,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("other reason: {0}")]
Other(&'static str),
}
/// Marker value returned if the presentation configuration no longer matches
/// the surface properties exactly, but can still be used to present
/// to the surface successfully.
#[derive(Debug)]
pub struct Suboptimal;
pub trait Api: Clone + Sized {
type Instance: Instance<Self>;
type Surface: Surface<Self>;
type Adapter: Adapter<Self>;
type Device: Device<Self>;
type Queue: Queue<Self>;
type CommandBuffer: CommandBuffer<Self>;
type RenderPass: RenderPass<Self>;
type ComputePass: ComputePass<Self>;
type Buffer: fmt::Debug + Send + Sync + 'static;
type QuerySet: fmt::Debug + Send + Sync;
type Texture: fmt::Debug + Send + Sync + 'static;
type SurfaceTexture: fmt::Debug + Send + Sync + Borrow<Self::Texture>;
type TextureView: fmt::Debug + Send + Sync;
type Sampler: fmt::Debug + Send + Sync;
type QuerySet: fmt::Debug + Send + Sync;
type Fence: fmt::Debug + Send + Sync;
type BindGroupLayout;
type BindGroup: fmt::Debug + Send + Sync;
@ -135,10 +133,12 @@ pub trait Surface<A: Api> {
unsafe fn unconfigure(&mut self, device: &A::Device);
/// Returns `None` on timing out.
unsafe fn acquire_texture(
&mut self,
timeout_ms: u32,
) -> Result<(A::SurfaceTexture, Option<Suboptimal>), SurfaceError>;
) -> Result<Option<AcquiredSurfaceTexture<A>>, SurfaceError>;
unsafe fn discard_texture(&mut self, texture: A::SurfaceTexture);
}
pub trait Adapter<A: Api> {
@ -168,7 +168,7 @@ pub trait Device<A: Api> {
buffer: &A::Buffer,
range: MemoryRange,
) -> Result<NonNull<u8>, DeviceError>;
unsafe fn unmap_buffer(&self, buffer: &A::Buffer);
unsafe fn unmap_buffer(&self, buffer: &A::Buffer) -> Result<(), DeviceError>;
unsafe fn flush_mapped_ranges<I: Iterator<Item = MemoryRange>>(
&self,
buffer: &A::Buffer,
@ -233,17 +233,43 @@ pub trait Device<A: Api> {
desc: &ComputePipelineDescriptor<A>,
) -> Result<A::ComputePipeline, PipelineError>;
unsafe fn destroy_compute_pipeline(&self, pipeline: A::ComputePipeline);
unsafe fn create_query_set(
&self,
desc: &wgt::QuerySetDescriptor,
) -> Result<A::QuerySet, DeviceError>;
unsafe fn destroy_query_set(&self, set: A::QuerySet);
unsafe fn create_fence(&self) -> Result<A::Fence, DeviceError>;
unsafe fn destroy_fence(&self, fence: A::Fence);
unsafe fn get_fence_value(&self, fence: &A::Fence) -> Result<FenceValue, DeviceError>;
unsafe fn wait(
&self,
fence: &A::Fence,
value: FenceValue,
timeout_ms: u32,
) -> Result<bool, DeviceError>;
unsafe fn start_capture(&self) -> bool;
unsafe fn stop_capture(&self);
}
pub trait Queue<A: Api> {
unsafe fn submit<I: Iterator<Item = A::CommandBuffer>>(&mut self, command_buffers: I);
unsafe fn submit<I: Iterator<Item = A::CommandBuffer>>(
&mut self,
command_buffers: I,
signal_fence: Option<(&A::Fence, FenceValue)>,
);
unsafe fn present(
&mut self,
surface: &mut A::Surface,
texture: A::SurfaceTexture,
) -> Result<(), SurfaceError>;
}
pub trait SwapChain<A: Api> {}
pub trait CommandBuffer<A: Api> {
unsafe fn begin(&mut self);
unsafe fn end(&mut self);
unsafe fn finish(&mut self);
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
where
@ -253,6 +279,8 @@ pub trait CommandBuffer<A: Api> {
where
T: Iterator<Item = TextureBarrier<'a, A>>;
// copy operations
unsafe fn fill_buffer(&mut self, buffer: &A::Buffer, range: MemoryRange, value: u8);
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &A::Buffer, dst: &A::Buffer, regions: T)
@ -283,14 +311,7 @@ pub trait CommandBuffer<A: Api> {
) where
T: Iterator<Item = BufferTextureCopy>;
unsafe fn begin_render_pass(&mut self) -> A::RenderPass;
unsafe fn end_render_pass(&mut self, pass: A::RenderPass);
unsafe fn begin_compute_pass(&mut self) -> A::ComputePass;
unsafe fn end_compute_pass(&mut self, pass: A::ComputePass);
}
pub trait RenderPass<A: Api> {
unsafe fn set_pipeline(&mut self, pipeline: &A::RenderPipeline);
// pass common
/// Sets the bind group at `index` to `group`, assuming the layout
/// of all the preceeding groups to be taken from `layout`.
@ -299,18 +320,53 @@ pub trait RenderPass<A: Api> {
layout: &A::PipelineLayout,
index: u32,
group: &A::BindGroup,
dynamic_offsets: &[u32],
);
unsafe fn set_push_constants(
&mut self,
layout: &A::PipelineLayout,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
);
unsafe fn insert_debug_marker(&mut self, label: &str);
unsafe fn begin_debug_marker(&mut self, group_label: &str);
unsafe fn end_debug_marker(&mut self);
// queries
unsafe fn begin_query(&mut self, set: &A::QuerySet, index: u32);
unsafe fn end_query(&mut self, set: &A::QuerySet, index: u32);
unsafe fn write_timestamp(&mut self, set: &A::QuerySet, index: u32);
unsafe fn reset_queries(&mut self, set: &A::QuerySet, range: Range<u32>);
unsafe fn copy_query_results(
&mut self,
set: &A::QuerySet,
range: Range<u32>,
buffer: &A::Buffer,
offset: wgt::BufferAddress,
);
// render passes
// Begins a render pass, clears all active bindings.
unsafe fn begin_render_pass(&mut self, desc: &RenderPassDescriptor<A>);
unsafe fn end_render_pass(&mut self);
unsafe fn set_render_pipeline(&mut self, pipeline: &A::RenderPipeline);
unsafe fn set_index_buffer<'a>(
&mut self,
binding: BufferBinding<'a, A>,
format: wgt::IndexFormat,
);
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: BufferBinding<'a, A>);
unsafe fn set_viewport(&mut self, rect: &Rect, depth_range: Range<f32>);
unsafe fn set_scissor_rect(&mut self, rect: &Rect);
unsafe fn set_viewport(&mut self, rect: &Rect<f32>, depth_range: Range<f32>);
unsafe fn set_scissor_rect(&mut self, rect: &Rect<u32>);
unsafe fn set_stencil_reference(&mut self, value: u32);
unsafe fn set_blend_constants(&mut self, color: wgt::Color);
unsafe fn set_blend_constants(&mut self, color: &wgt::Color);
unsafe fn draw(
&mut self,
@ -339,19 +395,30 @@ pub trait RenderPass<A: Api> {
offset: wgt::BufferAddress,
draw_count: u32,
);
}
pub trait ComputePass<A: Api> {
unsafe fn set_pipeline(&mut self, pipeline: &A::ComputePipeline);
/// Sets the bind group at `index` to `group`, assuming the layout
/// of all the preceeding groups to be taken from `layout`.
unsafe fn set_bind_group(
unsafe fn draw_indirect_count(
&mut self,
layout: &A::PipelineLayout,
index: u32,
group: &A::BindGroup,
buffer: &A::Buffer,
offset: wgt::BufferAddress,
count_buffer: &A::Buffer,
count_offset: wgt::BufferAddress,
max_count: u32,
);
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &A::Buffer,
offset: wgt::BufferAddress,
count_buffer: &A::Buffer,
count_offset: wgt::BufferAddress,
max_count: u32,
);
// compute passes
// Begins a compute pass, clears all active bindings.
unsafe fn begin_compute_pass(&mut self);
unsafe fn end_compute_pass(&mut self);
unsafe fn set_compute_pipeline(&mut self, pipeline: &A::ComputePipeline);
unsafe fn dispatch(&mut self, count: [u32; 3]);
unsafe fn dispatch_indirect(&mut self, buffer: &A::Buffer, offset: wgt::BufferAddress);
@ -423,6 +490,13 @@ bitflags!(
}
);
bitflags!(
pub struct AttachmentOp: u8 {
const LOAD = 1;
const STORE = 2;
}
);
bitflags::bitflags! {
/// Similar to `wgt::BufferUsage` but for internal use.
pub struct BufferUse: u32 {
@ -472,7 +546,7 @@ bitflags::bitflags! {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct Alignments {
/// The alignment of the start of the buffer used as a GPU copy source.
pub buffer_copy_offset: wgt::BufferSize,
@ -483,7 +557,7 @@ pub struct Alignments {
pub uniform_buffer_offset: wgt::BufferSize,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct Capabilities {
pub limits: wgt::Limits,
pub alignments: Alignments,
@ -505,7 +579,7 @@ pub struct SurfaceCapabilities {
/// List of supported texture formats.
///
/// Must be at least one.
pub texture_formats: Vec<wgt::TextureFormat>,
pub formats: Vec<wgt::TextureFormat>,
/// Range for the swap chain sizes.
///
@ -524,12 +598,12 @@ pub struct SurfaceCapabilities {
/// Supported texture usage flags.
///
/// Must have at least `TextureUse::COLOR_TARGET`
pub texture_uses: TextureUse,
pub usage: TextureUse,
/// List of supported V-sync modes.
///
/// Must be at least one.
pub vsync_modes: Vec<VsyncMode>,
pub present_modes: Vec<wgt::PresentMode>,
/// List of supported alpha composition modes.
///
@ -537,6 +611,15 @@ pub struct SurfaceCapabilities {
pub composite_alpha_modes: Vec<CompositeAlphaMode>,
}
#[derive(Debug)]
pub struct AcquiredSurfaceTexture<A: Api> {
pub texture: A::SurfaceTexture,
/// The presentation configuration no longer matches
/// the surface properties exactly, but can still be used to present
/// to the surface successfully.
pub suboptimal: bool,
}
#[derive(Debug)]
pub struct OpenDevice<A: Api> {
pub device: A::Device,
@ -721,24 +804,11 @@ pub struct RenderPipelineDescriptor<'a, A: Api> {
/// The multi-sampling properties of the pipeline.
pub multisample: wgt::MultisampleState,
/// The fragment stage for this pipeline.
pub fragment_stage: ProgrammableStage<'a, A>,
pub fragment_stage: Option<ProgrammableStage<'a, A>>,
/// The effect of draw calls on the color aspect of the output target.
pub color_targets: Cow<'a, [wgt::ColorTargetState]>,
}
/// Specifies the mode regulating how a surface presents frames.
#[derive(Debug, Clone)]
pub enum VsyncMode {
/// Don't ever wait for v-sync.
Immediate,
/// Wait for v-sync, overwrite the last rendered frame.
Mailbox,
/// Present frames in the same order they are rendered.
Fifo,
/// Don't wait for the next v-sync if we just missed it.
Relaxed,
}
/// Specifies how the alpha channel of the textures should be handled during (martin mouv i step)
/// compositing.
#[derive(Debug, Clone)]
@ -766,7 +836,7 @@ pub struct SurfaceConfiguration {
/// `SurfaceCapabilities::swap_chain_size` range.
pub swap_chain_size: u32,
/// Vertical synchronization mode.
pub vsync_mode: VsyncMode,
pub present_mode: wgt::PresentMode,
/// Alpha composition mode.
pub composite_alpha_mode: CompositeAlphaMode,
/// Format of the surface textures.
@ -779,11 +849,11 @@ pub struct SurfaceConfiguration {
}
#[derive(Debug, Clone)]
pub struct Rect {
pub x: f32,
pub y: f32,
pub w: f32,
pub h: f32,
pub struct Rect<T> {
pub x: T,
pub y: T,
pub w: T,
pub h: T,
}
#[derive(Debug, Clone)]
@ -795,7 +865,7 @@ pub struct BufferBarrier<'a, A: Api> {
#[derive(Debug, Clone)]
pub struct TextureBarrier<'a, A: Api> {
pub texture: &'a A::Texture,
pub subresource: wgt::ImageSubresourceRange,
pub range: wgt::ImageSubresourceRange,
pub usage: Range<TextureUse>,
}
@ -806,23 +876,85 @@ pub struct BufferCopy {
pub size: wgt::BufferSize,
}
#[derive(Clone, Debug)]
pub struct TextureCopyBase {
pub origin: wgt::Origin3d,
pub mip_level: u32,
pub aspect: FormatAspect,
}
#[derive(Clone, Debug)]
pub struct TextureCopy {
pub src_subresource: wgt::ImageSubresourceRange,
pub src_origin: wgt::Origin3d,
pub dst_subresource: wgt::ImageSubresourceRange,
pub dst_origin: wgt::Origin3d,
pub src_base: TextureCopyBase,
pub dst_base: TextureCopyBase,
pub size: wgt::Extent3d,
}
#[derive(Clone, Debug)]
pub struct BufferTextureCopy {
pub buffer_layout: wgt::ImageDataLayout,
pub texture_mip_level: u32,
pub texture_origin: wgt::Origin3d,
pub texture_base: TextureCopyBase,
pub size: wgt::Extent3d,
}
#[derive(Debug)]
pub struct Attachment<'a, A: Api> {
pub view: &'a A::TextureView,
/// Contains either a single mutating usage as a target, or a valid combination
/// of read-only usages.
pub usage: TextureUse,
/// Defines the boundary usages for the attachment.
/// It is expected to begin a render pass with `boundary_usage.start` usage,
/// and will end it with `boundary_usage.end` usage.
pub boundary_usage: Range<TextureUse>,
}
// Rust gets confused about the impl requirements for `A`
impl<A: Api> Clone for Attachment<'_, A> {
fn clone(&self) -> Self {
Self {
view: self.view,
usage: self.usage,
boundary_usage: self.boundary_usage.clone(),
}
}
}
#[derive(Debug)]
pub struct ColorAttachment<'a, A: Api> {
pub target: Attachment<'a, A>,
pub resolve_target: Option<Attachment<'a, A>>,
pub ops: AttachmentOp,
pub clear_value: wgt::Color,
}
// Rust gets confused about the impl requirements for `A`
impl<A: Api> Clone for ColorAttachment<'_, A> {
fn clone(&self) -> Self {
Self {
target: self.target.clone(),
resolve_target: self.resolve_target.clone(),
ops: self.ops,
clear_value: self.clear_value,
}
}
}
#[derive(Clone, Debug)]
pub struct DepthStencilAttachment<'a, A: Api> {
pub target: Attachment<'a, A>,
pub depth_ops: AttachmentOp,
pub stencil_ops: AttachmentOp,
pub clear_value: (f32, u32),
}
#[derive(Clone, Debug)]
pub struct RenderPassDescriptor<'a, A: Api> {
pub label: Label<'a>,
pub color_attachments: Cow<'a, [ColorAttachment<'a, A>]>,
pub depth_stencil_attachment: Option<DepthStencilAttachment<'a, A>>,
}
#[test]
fn test_default_limits() {
let limits = wgt::Limits::default();

View File

@ -796,26 +796,6 @@ bitflags::bitflags! {
}
}
bitflags::bitflags! {
/// Flags controlling the shader processing.
///
/// Note: These flags are internal tweaks, they don't affect the API.
#[repr(transparent)]
#[derive(Default)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ShaderFlags: u32 {
/// If enabled, `wgpu` will parse the shader with `Naga`
/// and validate it both internally and with regards to
/// the given pipeline interface.
const VALIDATION = 1;
/// If enabled, `wgpu` will attempt to operate on `Naga`'s internal
/// representation of the shader module for both validation and translation
/// into the backend shader language, on backends where `gfx-hal` supports this.
const EXPERIMENTAL_TRANSLATION = 2;
}
}
/// Dimensions of a particular texture view.
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
@ -1061,6 +1041,16 @@ impl Default for PrimitiveTopology {
}
}
impl PrimitiveTopology {
/// Returns true for strip topologies.
pub fn is_strip(&self) -> bool {
match *self {
Self::PointList | Self::LineList | Self::TriangleList => false,
Self::LineStrip | Self::TriangleStrip => true,
}
}
}
/// Winding order which classifies the "front" face.
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@ -2496,11 +2486,11 @@ impl Extent3d {
/// assert_eq!(wgpu::Extent3d { width: 60, height: 60, depth_or_array_layers: 1 }.max_mips(), 6);
/// assert_eq!(wgpu::Extent3d { width: 240, height: 1, depth_or_array_layers: 1 }.max_mips(), 8);
/// ```
pub fn max_mips(&self) -> u8 {
pub fn max_mips(&self) -> u32 {
let max_dim = self.width.max(self.height.max(self.depth_or_array_layers));
let max_levels = 32 - max_dim.leading_zeros();
max_levels as u8
max_levels
}
}
@ -2583,6 +2573,14 @@ impl<L> TextureDescriptor<L> {
},
})
}
/// Returns the number of array layers.
pub fn array_layer_count(&self) -> u32 {
match self.dimension {
TextureDimension::D1 | TextureDimension::D2 => self.size.depth_or_array_layers,
TextureDimension::D3 => 1,
}
}
}
/// Kind of data the texture holds.