mirror of
https://github.com/gfx-rs/wgpu.git
synced 2025-12-08 21:26:17 +00:00
Switch Binding Arrays on Metal to Argument Buffers (#6751)
This commit is contained in:
parent
fabcba8f9a
commit
a8a91737b2
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -48,7 +48,7 @@ env:
|
||||
CARGO_INCREMENTAL: false
|
||||
CARGO_TERM_COLOR: always
|
||||
WGPU_DX12_COMPILER: dxc
|
||||
RUST_LOG: info
|
||||
RUST_LOG: debug
|
||||
RUST_BACKTRACE: full
|
||||
PKG_CONFIG_ALLOW_CROSS: 1 # allow android to work
|
||||
RUSTFLAGS: -D warnings
|
||||
|
||||
3
Cargo.lock
generated
3
Cargo.lock
generated
@ -2099,8 +2099,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "metal"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c3572083504c43e14aec05447f8a3d57cce0f66d7a3c1b9058572eca4d70ab9"
|
||||
source = "git+https://github.com/gfx-rs/metal-rs.git?rev=ef768ff9d7#ef768ff9d742ae6a0f4e83ddc8031264e7d460c4"
|
||||
dependencies = [
|
||||
"bitflags 2.6.0",
|
||||
"block",
|
||||
|
||||
@ -142,9 +142,9 @@ wgpu-types = { version = "23.0.0", path = "./wgpu-types" }
|
||||
winit = { version = "0.29", features = ["android-native-activity"] }
|
||||
|
||||
# Metal dependencies
|
||||
metal = { version = "0.30.0", git = "https://github.com/gfx-rs/metal-rs.git", rev = "ef768ff9d7" }
|
||||
block = "0.1"
|
||||
core-graphics-types = "0.1"
|
||||
metal = { version = "0.30.0" }
|
||||
objc = "0.2.5"
|
||||
|
||||
# Vulkan dependencies
|
||||
|
||||
@ -62,6 +62,15 @@ impl BindGroupState {
|
||||
fn run_bench(ctx: &mut Criterion) {
|
||||
let state = Lazy::new(BindGroupState::new);
|
||||
|
||||
if !state
|
||||
.device_state
|
||||
.device
|
||||
.features()
|
||||
.contains(wgpu::Features::TEXTURE_BINDING_ARRAY)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
let mut group = ctx.benchmark_group("Bind Group Creation");
|
||||
|
||||
for count in [5, 50, 500, 5_000, 50_000] {
|
||||
|
||||
@ -341,4 +341,5 @@ pub const RESERVED: &[&str] = &[
|
||||
"DefaultConstructible",
|
||||
super::writer::FREXP_FUNCTION,
|
||||
super::writer::MODF_FUNCTION,
|
||||
super::writer::ARGUMENT_BUFFER_WRAPPER_STRUCT,
|
||||
];
|
||||
|
||||
@ -59,8 +59,6 @@ pub struct BindTarget {
|
||||
pub buffer: Option<Slot>,
|
||||
pub texture: Option<Slot>,
|
||||
pub sampler: Option<BindSamplerTarget>,
|
||||
/// If the binding is an unsized binding array, this overrides the size.
|
||||
pub binding_array_size: Option<u32>,
|
||||
pub mutable: bool,
|
||||
}
|
||||
|
||||
|
||||
@ -36,6 +36,14 @@ const RAY_QUERY_FUN_MAP_INTERSECTION: &str = "_map_intersection_type";
|
||||
pub(crate) const ATOMIC_COMP_EXCH_FUNCTION: &str = "naga_atomic_compare_exchange_weak_explicit";
|
||||
pub(crate) const MODF_FUNCTION: &str = "naga_modf";
|
||||
pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
|
||||
/// For some reason, Metal does not let you have `metal::texture<..>*` as a buffer argument.
|
||||
/// However, if you put that texture inside a struct, everything is totally fine. This
|
||||
/// baffles me to no end.
|
||||
///
|
||||
/// As such, we wrap all argument buffers in a struct that has a single generic `<T>` field.
|
||||
/// This allows `NagaArgumentBufferWrapper<metal::texture<..>>*` to work. The astute among
|
||||
/// you have noticed that this should be exactly the same to the compiler, and you're correct.
|
||||
pub(crate) const ARGUMENT_BUFFER_WRAPPER_STRUCT: &str = "NagaArgumentBufferWrapper";
|
||||
|
||||
/// Write the Metal name for a Naga numeric type: scalar, vector, or matrix.
|
||||
///
|
||||
@ -275,24 +283,17 @@ impl Display for TypeContext<'_> {
|
||||
crate::TypeInner::RayQuery => {
|
||||
write!(out, "{RAY_QUERY_TYPE}")
|
||||
}
|
||||
crate::TypeInner::BindingArray { base, size } => {
|
||||
crate::TypeInner::BindingArray { base, .. } => {
|
||||
let base_tyname = Self {
|
||||
handle: base,
|
||||
first_time: false,
|
||||
..*self
|
||||
};
|
||||
|
||||
if let Some(&super::ResolvedBinding::Resource(super::BindTarget {
|
||||
binding_array_size: Some(override_size),
|
||||
..
|
||||
})) = self.binding
|
||||
{
|
||||
write!(out, "{NAMESPACE}::array<{base_tyname}, {override_size}>")
|
||||
} else if let crate::ArraySize::Constant(size) = size {
|
||||
write!(out, "{NAMESPACE}::array<{base_tyname}, {size}>")
|
||||
} else {
|
||||
unreachable!("metal requires all arrays be constant sized");
|
||||
}
|
||||
write!(
|
||||
out,
|
||||
"constant {ARGUMENT_BUFFER_WRAPPER_STRUCT}<{base_tyname}>*"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2552,6 +2553,8 @@ impl<W: Write> Writer<W> {
|
||||
} => true,
|
||||
_ => false,
|
||||
};
|
||||
let accessing_wrapped_binding_array =
|
||||
matches!(*base_ty, crate::TypeInner::BindingArray { .. });
|
||||
|
||||
self.put_access_chain(base, policy, context)?;
|
||||
if accessing_wrapped_array {
|
||||
@ -2588,6 +2591,10 @@ impl<W: Write> Writer<W> {
|
||||
|
||||
write!(self.out, "]")?;
|
||||
|
||||
if accessing_wrapped_binding_array {
|
||||
write!(self.out, ".{WRAPPED_ARRAY_FIELD}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -3701,7 +3708,18 @@ impl<W: Write> Writer<W> {
|
||||
}
|
||||
|
||||
fn write_type_defs(&mut self, module: &crate::Module) -> BackendResult {
|
||||
let mut generated_argument_buffer_wrapper = false;
|
||||
for (handle, ty) in module.types.iter() {
|
||||
if let crate::TypeInner::BindingArray { .. } = ty.inner {
|
||||
if !generated_argument_buffer_wrapper {
|
||||
writeln!(self.out, "template <typename T>")?;
|
||||
writeln!(self.out, "struct {ARGUMENT_BUFFER_WRAPPER_STRUCT} {{")?;
|
||||
writeln!(self.out, "{}T {WRAPPED_ARRAY_FIELD};", back::INDENT)?;
|
||||
writeln!(self.out, "}};")?;
|
||||
generated_argument_buffer_wrapper = true;
|
||||
}
|
||||
}
|
||||
|
||||
if !ty.needs_alias() {
|
||||
continue;
|
||||
}
|
||||
@ -5132,13 +5150,10 @@ template <typename A>
|
||||
let target = options.get_resource_binding_target(ep, br);
|
||||
let good = match target {
|
||||
Some(target) => {
|
||||
let binding_ty = match module.types[var.ty].inner {
|
||||
crate::TypeInner::BindingArray { base, .. } => {
|
||||
&module.types[base].inner
|
||||
}
|
||||
ref ty => ty,
|
||||
};
|
||||
match *binding_ty {
|
||||
// We intentionally don't dereference binding_arrays here,
|
||||
// so that binding arrays fall to the buffer location.
|
||||
|
||||
match module.types[var.ty].inner {
|
||||
crate::TypeInner::Image { .. } => target.texture.is_some(),
|
||||
crate::TypeInner::Sampler { .. } => {
|
||||
target.sampler.is_some()
|
||||
|
||||
@ -19,11 +19,11 @@
|
||||
restrict_indexing: true
|
||||
),
|
||||
msl: (
|
||||
lang_version: (2, 0),
|
||||
lang_version: (3, 0),
|
||||
per_entry_point_map: {
|
||||
"main": (
|
||||
resources: {
|
||||
(group: 0, binding: 0): (texture: Some(0), binding_array_size: Some(10), mutable: false),
|
||||
(group: 0, binding: 0): (buffer: Some(0), binding_array_size: Some(10), mutable: false),
|
||||
},
|
||||
sizes_buffer: None,
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// language: metal2.0
|
||||
// language: metal3.0
|
||||
#include <metal_stdlib>
|
||||
#include <simd/simd.h>
|
||||
|
||||
@ -13,6 +13,10 @@ struct DefaultConstructible {
|
||||
struct UniformIndex {
|
||||
uint index;
|
||||
};
|
||||
template <typename T>
|
||||
struct NagaArgumentBufferWrapper {
|
||||
T inner;
|
||||
};
|
||||
struct FragmentIn {
|
||||
uint index;
|
||||
};
|
||||
@ -25,14 +29,14 @@ struct main_Output {
|
||||
};
|
||||
fragment main_Output main_(
|
||||
main_Input varyings [[stage_in]]
|
||||
, metal::array<metal::texture2d<float, metal::access::sample>, 10> texture_array_unbounded [[texture(0)]]
|
||||
, metal::array<metal::texture2d<float, metal::access::sample>, 5> texture_array_bounded [[user(fake0)]]
|
||||
, metal::array<metal::texture2d_array<float, metal::access::sample>, 5> texture_array_2darray [[user(fake0)]]
|
||||
, metal::array<metal::texture2d_ms<float, metal::access::read>, 5> texture_array_multisampled [[user(fake0)]]
|
||||
, metal::array<metal::depth2d<float, metal::access::sample>, 5> texture_array_depth [[user(fake0)]]
|
||||
, metal::array<metal::texture2d<float, metal::access::write>, 5> texture_array_storage [[user(fake0)]]
|
||||
, metal::array<metal::sampler, 5> samp [[user(fake0)]]
|
||||
, metal::array<metal::sampler, 5> samp_comp [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::texture2d<float, metal::access::sample>>* texture_array_unbounded [[buffer(0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::texture2d<float, metal::access::sample>>* texture_array_bounded [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::texture2d_array<float, metal::access::sample>>* texture_array_2darray [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::texture2d_ms<float, metal::access::read>>* texture_array_multisampled [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::depth2d<float, metal::access::sample>>* texture_array_depth [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::texture2d<float, metal::access::write>>* texture_array_storage [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::sampler>* samp [[user(fake0)]]
|
||||
, constant NagaArgumentBufferWrapper<metal::sampler>* samp_comp [[user(fake0)]]
|
||||
, constant UniformIndex& uni [[user(fake0)]]
|
||||
) {
|
||||
const FragmentIn fragment_in = { varyings.index };
|
||||
@ -45,116 +49,116 @@ fragment main_Output main_(
|
||||
metal::float2 uv = metal::float2(0.0);
|
||||
metal::int2 pix = metal::int2(0);
|
||||
metal::uint2 _e22 = u2_;
|
||||
u2_ = _e22 + metal::uint2(texture_array_unbounded[0].get_width(), texture_array_unbounded[0].get_height());
|
||||
u2_ = _e22 + metal::uint2(texture_array_unbounded[0].inner.get_width(), texture_array_unbounded[0].inner.get_height());
|
||||
metal::uint2 _e27 = u2_;
|
||||
u2_ = _e27 + metal::uint2(texture_array_unbounded[uniform_index].get_width(), texture_array_unbounded[uniform_index].get_height());
|
||||
u2_ = _e27 + metal::uint2(texture_array_unbounded[uniform_index].inner.get_width(), texture_array_unbounded[uniform_index].inner.get_height());
|
||||
metal::uint2 _e32 = u2_;
|
||||
u2_ = _e32 + metal::uint2(texture_array_unbounded[non_uniform_index].get_width(), texture_array_unbounded[non_uniform_index].get_height());
|
||||
metal::float4 _e38 = texture_array_bounded[0].gather(samp[0], uv);
|
||||
u2_ = _e32 + metal::uint2(texture_array_unbounded[non_uniform_index].inner.get_width(), texture_array_unbounded[non_uniform_index].inner.get_height());
|
||||
metal::float4 _e38 = texture_array_bounded[0].inner.gather(samp[0].inner, uv);
|
||||
metal::float4 _e39 = v4_;
|
||||
v4_ = _e39 + _e38;
|
||||
metal::float4 _e45 = texture_array_bounded[uniform_index].gather(samp[uniform_index], uv);
|
||||
metal::float4 _e45 = texture_array_bounded[uniform_index].inner.gather(samp[uniform_index].inner, uv);
|
||||
metal::float4 _e46 = v4_;
|
||||
v4_ = _e46 + _e45;
|
||||
metal::float4 _e52 = texture_array_bounded[non_uniform_index].gather(samp[non_uniform_index], uv);
|
||||
metal::float4 _e52 = texture_array_bounded[non_uniform_index].inner.gather(samp[non_uniform_index].inner, uv);
|
||||
metal::float4 _e53 = v4_;
|
||||
v4_ = _e53 + _e52;
|
||||
metal::float4 _e60 = texture_array_depth[0].gather_compare(samp_comp[0], uv, 0.0);
|
||||
metal::float4 _e60 = texture_array_depth[0].inner.gather_compare(samp_comp[0].inner, uv, 0.0);
|
||||
metal::float4 _e61 = v4_;
|
||||
v4_ = _e61 + _e60;
|
||||
metal::float4 _e68 = texture_array_depth[uniform_index].gather_compare(samp_comp[uniform_index], uv, 0.0);
|
||||
metal::float4 _e68 = texture_array_depth[uniform_index].inner.gather_compare(samp_comp[uniform_index].inner, uv, 0.0);
|
||||
metal::float4 _e69 = v4_;
|
||||
v4_ = _e69 + _e68;
|
||||
metal::float4 _e76 = texture_array_depth[non_uniform_index].gather_compare(samp_comp[non_uniform_index], uv, 0.0);
|
||||
metal::float4 _e76 = texture_array_depth[non_uniform_index].inner.gather_compare(samp_comp[non_uniform_index].inner, uv, 0.0);
|
||||
metal::float4 _e77 = v4_;
|
||||
v4_ = _e77 + _e76;
|
||||
metal::float4 _e82 = (uint(0) < texture_array_unbounded[0].get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[0].get_width(0), texture_array_unbounded[0].get_height(0))) ? texture_array_unbounded[0].read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e82 = (uint(0) < texture_array_unbounded[0].inner.get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[0].inner.get_width(0), texture_array_unbounded[0].inner.get_height(0))) ? texture_array_unbounded[0].inner.read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e83 = v4_;
|
||||
v4_ = _e83 + _e82;
|
||||
metal::float4 _e88 = (uint(0) < texture_array_unbounded[uniform_index].get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[uniform_index].get_width(0), texture_array_unbounded[uniform_index].get_height(0))) ? texture_array_unbounded[uniform_index].read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e88 = (uint(0) < texture_array_unbounded[uniform_index].inner.get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[uniform_index].inner.get_width(0), texture_array_unbounded[uniform_index].inner.get_height(0))) ? texture_array_unbounded[uniform_index].inner.read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e89 = v4_;
|
||||
v4_ = _e89 + _e88;
|
||||
metal::float4 _e94 = (uint(0) < texture_array_unbounded[non_uniform_index].get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[non_uniform_index].get_width(0), texture_array_unbounded[non_uniform_index].get_height(0))) ? texture_array_unbounded[non_uniform_index].read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e94 = (uint(0) < texture_array_unbounded[non_uniform_index].inner.get_num_mip_levels() && metal::all(metal::uint2(pix) < metal::uint2(texture_array_unbounded[non_uniform_index].inner.get_width(0), texture_array_unbounded[non_uniform_index].inner.get_height(0))) ? texture_array_unbounded[non_uniform_index].inner.read(metal::uint2(pix), 0): DefaultConstructible());
|
||||
metal::float4 _e95 = v4_;
|
||||
v4_ = _e95 + _e94;
|
||||
uint _e100 = u1_;
|
||||
u1_ = _e100 + texture_array_2darray[0].get_array_size();
|
||||
u1_ = _e100 + texture_array_2darray[0].inner.get_array_size();
|
||||
uint _e105 = u1_;
|
||||
u1_ = _e105 + texture_array_2darray[uniform_index].get_array_size();
|
||||
u1_ = _e105 + texture_array_2darray[uniform_index].inner.get_array_size();
|
||||
uint _e110 = u1_;
|
||||
u1_ = _e110 + texture_array_2darray[non_uniform_index].get_array_size();
|
||||
u1_ = _e110 + texture_array_2darray[non_uniform_index].inner.get_array_size();
|
||||
uint _e115 = u1_;
|
||||
u1_ = _e115 + texture_array_bounded[0].get_num_mip_levels();
|
||||
u1_ = _e115 + texture_array_bounded[0].inner.get_num_mip_levels();
|
||||
uint _e120 = u1_;
|
||||
u1_ = _e120 + texture_array_bounded[uniform_index].get_num_mip_levels();
|
||||
u1_ = _e120 + texture_array_bounded[uniform_index].inner.get_num_mip_levels();
|
||||
uint _e125 = u1_;
|
||||
u1_ = _e125 + texture_array_bounded[non_uniform_index].get_num_mip_levels();
|
||||
u1_ = _e125 + texture_array_bounded[non_uniform_index].inner.get_num_mip_levels();
|
||||
uint _e130 = u1_;
|
||||
u1_ = _e130 + texture_array_multisampled[0].get_num_samples();
|
||||
u1_ = _e130 + texture_array_multisampled[0].inner.get_num_samples();
|
||||
uint _e135 = u1_;
|
||||
u1_ = _e135 + texture_array_multisampled[uniform_index].get_num_samples();
|
||||
u1_ = _e135 + texture_array_multisampled[uniform_index].inner.get_num_samples();
|
||||
uint _e140 = u1_;
|
||||
u1_ = _e140 + texture_array_multisampled[non_uniform_index].get_num_samples();
|
||||
metal::float4 _e146 = texture_array_bounded[0].sample(samp[0], uv);
|
||||
u1_ = _e140 + texture_array_multisampled[non_uniform_index].inner.get_num_samples();
|
||||
metal::float4 _e146 = texture_array_bounded[0].inner.sample(samp[0].inner, uv);
|
||||
metal::float4 _e147 = v4_;
|
||||
v4_ = _e147 + _e146;
|
||||
metal::float4 _e153 = texture_array_bounded[uniform_index].sample(samp[uniform_index], uv);
|
||||
metal::float4 _e153 = texture_array_bounded[uniform_index].inner.sample(samp[uniform_index].inner, uv);
|
||||
metal::float4 _e154 = v4_;
|
||||
v4_ = _e154 + _e153;
|
||||
metal::float4 _e160 = texture_array_bounded[non_uniform_index].sample(samp[non_uniform_index], uv);
|
||||
metal::float4 _e160 = texture_array_bounded[non_uniform_index].inner.sample(samp[non_uniform_index].inner, uv);
|
||||
metal::float4 _e161 = v4_;
|
||||
v4_ = _e161 + _e160;
|
||||
metal::float4 _e168 = texture_array_bounded[0].sample(samp[0], uv, metal::bias(0.0));
|
||||
metal::float4 _e168 = texture_array_bounded[0].inner.sample(samp[0].inner, uv, metal::bias(0.0));
|
||||
metal::float4 _e169 = v4_;
|
||||
v4_ = _e169 + _e168;
|
||||
metal::float4 _e176 = texture_array_bounded[uniform_index].sample(samp[uniform_index], uv, metal::bias(0.0));
|
||||
metal::float4 _e176 = texture_array_bounded[uniform_index].inner.sample(samp[uniform_index].inner, uv, metal::bias(0.0));
|
||||
metal::float4 _e177 = v4_;
|
||||
v4_ = _e177 + _e176;
|
||||
metal::float4 _e184 = texture_array_bounded[non_uniform_index].sample(samp[non_uniform_index], uv, metal::bias(0.0));
|
||||
metal::float4 _e184 = texture_array_bounded[non_uniform_index].inner.sample(samp[non_uniform_index].inner, uv, metal::bias(0.0));
|
||||
metal::float4 _e185 = v4_;
|
||||
v4_ = _e185 + _e184;
|
||||
float _e192 = texture_array_depth[0].sample_compare(samp_comp[0], uv, 0.0);
|
||||
float _e192 = texture_array_depth[0].inner.sample_compare(samp_comp[0].inner, uv, 0.0);
|
||||
float _e193 = v1_;
|
||||
v1_ = _e193 + _e192;
|
||||
float _e200 = texture_array_depth[uniform_index].sample_compare(samp_comp[uniform_index], uv, 0.0);
|
||||
float _e200 = texture_array_depth[uniform_index].inner.sample_compare(samp_comp[uniform_index].inner, uv, 0.0);
|
||||
float _e201 = v1_;
|
||||
v1_ = _e201 + _e200;
|
||||
float _e208 = texture_array_depth[non_uniform_index].sample_compare(samp_comp[non_uniform_index], uv, 0.0);
|
||||
float _e208 = texture_array_depth[non_uniform_index].inner.sample_compare(samp_comp[non_uniform_index].inner, uv, 0.0);
|
||||
float _e209 = v1_;
|
||||
v1_ = _e209 + _e208;
|
||||
float _e216 = texture_array_depth[0].sample_compare(samp_comp[0], uv, 0.0);
|
||||
float _e216 = texture_array_depth[0].inner.sample_compare(samp_comp[0].inner, uv, 0.0);
|
||||
float _e217 = v1_;
|
||||
v1_ = _e217 + _e216;
|
||||
float _e224 = texture_array_depth[uniform_index].sample_compare(samp_comp[uniform_index], uv, 0.0);
|
||||
float _e224 = texture_array_depth[uniform_index].inner.sample_compare(samp_comp[uniform_index].inner, uv, 0.0);
|
||||
float _e225 = v1_;
|
||||
v1_ = _e225 + _e224;
|
||||
float _e232 = texture_array_depth[non_uniform_index].sample_compare(samp_comp[non_uniform_index], uv, 0.0);
|
||||
float _e232 = texture_array_depth[non_uniform_index].inner.sample_compare(samp_comp[non_uniform_index].inner, uv, 0.0);
|
||||
float _e233 = v1_;
|
||||
v1_ = _e233 + _e232;
|
||||
metal::float4 _e239 = texture_array_bounded[0].sample(samp[0], uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e239 = texture_array_bounded[0].inner.sample(samp[0].inner, uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e240 = v4_;
|
||||
v4_ = _e240 + _e239;
|
||||
metal::float4 _e246 = texture_array_bounded[uniform_index].sample(samp[uniform_index], uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e246 = texture_array_bounded[uniform_index].inner.sample(samp[uniform_index].inner, uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e247 = v4_;
|
||||
v4_ = _e247 + _e246;
|
||||
metal::float4 _e253 = texture_array_bounded[non_uniform_index].sample(samp[non_uniform_index], uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e253 = texture_array_bounded[non_uniform_index].inner.sample(samp[non_uniform_index].inner, uv, metal::gradient2d(uv, uv));
|
||||
metal::float4 _e254 = v4_;
|
||||
v4_ = _e254 + _e253;
|
||||
metal::float4 _e261 = texture_array_bounded[0].sample(samp[0], uv, metal::level(0.0));
|
||||
metal::float4 _e261 = texture_array_bounded[0].inner.sample(samp[0].inner, uv, metal::level(0.0));
|
||||
metal::float4 _e262 = v4_;
|
||||
v4_ = _e262 + _e261;
|
||||
metal::float4 _e269 = texture_array_bounded[uniform_index].sample(samp[uniform_index], uv, metal::level(0.0));
|
||||
metal::float4 _e269 = texture_array_bounded[uniform_index].inner.sample(samp[uniform_index].inner, uv, metal::level(0.0));
|
||||
metal::float4 _e270 = v4_;
|
||||
v4_ = _e270 + _e269;
|
||||
metal::float4 _e277 = texture_array_bounded[non_uniform_index].sample(samp[non_uniform_index], uv, metal::level(0.0));
|
||||
metal::float4 _e277 = texture_array_bounded[non_uniform_index].inner.sample(samp[non_uniform_index].inner, uv, metal::level(0.0));
|
||||
metal::float4 _e278 = v4_;
|
||||
v4_ = _e278 + _e277;
|
||||
metal::float4 _e282 = v4_;
|
||||
texture_array_storage[0].write(_e282, metal::uint2(pix));
|
||||
texture_array_storage[0].inner.write(_e282, metal::uint2(pix));
|
||||
metal::float4 _e285 = v4_;
|
||||
texture_array_storage[uniform_index].write(_e285, metal::uint2(pix));
|
||||
texture_array_storage[uniform_index].inner.write(_e285, metal::uint2(pix));
|
||||
metal::float4 _e288 = v4_;
|
||||
texture_array_storage[non_uniform_index].write(_e288, metal::uint2(pix));
|
||||
texture_array_storage[non_uniform_index].inner.write(_e288, metal::uint2(pix));
|
||||
metal::uint2 _e289 = u2_;
|
||||
uint _e290 = u1_;
|
||||
metal::float2 v2_ = static_cast<metal::float2>(_e289 + metal::uint2(_e290));
|
||||
|
||||
@ -377,12 +377,6 @@ const RESOURCE_HEAP_SUPPORT: &[MTLFeatureSet] = &[
|
||||
MTLFeatureSet::macOS_GPUFamily1_v3,
|
||||
];
|
||||
|
||||
const ARGUMENT_BUFFER_SUPPORT: &[MTLFeatureSet] = &[
|
||||
MTLFeatureSet::iOS_GPUFamily1_v4,
|
||||
MTLFeatureSet::tvOS_GPUFamily1_v3,
|
||||
MTLFeatureSet::macOS_GPUFamily1_v3,
|
||||
];
|
||||
|
||||
const MUTABLE_COMPARISON_SAMPLER_SUPPORT: &[MTLFeatureSet] = &[
|
||||
MTLFeatureSet::iOS_GPUFamily3_v1,
|
||||
MTLFeatureSet::macOS_GPUFamily1_v1,
|
||||
@ -610,7 +604,7 @@ impl super::PrivateCapabilities {
|
||||
},
|
||||
msaa_apple7: family_check && device.supports_family(MTLGPUFamily::Apple7),
|
||||
resource_heaps: Self::supports_any(device, RESOURCE_HEAP_SUPPORT),
|
||||
argument_buffers: Self::supports_any(device, ARGUMENT_BUFFER_SUPPORT),
|
||||
argument_buffers: device.argument_buffers_support(),
|
||||
shared_textures: !os_is_mac,
|
||||
mutable_comparison_samplers: Self::supports_any(
|
||||
device,
|
||||
@ -905,18 +899,12 @@ impl super::PrivateCapabilities {
|
||||
features.set(
|
||||
F::TEXTURE_BINDING_ARRAY
|
||||
| F::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING
|
||||
| F::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
|
||||
self.msl_version >= MTLLanguageVersion::V2_0 && self.supports_arrays_of_textures,
|
||||
| F::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING
|
||||
| F::PARTIALLY_BOUND_BINDING_ARRAY,
|
||||
self.msl_version >= MTLLanguageVersion::V3_0
|
||||
&& self.supports_arrays_of_textures
|
||||
&& self.argument_buffers as u64 >= metal::MTLArgumentBuffersTier::Tier2 as u64,
|
||||
);
|
||||
//// XXX: this is technically not true, as read-only storage images can be used in arrays
|
||||
//// on precisely the same conditions that sampled textures can. But texel fetch from a
|
||||
//// sampled texture is a thing; should we bother introducing another feature flag?
|
||||
if self.msl_version >= MTLLanguageVersion::V2_2
|
||||
&& self.supports_arrays_of_textures
|
||||
&& self.supports_arrays_of_textures_write
|
||||
{
|
||||
features.insert(F::STORAGE_RESOURCE_BINDING_ARRAY);
|
||||
}
|
||||
features.set(
|
||||
F::SHADER_INT64,
|
||||
self.int64 && self.msl_version >= MTLLanguageVersion::V2_3,
|
||||
|
||||
@ -750,6 +750,11 @@ impl crate::CommandEncoder for super::CommandEncoder {
|
||||
Some(res.as_native()),
|
||||
);
|
||||
}
|
||||
|
||||
// Call useResource on all textures and buffers used indirectly so they are alive
|
||||
for (resource, use_info) in group.resources_to_use.iter() {
|
||||
encoder.use_resource_at(resource.as_native(), use_info.uses, use_info.stages);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref encoder) = self.state.compute {
|
||||
@ -807,6 +812,14 @@ impl crate::CommandEncoder for super::CommandEncoder {
|
||||
Some(res.as_native()),
|
||||
);
|
||||
}
|
||||
|
||||
// Call useResource on all textures and buffers used indirectly so they are alive
|
||||
for (resource, use_info) in group.resources_to_use.iter() {
|
||||
if !use_info.visible_in_compute {
|
||||
continue;
|
||||
}
|
||||
encoder.use_resource(resource.as_native(), use_info.uses);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -331,3 +331,31 @@ pub fn get_blit_option(
|
||||
metal::MTLBlitOption::None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_render_stages(stage: wgt::ShaderStages) -> metal::MTLRenderStages {
|
||||
let mut raw_stages = metal::MTLRenderStages::empty();
|
||||
|
||||
if stage.contains(wgt::ShaderStages::VERTEX) {
|
||||
raw_stages |= metal::MTLRenderStages::Vertex;
|
||||
}
|
||||
if stage.contains(wgt::ShaderStages::FRAGMENT) {
|
||||
raw_stages |= metal::MTLRenderStages::Fragment;
|
||||
}
|
||||
|
||||
raw_stages
|
||||
}
|
||||
|
||||
pub fn map_resource_usage(ty: &wgt::BindingType) -> metal::MTLResourceUsage {
|
||||
match ty {
|
||||
wgt::BindingType::Texture { .. } => metal::MTLResourceUsage::Sample,
|
||||
wgt::BindingType::StorageTexture { access, .. } => match access {
|
||||
wgt::StorageTextureAccess::WriteOnly => metal::MTLResourceUsage::Write,
|
||||
wgt::StorageTextureAccess::ReadOnly => metal::MTLResourceUsage::Read,
|
||||
wgt::StorageTextureAccess::ReadWrite => {
|
||||
metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write
|
||||
}
|
||||
},
|
||||
wgt::BindingType::Sampler(..) => metal::MTLResourceUsage::empty(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
use parking_lot::Mutex;
|
||||
use std::{
|
||||
num::NonZeroU32,
|
||||
ptr,
|
||||
ptr::NonNull,
|
||||
sync::{atomic, Arc},
|
||||
thread, time,
|
||||
};
|
||||
@ -10,6 +9,8 @@ use super::conv;
|
||||
use crate::auxil::map_naga_stage;
|
||||
use crate::TlasInstance;
|
||||
|
||||
use metal::foreign_types::ForeignType;
|
||||
|
||||
type DeviceResult<T> = Result<T, crate::DeviceError>;
|
||||
|
||||
struct CompiledShader {
|
||||
@ -384,7 +385,7 @@ impl crate::Device for super::Device {
|
||||
let ptr = buffer.raw.contents().cast::<u8>();
|
||||
assert!(!ptr.is_null());
|
||||
Ok(crate::BufferMapping {
|
||||
ptr: ptr::NonNull::new(unsafe { ptr.offset(range.start as isize) }).unwrap(),
|
||||
ptr: NonNull::new(unsafe { ptr.offset(range.start as isize) }).unwrap(),
|
||||
is_coherent: true,
|
||||
})
|
||||
}
|
||||
@ -580,6 +581,9 @@ impl crate::Device for super::Device {
|
||||
if let Some(label) = desc.label {
|
||||
descriptor.set_label(label);
|
||||
}
|
||||
if self.features.contains(wgt::Features::TEXTURE_BINDING_ARRAY) {
|
||||
descriptor.set_support_argument_buffers(true);
|
||||
}
|
||||
let raw = self.shared.device.lock().new_sampler(&descriptor);
|
||||
|
||||
self.counters.samplers.add(1);
|
||||
@ -698,36 +702,41 @@ impl crate::Device for super::Device {
|
||||
}
|
||||
|
||||
let mut target = naga::back::msl::BindTarget::default();
|
||||
let count = entry.count.map_or(1, NonZeroU32::get);
|
||||
target.binding_array_size = entry.count.map(NonZeroU32::get);
|
||||
match entry.ty {
|
||||
wgt::BindingType::Buffer { ty, .. } => {
|
||||
target.buffer = Some(info.counters.buffers as _);
|
||||
info.counters.buffers += count;
|
||||
if let wgt::BufferBindingType::Storage { read_only } = ty {
|
||||
target.mutable = !read_only;
|
||||
// Bindless path
|
||||
if let Some(_) = entry.count {
|
||||
target.buffer = Some(info.counters.buffers as _);
|
||||
info.counters.buffers += 1;
|
||||
} else {
|
||||
match entry.ty {
|
||||
wgt::BindingType::Buffer { ty, .. } => {
|
||||
target.buffer = Some(info.counters.buffers as _);
|
||||
info.counters.buffers += 1;
|
||||
if let wgt::BufferBindingType::Storage { read_only } = ty {
|
||||
target.mutable = !read_only;
|
||||
}
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
target.sampler =
|
||||
Some(naga::back::msl::BindSamplerTarget::Resource(
|
||||
info.counters.samplers as _,
|
||||
));
|
||||
info.counters.samplers += 1;
|
||||
}
|
||||
wgt::BindingType::Texture { .. } => {
|
||||
target.texture = Some(info.counters.textures as _);
|
||||
info.counters.textures += 1;
|
||||
}
|
||||
wgt::BindingType::StorageTexture { access, .. } => {
|
||||
target.texture = Some(info.counters.textures as _);
|
||||
info.counters.textures += 1;
|
||||
target.mutable = match access {
|
||||
wgt::StorageTextureAccess::ReadOnly => false,
|
||||
wgt::StorageTextureAccess::WriteOnly => true,
|
||||
wgt::StorageTextureAccess::ReadWrite => true,
|
||||
};
|
||||
}
|
||||
wgt::BindingType::AccelerationStructure => unimplemented!(),
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
target.sampler = Some(naga::back::msl::BindSamplerTarget::Resource(
|
||||
info.counters.samplers as _,
|
||||
));
|
||||
info.counters.samplers += count;
|
||||
}
|
||||
wgt::BindingType::Texture { .. } => {
|
||||
target.texture = Some(info.counters.textures as _);
|
||||
info.counters.textures += count;
|
||||
}
|
||||
wgt::BindingType::StorageTexture { access, .. } => {
|
||||
target.texture = Some(info.counters.textures as _);
|
||||
info.counters.textures += count;
|
||||
target.mutable = match access {
|
||||
wgt::StorageTextureAccess::ReadOnly => false,
|
||||
wgt::StorageTextureAccess::WriteOnly => true,
|
||||
wgt::StorageTextureAccess::ReadWrite => true,
|
||||
};
|
||||
}
|
||||
wgt::BindingType::AccelerationStructure => unimplemented!(),
|
||||
}
|
||||
|
||||
let br = naga::ResourceBinding {
|
||||
@ -805,90 +814,162 @@ impl crate::Device for super::Device {
|
||||
super::AccelerationStructure,
|
||||
>,
|
||||
) -> DeviceResult<super::BindGroup> {
|
||||
let mut bg = super::BindGroup::default();
|
||||
for (&stage, counter) in super::NAGA_STAGES.iter().zip(bg.counters.iter_mut()) {
|
||||
let stage_bit = map_naga_stage(stage);
|
||||
let mut dynamic_offsets_count = 0u32;
|
||||
let layout_and_entry_iter = desc.entries.iter().map(|entry| {
|
||||
let layout = desc
|
||||
.layout
|
||||
.entries
|
||||
.iter()
|
||||
.find(|layout_entry| layout_entry.binding == entry.binding)
|
||||
.expect("internal error: no layout entry found with binding slot");
|
||||
(entry, layout)
|
||||
});
|
||||
for (entry, layout) in layout_and_entry_iter {
|
||||
let size = layout.count.map_or(1, |c| c.get());
|
||||
if let wgt::BindingType::Buffer {
|
||||
has_dynamic_offset: true,
|
||||
..
|
||||
} = layout.ty
|
||||
{
|
||||
dynamic_offsets_count += size;
|
||||
}
|
||||
if !layout.visibility.contains(stage_bit) {
|
||||
continue;
|
||||
}
|
||||
match layout.ty {
|
||||
wgt::BindingType::Buffer {
|
||||
ty,
|
||||
has_dynamic_offset,
|
||||
..
|
||||
} => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + size as usize;
|
||||
bg.buffers
|
||||
.extend(desc.buffers[start..end].iter().map(|source| {
|
||||
// Given the restrictions on `BufferBinding::offset`,
|
||||
// this should never be `None`.
|
||||
let remaining_size =
|
||||
wgt::BufferSize::new(source.buffer.size - source.offset);
|
||||
let binding_size = match ty {
|
||||
wgt::BufferBindingType::Storage { .. } => {
|
||||
source.size.or(remaining_size)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
super::BufferResource {
|
||||
ptr: source.buffer.as_raw(),
|
||||
offset: source.offset,
|
||||
dynamic_index: if has_dynamic_offset {
|
||||
Some(dynamic_offsets_count - 1)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
binding_size,
|
||||
binding_location: layout.binding,
|
||||
}
|
||||
}));
|
||||
counter.buffers += 1;
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + size as usize;
|
||||
bg.samplers
|
||||
.extend(desc.samplers[start..end].iter().map(|samp| samp.as_raw()));
|
||||
counter.samplers += size;
|
||||
}
|
||||
wgt::BindingType::Texture { .. } | wgt::BindingType::StorageTexture { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + size as usize;
|
||||
bg.textures.extend(
|
||||
desc.textures[start..end]
|
||||
.iter()
|
||||
.map(|tex| tex.view.as_raw()),
|
||||
objc::rc::autoreleasepool(|| {
|
||||
let mut bg = super::BindGroup::default();
|
||||
for (&stage, counter) in super::NAGA_STAGES.iter().zip(bg.counters.iter_mut()) {
|
||||
let stage_bit = map_naga_stage(stage);
|
||||
let mut dynamic_offsets_count = 0u32;
|
||||
let layout_and_entry_iter = desc.entries.iter().map(|entry| {
|
||||
let layout = desc
|
||||
.layout
|
||||
.entries
|
||||
.iter()
|
||||
.find(|layout_entry| layout_entry.binding == entry.binding)
|
||||
.expect("internal error: no layout entry found with binding slot");
|
||||
(entry, layout)
|
||||
});
|
||||
for (entry, layout) in layout_and_entry_iter {
|
||||
// Bindless path
|
||||
if layout.count.is_some() {
|
||||
let count = entry.count;
|
||||
|
||||
let stages = conv::map_render_stages(layout.visibility);
|
||||
let uses = conv::map_resource_usage(&layout.ty);
|
||||
|
||||
// Create argument buffer for this array
|
||||
let buffer = self.shared.device.lock().new_buffer(
|
||||
8 * count as u64,
|
||||
metal::MTLResourceOptions::HazardTrackingModeUntracked
|
||||
| metal::MTLResourceOptions::StorageModeShared,
|
||||
);
|
||||
counter.textures += size;
|
||||
|
||||
let contents: &mut [metal::MTLResourceID] = unsafe {
|
||||
std::slice::from_raw_parts_mut(buffer.contents().cast(), count as usize)
|
||||
};
|
||||
|
||||
match layout.ty {
|
||||
wgt::BindingType::Texture { .. }
|
||||
| wgt::BindingType::StorageTexture { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + count as usize;
|
||||
let textures = &desc.textures[start..end];
|
||||
|
||||
for (idx, tex) in textures.iter().enumerate() {
|
||||
contents[idx] = tex.view.raw.gpu_resource_id();
|
||||
|
||||
let use_info = bg
|
||||
.resources_to_use
|
||||
.entry(tex.view.as_raw().cast())
|
||||
.or_default();
|
||||
use_info.stages |= stages;
|
||||
use_info.uses |= uses;
|
||||
use_info.visible_in_compute |=
|
||||
layout.visibility.contains(wgt::ShaderStages::COMPUTE);
|
||||
}
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + count as usize;
|
||||
let samplers = &desc.samplers[start..end];
|
||||
|
||||
for (idx, &sampler) in samplers.iter().enumerate() {
|
||||
contents[idx] = sampler.raw.gpu_resource_id();
|
||||
// Samplers aren't resources like buffers and textures, so don't
|
||||
// need to be passed to useResource
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
bg.buffers.push(super::BufferResource {
|
||||
ptr: unsafe { NonNull::new_unchecked(buffer.as_ptr()) },
|
||||
offset: 0,
|
||||
dynamic_index: None,
|
||||
binding_size: None,
|
||||
binding_location: layout.binding,
|
||||
});
|
||||
counter.buffers += 1;
|
||||
|
||||
bg.argument_buffers.push(buffer)
|
||||
}
|
||||
// Bindfull path
|
||||
else {
|
||||
if let wgt::BindingType::Buffer {
|
||||
has_dynamic_offset: true,
|
||||
..
|
||||
} = layout.ty
|
||||
{
|
||||
dynamic_offsets_count += 1;
|
||||
}
|
||||
if !layout.visibility.contains(stage_bit) {
|
||||
continue;
|
||||
}
|
||||
match layout.ty {
|
||||
wgt::BindingType::Buffer {
|
||||
ty,
|
||||
has_dynamic_offset,
|
||||
..
|
||||
} => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + 1;
|
||||
bg.buffers
|
||||
.extend(desc.buffers[start..end].iter().map(|source| {
|
||||
// Given the restrictions on `BufferBinding::offset`,
|
||||
// this should never be `None`.
|
||||
let remaining_size = wgt::BufferSize::new(
|
||||
source.buffer.size - source.offset,
|
||||
);
|
||||
let binding_size = match ty {
|
||||
wgt::BufferBindingType::Storage { .. } => {
|
||||
source.size.or(remaining_size)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
super::BufferResource {
|
||||
ptr: source.buffer.as_raw(),
|
||||
offset: source.offset,
|
||||
dynamic_index: if has_dynamic_offset {
|
||||
Some(dynamic_offsets_count - 1)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
binding_size,
|
||||
binding_location: layout.binding,
|
||||
}
|
||||
}));
|
||||
counter.buffers += 1;
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + 1;
|
||||
bg.samplers.extend(
|
||||
desc.samplers[start..end].iter().map(|samp| samp.as_raw()),
|
||||
);
|
||||
counter.samplers += 1;
|
||||
}
|
||||
wgt::BindingType::Texture { .. }
|
||||
| wgt::BindingType::StorageTexture { .. } => {
|
||||
let start = entry.resource_index as usize;
|
||||
let end = start + 1;
|
||||
bg.textures.extend(
|
||||
desc.textures[start..end]
|
||||
.iter()
|
||||
.map(|tex| tex.view.as_raw()),
|
||||
);
|
||||
counter.textures += 1;
|
||||
}
|
||||
wgt::BindingType::AccelerationStructure => unimplemented!(),
|
||||
}
|
||||
}
|
||||
wgt::BindingType::AccelerationStructure => unimplemented!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.counters.bind_groups.add(1);
|
||||
self.counters.bind_groups.add(1);
|
||||
|
||||
Ok(bg)
|
||||
Ok(bg)
|
||||
})
|
||||
}
|
||||
|
||||
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
|
||||
|
||||
@ -26,6 +26,7 @@ mod surface;
|
||||
mod time;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt, iter, ops,
|
||||
ptr::NonNull,
|
||||
sync::{atomic, Arc},
|
||||
@ -199,7 +200,7 @@ struct PrivateCapabilities {
|
||||
msaa_apple3: bool,
|
||||
msaa_apple7: bool,
|
||||
resource_heaps: bool,
|
||||
argument_buffers: bool,
|
||||
argument_buffers: metal::MTLArgumentBuffersTier,
|
||||
shared_textures: bool,
|
||||
mutable_comparison_samplers: bool,
|
||||
sampler_clamp_to_border: bool,
|
||||
@ -651,10 +652,23 @@ trait AsNative {
|
||||
fn as_native(&self) -> &Self::Native;
|
||||
}
|
||||
|
||||
type ResourcePtr = NonNull<metal::MTLResource>;
|
||||
type BufferPtr = NonNull<metal::MTLBuffer>;
|
||||
type TexturePtr = NonNull<metal::MTLTexture>;
|
||||
type SamplerPtr = NonNull<metal::MTLSamplerState>;
|
||||
|
||||
impl AsNative for ResourcePtr {
|
||||
type Native = metal::ResourceRef;
|
||||
#[inline]
|
||||
fn from(native: &Self::Native) -> Self {
|
||||
unsafe { NonNull::new_unchecked(native.as_ptr()) }
|
||||
}
|
||||
#[inline]
|
||||
fn as_native(&self) -> &Self::Native {
|
||||
unsafe { Self::Native::from_ptr(self.as_ptr()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl AsNative for BufferPtr {
|
||||
type Native = metal::BufferRef;
|
||||
#[inline]
|
||||
@ -710,12 +724,32 @@ struct BufferResource {
|
||||
binding_location: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct UseResourceInfo {
|
||||
uses: metal::MTLResourceUsage,
|
||||
stages: metal::MTLRenderStages,
|
||||
visible_in_compute: bool,
|
||||
}
|
||||
|
||||
impl Default for UseResourceInfo {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
uses: metal::MTLResourceUsage::empty(),
|
||||
stages: metal::MTLRenderStages::empty(),
|
||||
visible_in_compute: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BindGroup {
|
||||
counters: MultiStageResourceCounters,
|
||||
buffers: Vec<BufferResource>,
|
||||
samplers: Vec<SamplerPtr>,
|
||||
textures: Vec<TexturePtr>,
|
||||
|
||||
argument_buffers: Vec<metal::Buffer>,
|
||||
resources_to_use: HashMap<ResourcePtr, UseResourceInfo>,
|
||||
}
|
||||
|
||||
impl crate::DynBindGroup for BindGroup {}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user