Skip test_multithreaded_compute on MoltenVK. (#4096)

Co-authored-by: Connor Fitzgerald <connorwadefitzgerald@gmail.com>
This commit is contained in:
Jim Blandy 2023-09-03 23:54:39 -04:00 committed by GitHub
parent 332cd0325d
commit 54a7f0eac9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 449 additions and 279 deletions

View File

@ -284,6 +284,7 @@ jobs:
done
- uses: actions/upload-artifact@v3
if: always() # We want artifacts even if the tests fail.
with:
name: comparison-images
path: |

View File

@ -114,6 +114,10 @@ By @Valaphee in [#3402](https://github.com/gfx-rs/wgpu/pull/3402)
- Ensure that limit requests and reporting is done correctly. By @OptimisticPeach in [#4107](https://github.com/gfx-rs/wgpu/pull/4107)
#### Testing
- Skip `test_multithreaded_compute` on MoltenVK. By @jimblandy in [#4096](https://github.com/gfx-rs/wgpu/pull/4096).
### Documentation
- Add an overview of `RenderPass` and how render state works. By @kpreid in [#4055](https://github.com/gfx-rs/wgpu/pull/4055)

View File

@ -345,7 +345,7 @@ fn boids() {
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
// Lots of validation errors, maybe related to https://github.com/gfx-rs/wgpu/issues/3160
.molten_vk_failure(),
.expect_fail(wgpu_test::FailureCase::molten_vk()),
comparisons: &[wgpu_test::ComparisonType::Mean(0.005)],
});
}

View File

@ -625,7 +625,7 @@ pub fn test<E: Example>(mut params: FrameworkRefTest) {
wgpu_test::image::compare_image_output(
env!("CARGO_MANIFEST_DIR").to_string() + "/../../" + params.image_path,
ctx.adapter_info.backend,
&ctx.adapter_info,
params.width,
params.height,
&bytes,

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use super::*;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{initialize_test, FailureCase, TestParameters};
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
@ -13,7 +13,7 @@ fn test_compute_1() {
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.specific_failure(None, None, Some("V3D"), true),
.skip(FailureCase::adapter("V3D")),
|ctx| {
let input = &[1, 2, 3, 4];
@ -35,7 +35,7 @@ fn test_compute_2() {
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.specific_failure(None, None, Some("V3D"), true),
.skip(FailureCase::adapter("V3D")),
|ctx| {
let input = &[5, 23, 10, 9];
@ -57,7 +57,7 @@ fn test_compute_overflow() {
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.specific_failure(None, None, Some("V3D"), true),
.skip(FailureCase::adapter("V3D")),
|ctx| {
let input = &[77031, 837799, 8400511, 63728127];
pollster::block_on(assert_execute_gpu(
@ -78,16 +78,15 @@ fn test_multithreaded_compute() {
.downlevel_flags(wgpu::DownlevelFlags::COMPUTE_SHADERS)
.limits(wgpu::Limits::downlevel_defaults())
.features(wgpu::Features::TIMESTAMP_QUERY)
.specific_failure(None, None, Some("V3D"), true)
.skip(FailureCase::adapter("V3D"))
// https://github.com/gfx-rs/wgpu/issues/3944
.specific_failure(
Some(wgpu::Backends::VULKAN),
None,
Some("swiftshader"),
true,
)
.skip(FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"swiftshader",
))
// https://github.com/gfx-rs/wgpu/issues/3250
.specific_failure(Some(wgpu::Backends::GL), None, Some("llvmpipe"), true),
.skip(FailureCase::backend_adapter(wgpu::Backends::GL, "llvmpipe"))
.skip(FailureCase::molten_vk()),
|ctx| {
use std::{sync::mpsc, thread, time::Duration};

View File

@ -521,7 +521,7 @@ fn mipmap() {
height: 768,
optional_features: wgpu::Features::default(),
base_test_parameters: wgpu_test::TestParameters::default()
.backend_failure(wgpu::Backends::GL),
.expect_fail(wgpu_test::FailureCase::backend(wgpu::Backends::GL)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}
@ -535,7 +535,7 @@ fn mipmap_query() {
height: 768,
optional_features: QUERY_FEATURES,
base_test_parameters: wgpu_test::TestParameters::default()
.backend_failure(wgpu::Backends::GL),
.expect_fail(wgpu_test::FailureCase::backend(wgpu::Backends::GL)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}

View File

@ -12,6 +12,9 @@ use std::{borrow::Cow, iter};
use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
#[cfg(test)]
use wgpu_test::FailureCase;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Vertex {
@ -326,7 +329,11 @@ fn msaa_line() {
optional_features: wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
base_test_parameters: wgpu_test::TestParameters::default()
// AMD seems to render nothing on DX12 https://github.com/gfx-rs/wgpu/issues/3838
.specific_failure(Some(wgpu::Backends::DX12), Some(0x1002), None, false),
.expect_fail(FailureCase {
backends: Some(wgpu::Backends::DX12),
vendor: Some(0x1002),
..FailureCase::default()
}),
// There's a lot of natural variance so we check the weighted median too to differentiate
// real failures from variance.
comparisons: &[

View File

@ -857,9 +857,15 @@ fn shadow() {
base_test_parameters: wgpu_test::TestParameters::default()
.downlevel_flags(wgpu::DownlevelFlags::COMPARISON_SAMPLERS)
// rpi4 on VK doesn't work: https://gitlab.freedesktop.org/mesa/mesa/-/issues/3916
.specific_failure(Some(wgpu::Backends::VULKAN), None, Some("V3D"), false)
.expect_fail(wgpu_test::FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"V3D",
))
// llvmpipe versions in CI are flaky: https://github.com/gfx-rs/wgpu/issues/2594
.specific_failure(Some(wgpu::Backends::VULKAN), None, Some("llvmpipe"), true),
.skip(wgpu_test::FailureCase::backend_adapter(
wgpu::Backends::VULKAN,
"llvmpipe",
)),
comparisons: &[wgpu_test::ComparisonType::Mean(0.02)],
});
}

View File

@ -475,11 +475,8 @@ fn skybox() {
width: 1024,
height: 768,
optional_features: wgpu::Features::default(),
base_test_parameters: wgpu_test::TestParameters::default().specific_failure(
Some(wgpu::Backends::GL),
None,
Some("ANGLE"),
false,
base_test_parameters: wgpu_test::TestParameters::default().expect_fail(
wgpu_test::FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"),
),
comparisons: &[wgpu_test::ComparisonType::Mean(0.015)],
});

View File

@ -150,7 +150,7 @@ impl ComparisonType {
pub fn compare_image_output(
path: impl AsRef<Path> + AsRef<OsStr>,
backend: Backend,
adapter_info: &wgt::AdapterInfo,
width: u32,
height: u32,
test_with_alpha: &[u8],
@ -205,17 +205,18 @@ pub fn compare_image_output(
}
let file_stem = reference_path.file_stem().unwrap().to_string_lossy();
let renderer = format!(
"{}-{}-{}",
adapter_info.backend.to_str(),
sanitize_for_path(&adapter_info.name),
sanitize_for_path(&adapter_info.driver)
);
// Determine the paths to write out the various intermediate files
let actual_path = Path::new(&path).with_file_name(
OsString::from_str(&format!("{}-{}-actual.png", file_stem, backend.to_str(),)).unwrap(),
OsString::from_str(&format!("{}-{}-actual.png", file_stem, renderer)).unwrap(),
);
let difference_path = Path::new(&path).with_file_name(
OsString::from_str(&format!(
"{}-{}-difference.png",
file_stem,
backend.to_str(),
))
.unwrap(),
OsString::from_str(&format!("{}-{}-difference.png", file_stem, renderer,)).unwrap(),
);
// Convert the error values to a false color reprensentation
@ -246,10 +247,16 @@ pub fn compare_image_output(
#[cfg(target_arch = "wasm32")]
{
let _ = (path, backend, width, height, test_with_alpha, checks);
let _ = (path, adapter_info, width, height, test_with_alpha, checks);
}
}
fn sanitize_for_path(s: &str) -> String {
s.chars()
.map(|ch| if ch.is_ascii_alphanumeric() { ch } else { '_' })
.collect()
}
fn copy_via_compute(
device: &Device,
encoder: &mut CommandEncoder,

View File

@ -53,11 +53,195 @@ fn lowest_downlevel_properties() -> DownlevelCapabilities {
}
}
/// Conditions under which a test should fail or be skipped.
///
/// By passing a `FailureCase` to [`TestParameters::expect_fail`], you can
/// mark a test as expected to fail under the indicated conditions. By
/// passing it to [`TestParameters::skip`], you can request that the
/// test be skipped altogether.
///
/// If a field is `None`, then that field does not restrict matches. For
/// example:
///
/// ```
/// # use wgpu_test::FailureCase;
/// FailureCase {
/// backends: Some(wgpu::Backends::DX11 | wgpu::Backends::DX12),
/// vendor: None,
/// adapter: Some("RTX"),
/// driver: None,
/// }
/// # ;
/// ```
///
/// This applies to all cards with `"RTX'` in their name on either
/// Direct3D backend, no matter the vendor ID or driver name.
///
/// The strings given here need only appear as a substring in the
/// corresponding [`AdapterInfo`] fields. The comparison is
/// case-insensitive.
///
/// The default value of `FailureCase` applies to any test case. That
/// is, there are no criteria to constrain the match.
///
/// [`AdapterInfo`]: wgt::AdapterInfo
#[derive(Default)]
pub struct FailureCase {
backends: Option<wgpu::Backends>,
vendor: Option<u32>,
adapter: Option<String>,
skip: bool,
/// Backends expected to fail, or `None` for any backend.
///
/// If this is `None`, or if the test is using one of the backends
/// in `backends`, then this `FailureCase` applies.
pub backends: Option<wgpu::Backends>,
/// Vendor expected to fail, or `None` for any vendor.
///
/// If `Some`, this must match [`AdapterInfo::device`], which is
/// usually the PCI device id. Otherwise, this `FailureCase`
/// applies regardless of vendor.
///
/// [`AdapterInfo::device`]: wgt::AdapterInfo::device
pub vendor: Option<u32>,
/// Name of adaper expected to fail, or `None` for any adapter name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::name`], then this `FailureCase` applies. If
/// this is `None`, the adapter name isn't considered.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub adapter: Option<&'static str>,
/// Name of driver expected to fail, or `None` for any driver name.
///
/// If this is `Some(s)` and `s` is a substring of
/// [`AdapterInfo::driver`], then this `FailureCase` applies. If
/// this is `None`, the driver name isn't considered.
///
/// [`AdapterInfo::driver`]: wgt::AdapterInfo::driver
pub driver: Option<&'static str>,
}
impl FailureCase {
/// This case applies to all tests.
pub fn always() -> Self {
FailureCase::default()
}
/// This case applies to no tests.
pub fn never() -> Self {
FailureCase {
backends: Some(wgpu::Backends::empty()),
..FailureCase::default()
}
}
/// Tests running on any of the given backends.
pub fn backend(backends: wgpu::Backends) -> Self {
FailureCase {
backends: Some(backends),
..FailureCase::default()
}
}
/// Tests running on `adapter`.
///
/// For this case to apply, the `adapter` string must appear as a substring
/// of the adapter's [`AdapterInfo::name`]. The comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn adapter(adapter: &'static str) -> Self {
FailureCase {
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running on `backend` and `adapter`.
///
/// For this case to apply, the test must be using an adapter for one of the
/// given `backend` bits, and `adapter` string must appear as a substring of
/// the adapter's [`AdapterInfo::name`]. The string comparison is
/// case-insensitive.
///
/// [`AdapterInfo::name`]: wgt::AdapterInfo::name
pub fn backend_adapter(backends: wgpu::Backends, adapter: &'static str) -> Self {
FailureCase {
backends: Some(backends),
adapter: Some(adapter),
..FailureCase::default()
}
}
/// Tests running under WebGL.
///
/// Because of wasm's limited ability to recover from errors, we
/// usually need to skip the test altogether if it's not
/// supported, so this should be usually used with
/// [`TestParameters::skip`].
pub fn webgl2() -> Self {
#[cfg(target_arch = "wasm32")]
let case = FailureCase::backend(wgpu::Backends::GL);
#[cfg(not(target_arch = "wasm32"))]
let case = FailureCase::never();
case
}
/// Tests running on the MoltenVK Vulkan driver on macOS.
pub fn molten_vk() -> Self {
FailureCase {
backends: Some(wgpu::Backends::VULKAN),
driver: Some("MoltenVK"),
..FailureCase::default()
}
}
/// Test whether `self` applies to `info`.
///
/// If it does, return a `FailureReasons` whose set bits indicate
/// why. If it doesn't, return `None`.
///
/// The caller is responsible for converting the string-valued
/// fields of `info` to lower case, to ensure case-insensitive
/// matching.
fn applies_to(&self, info: &wgt::AdapterInfo) -> Option<FailureReasons> {
let mut reasons = FailureReasons::empty();
if let Some(backends) = self.backends {
if !backends.contains(wgpu::Backends::from(info.backend)) {
return None;
}
reasons.set(FailureReasons::BACKEND, true);
}
if let Some(vendor) = self.vendor {
if vendor != info.vendor {
return None;
}
reasons.set(FailureReasons::VENDOR, true);
}
if let Some(adapter) = self.adapter {
let adapter = adapter.to_lowercase();
if !info.name.contains(&adapter) {
return None;
}
reasons.set(FailureReasons::ADAPTER, true);
}
if let Some(driver) = self.driver {
let driver = driver.to_lowercase();
if !info.driver.contains(&driver) {
return None;
}
reasons.set(FailureReasons::DRIVER, true);
}
// If we got this far but no specific reasons were triggered, then this
// must be a wildcard.
if reasons.is_empty() {
Some(FailureReasons::ALWAYS)
} else {
Some(reasons)
}
}
}
// This information determines if a test should run.
@ -65,7 +249,11 @@ pub struct TestParameters {
pub required_features: Features,
pub required_downlevel_properties: DownlevelCapabilities,
pub required_limits: Limits,
// Backends where test should fail.
/// Conditions under which this test should be skipped.
pub skips: Vec<FailureCase>,
/// Conditions under which this test should be run, but is expected to fail.
pub failures: Vec<FailureCase>,
}
@ -75,6 +263,7 @@ impl Default for TestParameters {
required_features: Features::empty(),
required_downlevel_properties: lowest_downlevel_properties(),
required_limits: Limits::downlevel_webgl2_defaults(),
skips: Vec::new(),
failures: Vec::new(),
}
}
@ -86,7 +275,8 @@ bitflags::bitflags! {
const BACKEND = 1 << 0;
const VENDOR = 1 << 1;
const ADAPTER = 1 << 2;
const ALWAYS = 1 << 3;
const DRIVER = 1 << 3;
const ALWAYS = 1 << 4;
}
}
@ -115,87 +305,17 @@ impl TestParameters {
self
}
/// Mark the test as always failing, equivalent to specific_failure(None, None, None)
pub fn failure(mut self) -> Self {
self.failures.push(FailureCase {
backends: None,
vendor: None,
adapter: None,
skip: false,
});
/// Mark the test as always failing, but not to be skipped.
pub fn expect_fail(mut self, when: FailureCase) -> Self {
self.failures.push(when);
self
}
/// Mark the test as always failing and needing to be skipped, equivalent to specific_failure(None, None, None)
pub fn skip(mut self) -> Self {
self.failures.push(FailureCase {
backends: None,
vendor: None,
adapter: None,
skip: true,
});
/// Mark the test as always failing, and needing to be skipped.
pub fn skip(mut self, when: FailureCase) -> Self {
self.skips.push(when);
self
}
/// Mark the test as always failing on a specific backend, equivalent to specific_failure(backend, None, None)
pub fn backend_failure(mut self, backends: wgpu::Backends) -> Self {
self.failures.push(FailureCase {
backends: Some(backends),
vendor: None,
adapter: None,
skip: false,
});
self
}
/// Mark the test as always failing on WebGL. Because limited ability of wasm to recover from errors, we need to wholesale
/// skip the test if it's not supported.
pub fn webgl2_failure(mut self) -> Self {
let _ = &mut self;
#[cfg(target_arch = "wasm32")]
self.failures.push(FailureCase {
backends: Some(wgpu::Backends::GL),
vendor: None,
adapter: None,
skip: true,
});
self
}
/// Determines if a test should fail under a particular set of conditions. If any of these are None, that means that it will match anything in that field.
///
/// ex.
/// `specific_failure(Some(wgpu::Backends::DX11 | wgpu::Backends::DX12), None, Some("RTX"), false)`
/// means that this test will fail on all cards with RTX in their name on either D3D backend, no matter the vendor ID.
///
/// If segfault is set to true, the test won't be run at all due to avoid segfaults.
pub fn specific_failure(
mut self,
backends: Option<Backends>,
vendor: Option<u32>,
device: Option<&'static str>,
skip: bool,
) -> Self {
self.failures.push(FailureCase {
backends,
vendor,
adapter: device.as_ref().map(AsRef::as_ref).map(str::to_lowercase),
skip,
});
self
}
/// Mark the test as failing on vulkan on mac only
pub fn molten_vk_failure(self) -> Self {
#[cfg(any(target_os = "macos", target_os = "ios"))]
{
self.specific_failure(Some(wgpu::Backends::VULKAN), None, None, false)
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
{
self
}
}
}
pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(TestingContext)) {
@ -210,7 +330,15 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te
let (adapter, _surface_guard) = initialize_adapter();
let adapter_info = adapter.get_info();
let adapter_lowercase_name = adapter_info.name.to_lowercase();
// Produce a lower-case version of the adapter info, for comparison against
// `parameters.skips` and `parameters.failures`.
let adapter_lowercase_info = wgt::AdapterInfo {
name: adapter_info.name.to_lowercase(),
driver: adapter_info.driver.to_lowercase(),
..adapter_info.clone()
};
let adapter_features = adapter.features();
let adapter_limits = adapter.limits();
let adapter_downlevel_capabilities = adapter.get_downlevel_capabilities();
@ -254,7 +382,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te
let context = TestingContext {
adapter,
adapter_info: adapter_info.clone(),
adapter_info,
adapter_downlevel_capabilities,
device,
device_features: parameters.required_features,
@ -262,52 +390,26 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te
queue,
};
let expected_failure_reason = parameters.failures.iter().find_map(|failure| {
let always =
failure.backends.is_none() && failure.vendor.is_none() && failure.adapter.is_none();
let expect_failure_backend = failure
.backends
.map(|f| f.contains(wgpu::Backends::from(adapter_info.backend)));
let expect_failure_vendor = failure.vendor.map(|v| v == adapter_info.vendor);
let expect_failure_adapter = failure
.adapter
.as_deref()
.map(|f| adapter_lowercase_name.contains(f));
if expect_failure_backend.unwrap_or(true)
&& expect_failure_vendor.unwrap_or(true)
&& expect_failure_adapter.unwrap_or(true)
{
if always {
Some((FailureReasons::ALWAYS, failure.skip))
} else {
let mut reason = FailureReasons::empty();
reason.set(
FailureReasons::BACKEND,
expect_failure_backend.unwrap_or(false),
);
reason.set(
FailureReasons::VENDOR,
expect_failure_vendor.unwrap_or(false),
);
reason.set(
FailureReasons::ADAPTER,
expect_failure_adapter.unwrap_or(false),
);
Some((reason, failure.skip))
}
} else {
None
}
});
if let Some((reason, true)) = expected_failure_reason {
log::info!("EXPECTED TEST FAILURE SKIPPED: {:?}", reason);
// Check if we should skip the test altogether.
if let Some(skip_reason) = parameters
.skips
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info))
{
log::info!("EXPECTED TEST FAILURE SKIPPED: {:?}", skip_reason);
return;
}
// Determine if we expect this test to fail, and if so, why.
let expected_failure_reason = parameters
.failures
.iter()
.find_map(|case| case.applies_to(&adapter_lowercase_info));
// Run the test, and catch panics (possibly due to failed assertions).
let panicked = catch_unwind(AssertUnwindSafe(|| test_function(context))).is_err();
// Check whether any validation errors were reported during the test run.
cfg_if::cfg_if!(
if #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] {
let canary_set = wgpu::hal::VALIDATION_CANARY.get_and_reset();
@ -316,32 +418,34 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te
}
);
let failed = panicked || canary_set;
// Summarize reasons for actual failure, if any.
let failure_cause = match (panicked, canary_set) {
(true, true) => "PANIC AND VALIDATION ERROR",
(true, false) => "PANIC",
(false, true) => "VALIDATION ERROR",
(false, false) => "",
(true, true) => Some("PANIC AND VALIDATION ERROR"),
(true, false) => Some("PANIC"),
(false, true) => Some("VALIDATION ERROR"),
(false, false) => None,
};
let expect_failure = expected_failure_reason.is_some();
if failed == expect_failure {
// We got the conditions we expected
if let Some((expected_reason, _)) = expected_failure_reason {
// Print out reason for the failure
// Compare actual results against expectations.
match (failure_cause, expected_failure_reason) {
// The test passed, as expected.
(None, None) => {}
// The test failed unexpectedly.
(Some(cause), None) => {
panic!("UNEXPECTED TEST FAILURE DUE TO {cause}")
}
// The test passed unexpectedly.
(None, Some(reason)) => {
panic!("UNEXPECTED TEST PASS: {reason:?}");
}
// The test failed, as expected.
(Some(cause), Some(reason_expected)) => {
log::info!(
"GOT EXPECTED TEST FAILURE DUE TO {}: {:?}",
failure_cause,
expected_reason
"EXPECTED FAILURE DUE TO {} (expected because of {:?})",
cause,
reason_expected
);
}
} else if let Some((reason, _)) = expected_failure_reason {
// We expected to fail, but things passed
panic!("UNEXPECTED TEST PASS: {reason:?}");
} else {
panic!("UNEXPECTED TEST FAILURE DUE TO {failure_cause}")
}
}

View File

@ -1,5 +1,7 @@
use wasm_bindgen_test::*;
use wgpu_test::{image::ReadbackBuffers, initialize_test, TestParameters, TestingContext};
use wgpu_test::{
image::ReadbackBuffers, initialize_test, FailureCase, TestParameters, TestingContext,
};
static TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT: &[wgpu::TextureFormat] = &[
wgpu::TextureFormat::R8Unorm,
@ -328,7 +330,7 @@ fn clear_texture_tests(ctx: &TestingContext, formats: &[wgpu::TextureFormat]) {
fn clear_texture_uncompressed_gles_compat() {
initialize_test(
TestParameters::default()
.webgl2_failure()
.skip(FailureCase::webgl2())
.features(wgpu::Features::CLEAR_TEXTURE),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED_GLES_COMPAT);
@ -341,8 +343,8 @@ fn clear_texture_uncompressed_gles_compat() {
fn clear_texture_uncompressed() {
initialize_test(
TestParameters::default()
.webgl2_failure()
.backend_failure(wgpu::Backends::GL)
.skip(FailureCase::webgl2())
.expect_fail(FailureCase::backend(wgpu::Backends::GL))
.features(wgpu::Features::CLEAR_TEXTURE),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_UNCOMPRESSED);
@ -355,7 +357,7 @@ fn clear_texture_uncompressed() {
fn clear_texture_depth() {
initialize_test(
TestParameters::default()
.webgl2_failure()
.skip(FailureCase::webgl2())
.downlevel_flags(
wgpu::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES
| wgpu::DownlevelFlags::COMPUTE_SHADERS,
@ -385,8 +387,10 @@ fn clear_texture_bc() {
initialize_test(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::TEXTURE_COMPRESSION_BC)
.specific_failure(Some(wgpu::Backends::GL), None, Some("ANGLE"), false) // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.backend_failure(wgpu::Backends::GL), // compressed texture copy to buffer not yet implemented
// https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_BC);
},
@ -402,8 +406,10 @@ fn clear_texture_astc() {
max_texture_dimension_2d: wgpu::COPY_BYTES_PER_ROW_ALIGNMENT * 12,
..wgpu::Limits::downlevel_defaults()
})
.specific_failure(Some(wgpu::Backends::GL), None, Some("ANGLE"), false) // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.backend_failure(wgpu::Backends::GL), // compressed texture copy to buffer not yet implemented
// https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ASTC);
},
@ -415,8 +421,10 @@ fn clear_texture_etc2() {
initialize_test(
TestParameters::default()
.features(wgpu::Features::CLEAR_TEXTURE | wgpu::Features::TEXTURE_COMPRESSION_ETC2)
.specific_failure(Some(wgpu::Backends::GL), None, Some("ANGLE"), false) // https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.backend_failure(wgpu::Backends::GL), // compressed texture copy to buffer not yet implemented
// https://bugs.chromium.org/p/angleproject/issues/detail?id=7056
.expect_fail(FailureCase::backend_adapter(wgpu::Backends::GL, "ANGLE"))
// compressed texture copy to buffer not yet implemented
.expect_fail(FailureCase::backend(wgpu::Backends::GL)),
|ctx| {
clear_texture_tests(&ctx, TEXTURE_FORMATS_ETC2);
},

View File

@ -1,6 +1,6 @@
use wasm_bindgen_test::*;
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{initialize_test, FailureCase, TestParameters};
#[test]
#[wasm_bindgen_test]
@ -13,26 +13,30 @@ fn device_initialization() {
#[test]
#[ignore]
fn device_mismatch() {
initialize_test(TestParameters::default().failure(), |ctx| {
// Create a bind group uisng a lyaout from another device. This should be a validation
// error but currently crashes.
let (device2, _) =
pollster::block_on(ctx.adapter.request_device(&Default::default(), None)).unwrap();
initialize_test(
// https://github.com/gfx-rs/wgpu/issues/3927
TestParameters::default().expect_fail(FailureCase::always()),
|ctx| {
// Create a bind group uisng a lyaout from another device. This should be a validation
// error but currently crashes.
let (device2, _) =
pollster::block_on(ctx.adapter.request_device(&Default::default(), None)).unwrap();
{
let bind_group_layout =
device2.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
{
let bind_group_layout =
device2.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[],
});
let _bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &[],
});
}
let _bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &[],
});
}
ctx.device.poll(wgpu::Maintain::Poll);
});
ctx.device.poll(wgpu::Maintain::Poll);
},
);
}

View File

@ -1,6 +1,6 @@
use wasm_bindgen_test::*;
use wgpu::RenderPassDescriptor;
use wgpu_test::{fail, initialize_test, TestParameters};
use wgpu_test::{fail, initialize_test, FailureCase, TestParameters};
#[test]
#[wasm_bindgen_test]
@ -22,7 +22,8 @@ fn drop_encoder_after_error() {
// #543: COMMAND_ALLOCATOR_CANNOT_RESET]
//
// For now, we mark the test as failing on DX12.
let parameters = TestParameters::default().backend_failure(wgpu::Backends::DX12);
let parameters =
TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12));
initialize_test(parameters, |ctx| {
let mut encoder = ctx
.device

View File

@ -7,7 +7,7 @@ use wgpu::{
};
use wasm_bindgen_test::*;
use wgpu_test::{initialize_test, TestParameters, TestingContext};
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer {
let buffer = ctx.device.create_buffer(&BufferDescriptor {
@ -56,60 +56,75 @@ fn generate_dummy_work(ctx: &TestingContext) -> CommandBuffer {
#[test]
#[wasm_bindgen_test]
fn wait() {
initialize_test(TestParameters::default().skip(), |ctx| {
let cmd_buf = generate_dummy_work(&ctx);
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::Wait);
})
ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::Wait);
},
)
}
#[test]
#[wasm_bindgen_test]
fn double_wait() {
initialize_test(TestParameters::default().skip(), |ctx| {
let cmd_buf = generate_dummy_work(&ctx);
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::Wait);
ctx.device.poll(Maintain::Wait);
})
ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::Wait);
ctx.device.poll(Maintain::Wait);
},
)
}
#[test]
#[wasm_bindgen_test]
fn wait_on_submission() {
initialize_test(TestParameters::default().skip(), |ctx| {
let cmd_buf = generate_dummy_work(&ctx);
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
})
let index = ctx.queue.submit(Some(cmd_buf));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
},
)
}
#[test]
#[wasm_bindgen_test]
fn double_wait_on_submission() {
initialize_test(TestParameters::default().skip(), |ctx| {
let cmd_buf = generate_dummy_work(&ctx);
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf = generate_dummy_work(&ctx);
let index = ctx.queue.submit(Some(cmd_buf));
ctx.device
.poll(Maintain::WaitForSubmissionIndex(index.clone()));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
})
let index = ctx.queue.submit(Some(cmd_buf));
ctx.device
.poll(Maintain::WaitForSubmissionIndex(index.clone()));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index));
},
)
}
#[test]
#[wasm_bindgen_test]
fn wait_out_of_order() {
initialize_test(TestParameters::default().skip(), |ctx| {
let cmd_buf1 = generate_dummy_work(&ctx);
let cmd_buf2 = generate_dummy_work(&ctx);
initialize_test(
TestParameters::default().skip(FailureCase::always()),
|ctx| {
let cmd_buf1 = generate_dummy_work(&ctx);
let cmd_buf2 = generate_dummy_work(&ctx);
let index1 = ctx.queue.submit(Some(cmd_buf1));
let index2 = ctx.queue.submit(Some(cmd_buf2));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index2));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index1));
})
let index1 = ctx.queue.submit(Some(cmd_buf1));
let index2 = ctx.queue.submit(Some(cmd_buf2));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index2));
ctx.device.poll(Maintain::WaitForSubmissionIndex(index1));
},
)
}

View File

@ -4,7 +4,7 @@ use wasm_bindgen_test::*;
use wgpu::{Backends, DownlevelFlags, Features, Limits};
use crate::shader::{shader_input_output_test, InputStorageType, ShaderTest, MAX_BUFFER_SIZE};
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{initialize_test, FailureCase, TestParameters};
fn create_struct_layout_tests(storage_type: InputStorageType) -> Vec<ShaderTest> {
let input_values: Vec<_> = (0..(MAX_BUFFER_SIZE as u32 / 4)).collect();
@ -182,7 +182,7 @@ fn uniform_input() {
TestParameters::default()
.downlevel_flags(DownlevelFlags::COMPUTE_SHADERS)
// Validation errors thrown by the SPIR-V validator https://github.com/gfx-rs/naga/issues/2034
.specific_failure(Some(wgpu::Backends::VULKAN), None, None, false)
.expect_fail(FailureCase::backend(wgpu::Backends::VULKAN))
.limits(Limits::downlevel_defaults()),
|ctx| {
shader_input_output_test(
@ -222,7 +222,7 @@ fn push_constant_input() {
max_push_constant_size: MAX_BUFFER_SIZE as u32,
..Limits::downlevel_defaults()
})
.backend_failure(Backends::GL),
.expect_fail(FailureCase::backend(Backends::GL)),
|ctx| {
shader_input_output_test(
ctx,

View File

@ -8,7 +8,7 @@ use wgpu::{
ShaderStages,
};
use wgpu_test::{initialize_test, TestParameters, TestingContext};
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
#[test]
fn zero_init_workgroup_mem() {
@ -18,13 +18,16 @@ fn zero_init_workgroup_mem() {
.limits(Limits::downlevel_defaults())
// remove both of these once we get to https://github.com/gfx-rs/wgpu/issues/3193 or
// https://github.com/gfx-rs/wgpu/issues/3160
.specific_failure(
Some(Backends::DX12),
Some(5140),
Some("Microsoft Basic Render Driver"),
true,
)
.specific_failure(Some(Backends::VULKAN), None, Some("swiftshader"), true),
.skip(FailureCase {
backends: Some(Backends::DX12),
vendor: Some(5140),
adapter: Some("Microsoft Basic Render Driver"),
..FailureCase::default()
})
.skip(FailureCase::backend_adapter(
Backends::VULKAN,
"swiftshader",
)),
zero_init_workgroup_mem_impl,
);
}

View File

@ -1,12 +1,17 @@
use wgpu::{util::DeviceExt, DownlevelFlags, Limits, TextureFormat};
use wgpu_test::{image::calc_difference, initialize_test, TestParameters, TestingContext};
use wgpu_test::{
image::calc_difference, initialize_test, FailureCase, TestParameters, TestingContext,
};
#[test]
fn reinterpret_srgb_ness() {
let parameters = TestParameters::default()
.downlevel_flags(DownlevelFlags::VIEW_FORMATS)
.limits(Limits::downlevel_defaults())
.specific_failure(Some(wgpu::Backends::GL), None, None, true);
.skip(FailureCase {
backends: Some(wgpu::Backends::GL),
..FailureCase::default()
});
initialize_test(parameters, |ctx| {
let unorm_data: [[u8; 4]; 4] = [
[180, 0, 0, 255],

View File

@ -3,7 +3,7 @@ use std::num::NonZeroU64;
use wasm_bindgen_test::*;
use wgpu::util::DeviceExt;
use wgpu_test::{initialize_test, TestParameters, TestingContext};
use wgpu_test::{initialize_test, FailureCase, TestParameters, TestingContext};
fn pulling_common(
ctx: TestingContext,
@ -150,7 +150,7 @@ fn draw_vertex_offset() {
initialize_test(
TestParameters::default()
.test_features_limits()
.backend_failure(wgpu::Backends::DX11),
.expect_fail(FailureCase::backend(wgpu::Backends::DX11)),
|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..1);
@ -176,7 +176,7 @@ fn draw_instanced_offset() {
initialize_test(
TestParameters::default()
.test_features_limits()
.backend_failure(wgpu::Backends::DX11),
.expect_fail(FailureCase::backend(wgpu::Backends::DX11)),
|ctx| {
pulling_common(ctx, &[0, 1, 2, 3, 4, 5], |cmb| {
cmb.draw(0..3, 0..1);

View File

@ -1,6 +1,6 @@
//! Tests for texture copy
use wgpu_test::{initialize_test, TestParameters};
use wgpu_test::{initialize_test, FailureCase, TestParameters};
use wasm_bindgen_test::*;
@ -8,7 +8,8 @@ use wasm_bindgen_test::*;
#[wasm_bindgen_test]
fn write_texture_subset_2d() {
let size = 256;
let parameters = TestParameters::default().backend_failure(wgpu::Backends::DX12);
let parameters =
TestParameters::default().expect_fail(FailureCase::backend(wgpu::Backends::DX12));
initialize_test(parameters, |ctx| {
let tex = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: None,

View File

@ -1,38 +1,46 @@
use wasm_bindgen_test::*;
use wgpu::*;
use wgpu_test::{image::ReadbackBuffers, initialize_test, TestParameters, TestingContext};
use wgpu_test::{
image::ReadbackBuffers, initialize_test, FailureCase, TestParameters, TestingContext,
};
// Checks if discarding a color target resets its init state, causing a zero read of this texture when copied in after submit of the encoder.
#[test]
#[wasm_bindgen_test]
fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_after_submit() {
initialize_test(TestParameters::default().webgl2_failure(), |mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
case.submit_command_encoder();
initialize_test(
TestParameters::default().skip(FailureCase::webgl2()),
|mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
case.submit_command_encoder();
case.create_command_encoder();
case.copy_texture_to_buffer();
case.submit_command_encoder();
case.create_command_encoder();
case.copy_texture_to_buffer();
case.submit_command_encoder();
case.assert_buffers_are_zero();
});
case.assert_buffers_are_zero();
},
);
}
// Checks if discarding a color target resets its init state, causing a zero read of this texture when copied in the same encoder to a buffer.
#[test]
#[wasm_bindgen_test]
fn discarding_color_target_resets_texture_init_state_check_visible_on_copy_in_same_encoder() {
initialize_test(TestParameters::default().webgl2_failure(), |mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
case.copy_texture_to_buffer();
case.submit_command_encoder();
initialize_test(
TestParameters::default().skip(FailureCase::webgl2()),
|mut ctx| {
let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb);
case.create_command_encoder();
case.discard();
case.copy_texture_to_buffer();
case.submit_command_encoder();
case.assert_buffers_are_zero();
});
case.assert_buffers_are_zero();
},
);
}
#[test]