Use test to generate .gpuinfo file to reduce recompilations (#8660)

This commit is contained in:
Connor Fitzgerald 2025-12-07 11:46:47 -05:00 committed by GitHub
parent 889055fb70
commit fd671d45a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 73 additions and 45 deletions

View File

@ -37,7 +37,7 @@ env:
RUSTFLAGS: -D warnings
RUSTDOCFLAGS: -D warnings
WASM_BINDGEN_TEST_TIMEOUT: 300 # 5 minutes
CACHE_SUFFIX: d # cache busting
CACHE_SUFFIX: e # cache busting
WGPU_CI: true
# Every time a PR is pushed to, cancel any previous jobs. This
@ -588,16 +588,6 @@ jobs:
# Delete snapshots so we can ensure there aren't any excess output files.
rm -r naga/tests/out
- name: Run `wgpu-info`
shell: bash
run: |
echo "$PATH"
export RUST_LOG=trace
# This needs to match the command in xtask/tests.rs
cargo --locked llvm-cov --no-cfg-coverage --no-report run --bin wgpu-info -- -vv
- name: Run tests
shell: bash
run: |
@ -610,6 +600,7 @@ jobs:
run: git add . && git diff --exit-code HEAD naga/tests/out
- uses: actions/upload-artifact@v5
name: Upload comparison images
if: always() # We want artifacts even if the tests fail.
with:
name: comparison-images-${{ matrix.os }}
@ -617,6 +608,15 @@ jobs:
**/*-actual.png
**/*-difference.png
# Print GPU configuration so we can see what features were available during the test.
- name: Print GPU configurations
if: always() # We want this information even if the tests fail.
shell: bash
run: |
set -e
cat .gpuconfig
- name: Generate coverage report
id: coverage
shell: bash

View File

@ -1,16 +1,13 @@
#[cfg(not(target_arch = "wasm32"))]
#![cfg_attr(target_arch = "wasm32", no_main)]
#![cfg(not(target_arch = "wasm32"))]
mod cli;
#[cfg(not(target_arch = "wasm32"))]
mod human;
#[cfg(not(target_arch = "wasm32"))]
mod report;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(test)]
mod tests;
mod texture;
fn main() -> anyhow::Result<()> {
#[cfg(not(target_arch = "wasm32"))]
{
cli::main()?;
}
Ok(())
cli::main()
}

25
wgpu-info/src/tests.rs Normal file
View File

@ -0,0 +1,25 @@
use std::{fs::File, io::BufWriter};
const ENV_VAR_SAVE: &str = "WGPU_INFO_SAVE_GPUCONFIG_REPORT";
// We use a test to generate the .gpuconfig file instead of using the cli directly
// as `cargo run --bin wgpu-info` would build a different set of dependencies, causing
// incremental changes to need to rebuild the wgpu stack twice, one for the tests
// and once for the cli binary.
//
// Needs to be kept in sync with the test in xtask/src/test.rs
#[test]
fn generate_gpuconfig_report() {
let report = crate::report::GpuReport::generate();
// If we don't get the env var, just test that we can generate the report, but don't save it
// to avoid a race condition when other tests are reading the file.
if std::env::var(ENV_VAR_SAVE).is_err() {
println!("Set {ENV_VAR_SAVE} to generate a .gpuconfig report using this test");
return;
}
let file = File::create(concat!(env!("CARGO_MANIFEST_DIR"), "/../.gpuconfig")).unwrap();
let buf = BufWriter::new(file);
report.into_json(buf).unwrap();
}

View File

@ -31,10 +31,17 @@ pub fn run_tests(
let mut cargo_args = flatten_args(args, passthrough_args);
// Re-add profile flags that were consumed during argument parsing
if is_release {
cargo_args.insert(0, OsString::from("--release"));
#[expect(clippy::manual_map)] // This is much clearer than using map()
let profile_arg = if is_release {
Some(OsString::from("--release"))
} else if let Some(ref p) = custom_profile {
cargo_args.insert(0, OsString::from(format!("--cargo-profile={p}")));
Some(OsString::from(format!("--cargo-profile={p}")))
} else {
None
};
if let Some(ref profile_arg) = profile_arg {
cargo_args.insert(0, profile_arg.clone());
}
// Retries handled by cargo nextest natively
@ -50,37 +57,36 @@ pub fn run_tests(
install_warp::install_warp(&shell, &target_dir)?;
}
// These needs to match the command in "run wgpu-info" in `.github/workflows/ci.yml`
let llvm_cov_flags: &[_] = if llvm_cov {
&["llvm-cov", "--no-cfg-coverage", "--no-report"]
} else {
&[]
};
let llvm_cov_nextest_flags: &[_] = if llvm_cov {
let test_suite_run_flags: &[_] = if llvm_cov {
&["llvm-cov", "--no-cfg-coverage", "--no-report", "nextest"]
} else if list {
&["nextest", "list"]
} else {
&["nextest", "run"]
};
log::info!("Generating .gpuconfig file based on gpus on the system");
// We use a test to generate the .gpuconfig file instead of using the cli directly
// as `cargo run --bin wgpu-info` would build a different set of dependencies, causing
// incremental changes to need to rebuild the wgpu stack twice, one for the tests
// and once for the cli binary.
//
// Needs to be kept in sync with the test in wgpu-info/src/tests.rs
shell
.cmd("cargo")
.args(llvm_cov_flags)
.args([
"run",
"--bin",
"wgpu-info",
"--",
"--json",
"-o",
".gpuconfig",
])
.args(test_suite_run_flags)
// Use the same build configuration as the main tests, so that we only build once.
.args(["--benches", "--tests", "--all-features"])
// Use the same cargo profile as the main tests.
.args(profile_arg)
// We need to tell nextest to filter by binary too, so it doesn't try to enumerate
// tests on any of the gpu enabled test binaries, as that will fail due to
// old or missing .gpuconfig files.
.args(["-E", "binary(wgpu-info)", "generate_gpuconfig_report"])
// Turn on the env var for saving the .gpuconfig files
.env("WGPU_INFO_SAVE_GPUCONFIG_REPORT", "1")
.quiet()
.run()
.context("Failed to run wgpu-info to generate .gpuconfig")?;
.context("Failed to run tests to generate .gpuconfig")?;
let gpu_count = shell
.read_file(".gpuconfig")
@ -99,7 +105,7 @@ pub fn run_tests(
log::info!("Listing tests");
shell
.cmd("cargo")
.args(llvm_cov_nextest_flags)
.args(["nextest", "list"])
.args(["-v", "--benches", "--tests", "--all-features"])
.args(cargo_args)
.run()
@ -110,7 +116,7 @@ pub fn run_tests(
shell
.cmd("cargo")
.args(llvm_cov_nextest_flags)
.args(test_suite_run_flags)
.args(["--benches", "--tests", "--all-features"])
.args(cargo_args)
.quiet()