mirror of
https://github.com/maplibre/maplibre-rs.git
synced 2025-12-08 19:05:57 +00:00
Add Render Graph from Bevy (#93)
* Add render graph from bevy with graph runner * Add surface * Continue to refactor renderer * Make ScheduleMethod object safe in order to be able to have a dyn object in MapContext * Cleanup, add some more documentation and simplify Support resizing Fix late init * Update apple docs * Give bevy attribution * Pass github token * Improve some comments
This commit is contained in:
parent
c563d640b0
commit
916af61abc
6
.github/workflows/on_main_push.yml
vendored
6
.github/workflows/on_main_push.yml
vendored
@ -63,7 +63,7 @@ jobs:
|
||||
destination: docs
|
||||
key: ${{ secrets.SSH_KEY_MAXAMMANN_ORG }}
|
||||
build-ios:
|
||||
runs-on: macOS-11
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: ./.github/actions/apple
|
||||
@ -73,4 +73,6 @@ jobs:
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: ./.github/actions/demo/macos
|
||||
- uses: ./.github/actions/demo/macos
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
6
.github/workflows/on_pull_request.yml
vendored
6
.github/workflows/on_pull_request.yml
vendored
@ -43,7 +43,7 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: ./.github/actions/docs
|
||||
build-ios:
|
||||
runs-on: macOS-11
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: ./.github/actions/apple
|
||||
@ -53,4 +53,6 @@ jobs:
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: ./.github/actions/demo/macos
|
||||
- uses: ./.github/actions/demo/macos
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1,12 +1,13 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Run demo (debug+enable-tracing)" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
|
||||
<option name="command" value="run -p maplibre-demo --features enable-tracing" />
|
||||
<option name="command" value="run -p maplibre-demo --features trace" />
|
||||
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
|
||||
<option name="channel" value="DEFAULT" />
|
||||
<option name="requiredFeatures" value="true" />
|
||||
<option name="allFeatures" value="false" />
|
||||
<option name="emulateTerminal" value="false" />
|
||||
<option name="withSudo" value="false" />
|
||||
<option name="buildTarget" value="REMOTE" />
|
||||
<option name="backtrace" value="SHORT" />
|
||||
<envs />
|
||||
<option name="isRedirectInput" value="false" />
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
<component name="ProjectRunConfigurationManager">
|
||||
<configuration default="false" name="Run demo (release+enable-tracing)" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
|
||||
<option name="command" value="run -p maplibre-demo --release --features enable-tracing" />
|
||||
<option name="command" value="run -p maplibre-demo --release --features trace" />
|
||||
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
|
||||
<option name="channel" value="DEFAULT" />
|
||||
<option name="requiredFeatures" value="true" />
|
||||
<option name="allFeatures" value="false" />
|
||||
<option name="emulateTerminal" value="false" />
|
||||
<option name="withSudo" value="false" />
|
||||
<option name="buildTarget" value="REMOTE" />
|
||||
<option name="backtrace" value="SHORT" />
|
||||
<envs />
|
||||
<option name="isRedirectInput" value="false" />
|
||||
|
||||
@ -4,6 +4,7 @@ use log::Level;
|
||||
use maplibre::platform::http_client::ReqwestHttpClient;
|
||||
use maplibre::platform::run_multithreaded;
|
||||
use maplibre::platform::schedule_method::TokioScheduleMethod;
|
||||
use maplibre::render::settings::{Backends, WgpuSettings};
|
||||
use maplibre::MapBuilder;
|
||||
use maplibre_winit::winit::{WinitEventLoop, WinitMapWindow, WinitMapWindowConfig, WinitWindow};
|
||||
use std::ffi::CString;
|
||||
@ -20,6 +21,10 @@ pub fn android_main() {
|
||||
.with_map_window_config(WinitMapWindowConfig::new("maplibre android".to_string()))
|
||||
.with_http_client(ReqwestHttpClient::new(None))
|
||||
.with_schedule_method(TokioScheduleMethod::new())
|
||||
.with_wgpu_settings(WgpuSettings {
|
||||
backends: Some(Backends::VULKAN),
|
||||
..WgpuSettings::default()
|
||||
})
|
||||
.build()
|
||||
.initialize()
|
||||
.await
|
||||
|
||||
@ -52,8 +52,6 @@ environment variable, as the others seem unreliable. Note that this can include
|
||||
setting `ONLY_ACTIVE_ARCH` is set to `YES`.
|
||||
|
||||
```bash
|
||||
. "$HOME/.cargo/env"
|
||||
|
||||
arch="unknown"
|
||||
vendor="apple"
|
||||
os_type="unknown"
|
||||
@ -85,7 +83,10 @@ then
|
||||
elif [[ $SDK_NAME == *"iphonesimulator"* ]]
|
||||
then
|
||||
os_type="ios"
|
||||
environment_type="sim"
|
||||
if [[ $ARCHS == "arm64" ]]
|
||||
then
|
||||
environment_type="sim"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@ -96,10 +97,15 @@ then
|
||||
triplet="$triplet-$environment_type"
|
||||
fi
|
||||
|
||||
echo "$mode"
|
||||
echo "$triplet"
|
||||
echo "Mode: $mode"
|
||||
echo "Triplet: $triplet"
|
||||
echo "Shell: $SHELL"
|
||||
|
||||
env -i zsh -c "cargo build -p maplibre-apple $mode --target $triplet --lib"
|
||||
cmd="export HOME=$HOME && . $HOME/.cargo/env && cargo build -p apple $mode --target $triplet --lib"
|
||||
|
||||
echo "Command: $cmd"
|
||||
|
||||
env -i /bin/bash -c "$cmd"
|
||||
```
|
||||
|
||||
### Build Settings
|
||||
|
||||
@ -190,7 +190,7 @@ module.exports = (env) => ({
|
||||
// command. Default arguments are `--verbose`.
|
||||
//args: '--log-level warn',
|
||||
// Default arguments are `--typescript --target browser --mode normal`.
|
||||
extraArgs: ` --target web -- . -Z build-std=std,panic_abort ${env.webgl ? '--features web-webgl' : ''} ${env.tracing ? '--features enable-tracing' : ''}`,
|
||||
extraArgs: ` --target web -- . -Z build-std=std,panic_abort ${env.webgl ? '--features web-webgl' : ''} ${env.tracing ? '--features trace' : ''}`,
|
||||
|
||||
// Optional array of absolute paths to directories, changes to which
|
||||
// will trigger the build.
|
||||
|
||||
@ -10,7 +10,7 @@ readme = "../README.md"
|
||||
|
||||
[features]
|
||||
web-webgl = ["maplibre/web-webgl"]
|
||||
enable-tracing = ["maplibre/enable-tracing", "tracing-subscriber", "tracing-tracy", "tracy-client"]
|
||||
trace = ["maplibre/trace", "tracing-subscriber", "tracing-tracy", "tracy-client"]
|
||||
|
||||
[dependencies]
|
||||
env_logger = "0.9"
|
||||
|
||||
@ -4,7 +4,7 @@ use maplibre::platform::schedule_method::TokioScheduleMethod;
|
||||
use maplibre::MapBuilder;
|
||||
use maplibre_winit::winit::{WinitEventLoop, WinitMapWindow, WinitMapWindowConfig, WinitWindow};
|
||||
|
||||
#[cfg(feature = "enable-tracing")]
|
||||
#[cfg(feature = "trace")]
|
||||
fn enable_tracing() {
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Registry;
|
||||
@ -30,7 +30,7 @@ fn run_in_window() {
|
||||
fn main() {
|
||||
env_logger::init_from_env(env_logger::Env::default().default_filter_or("info"));
|
||||
|
||||
#[cfg(feature = "enable-tracing")]
|
||||
#[cfg(feature = "trace")]
|
||||
enable_tracing();
|
||||
|
||||
run_in_window()
|
||||
|
||||
@ -12,7 +12,7 @@ use crate::input::query_handler::QueryHandler;
|
||||
use crate::input::shift_handler::ShiftHandler;
|
||||
use crate::input::tilt_handler::TiltHandler;
|
||||
use crate::input::zoom_handler::ZoomHandler;
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
|
||||
mod pan_handler;
|
||||
mod pinch_handler;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use super::UpdateState;
|
||||
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
use maplibre::render::camera::Camera;
|
||||
|
||||
use cgmath::{EuclideanSpace, Point3, Vector2, Vector3, Zero};
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use super::UpdateState;
|
||||
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct PinchHandler {}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use cgmath::Vector2;
|
||||
|
||||
use crate::input::UpdateState;
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
use std::time::Duration;
|
||||
use winit::event::{ElementState, MouseButton};
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use super::UpdateState;
|
||||
|
||||
use cgmath::{Vector3, Zero};
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct ShiftHandler {
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use super::UpdateState;
|
||||
|
||||
use maplibre::map_state::ViewState;
|
||||
use maplibre::context::ViewState;
|
||||
|
||||
use cgmath::{Deg, Rad, Zero};
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use super::UpdateState;
|
||||
|
||||
use maplibre::context::ViewState;
|
||||
use maplibre::coords::Zoom;
|
||||
use maplibre::map_state::ViewState;
|
||||
|
||||
use cgmath::{Vector2, Vector3};
|
||||
|
||||
|
||||
@ -2,11 +2,12 @@ use instant::Instant;
|
||||
use maplibre::error::Error;
|
||||
use maplibre::io::scheduler::ScheduleMethod;
|
||||
use maplibre::io::source_client::HTTPClient;
|
||||
use std::borrow::BorrowMut;
|
||||
use winit::event::{ElementState, KeyboardInput, VirtualKeyCode, WindowEvent};
|
||||
use winit::event_loop::ControlFlow;
|
||||
|
||||
use crate::input::{InputController, UpdateState};
|
||||
use maplibre::map_state::MapState;
|
||||
use maplibre::map_schedule::MapSchedule;
|
||||
use maplibre::window::{MapWindow, MapWindowConfig, Runnable};
|
||||
use winit::event::Event;
|
||||
|
||||
@ -74,7 +75,7 @@ where
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
fn run(mut self, mut map_state: MapState<MWC, SM, HC>, max_frames: Option<u64>) {
|
||||
fn run(mut self, mut map_state: MapSchedule<MWC, SM, HC>, max_frames: Option<u64>) {
|
||||
let mut last_render_time = Instant::now();
|
||||
let mut current_frame: u64 = 0;
|
||||
|
||||
@ -90,7 +91,7 @@ where
|
||||
|
||||
let state = task::block_in_place(|| {
|
||||
Handle::current().block_on(async {
|
||||
map_state.reinitialize().await;
|
||||
map_state.late_init().await;
|
||||
})
|
||||
});
|
||||
return;
|
||||
@ -135,7 +136,9 @@ where
|
||||
let dt = now - last_render_time;
|
||||
last_render_time = now;
|
||||
|
||||
input_controller.update_state(map_state.view_state_mut(), dt);
|
||||
{
|
||||
input_controller.update_state(map_state.view_state_mut(), dt);
|
||||
}
|
||||
|
||||
match map_state.update_and_redraw() {
|
||||
Ok(_) => {}
|
||||
@ -161,10 +164,7 @@ where
|
||||
map_state.suspend();
|
||||
}
|
||||
Event::Resumed => {
|
||||
map_state.recreate_surface(&self);
|
||||
let size = self.size();
|
||||
map_state.resize(size.width(), size.height());// FIXME: Resumed is also called when the app launches for the first time. Instead of first using a "fake" inner_size() in State::new we should initialize with a proper size from the beginning
|
||||
map_state.resume();
|
||||
map_state.resume(&self);
|
||||
}
|
||||
Event::MainEventsCleared => {
|
||||
// RedrawRequested will only trigger once, unless we manually
|
||||
|
||||
@ -11,7 +11,7 @@ readme = "../README.md"
|
||||
[features]
|
||||
web-webgl = ["wgpu/webgl"]
|
||||
# Enable tracing using tracy on desktop/mobile and the chrome profiler on web
|
||||
enable-tracing = [ "tracing-subscriber", "tracing-tracy", "tracy-client"]
|
||||
trace = [ "tracing-subscriber", "tracing-tracy", "tracy-client"]
|
||||
no-thread-safe-futures = []
|
||||
|
||||
|
||||
@ -68,5 +68,9 @@ serde_json = "1.0"
|
||||
csscolorparser = { version = "0.5", features = ["serde", "cint"]}
|
||||
cint = "0.2"
|
||||
|
||||
thiserror = "1"
|
||||
downcast-rs = "1.2"
|
||||
smallvec = "1.8"
|
||||
|
||||
[build-dependencies]
|
||||
maplibre-build-tools = { path = "../maplibre-build-tools", version = "0.1.0" }
|
||||
|
||||
70
maplibre/src/context.rs
Normal file
70
maplibre/src/context.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use crate::coords::{Zoom, TILE_SIZE};
|
||||
use crate::io::shared_thread_state::SharedThreadState;
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::TessellateMessage;
|
||||
use crate::render::camera::{Camera, Perspective, ViewProjection};
|
||||
use crate::util::ChangeObserver;
|
||||
use crate::{Renderer, ScheduleMethod, Style, WindowSize};
|
||||
use std::sync::mpsc;
|
||||
|
||||
/// Stores the camera configuration.
|
||||
pub struct ViewState {
|
||||
pub zoom: ChangeObserver<Zoom>,
|
||||
pub camera: ChangeObserver<Camera>,
|
||||
pub perspective: Perspective,
|
||||
}
|
||||
|
||||
impl ViewState {
|
||||
pub fn new(window_size: &WindowSize) -> Self {
|
||||
let camera = Camera::new(
|
||||
(TILE_SIZE / 2.0, TILE_SIZE / 2.0, 150.0),
|
||||
cgmath::Deg(-90.0),
|
||||
cgmath::Deg(0.0),
|
||||
window_size.width(),
|
||||
window_size.height(),
|
||||
);
|
||||
|
||||
let perspective = Perspective::new(
|
||||
window_size.width(),
|
||||
window_size.height(),
|
||||
cgmath::Deg(110.0),
|
||||
100.0,
|
||||
2000.0,
|
||||
);
|
||||
|
||||
Self {
|
||||
zoom: ChangeObserver::default(),
|
||||
camera: ChangeObserver::new(camera),
|
||||
perspective,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn view_projection(&self) -> ViewProjection {
|
||||
self.camera.calc_view_proj(&self.perspective)
|
||||
}
|
||||
|
||||
pub fn visible_level(&self) -> u8 {
|
||||
self.zoom.level()
|
||||
}
|
||||
|
||||
pub fn zoom(&self) -> Zoom {
|
||||
*self.zoom
|
||||
}
|
||||
|
||||
pub fn update_zoom(&mut self, new_zoom: Zoom) {
|
||||
*self.zoom = new_zoom;
|
||||
log::info!("zoom: {}", new_zoom);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MapContext {
|
||||
pub view_state: ViewState,
|
||||
pub style: Style,
|
||||
|
||||
pub tile_cache: TileCache,
|
||||
pub renderer: Renderer,
|
||||
pub scheduler: Box<dyn ScheduleMethod>,
|
||||
|
||||
pub message_receiver: mpsc::Receiver<TessellateMessage>,
|
||||
pub shared_thread_state: SharedThreadState,
|
||||
}
|
||||
@ -1,15 +1,11 @@
|
||||
//! Provides utilities related to coordinates.
|
||||
|
||||
use std::fmt;
|
||||
use std::fmt::Formatter;
|
||||
|
||||
use cgmath::num_traits::Pow;
|
||||
use cgmath::{AbsDiffEq, Matrix4, Point3, Vector3};
|
||||
|
||||
use crate::style::source::TileAddressingScheme;
|
||||
|
||||
use crate::util::math::{div_floor, Aabb2};
|
||||
use crate::util::SignificantlyDifferent;
|
||||
use cgmath::num_traits::Pow;
|
||||
use cgmath::{AbsDiffEq, Matrix4, Point3, Vector3};
|
||||
use std::fmt;
|
||||
|
||||
pub const EXTENT_UINT: u32 = 4096;
|
||||
pub const EXTENT_SINT: i32 = EXTENT_UINT as i32;
|
||||
@ -51,7 +47,7 @@ impl Quadkey {
|
||||
}
|
||||
|
||||
impl fmt::Debug for Quadkey {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let len = self.0[0] as usize;
|
||||
for part in &self.0[0..len] {
|
||||
write!(f, "{:?}", part)?;
|
||||
@ -78,7 +74,7 @@ impl Default for Zoom {
|
||||
}
|
||||
|
||||
impl fmt::Display for Zoom {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", (self.0 * 100.0).round() / 100.0)
|
||||
}
|
||||
}
|
||||
@ -505,7 +501,7 @@ impl fmt::Display for TileCoords {
|
||||
}
|
||||
|
||||
impl fmt::Display for WorldTileCoords {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"WT(x={x},y={y},z={z})",
|
||||
@ -516,7 +512,7 @@ impl fmt::Display for WorldTileCoords {
|
||||
}
|
||||
}
|
||||
impl fmt::Display for WorldCoords {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "W(x={x},y={y})", x = self.x, y = self.y,)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ use lyon::tessellation::TessellationError;
|
||||
use std::fmt;
|
||||
use std::fmt::Formatter;
|
||||
use std::sync::mpsc::SendError;
|
||||
use wgpu::SurfaceError;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RenderError {
|
||||
@ -23,7 +22,7 @@ impl RenderError {
|
||||
pub fn should_exit(&self) -> bool {
|
||||
match self {
|
||||
RenderError::Surface(e) => match e {
|
||||
SurfaceError::OutOfMemory => true,
|
||||
wgpu::SurfaceError::OutOfMemory => true,
|
||||
_ => false,
|
||||
},
|
||||
}
|
||||
@ -39,8 +38,8 @@ pub enum Error {
|
||||
Render(RenderError),
|
||||
}
|
||||
|
||||
impl From<SurfaceError> for Error {
|
||||
fn from(e: SurfaceError) -> Self {
|
||||
impl From<wgpu::SurfaceError> for Error {
|
||||
fn from(e: wgpu::SurfaceError) -> Self {
|
||||
Error::Render(RenderError::Surface(e))
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,9 +2,9 @@
|
||||
|
||||
use crate::coords::WorldTileCoords;
|
||||
|
||||
use crate::render::ShaderVertex;
|
||||
use crate::tessellation::{IndexDataType, OverAlignedVertexBuffer};
|
||||
|
||||
use crate::render::ShaderVertex;
|
||||
use geozero::mvt::tile;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
//! Scheduling.
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
@ -25,25 +26,30 @@ where
|
||||
pub fn schedule_method(&self) -> &SM {
|
||||
&self.schedule_method
|
||||
}
|
||||
|
||||
pub fn take(self) -> SM {
|
||||
self.schedule_method
|
||||
}
|
||||
}
|
||||
|
||||
/// Can schedule a task from a future factory and a shared state.
|
||||
// Should be object safe in order to be able to have a dyn object in MapContext
|
||||
pub trait ScheduleMethod: 'static {
|
||||
#[cfg(not(feature = "no-thread-safe-futures"))]
|
||||
fn schedule<T>(
|
||||
fn schedule(
|
||||
&self,
|
||||
shared_thread_state: SharedThreadState,
|
||||
future_factory: impl (FnOnce(SharedThreadState) -> T) + Send + 'static,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Future<Output = ()> + Send + 'static;
|
||||
future_factory: Box<
|
||||
(dyn (FnOnce(SharedThreadState) -> Pin<Box<dyn Future<Output = ()> + Send>>) + Send),
|
||||
>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
#[cfg(feature = "no-thread-safe-futures")]
|
||||
fn schedule<T>(
|
||||
fn schedule(
|
||||
&self,
|
||||
shared_thread_state: SharedThreadState,
|
||||
future_factory: impl (FnOnce(SharedThreadState) -> T) + Send + 'static,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Future<Output = ()> + 'static;
|
||||
future_factory: Box<
|
||||
(dyn (FnOnce(SharedThreadState) -> Pin<Box<dyn Future<Output = ()>>>) + Send),
|
||||
>,
|
||||
) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
@ -18,25 +18,31 @@
|
||||
|
||||
use crate::io::scheduler::{ScheduleMethod, Scheduler};
|
||||
use crate::io::source_client::HTTPClient;
|
||||
use crate::map_state::MapState;
|
||||
use crate::render::render_state::RenderState;
|
||||
use crate::map_schedule::MapSchedule;
|
||||
use crate::render::settings::{RendererSettings, WgpuSettings};
|
||||
use crate::render::{RenderState, Renderer};
|
||||
use crate::style::Style;
|
||||
use crate::window::{MapWindow, MapWindowConfig, Runnable, WindowSize};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub mod context;
|
||||
pub mod coords;
|
||||
pub mod error;
|
||||
pub mod io;
|
||||
// Exposed because of input handlers in maplibre-winit
|
||||
pub mod map_schedule;
|
||||
pub mod platform;
|
||||
// Exposed because of camera
|
||||
pub mod render;
|
||||
pub mod style;
|
||||
pub mod window;
|
||||
// Exposed because of doc-strings
|
||||
pub mod schedule;
|
||||
|
||||
// Used for benchmarking
|
||||
pub mod benchmarking;
|
||||
|
||||
// Internal modules
|
||||
pub mod map_state;
|
||||
pub mod render;
|
||||
pub(crate) mod stages;
|
||||
pub(crate) mod tessellation;
|
||||
pub(crate) mod tilejson;
|
||||
pub(crate) mod util;
|
||||
@ -48,7 +54,7 @@ where
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
map_state: MapState<W::MapWindowConfig, SM, HC>,
|
||||
map_state: MapSchedule<W::MapWindowConfig, SM, HC>,
|
||||
window: W,
|
||||
}
|
||||
|
||||
@ -96,6 +102,8 @@ where
|
||||
http_client: HC,
|
||||
style: Style,
|
||||
|
||||
wgpu_settings: WgpuSettings,
|
||||
renderer_settings: RendererSettings,
|
||||
map_window_config: MWC,
|
||||
}
|
||||
|
||||
@ -108,32 +116,29 @@ where
|
||||
/// Initializes the whole rendering pipeline for the given configuration.
|
||||
/// Returns the initialized map, ready to be run.
|
||||
pub async fn initialize(self) -> Map<MWC::MapWindow, SM, HC> {
|
||||
let instance = wgpu::Instance::new(wgpu::Backends::all());
|
||||
//let instance = wgpu::Instance::new(wgpu::Backends::GL);
|
||||
//let instance = wgpu::Instance::new(wgpu::Backends::VULKAN);
|
||||
|
||||
let window = MWC::MapWindow::create(&self.map_window_config);
|
||||
let window_size = window.size();
|
||||
|
||||
let surface = unsafe { instance.create_surface(window.inner()) };
|
||||
let surface_config = wgpu::SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
format: crate::platform::COLOR_TEXTURE_FORMAT,
|
||||
width: window_size.width(),
|
||||
height: window_size.height(),
|
||||
// present_mode: wgpu::PresentMode::Mailbox,
|
||||
present_mode: wgpu::PresentMode::Fifo, // VSync
|
||||
};
|
||||
|
||||
let render_state = RenderState::initialize(instance, surface, surface_config).await;
|
||||
#[cfg(target_os = "android")]
|
||||
let renderer = None;
|
||||
#[cfg(not(target_os = "android"))]
|
||||
let renderer = Renderer::initialize(
|
||||
&window,
|
||||
self.wgpu_settings.clone(),
|
||||
self.renderer_settings.clone(),
|
||||
)
|
||||
.await
|
||||
.ok();
|
||||
Map {
|
||||
map_state: MapState::new(
|
||||
map_state: MapSchedule::new(
|
||||
self.map_window_config,
|
||||
window_size,
|
||||
render_state,
|
||||
renderer,
|
||||
self.scheduler,
|
||||
self.http_client,
|
||||
self.style,
|
||||
self.wgpu_settings,
|
||||
self.renderer_settings,
|
||||
),
|
||||
window,
|
||||
}
|
||||
@ -150,6 +155,8 @@ where
|
||||
style: Option<Style>,
|
||||
|
||||
map_window_config: Option<MWC>,
|
||||
wgpu_settings: Option<WgpuSettings>,
|
||||
renderer_settings: Option<RendererSettings>,
|
||||
}
|
||||
|
||||
impl<MWC, SM, HC> MapBuilder<MWC, SM, HC>
|
||||
@ -165,6 +172,8 @@ where
|
||||
http_client: None,
|
||||
style: None,
|
||||
map_window_config: None,
|
||||
wgpu_settings: None,
|
||||
renderer_settings: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,6 +182,16 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_renderer_settings(mut self, renderer_settings: RendererSettings) -> Self {
|
||||
self.renderer_settings = Some(renderer_settings);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_wgpu_settings(mut self, wgpu_settings: WgpuSettings) -> Self {
|
||||
self.wgpu_settings = Some(wgpu_settings);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_schedule_method(mut self, schedule_method: SM) -> Self {
|
||||
self.schedule_method = Some(schedule_method);
|
||||
self
|
||||
@ -204,6 +223,8 @@ where
|
||||
scheduler,
|
||||
http_client: self.http_client.unwrap(),
|
||||
style,
|
||||
wgpu_settings: self.wgpu_settings.unwrap_or_default(),
|
||||
renderer_settings: self.renderer_settings.unwrap_or_default(),
|
||||
map_window_config: self.map_window_config.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
236
maplibre/src/map_schedule.rs
Normal file
236
maplibre/src/map_schedule.rs
Normal file
@ -0,0 +1,236 @@
|
||||
//! Stores the state of the map such as `[crate::coords::Zoom]`, `[crate::camera::Camera]`, `[crate::style::Style]`, `[crate::io::tile_cache::TileCache]` and more.
|
||||
|
||||
use crate::context::{MapContext, ViewState};
|
||||
use crate::error::Error;
|
||||
use crate::io::geometry_index::GeometryIndex;
|
||||
use crate::io::scheduler::Scheduler;
|
||||
use crate::io::shared_thread_state::SharedThreadState;
|
||||
use crate::io::source_client::{HTTPClient, HttpSourceClient, SourceClient};
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::tile_request_state::TileRequestState;
|
||||
use crate::io::TessellateMessage;
|
||||
use crate::render::register_render_stages;
|
||||
use crate::schedule::{Schedule, Stage};
|
||||
use crate::stages::register_stages;
|
||||
use crate::style::Style;
|
||||
use crate::{
|
||||
MapWindow, MapWindowConfig, Renderer, RendererSettings, ScheduleMethod, WgpuSettings,
|
||||
WindowSize,
|
||||
};
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
|
||||
pub struct PrematureMapContext {
|
||||
pub view_state: ViewState,
|
||||
pub style: Style,
|
||||
|
||||
pub tile_cache: TileCache,
|
||||
pub scheduler: Box<dyn ScheduleMethod>,
|
||||
|
||||
pub message_receiver: mpsc::Receiver<TessellateMessage>,
|
||||
pub shared_thread_state: SharedThreadState,
|
||||
|
||||
wgpu_settings: WgpuSettings,
|
||||
renderer_settings: RendererSettings,
|
||||
}
|
||||
|
||||
pub enum EventuallyMapContext {
|
||||
Full(MapContext),
|
||||
Premature(PrematureMapContext),
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl EventuallyMapContext {
|
||||
pub fn make_full(&mut self, renderer: Renderer) {
|
||||
let context = mem::replace(self, EventuallyMapContext::Empty);
|
||||
|
||||
match context {
|
||||
EventuallyMapContext::Full(_) => {}
|
||||
EventuallyMapContext::Premature(PrematureMapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
scheduler,
|
||||
message_receiver,
|
||||
shared_thread_state,
|
||||
wgpu_settings,
|
||||
renderer_settings,
|
||||
}) => {
|
||||
mem::replace(
|
||||
self,
|
||||
EventuallyMapContext::Full(MapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
renderer,
|
||||
scheduler,
|
||||
message_receiver,
|
||||
shared_thread_state,
|
||||
}),
|
||||
);
|
||||
}
|
||||
EventuallyMapContext::Empty => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the state of the map, dispatches tile fetching and caching, tessellation and drawing.
|
||||
pub struct MapSchedule<MWC, SM, HC>
|
||||
where
|
||||
MWC: MapWindowConfig,
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
map_window_config: MWC,
|
||||
|
||||
map_context: EventuallyMapContext,
|
||||
|
||||
schedule: Schedule,
|
||||
|
||||
phantom_sm: PhantomData<SM>,
|
||||
phantom_hc: PhantomData<HC>,
|
||||
|
||||
suspended: bool,
|
||||
}
|
||||
|
||||
impl<MWC, SM, HC> MapSchedule<MWC, SM, HC>
|
||||
where
|
||||
MWC: MapWindowConfig,
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
pub fn new(
|
||||
map_window_config: MWC,
|
||||
window_size: WindowSize,
|
||||
renderer: Option<Renderer>,
|
||||
scheduler: Scheduler<SM>,
|
||||
http_client: HC,
|
||||
style: Style,
|
||||
wgpu_settings: WgpuSettings,
|
||||
renderer_settings: RendererSettings,
|
||||
) -> Self {
|
||||
let view_state = ViewState::new(&window_size);
|
||||
let tile_cache = TileCache::new();
|
||||
|
||||
let mut schedule = Schedule::default();
|
||||
let client: SourceClient<HC> = SourceClient::Http(HttpSourceClient::new(http_client));
|
||||
register_stages(&mut schedule, client);
|
||||
register_render_stages(&mut schedule);
|
||||
|
||||
let (message_sender, message_receiver) = mpsc::channel();
|
||||
|
||||
let scheduler = Box::new(scheduler.take());
|
||||
let shared_thread_state = SharedThreadState {
|
||||
tile_request_state: Arc::new(Mutex::new(TileRequestState::new())),
|
||||
message_sender,
|
||||
geometry_index: Arc::new(Mutex::new(GeometryIndex::new())),
|
||||
};
|
||||
Self {
|
||||
map_window_config,
|
||||
map_context: match renderer {
|
||||
None => EventuallyMapContext::Premature(PrematureMapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
scheduler,
|
||||
shared_thread_state,
|
||||
wgpu_settings,
|
||||
message_receiver,
|
||||
renderer_settings,
|
||||
}),
|
||||
Some(renderer) => EventuallyMapContext::Full(MapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
renderer,
|
||||
scheduler,
|
||||
shared_thread_state,
|
||||
message_receiver,
|
||||
}),
|
||||
},
|
||||
schedule,
|
||||
phantom_sm: Default::default(),
|
||||
phantom_hc: Default::default(),
|
||||
suspended: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "update_and_redraw", skip_all)]
|
||||
pub fn update_and_redraw(&mut self) -> Result<(), Error> {
|
||||
if self.suspended {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let EventuallyMapContext::Full(map_context) = &mut self.map_context {
|
||||
self.schedule.run(map_context)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, width: u32, height: u32) {
|
||||
if let EventuallyMapContext::Full(map_context) = &mut self.map_context {
|
||||
let view_state = &mut map_context.view_state;
|
||||
view_state.perspective.resize(width, height);
|
||||
view_state.camera.resize(width, height);
|
||||
|
||||
map_context.renderer.resize(width, height)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn suspend(&mut self) {
|
||||
self.suspended = true;
|
||||
}
|
||||
|
||||
pub fn resume<MW>(&mut self, window: &MW)
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
if let EventuallyMapContext::Full(map_context) = &mut self.map_context {
|
||||
let mut renderer = &mut map_context.renderer;
|
||||
renderer.surface.recreate(window, &renderer.instance);
|
||||
self.suspended = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_initialized(&self) -> bool {
|
||||
match &self.map_context {
|
||||
EventuallyMapContext::Full(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn late_init(&mut self) -> bool {
|
||||
match &self.map_context {
|
||||
EventuallyMapContext::Full(_) => false,
|
||||
EventuallyMapContext::Premature(PrematureMapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
scheduler,
|
||||
message_receiver,
|
||||
shared_thread_state,
|
||||
wgpu_settings,
|
||||
renderer_settings,
|
||||
}) => {
|
||||
let window = MWC::MapWindow::create(&self.map_window_config);
|
||||
let renderer =
|
||||
Renderer::initialize(&window, wgpu_settings.clone(), renderer_settings.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
&self.map_context.make_full(renderer);
|
||||
true
|
||||
}
|
||||
EventuallyMapContext::Empty => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn view_state_mut(&mut self) -> &mut ViewState {
|
||||
match &mut self.map_context {
|
||||
EventuallyMapContext::Full(MapContext { view_state, .. }) => view_state,
|
||||
EventuallyMapContext::Premature(PrematureMapContext { view_state, .. }) => view_state,
|
||||
_ => panic!("should not happen"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,372 +0,0 @@
|
||||
//! Stores the state of the map such as `[crate::coords::Zoom]`, `[crate::camera::Camera]`, `[crate::style::Style]`, `[crate::io::tile_cache::TileCache]` and more.
|
||||
|
||||
use crate::coords::{ViewRegion, WorldTileCoords, Zoom, TILE_SIZE};
|
||||
use crate::error::Error;
|
||||
use crate::io::geometry_index::GeometryIndex;
|
||||
use crate::io::scheduler::Scheduler;
|
||||
use crate::io::shared_thread_state::SharedThreadState;
|
||||
use crate::io::source_client::{HTTPClient, HttpSourceClient, SourceClient};
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::tile_request_state::TileRequestState;
|
||||
use crate::io::{TessellateMessage, TileRequest, TileTessellateMessage};
|
||||
use crate::render::camera;
|
||||
use crate::render::camera::{Camera, Perspective, ViewProjection};
|
||||
use crate::render::render_state::RenderState;
|
||||
use crate::style::Style;
|
||||
use crate::util::ChangeObserver;
|
||||
use crate::{MapWindow, MapWindowConfig, ScheduleMethod, WindowSize};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
|
||||
/// Stores the camera configuration.
|
||||
pub struct ViewState {
|
||||
zoom: ChangeObserver<Zoom>,
|
||||
pub camera: ChangeObserver<Camera>,
|
||||
pub perspective: Perspective,
|
||||
}
|
||||
|
||||
impl ViewState {
|
||||
pub fn view_projection(&self) -> ViewProjection {
|
||||
self.camera.calc_view_proj(&self.perspective)
|
||||
}
|
||||
|
||||
pub fn visible_level(&self) -> u8 {
|
||||
self.zoom.level()
|
||||
}
|
||||
|
||||
pub fn zoom(&self) -> Zoom {
|
||||
*self.zoom
|
||||
}
|
||||
|
||||
pub fn update_zoom(&mut self, new_zoom: Zoom) {
|
||||
*self.zoom = new_zoom;
|
||||
log::info!("zoom: {}", new_zoom);
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the state of the map, dispatches tile fetching and caching, tessellation and drawing.
|
||||
///
|
||||
/// FIXME: MapState may not follow the Single-responsibility principle, as it not only stores
|
||||
/// the state of the map but also the rendering, caching, etc.
|
||||
pub struct MapState<MWC, SM, HC>
|
||||
where
|
||||
MWC: MapWindowConfig,
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
map_window_config: MWC,
|
||||
|
||||
view_state: ViewState,
|
||||
|
||||
render_state: Option<RenderState>,
|
||||
scheduler: Scheduler<SM>,
|
||||
message_receiver: mpsc::Receiver<TessellateMessage>,
|
||||
shared_thread_state: SharedThreadState,
|
||||
tile_cache: TileCache,
|
||||
|
||||
source_client: SourceClient<HC>,
|
||||
|
||||
style: Style,
|
||||
|
||||
try_failed: bool,
|
||||
}
|
||||
|
||||
impl<MWC, SM, HC> MapState<MWC, SM, HC>
|
||||
where
|
||||
MWC: MapWindowConfig,
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
pub fn new(
|
||||
map_window_config: MWC,
|
||||
window_size: WindowSize,
|
||||
render_state: Option<RenderState>,
|
||||
scheduler: Scheduler<SM>,
|
||||
http_client: HC,
|
||||
style: Style,
|
||||
) -> Self {
|
||||
let camera = camera::Camera::new(
|
||||
(TILE_SIZE / 2.0, TILE_SIZE / 2.0, 150.0),
|
||||
cgmath::Deg(-90.0),
|
||||
cgmath::Deg(0.0),
|
||||
window_size.width(),
|
||||
window_size.height(),
|
||||
);
|
||||
|
||||
let perspective = camera::Perspective::new(
|
||||
window_size.width(),
|
||||
window_size.height(),
|
||||
cgmath::Deg(110.0),
|
||||
100.0,
|
||||
2000.0,
|
||||
);
|
||||
|
||||
let (message_sender, message_receiver) = mpsc::channel();
|
||||
|
||||
Self {
|
||||
map_window_config,
|
||||
view_state: ViewState {
|
||||
zoom: ChangeObserver::default(),
|
||||
camera: ChangeObserver::new(camera),
|
||||
perspective,
|
||||
},
|
||||
|
||||
render_state,
|
||||
scheduler,
|
||||
|
||||
tile_cache: TileCache::new(),
|
||||
message_receiver,
|
||||
shared_thread_state: SharedThreadState {
|
||||
tile_request_state: Arc::new(Mutex::new(TileRequestState::new())),
|
||||
message_sender,
|
||||
geometry_index: Arc::new(Mutex::new(GeometryIndex::new())),
|
||||
},
|
||||
|
||||
style,
|
||||
|
||||
try_failed: false,
|
||||
source_client: SourceClient::Http(HttpSourceClient::new(http_client)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_and_redraw(&mut self) -> Result<(), Error> {
|
||||
// Get data from other threads
|
||||
self.try_populate_cache();
|
||||
|
||||
// Update buffers
|
||||
self.prepare_render();
|
||||
|
||||
// Render buffers
|
||||
self.render_state_mut().render()?;
|
||||
|
||||
#[cfg(all(feature = "enable-tracing", not(target_arch = "wasm32")))]
|
||||
tracy_client::finish_continuous_frame!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
fn try_populate_cache(&mut self) {
|
||||
if let Ok(result) = self.message_receiver.try_recv() {
|
||||
match result {
|
||||
TessellateMessage::Layer(layer_result) => {
|
||||
tracing::trace!(
|
||||
"Layer {} at {} reached main thread",
|
||||
layer_result.layer_name(),
|
||||
layer_result.get_coords()
|
||||
);
|
||||
self.tile_cache.put_tessellated_layer(layer_result);
|
||||
}
|
||||
TessellateMessage::Tile(TileTessellateMessage { request_id, coords }) => loop {
|
||||
if let Ok(mut tile_request_state) =
|
||||
self.shared_thread_state.tile_request_state.try_lock()
|
||||
{
|
||||
tile_request_state.finish_tile_request(request_id);
|
||||
tracing::trace!("Tile at {} finished loading", coords);
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request tiles which are currently in view.
|
||||
#[tracing::instrument(skip_all)]
|
||||
fn request_tiles_in_view(&mut self, view_region: &ViewRegion) -> bool {
|
||||
let mut try_failed = false;
|
||||
let source_layers: HashSet<String> = self
|
||||
.style
|
||||
.layers
|
||||
.iter()
|
||||
.filter_map(|layer| layer.source_layer.clone())
|
||||
.collect();
|
||||
|
||||
for coords in view_region.iter() {
|
||||
if coords.build_quad_key().is_some() {
|
||||
// TODO: Make tesselation depend on style?
|
||||
try_failed = self.try_request_tile(&coords, &source_layers).unwrap();
|
||||
}
|
||||
}
|
||||
try_failed
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
fn prepare_render(&mut self) {
|
||||
let render_setup_span = tracing::span!(tracing::Level::TRACE, "setup view region");
|
||||
let _guard = render_setup_span.enter();
|
||||
|
||||
let visible_level = self.view_state.visible_level();
|
||||
|
||||
let view_proj = self.view_state.view_projection();
|
||||
|
||||
let view_region = self
|
||||
.view_state
|
||||
.camera
|
||||
.view_region_bounding_box(&view_proj.invert())
|
||||
.map(|bounding_box| {
|
||||
ViewRegion::new(bounding_box, 0, *self.view_state.zoom, visible_level)
|
||||
});
|
||||
|
||||
drop(_guard);
|
||||
|
||||
if let Some(view_region) = &view_region {
|
||||
self.render_state
|
||||
.as_mut()
|
||||
.expect("render state not yet initialized. Call reinitialize().")
|
||||
.upload_tile_geometry(view_region, &self.style, &self.tile_cache);
|
||||
|
||||
let zoom = self.view_state.zoom();
|
||||
self.render_state_mut()
|
||||
.update_tile_view_pattern(view_region, &view_proj, zoom);
|
||||
|
||||
self.render_state_mut().update_metadata();
|
||||
}
|
||||
|
||||
// TODO: Could we draw inspiration from StagingBelt (https://docs.rs/wgpu/latest/wgpu/util/struct.StagingBelt.html)?
|
||||
// TODO: What is StagingBelt for?
|
||||
|
||||
if self.view_state.camera.did_change(0.05)
|
||||
|| self.view_state.zoom.did_change(0.05)
|
||||
|| self.try_failed
|
||||
{
|
||||
if let Some(view_region) = &view_region {
|
||||
// FIXME: We also need to request tiles from layers above if we are over the maximum zoom level
|
||||
self.try_failed = self.request_tiles_in_view(view_region);
|
||||
}
|
||||
|
||||
self.render_state()
|
||||
.update_globals(&view_proj, &self.view_state.camera);
|
||||
}
|
||||
|
||||
self.view_state.camera.update_reference();
|
||||
self.view_state.zoom.update_reference();
|
||||
}
|
||||
|
||||
fn try_request_tile(
|
||||
&mut self,
|
||||
coords: &WorldTileCoords,
|
||||
layers: &HashSet<String>,
|
||||
) -> Result<bool, Error> {
|
||||
if !self.tile_cache.is_layers_missing(coords, layers) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if let Ok(mut tile_request_state) = self.shared_thread_state.tile_request_state.try_lock() {
|
||||
if let Some(request_id) = tile_request_state.start_tile_request(TileRequest {
|
||||
coords: *coords,
|
||||
layers: layers.clone(),
|
||||
}) {
|
||||
tracing::info!("new tile request: {}", &coords);
|
||||
|
||||
// The following snippet can be added instead of the next code block to demonstrate
|
||||
// an understanable approach of fetching
|
||||
/*#[cfg(target_arch = "wasm32")]
|
||||
if let Some(tile_coords) = coords.into_tile(TileAddressingScheme::TMS) {
|
||||
crate::platform::legacy_webworker_fetcher::request_tile(
|
||||
request_id,
|
||||
tile_coords,
|
||||
);
|
||||
}*/
|
||||
|
||||
let client = self.source_client.clone();
|
||||
let coords = *coords;
|
||||
|
||||
self.scheduler
|
||||
.schedule_method()
|
||||
.schedule(
|
||||
self.shared_thread_state.clone(),
|
||||
move |state: SharedThreadState| async move {
|
||||
match client.fetch(&coords).await {
|
||||
Ok(data) => state
|
||||
.process_tile(request_id, data.into_boxed_slice())
|
||||
.unwrap(),
|
||||
Err(e) => {
|
||||
log::error!("{:?}", &e);
|
||||
state.tile_unavailable(&coords, request_id).unwrap()
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
} else {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, width: u32, height: u32) {
|
||||
self.view_state.perspective.resize(width, height);
|
||||
self.view_state.camera.resize(width, height);
|
||||
|
||||
self.render_state_mut().resize(width, height)
|
||||
}
|
||||
|
||||
pub fn scheduler(&self) -> &Scheduler<SM> {
|
||||
&self.scheduler
|
||||
}
|
||||
|
||||
pub fn suspend(&mut self) {
|
||||
self.render_state_mut().suspend();
|
||||
}
|
||||
|
||||
pub fn resume(&mut self) {
|
||||
self.render_state_mut().resume();
|
||||
}
|
||||
|
||||
pub fn render_state(&self) -> &RenderState {
|
||||
self.render_state
|
||||
.as_ref()
|
||||
.expect("render state not yet initialized. Call reinitialize().")
|
||||
}
|
||||
|
||||
pub fn render_state_mut(&mut self) -> &'_ mut RenderState {
|
||||
self.render_state.as_mut().unwrap()
|
||||
}
|
||||
|
||||
pub fn view_state(&self) -> &ViewState {
|
||||
&self.view_state
|
||||
}
|
||||
|
||||
pub fn view_state_mut(&mut self) -> &mut ViewState {
|
||||
&mut self.view_state
|
||||
}
|
||||
|
||||
pub fn recreate_surface(&mut self, window: &MWC::MapWindow) {
|
||||
self.render_state
|
||||
.as_mut()
|
||||
.expect("render state not yet initialized. Call reinitialize().")
|
||||
.recreate_surface(window);
|
||||
}
|
||||
|
||||
pub fn is_initialized(&self) -> bool {
|
||||
self.render_state.is_some()
|
||||
}
|
||||
|
||||
pub async fn reinitialize(&mut self) {
|
||||
if self.render_state.is_none() {
|
||||
let instance = wgpu::Instance::new(wgpu::Backends::all());
|
||||
//let instance = wgpu::Instance::new(wgpu::Backends::GL);
|
||||
//let instance = wgpu::Instance::new(wgpu::Backends::VULKAN);
|
||||
|
||||
let window = MWC::MapWindow::create(&self.map_window_config);
|
||||
let window_size = window.size();
|
||||
|
||||
let surface = unsafe { instance.create_surface(window.inner()) };
|
||||
let surface_config = wgpu::SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
format: crate::platform::COLOR_TEXTURE_FORMAT,
|
||||
width: window_size.width(),
|
||||
height: window_size.height(),
|
||||
// present_mode: wgpu::PresentMode::Mailbox,
|
||||
present_mode: wgpu::PresentMode::Fifo, // VSync
|
||||
};
|
||||
let _window_size = window.size();
|
||||
let render_state = RenderState::initialize(instance, surface, surface_config)
|
||||
.await
|
||||
.unwrap();
|
||||
self.render_state = Some(render_state)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -11,7 +11,7 @@ pub fn run_multithreaded<F: Future>(future: F) -> F::Output {
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.on_thread_start(|| {
|
||||
#[cfg(feature = "enable-tracing")]
|
||||
#[cfg(feature = "trace")]
|
||||
tracy_client::set_thread_name("tokio-runtime-worker");
|
||||
})
|
||||
.build()
|
||||
|
||||
@ -2,6 +2,7 @@ use crate::error::Error;
|
||||
use crate::io::shared_thread_state::SharedThreadState;
|
||||
use crate::ScheduleMethod;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
/// Multi-threading with Tokio.
|
||||
pub struct TokioScheduleMethod;
|
||||
@ -13,15 +14,16 @@ impl TokioScheduleMethod {
|
||||
}
|
||||
|
||||
impl ScheduleMethod for TokioScheduleMethod {
|
||||
fn schedule<T>(
|
||||
fn schedule(
|
||||
&self,
|
||||
shared_thread_state: SharedThreadState,
|
||||
future_factory: impl FnOnce(SharedThreadState) -> T + Send + 'static,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
tokio::task::spawn(future_factory(shared_thread_state));
|
||||
future_factory: Box<
|
||||
(dyn (FnOnce(SharedThreadState) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>>)
|
||||
+ Send
|
||||
+ 'static),
|
||||
>,
|
||||
) -> Result<(), Error> {
|
||||
tokio::task::spawn((future_factory)(shared_thread_state));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
//! Main camera
|
||||
|
||||
use cgmath::prelude::*;
|
||||
use cgmath::{AbsDiffEq, Matrix4, Point2, Point3, Vector2, Vector3, Vector4};
|
||||
|
||||
@ -5,7 +7,7 @@ use crate::util::math::{bounds_from_points, Aabb2, Aabb3, Plane};
|
||||
use crate::util::SignificantlyDifferent;
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f64> = cgmath::Matrix4::new(
|
||||
pub const OPENGL_TO_WGPU_MATRIX: Matrix4<f64> = Matrix4::new(
|
||||
1.0, 0.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.5, 0.0,
|
||||
@ -13,7 +15,7 @@ pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f64> = cgmath::Matrix4::new(
|
||||
);
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const FLIP_Y: cgmath::Matrix4<f64> = cgmath::Matrix4::new(
|
||||
pub const FLIP_Y: Matrix4<f64> = Matrix4::new(
|
||||
1.0, 0.0, 0.0, 0.0,
|
||||
0.0, -1.0, 0.0, 0.0,
|
||||
0.0, 0.0, 1.0, 0.0,
|
||||
@ -65,7 +67,7 @@ impl ModelViewProjection {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Camera {
|
||||
pub position: cgmath::Point3<f64>,
|
||||
pub position: Point3<f64>,
|
||||
pub yaw: cgmath::Rad<f64>,
|
||||
pub pitch: cgmath::Rad<f64>,
|
||||
|
||||
@ -84,11 +86,7 @@ impl SignificantlyDifferent for Camera {
|
||||
}
|
||||
|
||||
impl Camera {
|
||||
pub fn new<
|
||||
V: Into<cgmath::Point3<f64>>,
|
||||
Y: Into<cgmath::Rad<f64>>,
|
||||
P: Into<cgmath::Rad<f64>>,
|
||||
>(
|
||||
pub fn new<V: Into<Point3<f64>>, Y: Into<cgmath::Rad<f64>>, P: Into<cgmath::Rad<f64>>>(
|
||||
position: V,
|
||||
yaw: Y,
|
||||
pitch: P,
|
||||
@ -109,11 +107,11 @@ impl Camera {
|
||||
self.height = height as f64;
|
||||
}
|
||||
|
||||
fn calc_matrix(&self) -> cgmath::Matrix4<f64> {
|
||||
cgmath::Matrix4::look_to_rh(
|
||||
fn calc_matrix(&self) -> Matrix4<f64> {
|
||||
Matrix4::look_to_rh(
|
||||
self.position,
|
||||
cgmath::Vector3::new(self.yaw.cos(), self.pitch.sin(), self.yaw.sin()).normalize(),
|
||||
cgmath::Vector3::unit_y(),
|
||||
Vector3::new(self.yaw.cos(), self.pitch.sin(), self.yaw.sin()).normalize(),
|
||||
Vector3::unit_y(),
|
||||
)
|
||||
}
|
||||
|
||||
@ -354,7 +352,7 @@ pub struct Perspective {
|
||||
znear: f64,
|
||||
zfar: f64,
|
||||
|
||||
current_projection: cgmath::Matrix4<f64>,
|
||||
current_projection: Matrix4<f64>,
|
||||
}
|
||||
|
||||
impl Perspective {
|
||||
@ -383,12 +381,7 @@ impl Perspective {
|
||||
);
|
||||
}
|
||||
|
||||
fn calc_matrix(
|
||||
aspect: f64,
|
||||
fovy: cgmath::Rad<f64>,
|
||||
znear: f64,
|
||||
zfar: f64,
|
||||
) -> cgmath::Matrix4<f64> {
|
||||
fn calc_matrix(aspect: f64, fovy: cgmath::Rad<f64>, znear: f64, zfar: f64) -> Matrix4<f64> {
|
||||
OPENGL_TO_WGPU_MATRIX * cgmath::perspective(fovy, aspect, znear, zfar)
|
||||
}
|
||||
}
|
||||
|
||||
238
maplibre/src/render/graph/context.rs
Normal file
238
maplibre/src/render/graph/context.rs
Normal file
@ -0,0 +1,238 @@
|
||||
use super::{NodeState, RenderGraph, SlotInfos, SlotLabel, SlotType, SlotValue};
|
||||
use crate::render::resource::TextureView;
|
||||
use std::borrow::Cow;
|
||||
use thiserror::Error;
|
||||
|
||||
/// A command that signals the graph runner to run the sub graph corresponding to the `name`
|
||||
/// with the specified `inputs` next.
|
||||
pub struct RunSubGraph {
|
||||
pub name: Cow<'static, str>,
|
||||
pub inputs: Vec<SlotValue>,
|
||||
}
|
||||
|
||||
/// The context with all graph information required to run a [`Node`](super::Node).
|
||||
/// This context is created for each node by the `RenderGraphRunner`.
|
||||
///
|
||||
/// The slot input can be read from here and the outputs must be written back to the context for
|
||||
/// passing them onto the next node.
|
||||
///
|
||||
/// Sub graphs can be queued for running by adding a [`RunSubGraph`] command to the context.
|
||||
/// After the node has finished running the graph runner is responsible for executing the sub graphs.
|
||||
pub struct RenderGraphContext<'a> {
|
||||
graph: &'a RenderGraph,
|
||||
node: &'a NodeState,
|
||||
inputs: &'a [SlotValue],
|
||||
outputs: &'a mut [Option<SlotValue>],
|
||||
run_sub_graphs: Vec<RunSubGraph>,
|
||||
}
|
||||
|
||||
impl<'a> RenderGraphContext<'a> {
|
||||
/// Creates a new render graph context for the `node`.
|
||||
pub fn new(
|
||||
graph: &'a RenderGraph,
|
||||
node: &'a NodeState,
|
||||
inputs: &'a [SlotValue],
|
||||
outputs: &'a mut [Option<SlotValue>],
|
||||
) -> Self {
|
||||
Self {
|
||||
graph,
|
||||
node,
|
||||
inputs,
|
||||
outputs,
|
||||
run_sub_graphs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the input slot values for the node.
|
||||
#[inline]
|
||||
pub fn inputs(&self) -> &[SlotValue] {
|
||||
self.inputs
|
||||
}
|
||||
|
||||
/// Returns the [`SlotInfos`] of the inputs.
|
||||
pub fn input_info(&self) -> &SlotInfos {
|
||||
&self.node.input_slots
|
||||
}
|
||||
|
||||
/// Returns the [`SlotInfos`] of the outputs.
|
||||
pub fn output_info(&self) -> &SlotInfos {
|
||||
&self.node.output_slots
|
||||
}
|
||||
|
||||
/// Retrieves the input slot value referenced by the `label`.
|
||||
pub fn get_input(&self, label: impl Into<SlotLabel>) -> Result<&SlotValue, InputSlotError> {
|
||||
let label = label.into();
|
||||
let index = self
|
||||
.input_info()
|
||||
.get_slot_index(label.clone())
|
||||
.ok_or(InputSlotError::InvalidSlot(label))?;
|
||||
Ok(&self.inputs[index])
|
||||
}
|
||||
|
||||
/// Retrieves the input slot value referenced by the `label` as a [`TextureView`].
|
||||
pub fn get_input_texture(
|
||||
&self,
|
||||
label: impl Into<SlotLabel>,
|
||||
) -> Result<&TextureView, InputSlotError> {
|
||||
let label = label.into();
|
||||
match self.get_input(label.clone())? {
|
||||
SlotValue::TextureView(value) => Ok(value),
|
||||
value => Err(InputSlotError::MismatchedSlotType {
|
||||
label,
|
||||
actual: value.slot_type(),
|
||||
expected: SlotType::TextureView,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the input slot value referenced by the `label` as a [`Sampler`].
|
||||
pub fn get_input_sampler(
|
||||
&self,
|
||||
label: impl Into<SlotLabel>,
|
||||
) -> Result<&wgpu::Sampler, InputSlotError> {
|
||||
let label = label.into();
|
||||
match self.get_input(label.clone())? {
|
||||
SlotValue::Sampler(value) => Ok(value),
|
||||
value => Err(InputSlotError::MismatchedSlotType {
|
||||
label,
|
||||
actual: value.slot_type(),
|
||||
expected: SlotType::Sampler,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the input slot value referenced by the `label` as a [`Buffer`].
|
||||
pub fn get_input_buffer(
|
||||
&self,
|
||||
label: impl Into<SlotLabel>,
|
||||
) -> Result<&wgpu::Buffer, InputSlotError> {
|
||||
let label = label.into();
|
||||
match self.get_input(label.clone())? {
|
||||
SlotValue::Buffer(value) => Ok(value),
|
||||
value => Err(InputSlotError::MismatchedSlotType {
|
||||
label,
|
||||
actual: value.slot_type(),
|
||||
expected: SlotType::Buffer,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the output slot value referenced by the `label`.
|
||||
pub fn set_output(
|
||||
&mut self,
|
||||
label: impl Into<SlotLabel>,
|
||||
value: impl Into<SlotValue>,
|
||||
) -> Result<(), OutputSlotError> {
|
||||
let label = label.into();
|
||||
let value = value.into();
|
||||
let slot_index = self
|
||||
.output_info()
|
||||
.get_slot_index(label.clone())
|
||||
.ok_or_else(|| OutputSlotError::InvalidSlot(label.clone()))?;
|
||||
let slot = self
|
||||
.output_info()
|
||||
.get_slot(slot_index)
|
||||
.expect("slot is valid");
|
||||
if value.slot_type() != slot.slot_type {
|
||||
return Err(OutputSlotError::MismatchedSlotType {
|
||||
label,
|
||||
actual: slot.slot_type,
|
||||
expected: value.slot_type(),
|
||||
});
|
||||
}
|
||||
self.outputs[slot_index] = Some(value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Queues up a sub graph for execution after the node has finished running.
|
||||
pub fn run_sub_graph(
|
||||
&mut self,
|
||||
name: impl Into<Cow<'static, str>>,
|
||||
inputs: Vec<SlotValue>,
|
||||
) -> Result<(), RunSubGraphError> {
|
||||
let name = name.into();
|
||||
let sub_graph = self
|
||||
.graph
|
||||
.get_sub_graph(&name)
|
||||
.ok_or_else(|| RunSubGraphError::MissingSubGraph(name.clone()))?;
|
||||
if let Some(input_node) = sub_graph.input_node() {
|
||||
for (i, input_slot) in input_node.input_slots.iter().enumerate() {
|
||||
if let Some(input_value) = inputs.get(i) {
|
||||
if input_slot.slot_type != input_value.slot_type() {
|
||||
return Err(RunSubGraphError::MismatchedInputSlotType {
|
||||
graph_name: name,
|
||||
slot_index: i,
|
||||
actual: input_value.slot_type(),
|
||||
expected: input_slot.slot_type,
|
||||
label: input_slot.name.clone().into(),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return Err(RunSubGraphError::MissingInput {
|
||||
slot_index: i,
|
||||
slot_name: input_slot.name.clone(),
|
||||
graph_name: name,
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if !inputs.is_empty() {
|
||||
return Err(RunSubGraphError::SubGraphHasNoInputs(name));
|
||||
}
|
||||
|
||||
self.run_sub_graphs.push(RunSubGraph { name, inputs });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finishes the context for this [`Node`](super::Node) by
|
||||
/// returning the sub graphs to run next.
|
||||
pub fn finish(self) -> Vec<RunSubGraph> {
|
||||
self.run_sub_graphs
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Eq, PartialEq)]
|
||||
pub enum RunSubGraphError {
|
||||
#[error("attempted to run sub-graph `{0}`, but it does not exist")]
|
||||
MissingSubGraph(Cow<'static, str>),
|
||||
#[error("attempted to pass inputs to sub-graph `{0}`, which has no input slots")]
|
||||
SubGraphHasNoInputs(Cow<'static, str>),
|
||||
#[error("sub graph (name: `{graph_name:?}`) could not be run because slot `{slot_name}` at index {slot_index} has no value")]
|
||||
MissingInput {
|
||||
slot_index: usize,
|
||||
slot_name: Cow<'static, str>,
|
||||
graph_name: Cow<'static, str>,
|
||||
},
|
||||
#[error("attempted to use the wrong type for input slot")]
|
||||
MismatchedInputSlotType {
|
||||
graph_name: Cow<'static, str>,
|
||||
slot_index: usize,
|
||||
label: SlotLabel,
|
||||
expected: SlotType,
|
||||
actual: SlotType,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Eq, PartialEq)]
|
||||
pub enum OutputSlotError {
|
||||
#[error("output slot `{0:?}` does not exist")]
|
||||
InvalidSlot(SlotLabel),
|
||||
#[error("attempted to output a value of type `{actual}` to output slot `{label:?}`, which has type `{expected}`")]
|
||||
MismatchedSlotType {
|
||||
label: SlotLabel,
|
||||
expected: SlotType,
|
||||
actual: SlotType,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Eq, PartialEq)]
|
||||
pub enum InputSlotError {
|
||||
#[error("input slot `{0:?}` does not exist")]
|
||||
InvalidSlot(SlotLabel),
|
||||
#[error("attempted to retrieve a value of type `{actual}` from input slot `{label:?}`, which has type `{expected}`")]
|
||||
MismatchedSlotType {
|
||||
label: SlotLabel,
|
||||
expected: SlotType,
|
||||
actual: SlotType,
|
||||
},
|
||||
}
|
||||
56
maplibre/src/render/graph/edge.rs
Normal file
56
maplibre/src/render/graph/edge.rs
Normal file
@ -0,0 +1,56 @@
|
||||
use super::NodeId;
|
||||
|
||||
/// An edge, which connects two [`Nodes`](super::Node) in
|
||||
/// a [`RenderGraph`](crate::render_graph::RenderGraph).
|
||||
///
|
||||
/// They are used to describe the ordering (which node has to run first)
|
||||
/// and may be of two kinds: [`NodeEdge`](Self::NodeEdge) and [`SlotEdge`](Self::SlotEdge).
|
||||
///
|
||||
/// Edges are added via the `render_graph::add_node_edge(output_node, input_node)` and the
|
||||
/// `render_graph::add_slot_edge(output_node, output_slot, input_node, input_slot)` methods.
|
||||
///
|
||||
/// The former simply states that the `output_node` has to be run before the `input_node`,
|
||||
/// while the later connects an output slot of the `output_node`
|
||||
/// with an input slot of the `input_node` to pass additional data along.
|
||||
/// For more information see [`SlotType`](super::SlotType).
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Edge {
|
||||
/// An edge describing to ordering of both nodes (`output_node` before `input_node`)
|
||||
/// and connecting the output slot at the `output_index` of the output_node
|
||||
/// with the slot at the `input_index` of the `input_node`.
|
||||
SlotEdge {
|
||||
input_node: NodeId,
|
||||
input_index: usize,
|
||||
output_node: NodeId,
|
||||
output_index: usize,
|
||||
},
|
||||
/// An edge describing to ordering of both nodes (`output_node` before `input_node`).
|
||||
NodeEdge {
|
||||
input_node: NodeId,
|
||||
output_node: NodeId,
|
||||
},
|
||||
}
|
||||
|
||||
impl Edge {
|
||||
/// Returns the id of the `input_node`.
|
||||
pub fn get_input_node(&self) -> NodeId {
|
||||
match self {
|
||||
Edge::SlotEdge { input_node, .. } => *input_node,
|
||||
Edge::NodeEdge { input_node, .. } => *input_node,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the id of the `output_node`.
|
||||
pub fn get_output_node(&self) -> NodeId {
|
||||
match self {
|
||||
Edge::SlotEdge { output_node, .. } => *output_node,
|
||||
Edge::NodeEdge { output_node, .. } => *output_node,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub enum EdgeExistence {
|
||||
Exists,
|
||||
DoesNotExist,
|
||||
}
|
||||
748
maplibre/src/render/graph/graph.rs
Normal file
748
maplibre/src/render/graph/graph.rs
Normal file
@ -0,0 +1,748 @@
|
||||
use super::{
|
||||
Edge, Node, NodeId, NodeLabel, NodeRunError, NodeState, RenderGraphContext, RenderGraphError,
|
||||
SlotInfo, SlotLabel,
|
||||
};
|
||||
use crate::render::graph::RenderContext;
|
||||
use crate::render::RenderState;
|
||||
use std::collections::HashMap;
|
||||
use std::{borrow::Cow, fmt::Debug};
|
||||
|
||||
use super::EdgeExistence;
|
||||
|
||||
/// The render graph configures the modular, parallel and re-usable render logic.
|
||||
/// It is a retained and stateless (nodes itself my have their internal state) structure,
|
||||
/// which can not be modified while it is executed by the graph runner.
|
||||
///
|
||||
/// The `RenderGraphRunner` is responsible for executing the entire graph each frame.
|
||||
///
|
||||
/// It consists of three main components: [`Nodes`](Node), [`Edges`](Edge)
|
||||
/// and [`Slots`](super::SlotType).
|
||||
///
|
||||
/// Nodes are responsible for generating draw calls and operating on input and output slots.
|
||||
/// Edges specify the order of execution for nodes and connect input and output slots together.
|
||||
/// Slots describe the render resources created or used by the nodes.
|
||||
///
|
||||
/// Additionally a render graph can contain multiple sub graphs, which are run by the
|
||||
/// corresponding nodes. Every render graph can have it’s own optional input node.
|
||||
///
|
||||
/// ## Example
|
||||
/// Here is a simple render graph example with two nodes connected by a node edge.
|
||||
/// ```
|
||||
/// #
|
||||
/// # use maplibre::render::graph::{Node, NodeRunError, RenderContext, RenderGraph, RenderGraphContext};
|
||||
/// # use maplibre::render::{RenderState};
|
||||
/// # struct MyNode;
|
||||
/// #
|
||||
/// # impl Node for MyNode {
|
||||
/// # fn run(&self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, state: &RenderState) -> Result<(), NodeRunError> {
|
||||
/// # unimplemented!()
|
||||
/// # }
|
||||
/// # }
|
||||
/// #
|
||||
/// let mut graph = RenderGraph::default();
|
||||
/// graph.add_node("input_node", MyNode);
|
||||
/// graph.add_node("output_node", MyNode);
|
||||
/// graph.add_node_edge("output_node", "input_node").unwrap();
|
||||
/// ```
|
||||
#[derive(Default)]
|
||||
pub struct RenderGraph {
|
||||
nodes: HashMap<NodeId, NodeState>,
|
||||
node_names: HashMap<Cow<'static, str>, NodeId>,
|
||||
sub_graphs: HashMap<Cow<'static, str>, RenderGraph>,
|
||||
input_node: Option<NodeId>,
|
||||
|
||||
current_id: usize,
|
||||
}
|
||||
|
||||
impl RenderGraph {
|
||||
/// The name of the [`GraphInputNode`] of this graph. Used to connect other nodes to it.
|
||||
pub const INPUT_NODE_NAME: &'static str = "GraphInputNode";
|
||||
|
||||
/// Updates all nodes and sub graphs of the render graph. Should be called before executing it.
|
||||
pub fn update(&mut self, state: &mut RenderState) {
|
||||
for node in self.nodes.values_mut() {
|
||||
node.node.update(state);
|
||||
}
|
||||
|
||||
for sub_graph in self.sub_graphs.values_mut() {
|
||||
sub_graph.update(state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an [`GraphInputNode`] with the specified slots if not already present.
|
||||
pub fn set_input(&mut self, inputs: Vec<SlotInfo>) -> NodeId {
|
||||
assert!(self.input_node.is_none(), "Graph already has an input node");
|
||||
|
||||
let id = self.add_node("GraphInputNode", GraphInputNode { inputs });
|
||||
self.input_node = Some(id);
|
||||
id
|
||||
}
|
||||
|
||||
/// Returns the [`NodeState`] of the input node of this graph..
|
||||
#[inline]
|
||||
pub fn input_node(&self) -> Option<&NodeState> {
|
||||
self.input_node.and_then(|id| self.get_node_state(id).ok())
|
||||
}
|
||||
|
||||
/// Adds the `node` with the `name` to the graph.
|
||||
/// If the name is already present replaces it instead.
|
||||
pub fn add_node<T>(&mut self, name: impl Into<Cow<'static, str>>, node: T) -> NodeId
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
let id = NodeId::new(self.current_id);
|
||||
self.current_id += 1;
|
||||
let name = name.into();
|
||||
let mut node_state = NodeState::new(id, node);
|
||||
node_state.name = Some(name.clone());
|
||||
self.nodes.insert(id, node_state);
|
||||
self.node_names.insert(name, id);
|
||||
id
|
||||
}
|
||||
|
||||
/// Removes the `node` with the `name` from the graph.
|
||||
/// If the name is does not exist, nothing happens.
|
||||
pub fn remove_node(
|
||||
&mut self,
|
||||
name: impl Into<Cow<'static, str>>,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
let name = name.into();
|
||||
if let Some(id) = self.node_names.remove(&name) {
|
||||
if let Some(node_state) = self.nodes.remove(&id) {
|
||||
// Remove all edges from other nodes to this one. Note that as we're removing this
|
||||
// node, we don't need to remove its input edges
|
||||
for input_edge in node_state.edges.input_edges().iter() {
|
||||
match input_edge {
|
||||
Edge::SlotEdge {
|
||||
output_node,
|
||||
output_index: _,
|
||||
input_node: _,
|
||||
input_index: _,
|
||||
} => {
|
||||
if let Ok(output_node) = self.get_node_state_mut(*output_node) {
|
||||
output_node.edges.remove_output_edge(input_edge.clone())?;
|
||||
}
|
||||
}
|
||||
Edge::NodeEdge {
|
||||
input_node: _,
|
||||
output_node,
|
||||
} => {
|
||||
if let Ok(output_node) = self.get_node_state_mut(*output_node) {
|
||||
output_node.edges.remove_output_edge(input_edge.clone())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove all edges from this node to other nodes. Note that as we're removing this
|
||||
// node, we don't need to remove its output edges
|
||||
for output_edge in node_state.edges.output_edges().iter() {
|
||||
match output_edge {
|
||||
Edge::SlotEdge {
|
||||
output_node: _,
|
||||
output_index: _,
|
||||
input_node,
|
||||
input_index: _,
|
||||
} => {
|
||||
if let Ok(input_node) = self.get_node_state_mut(*input_node) {
|
||||
input_node.edges.remove_input_edge(output_edge.clone())?;
|
||||
}
|
||||
}
|
||||
Edge::NodeEdge {
|
||||
output_node: _,
|
||||
input_node,
|
||||
} => {
|
||||
if let Ok(input_node) = self.get_node_state_mut(*input_node) {
|
||||
input_node.edges.remove_input_edge(output_edge.clone())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieves the [`NodeState`] referenced by the `label`.
|
||||
pub fn get_node_state(
|
||||
&self,
|
||||
label: impl Into<NodeLabel>,
|
||||
) -> Result<&NodeState, RenderGraphError> {
|
||||
let label = label.into();
|
||||
let node_id = self.get_node_id(&label)?;
|
||||
self.nodes
|
||||
.get(&node_id)
|
||||
.ok_or(RenderGraphError::InvalidNode(label))
|
||||
}
|
||||
|
||||
/// Retrieves the [`NodeState`] referenced by the `label` mutably.
|
||||
pub fn get_node_state_mut(
|
||||
&mut self,
|
||||
label: impl Into<NodeLabel>,
|
||||
) -> Result<&mut NodeState, RenderGraphError> {
|
||||
let label = label.into();
|
||||
let node_id = self.get_node_id(&label)?;
|
||||
self.nodes
|
||||
.get_mut(&node_id)
|
||||
.ok_or(RenderGraphError::InvalidNode(label))
|
||||
}
|
||||
|
||||
/// Retrieves the [`NodeId`] referenced by the `label`.
|
||||
pub fn get_node_id(&self, label: impl Into<NodeLabel>) -> Result<NodeId, RenderGraphError> {
|
||||
let label = label.into();
|
||||
match label {
|
||||
NodeLabel::Id(id) => Ok(id),
|
||||
NodeLabel::Name(ref name) => self
|
||||
.node_names
|
||||
.get(name)
|
||||
.cloned()
|
||||
.ok_or(RenderGraphError::InvalidNode(label)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the [`Node`] referenced by the `label`.
|
||||
pub fn get_node<T>(&self, label: impl Into<NodeLabel>) -> Result<&T, RenderGraphError>
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
self.get_node_state(label).and_then(|n| n.node())
|
||||
}
|
||||
|
||||
/// Retrieves the [`Node`] referenced by the `label` mutably.
|
||||
pub fn get_node_mut<T>(
|
||||
&mut self,
|
||||
label: impl Into<NodeLabel>,
|
||||
) -> Result<&mut T, RenderGraphError>
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
self.get_node_state_mut(label).and_then(|n| n.node_mut())
|
||||
}
|
||||
|
||||
/// Adds the [`Edge::SlotEdge`] to the graph. This guarantees that the `output_node`
|
||||
/// is run before the `input_node` and also connects the `output_slot` to the `input_slot`.
|
||||
pub fn add_slot_edge(
|
||||
&mut self,
|
||||
output_node: impl Into<NodeLabel>,
|
||||
output_slot: impl Into<SlotLabel>,
|
||||
input_node: impl Into<NodeLabel>,
|
||||
input_slot: impl Into<SlotLabel>,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
let output_slot = output_slot.into();
|
||||
let input_slot = input_slot.into();
|
||||
let output_node_id = self.get_node_id(output_node)?;
|
||||
let input_node_id = self.get_node_id(input_node)?;
|
||||
|
||||
let output_index = self
|
||||
.get_node_state(output_node_id)?
|
||||
.output_slots
|
||||
.get_slot_index(output_slot.clone())
|
||||
.ok_or(RenderGraphError::InvalidOutputNodeSlot(output_slot))?;
|
||||
let input_index = self
|
||||
.get_node_state(input_node_id)?
|
||||
.input_slots
|
||||
.get_slot_index(input_slot.clone())
|
||||
.ok_or(RenderGraphError::InvalidInputNodeSlot(input_slot))?;
|
||||
|
||||
let edge = Edge::SlotEdge {
|
||||
output_node: output_node_id,
|
||||
output_index,
|
||||
input_node: input_node_id,
|
||||
input_index,
|
||||
};
|
||||
|
||||
self.validate_edge(&edge, EdgeExistence::DoesNotExist)?;
|
||||
|
||||
{
|
||||
let output_node = self.get_node_state_mut(output_node_id)?;
|
||||
output_node.edges.add_output_edge(edge.clone())?;
|
||||
}
|
||||
let input_node = self.get_node_state_mut(input_node_id)?;
|
||||
input_node.edges.add_input_edge(edge)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes the [`Edge::SlotEdge`] from the graph. If any nodes or slots do not exist then
|
||||
/// nothing happens.
|
||||
pub fn remove_slot_edge(
|
||||
&mut self,
|
||||
output_node: impl Into<NodeLabel>,
|
||||
output_slot: impl Into<SlotLabel>,
|
||||
input_node: impl Into<NodeLabel>,
|
||||
input_slot: impl Into<SlotLabel>,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
let output_slot = output_slot.into();
|
||||
let input_slot = input_slot.into();
|
||||
let output_node_id = self.get_node_id(output_node)?;
|
||||
let input_node_id = self.get_node_id(input_node)?;
|
||||
|
||||
let output_index = self
|
||||
.get_node_state(output_node_id)?
|
||||
.output_slots
|
||||
.get_slot_index(output_slot.clone())
|
||||
.ok_or(RenderGraphError::InvalidOutputNodeSlot(output_slot))?;
|
||||
let input_index = self
|
||||
.get_node_state(input_node_id)?
|
||||
.input_slots
|
||||
.get_slot_index(input_slot.clone())
|
||||
.ok_or(RenderGraphError::InvalidInputNodeSlot(input_slot))?;
|
||||
|
||||
let edge = Edge::SlotEdge {
|
||||
output_node: output_node_id,
|
||||
output_index,
|
||||
input_node: input_node_id,
|
||||
input_index,
|
||||
};
|
||||
|
||||
self.validate_edge(&edge, EdgeExistence::Exists)?;
|
||||
|
||||
{
|
||||
let output_node = self.get_node_state_mut(output_node_id)?;
|
||||
output_node.edges.remove_output_edge(edge.clone())?;
|
||||
}
|
||||
let input_node = self.get_node_state_mut(input_node_id)?;
|
||||
input_node.edges.remove_input_edge(edge)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds the [`Edge::NodeEdge`] to the graph. This guarantees that the `output_node`
|
||||
/// is run before the `input_node`.
|
||||
pub fn add_node_edge(
|
||||
&mut self,
|
||||
output_node: impl Into<NodeLabel>,
|
||||
input_node: impl Into<NodeLabel>,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
let output_node_id = self.get_node_id(output_node)?;
|
||||
let input_node_id = self.get_node_id(input_node)?;
|
||||
|
||||
let edge = Edge::NodeEdge {
|
||||
output_node: output_node_id,
|
||||
input_node: input_node_id,
|
||||
};
|
||||
|
||||
self.validate_edge(&edge, EdgeExistence::DoesNotExist)?;
|
||||
|
||||
{
|
||||
let output_node = self.get_node_state_mut(output_node_id)?;
|
||||
output_node.edges.add_output_edge(edge.clone())?;
|
||||
}
|
||||
let input_node = self.get_node_state_mut(input_node_id)?;
|
||||
input_node.edges.add_input_edge(edge)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes the [`Edge::NodeEdge`] from the graph. If either node does not exist then nothing
|
||||
/// happens.
|
||||
pub fn remove_node_edge(
|
||||
&mut self,
|
||||
output_node: impl Into<NodeLabel>,
|
||||
input_node: impl Into<NodeLabel>,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
let output_node_id = self.get_node_id(output_node)?;
|
||||
let input_node_id = self.get_node_id(input_node)?;
|
||||
|
||||
let edge = Edge::NodeEdge {
|
||||
output_node: output_node_id,
|
||||
input_node: input_node_id,
|
||||
};
|
||||
|
||||
self.validate_edge(&edge, EdgeExistence::Exists)?;
|
||||
|
||||
{
|
||||
let output_node = self.get_node_state_mut(output_node_id)?;
|
||||
output_node.edges.remove_output_edge(edge.clone())?;
|
||||
}
|
||||
let input_node = self.get_node_state_mut(input_node_id)?;
|
||||
input_node.edges.remove_input_edge(edge)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies that the edge existence is as expected and
|
||||
/// checks that slot edges are connected correctly.
|
||||
pub fn validate_edge(
|
||||
&mut self,
|
||||
edge: &Edge,
|
||||
should_exist: EdgeExistence,
|
||||
) -> Result<(), RenderGraphError> {
|
||||
if should_exist == EdgeExistence::Exists && !self.has_edge(edge) {
|
||||
return Err(RenderGraphError::EdgeDoesNotExist(edge.clone()));
|
||||
} else if should_exist == EdgeExistence::DoesNotExist && self.has_edge(edge) {
|
||||
return Err(RenderGraphError::EdgeAlreadyExists(edge.clone()));
|
||||
}
|
||||
|
||||
match *edge {
|
||||
Edge::SlotEdge {
|
||||
output_node,
|
||||
output_index,
|
||||
input_node,
|
||||
input_index,
|
||||
} => {
|
||||
let output_node_state = self.get_node_state(output_node)?;
|
||||
let input_node_state = self.get_node_state(input_node)?;
|
||||
|
||||
let output_slot = output_node_state
|
||||
.output_slots
|
||||
.get_slot(output_index)
|
||||
.ok_or(RenderGraphError::InvalidOutputNodeSlot(SlotLabel::Index(
|
||||
output_index,
|
||||
)))?;
|
||||
let input_slot = input_node_state.input_slots.get_slot(input_index).ok_or(
|
||||
RenderGraphError::InvalidInputNodeSlot(SlotLabel::Index(input_index)),
|
||||
)?;
|
||||
|
||||
if let Some(Edge::SlotEdge {
|
||||
output_node: current_output_node,
|
||||
..
|
||||
}) = input_node_state.edges.input_edges().iter().find(|e| {
|
||||
if let Edge::SlotEdge {
|
||||
input_index: current_input_index,
|
||||
..
|
||||
} = e
|
||||
{
|
||||
input_index == *current_input_index
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}) {
|
||||
if should_exist == EdgeExistence::DoesNotExist {
|
||||
return Err(RenderGraphError::NodeInputSlotAlreadyOccupied {
|
||||
node: input_node,
|
||||
input_slot: input_index,
|
||||
occupied_by_node: *current_output_node,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if output_slot.slot_type != input_slot.slot_type {
|
||||
return Err(RenderGraphError::MismatchedNodeSlots {
|
||||
output_node,
|
||||
output_slot: output_index,
|
||||
input_node,
|
||||
input_slot: input_index,
|
||||
});
|
||||
}
|
||||
}
|
||||
Edge::NodeEdge { .. } => { /* nothing to validate here */ }
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks whether the `edge` already exists in the graph.
|
||||
pub fn has_edge(&self, edge: &Edge) -> bool {
|
||||
let output_node_state = self.get_node_state(edge.get_output_node());
|
||||
let input_node_state = self.get_node_state(edge.get_input_node());
|
||||
if let Ok(output_node_state) = output_node_state {
|
||||
if output_node_state.edges.output_edges().contains(edge) {
|
||||
if let Ok(input_node_state) = input_node_state {
|
||||
if input_node_state.edges.input_edges().contains(edge) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns an iterator over the [`NodeStates`](NodeState).
|
||||
pub fn iter_nodes(&self) -> impl Iterator<Item = &NodeState> {
|
||||
self.nodes.values()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the [`NodeStates`](NodeState), that allows modifying each value.
|
||||
pub fn iter_nodes_mut(&mut self) -> impl Iterator<Item = &mut NodeState> {
|
||||
self.nodes.values_mut()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the sub graphs.
|
||||
pub fn iter_sub_graphs(&self) -> impl Iterator<Item = (&str, &RenderGraph)> {
|
||||
self.sub_graphs
|
||||
.iter()
|
||||
.map(|(name, graph)| (name.as_ref(), graph))
|
||||
}
|
||||
|
||||
/// Returns an iterator over the sub graphs, that allows modifying each value.
|
||||
pub fn iter_sub_graphs_mut(&mut self) -> impl Iterator<Item = (&str, &mut RenderGraph)> {
|
||||
self.sub_graphs
|
||||
.iter_mut()
|
||||
.map(|(name, graph)| (name.as_ref(), graph))
|
||||
}
|
||||
|
||||
/// Returns an iterator over a tuple of the input edges and the corresponding output nodes
|
||||
/// for the node referenced by the label.
|
||||
pub fn iter_node_inputs(
|
||||
&self,
|
||||
label: impl Into<NodeLabel>,
|
||||
) -> Result<impl Iterator<Item = (&Edge, &NodeState)>, RenderGraphError> {
|
||||
let node = self.get_node_state(label)?;
|
||||
Ok(node
|
||||
.edges
|
||||
.input_edges()
|
||||
.iter()
|
||||
.map(|edge| (edge, edge.get_output_node()))
|
||||
.map(move |(edge, output_node_id)| {
|
||||
(edge, self.get_node_state(output_node_id).unwrap())
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns an iterator over a tuple of the output edges and the corresponding input nodes
|
||||
/// for the node referenced by the label.
|
||||
pub fn iter_node_outputs(
|
||||
&self,
|
||||
label: impl Into<NodeLabel>,
|
||||
) -> Result<impl Iterator<Item = (&Edge, &NodeState)>, RenderGraphError> {
|
||||
let node = self.get_node_state(label)?;
|
||||
Ok(node
|
||||
.edges
|
||||
.output_edges()
|
||||
.iter()
|
||||
.map(|edge| (edge, edge.get_input_node()))
|
||||
.map(move |(edge, input_node_id)| (edge, self.get_node_state(input_node_id).unwrap())))
|
||||
}
|
||||
|
||||
/// Adds the `sub_graph` with the `name` to the graph.
|
||||
/// If the name is already present replaces it instead.
|
||||
pub fn add_sub_graph(&mut self, name: impl Into<Cow<'static, str>>, sub_graph: RenderGraph) {
|
||||
self.sub_graphs.insert(name.into(), sub_graph);
|
||||
}
|
||||
|
||||
/// Removes the `sub_graph` with the `name` from the graph.
|
||||
/// If the name does not exist then nothing happens.
|
||||
pub fn remove_sub_graph(&mut self, name: impl Into<Cow<'static, str>>) {
|
||||
self.sub_graphs.remove(&name.into());
|
||||
}
|
||||
|
||||
/// Retrieves the sub graph corresponding to the `name`.
|
||||
pub fn get_sub_graph(&self, name: impl AsRef<str>) -> Option<&RenderGraph> {
|
||||
self.sub_graphs.get(name.as_ref())
|
||||
}
|
||||
|
||||
/// Retrieves the sub graph corresponding to the `name` mutably.
|
||||
pub fn get_sub_graph_mut(&mut self, name: impl AsRef<str>) -> Option<&mut RenderGraph> {
|
||||
self.sub_graphs.get_mut(name.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for RenderGraph {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
for node in self.iter_nodes() {
|
||||
writeln!(f, "{:?}", node.id)?;
|
||||
writeln!(f, " in: {:?}", node.input_slots)?;
|
||||
writeln!(f, " out: {:?}", node.output_slots)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Node`] which acts as an entry point for a [`RenderGraph`] with custom inputs.
|
||||
/// It has the same input and output slots and simply copies them over when run.
|
||||
pub struct GraphInputNode {
|
||||
inputs: Vec<SlotInfo>,
|
||||
}
|
||||
|
||||
impl Node for GraphInputNode {
|
||||
fn input(&self) -> Vec<SlotInfo> {
|
||||
self.inputs.clone()
|
||||
}
|
||||
|
||||
fn output(&self) -> Vec<SlotInfo> {
|
||||
self.inputs.clone()
|
||||
}
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
graph: &mut RenderGraphContext,
|
||||
_render_context: &mut RenderContext,
|
||||
_state: &RenderState,
|
||||
) -> Result<(), NodeRunError> {
|
||||
for i in 0..graph.inputs().len() {
|
||||
let input = graph.inputs()[i].clone();
|
||||
graph.set_output(i, input)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
Edge, Node, NodeId, NodeRunError, RenderGraph, RenderGraphContext, RenderGraphError,
|
||||
SlotInfo,
|
||||
};
|
||||
use crate::render::graph::SlotType;
|
||||
use crate::render::renderer::RenderContext;
|
||||
use crate::render::World;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestNode {
|
||||
inputs: Vec<SlotInfo>,
|
||||
outputs: Vec<SlotInfo>,
|
||||
}
|
||||
|
||||
impl TestNode {
|
||||
pub fn new(inputs: usize, outputs: usize) -> Self {
|
||||
TestNode {
|
||||
inputs: (0..inputs)
|
||||
.map(|i| SlotInfo::new(format!("in_{}", i), SlotType::TextureView))
|
||||
.collect(),
|
||||
outputs: (0..outputs)
|
||||
.map(|i| SlotInfo::new(format!("out_{}", i), SlotType::TextureView))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for TestNode {
|
||||
fn input(&self) -> Vec<SlotInfo> {
|
||||
self.inputs.clone()
|
||||
}
|
||||
|
||||
fn output(&self) -> Vec<SlotInfo> {
|
||||
self.outputs.clone()
|
||||
}
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
_: &mut RenderGraphContext,
|
||||
_: &mut RenderContext,
|
||||
_: &World,
|
||||
) -> Result<(), NodeRunError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_edges() {
|
||||
let mut graph = RenderGraph::default();
|
||||
let a_id = graph.add_node("A", TestNode::new(0, 1));
|
||||
let b_id = graph.add_node("B", TestNode::new(0, 1));
|
||||
let c_id = graph.add_node("C", TestNode::new(1, 1));
|
||||
let d_id = graph.add_node("D", TestNode::new(1, 0));
|
||||
|
||||
graph.add_slot_edge("A", "out_0", "C", "in_0").unwrap();
|
||||
graph.add_node_edge("B", "C").unwrap();
|
||||
graph.add_slot_edge("C", 0, "D", 0).unwrap();
|
||||
|
||||
fn input_nodes(name: &'static str, graph: &RenderGraph) -> HashSet<NodeId> {
|
||||
graph
|
||||
.iter_node_inputs(name)
|
||||
.unwrap()
|
||||
.map(|(_edge, node)| node.id)
|
||||
.collect::<HashSet<NodeId>>()
|
||||
}
|
||||
|
||||
fn output_nodes(name: &'static str, graph: &RenderGraph) -> HashSet<NodeId> {
|
||||
graph
|
||||
.iter_node_outputs(name)
|
||||
.unwrap()
|
||||
.map(|(_edge, node)| node.id)
|
||||
.collect::<HashSet<NodeId>>()
|
||||
}
|
||||
|
||||
assert!(input_nodes("A", &graph).is_empty(), "A has no inputs");
|
||||
assert!(
|
||||
output_nodes("A", &graph) == HashSet::from_iter(vec![c_id]),
|
||||
"A outputs to C"
|
||||
);
|
||||
|
||||
assert!(input_nodes("B", &graph).is_empty(), "B has no inputs");
|
||||
assert!(
|
||||
output_nodes("B", &graph) == HashSet::from_iter(vec![c_id]),
|
||||
"B outputs to C"
|
||||
);
|
||||
|
||||
assert!(
|
||||
input_nodes("C", &graph) == HashSet::from_iter(vec![a_id, b_id]),
|
||||
"A and B input to C"
|
||||
);
|
||||
assert!(
|
||||
output_nodes("C", &graph) == HashSet::from_iter(vec![d_id]),
|
||||
"C outputs to D"
|
||||
);
|
||||
|
||||
assert!(
|
||||
input_nodes("D", &graph) == HashSet::from_iter(vec![c_id]),
|
||||
"C inputs to D"
|
||||
);
|
||||
assert!(output_nodes("D", &graph).is_empty(), "D has no outputs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_node_typed() {
|
||||
struct MyNode {
|
||||
value: usize,
|
||||
}
|
||||
|
||||
impl Node for MyNode {
|
||||
fn run(
|
||||
&self,
|
||||
_: &mut RenderGraphContext,
|
||||
_: &mut RenderContext,
|
||||
_: &World,
|
||||
) -> Result<(), NodeRunError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
let mut graph = RenderGraph::default();
|
||||
|
||||
graph.add_node("A", MyNode { value: 42 });
|
||||
|
||||
let node: &MyNode = graph.get_node("A").unwrap();
|
||||
assert_eq!(node.value, 42, "node value matches");
|
||||
|
||||
let result: Result<&TestNode, RenderGraphError> = graph.get_node("A");
|
||||
assert_eq!(
|
||||
result.unwrap_err(),
|
||||
RenderGraphError::WrongNodeType,
|
||||
"expect a wrong node type error"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slot_already_occupied() {
|
||||
let mut graph = RenderGraph::default();
|
||||
|
||||
graph.add_node("A", TestNode::new(0, 1));
|
||||
graph.add_node("B", TestNode::new(0, 1));
|
||||
graph.add_node("C", TestNode::new(1, 1));
|
||||
|
||||
graph.add_slot_edge("A", 0, "C", 0).unwrap();
|
||||
assert_eq!(
|
||||
graph.add_slot_edge("B", 0, "C", 0),
|
||||
Err(RenderGraphError::NodeInputSlotAlreadyOccupied {
|
||||
node: graph.get_node_id("C").unwrap(),
|
||||
input_slot: 0,
|
||||
occupied_by_node: graph.get_node_id("A").unwrap(),
|
||||
}),
|
||||
"Adding to a slot that is already occupied should return an error"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_already_exists() {
|
||||
let mut graph = RenderGraph::default();
|
||||
|
||||
graph.add_node("A", TestNode::new(0, 1));
|
||||
graph.add_node("B", TestNode::new(1, 0));
|
||||
|
||||
graph.add_slot_edge("A", 0, "B", 0).unwrap();
|
||||
assert_eq!(
|
||||
graph.add_slot_edge("A", 0, "B", 0),
|
||||
Err(RenderGraphError::EdgeAlreadyExists(Edge::SlotEdge {
|
||||
output_node: graph.get_node_id("A").unwrap(),
|
||||
output_index: 0,
|
||||
input_node: graph.get_node_id("B").unwrap(),
|
||||
input_index: 0,
|
||||
})),
|
||||
"Adding to a duplicate edge should return an error"
|
||||
);
|
||||
}
|
||||
}
|
||||
46
maplibre/src/render/graph/mod.rs
Normal file
46
maplibre/src/render/graph/mod.rs
Normal file
@ -0,0 +1,46 @@
|
||||
mod context;
|
||||
mod edge;
|
||||
mod graph;
|
||||
mod node;
|
||||
mod node_slot;
|
||||
|
||||
pub use context::*;
|
||||
pub use edge::*;
|
||||
pub use graph::*;
|
||||
pub use node::*;
|
||||
pub use node_slot::*;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug, Eq, PartialEq)]
|
||||
pub enum RenderGraphError {
|
||||
#[error("node does not exist")]
|
||||
InvalidNode(NodeLabel),
|
||||
#[error("output node slot does not exist")]
|
||||
InvalidOutputNodeSlot(SlotLabel),
|
||||
#[error("input node slot does not exist")]
|
||||
InvalidInputNodeSlot(SlotLabel),
|
||||
#[error("node does not match the given type")]
|
||||
WrongNodeType,
|
||||
#[error("attempted to connect a node output slot to an incompatible input node slot")]
|
||||
MismatchedNodeSlots {
|
||||
output_node: NodeId,
|
||||
output_slot: usize,
|
||||
input_node: NodeId,
|
||||
input_slot: usize,
|
||||
},
|
||||
#[error("attempted to add an edge that already exists")]
|
||||
EdgeAlreadyExists(Edge),
|
||||
#[error("attempted to remove an edge that does not exist")]
|
||||
EdgeDoesNotExist(Edge),
|
||||
#[error("node has an unconnected input slot")]
|
||||
UnconnectedNodeInputSlot { node: NodeId, input_slot: usize },
|
||||
#[error("node has an unconnected output slot")]
|
||||
UnconnectedNodeOutputSlot { node: NodeId, output_slot: usize },
|
||||
#[error("node input slot already occupied")]
|
||||
NodeInputSlotAlreadyOccupied {
|
||||
node: NodeId,
|
||||
input_slot: usize,
|
||||
occupied_by_node: NodeId,
|
||||
},
|
||||
}
|
||||
334
maplibre/src/render/graph/node.rs
Normal file
334
maplibre/src/render/graph/node.rs
Normal file
@ -0,0 +1,334 @@
|
||||
use super::{
|
||||
Edge, InputSlotError, OutputSlotError, RenderGraphContext, RenderGraphError, RunSubGraphError,
|
||||
SlotInfo, SlotInfos,
|
||||
};
|
||||
use crate::render::RenderState;
|
||||
use downcast_rs::{impl_downcast, Downcast};
|
||||
use std::{borrow::Cow, fmt::Debug};
|
||||
use thiserror::Error;
|
||||
|
||||
/// The context with all information required to interact with the GPU.
|
||||
///
|
||||
/// The [`Device`] is used to create render resources and the
|
||||
/// the [`CommandEncoder`] is used to record a series of GPU operations.
|
||||
pub struct RenderContext<'d> {
|
||||
pub device: &'d wgpu::Device,
|
||||
pub command_encoder: wgpu::CommandEncoder,
|
||||
}
|
||||
|
||||
/// A [`Node`] identifier.
|
||||
/// It automatically generates its own random uuid.
|
||||
///
|
||||
/// This id is used to reference the node internally (edges, etc).
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct NodeId(usize);
|
||||
|
||||
impl NodeId {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new(id: usize) -> Self {
|
||||
NodeId(id)
|
||||
}
|
||||
}
|
||||
|
||||
/// A render node that can be added to a [`RenderGraph`](super::RenderGraph).
|
||||
///
|
||||
/// Nodes are the fundamental part of the graph and used to extend its functionality, by
|
||||
/// generating draw calls and/or running subgraphs.
|
||||
/// They are added via the `render_graph::add_node(my_node)` method.
|
||||
///
|
||||
/// To determine their position in the graph and ensure that all required dependencies (inputs)
|
||||
/// are already executed, [`Edges`](Edge) are used.
|
||||
///
|
||||
/// A node can produce outputs used as dependencies by other nodes.
|
||||
/// Those inputs and outputs are called slots and are the default way of passing render data
|
||||
/// inside the graph. For more information see [`SlotType`](super::SlotType).
|
||||
pub trait Node: Downcast + Send + Sync + 'static {
|
||||
/// Specifies the required input slots for this node.
|
||||
/// They will then be available during the run method inside the [`RenderGraphContext`].
|
||||
fn input(&self) -> Vec<SlotInfo> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Specifies the produced output slots for this node.
|
||||
/// They can then be passed one inside [`RenderGraphContext`] during the run method.
|
||||
fn output(&self) -> Vec<SlotInfo> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Updates internal node state using the current [`RenderState`] prior to the run method.
|
||||
fn update(&mut self, _state: &mut RenderState) {}
|
||||
|
||||
/// Runs the graph node logic, issues draw calls, updates the output slots and
|
||||
/// optionally queues up subgraphs for execution. The graph data, input and output values are
|
||||
/// passed via the [`RenderGraphContext`].
|
||||
fn run(
|
||||
&self,
|
||||
graph: &mut RenderGraphContext,
|
||||
render_context: &mut RenderContext,
|
||||
state: &RenderState,
|
||||
) -> Result<(), NodeRunError>;
|
||||
}
|
||||
|
||||
impl_downcast!(Node);
|
||||
|
||||
#[derive(Error, Debug, Eq, PartialEq)]
|
||||
pub enum NodeRunError {
|
||||
#[error("encountered an input slot error")]
|
||||
InputSlotError(#[from] InputSlotError),
|
||||
#[error("encountered an output slot error")]
|
||||
OutputSlotError(#[from] OutputSlotError),
|
||||
#[error("encountered an error when running a sub-graph")]
|
||||
RunSubGraphError(#[from] RunSubGraphError),
|
||||
}
|
||||
|
||||
/// A collection of input and output [`Edges`](Edge) for a [`Node`].
|
||||
#[derive(Debug)]
|
||||
pub struct Edges {
|
||||
id: NodeId,
|
||||
input_edges: Vec<Edge>,
|
||||
output_edges: Vec<Edge>,
|
||||
}
|
||||
|
||||
impl Edges {
|
||||
/// Returns all "input edges" (edges going "in") for this node .
|
||||
#[inline]
|
||||
pub fn input_edges(&self) -> &[Edge] {
|
||||
&self.input_edges
|
||||
}
|
||||
|
||||
/// Returns all "output edges" (edges going "out") for this node .
|
||||
#[inline]
|
||||
pub fn output_edges(&self) -> &[Edge] {
|
||||
&self.output_edges
|
||||
}
|
||||
|
||||
/// Returns this node's id.
|
||||
#[inline]
|
||||
pub fn id(&self) -> NodeId {
|
||||
self.id
|
||||
}
|
||||
|
||||
/// Adds an edge to the `input_edges` if it does not already exist.
|
||||
pub(crate) fn add_input_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> {
|
||||
if self.has_input_edge(&edge) {
|
||||
return Err(RenderGraphError::EdgeAlreadyExists(edge));
|
||||
}
|
||||
self.input_edges.push(edge);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes an edge from the `input_edges` if it exists.
|
||||
pub(crate) fn remove_input_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> {
|
||||
if let Some((index, _)) = self
|
||||
.input_edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find(|(_i, e)| **e == edge)
|
||||
{
|
||||
self.input_edges.swap_remove(index);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(RenderGraphError::EdgeDoesNotExist(edge))
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds an edge to the `output_edges` if it does not already exist.
|
||||
pub(crate) fn add_output_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> {
|
||||
if self.has_output_edge(&edge) {
|
||||
return Err(RenderGraphError::EdgeAlreadyExists(edge));
|
||||
}
|
||||
self.output_edges.push(edge);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes an edge from the `output_edges` if it exists.
|
||||
pub(crate) fn remove_output_edge(&mut self, edge: Edge) -> Result<(), RenderGraphError> {
|
||||
if let Some((index, _)) = self
|
||||
.output_edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find(|(_i, e)| **e == edge)
|
||||
{
|
||||
self.output_edges.swap_remove(index);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(RenderGraphError::EdgeDoesNotExist(edge))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether the input edge already exists.
|
||||
pub fn has_input_edge(&self, edge: &Edge) -> bool {
|
||||
self.input_edges.contains(edge)
|
||||
}
|
||||
|
||||
/// Checks whether the output edge already exists.
|
||||
pub fn has_output_edge(&self, edge: &Edge) -> bool {
|
||||
self.output_edges.contains(edge)
|
||||
}
|
||||
|
||||
/// Searches the `input_edges` for a [`Edge::SlotEdge`],
|
||||
/// which `input_index` matches the `index`;
|
||||
pub fn get_input_slot_edge(&self, index: usize) -> Result<&Edge, RenderGraphError> {
|
||||
self.input_edges
|
||||
.iter()
|
||||
.find(|e| {
|
||||
if let Edge::SlotEdge { input_index, .. } = e {
|
||||
*input_index == index
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.ok_or(RenderGraphError::UnconnectedNodeInputSlot {
|
||||
input_slot: index,
|
||||
node: self.id,
|
||||
})
|
||||
}
|
||||
|
||||
/// Searches the `output_edges` for a [`Edge::SlotEdge`],
|
||||
/// which `output_index` matches the `index`;
|
||||
pub fn get_output_slot_edge(&self, index: usize) -> Result<&Edge, RenderGraphError> {
|
||||
self.output_edges
|
||||
.iter()
|
||||
.find(|e| {
|
||||
if let Edge::SlotEdge { output_index, .. } = e {
|
||||
*output_index == index
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.ok_or(RenderGraphError::UnconnectedNodeOutputSlot {
|
||||
output_slot: index,
|
||||
node: self.id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The internal representation of a [`Node`], with all data required
|
||||
/// by the [`RenderGraph`](super::RenderGraph).
|
||||
///
|
||||
/// The `input_slots` and `output_slots` are provided by the `node`.
|
||||
pub struct NodeState {
|
||||
pub id: NodeId,
|
||||
pub name: Option<Cow<'static, str>>,
|
||||
/// The name of the type that implements [`Node`].
|
||||
pub type_name: &'static str,
|
||||
pub node: Box<dyn Node>,
|
||||
pub input_slots: SlotInfos,
|
||||
pub output_slots: SlotInfos,
|
||||
pub edges: Edges,
|
||||
}
|
||||
|
||||
impl Debug for NodeState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "{:?} ({:?})", self.id, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeState {
|
||||
/// Creates an [`NodeState`] without edges, but the `input_slots` and `output_slots`
|
||||
/// are provided by the `node`.
|
||||
pub fn new<T>(id: NodeId, node: T) -> Self
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
NodeState {
|
||||
id,
|
||||
name: None,
|
||||
input_slots: node.input().into(),
|
||||
output_slots: node.output().into(),
|
||||
node: Box::new(node),
|
||||
type_name: std::any::type_name::<T>(),
|
||||
edges: Edges {
|
||||
id,
|
||||
input_edges: Vec::new(),
|
||||
output_edges: Vec::new(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the [`Node`].
|
||||
pub fn node<T>(&self) -> Result<&T, RenderGraphError>
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
self.node
|
||||
.downcast_ref::<T>()
|
||||
.ok_or(RenderGraphError::WrongNodeType)
|
||||
}
|
||||
|
||||
/// Retrieves the [`Node`] mutably.
|
||||
pub fn node_mut<T>(&mut self) -> Result<&mut T, RenderGraphError>
|
||||
where
|
||||
T: Node,
|
||||
{
|
||||
self.node
|
||||
.downcast_mut::<T>()
|
||||
.ok_or(RenderGraphError::WrongNodeType)
|
||||
}
|
||||
|
||||
/// Validates that each input slot corresponds to an input edge.
|
||||
pub fn validate_input_slots(&self) -> Result<(), RenderGraphError> {
|
||||
for i in 0..self.input_slots.len() {
|
||||
self.edges.get_input_slot_edge(i)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates that each output slot corresponds to an output edge.
|
||||
pub fn validate_output_slots(&self) -> Result<(), RenderGraphError> {
|
||||
for i in 0..self.output_slots.len() {
|
||||
self.edges.get_output_slot_edge(i)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`NodeLabel`] is used to reference a [`NodeState`] by either its name or [`NodeId`]
|
||||
/// inside the [`RenderGraph`](super::RenderGraph).
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum NodeLabel {
|
||||
Id(NodeId),
|
||||
Name(Cow<'static, str>),
|
||||
}
|
||||
|
||||
impl From<&NodeLabel> for NodeLabel {
|
||||
fn from(value: &NodeLabel) -> Self {
|
||||
value.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for NodeLabel {
|
||||
fn from(value: String) -> Self {
|
||||
NodeLabel::Name(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'static str> for NodeLabel {
|
||||
fn from(value: &'static str) -> Self {
|
||||
NodeLabel::Name(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NodeId> for NodeLabel {
|
||||
fn from(value: NodeId) -> Self {
|
||||
NodeLabel::Id(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Node`] without any inputs, outputs and subgraphs, which does nothing when run.
|
||||
/// Used (as a label) to bundle multiple dependencies into one inside
|
||||
/// the [`RenderGraph`](super::RenderGraph).
|
||||
pub struct EmptyNode;
|
||||
|
||||
impl Node for EmptyNode {
|
||||
fn run(
|
||||
&self,
|
||||
_graph: &mut RenderGraphContext,
|
||||
_render_context: &mut RenderContext,
|
||||
_state: &RenderState,
|
||||
) -> Result<(), NodeRunError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
191
maplibre/src/render/graph/node_slot.rs
Normal file
191
maplibre/src/render/graph/node_slot.rs
Normal file
@ -0,0 +1,191 @@
|
||||
use crate::render::resource::TextureView;
|
||||
use std::rc::Rc;
|
||||
use std::{borrow::Cow, fmt};
|
||||
|
||||
/// A value passed between render [`Nodes`](super::Node).
|
||||
/// Corresponds to the [`SlotType`] specified in the [`RenderGraph`](super::RenderGraph).
|
||||
///
|
||||
/// Slots can have four different types of values:
|
||||
/// [`Buffer`], [`TextureView`], [`Sampler`] and [`Entity`].
|
||||
///
|
||||
/// These values do not contain the actual render data, but only the ids to retrieve them.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum SlotValue {
|
||||
/// A GPU-accessible [`Buffer`].
|
||||
Buffer(Rc<wgpu::Buffer>),
|
||||
/// A [`TextureView`] describes a texture used in a pipeline.
|
||||
TextureView(Rc<TextureView>),
|
||||
/// A texture [`Sampler`] defines how a pipeline will sample from a [`TextureView`].
|
||||
Sampler(Rc<wgpu::Sampler>),
|
||||
}
|
||||
|
||||
impl SlotValue {
|
||||
/// Returns the [`SlotType`] of this value.
|
||||
pub fn slot_type(&self) -> SlotType {
|
||||
match self {
|
||||
SlotValue::Buffer(_) => SlotType::Buffer,
|
||||
SlotValue::TextureView(_) => SlotType::TextureView,
|
||||
SlotValue::Sampler(_) => SlotType::Sampler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::Buffer> for SlotValue {
|
||||
fn from(value: wgpu::Buffer) -> Self {
|
||||
SlotValue::Buffer(Rc::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TextureView> for SlotValue {
|
||||
fn from(value: TextureView) -> Self {
|
||||
SlotValue::TextureView(Rc::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::Sampler> for SlotValue {
|
||||
fn from(value: wgpu::Sampler) -> Self {
|
||||
SlotValue::Sampler(Rc::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes the render resources created (output) or used (input) by
|
||||
/// the render [`Nodes`](super::Node).
|
||||
///
|
||||
/// This should not be confused with [`SlotValue`], which actually contains the passed data.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub enum SlotType {
|
||||
/// A GPU-accessible [`Buffer`].
|
||||
Buffer,
|
||||
/// A [`TextureView`] describes a texture used in a pipeline.
|
||||
TextureView,
|
||||
/// A texture [`Sampler`] defines how a pipeline will sample from a [`TextureView`].
|
||||
Sampler,
|
||||
}
|
||||
|
||||
impl fmt::Display for SlotType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let s = match self {
|
||||
SlotType::Buffer => "Buffer",
|
||||
SlotType::TextureView => "TextureView",
|
||||
SlotType::Sampler => "Sampler",
|
||||
};
|
||||
|
||||
f.write_str(s)
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`SlotLabel`] is used to reference a slot by either its name or index
|
||||
/// inside the [`RenderGraph`](super::RenderGraph).
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum SlotLabel {
|
||||
Index(usize),
|
||||
Name(Cow<'static, str>),
|
||||
}
|
||||
|
||||
impl From<&SlotLabel> for SlotLabel {
|
||||
fn from(value: &SlotLabel) -> Self {
|
||||
value.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for SlotLabel {
|
||||
fn from(value: String) -> Self {
|
||||
SlotLabel::Name(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'static str> for SlotLabel {
|
||||
fn from(value: &'static str) -> Self {
|
||||
SlotLabel::Name(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Cow<'static, str>> for SlotLabel {
|
||||
fn from(value: Cow<'static, str>) -> Self {
|
||||
SlotLabel::Name(value.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for SlotLabel {
|
||||
fn from(value: usize) -> Self {
|
||||
SlotLabel::Index(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// The internal representation of a slot, which specifies its [`SlotType`] and name.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SlotInfo {
|
||||
pub name: Cow<'static, str>,
|
||||
pub slot_type: SlotType,
|
||||
}
|
||||
|
||||
impl SlotInfo {
|
||||
pub fn new(name: impl Into<Cow<'static, str>>, slot_type: SlotType) -> Self {
|
||||
SlotInfo {
|
||||
name: name.into(),
|
||||
slot_type,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A collection of input or output [`SlotInfos`](SlotInfo) for
|
||||
/// a [`NodeState`](super::NodeState).
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SlotInfos {
|
||||
slots: Vec<SlotInfo>,
|
||||
}
|
||||
|
||||
impl<T: IntoIterator<Item = SlotInfo>> From<T> for SlotInfos {
|
||||
fn from(slots: T) -> Self {
|
||||
SlotInfos {
|
||||
slots: slots.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SlotInfos {
|
||||
/// Returns the count of slots.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.slots.len()
|
||||
}
|
||||
|
||||
/// Returns true if there are no slots.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.slots.is_empty()
|
||||
}
|
||||
|
||||
/// Retrieves the [`SlotInfo`] for the provided label.
|
||||
pub fn get_slot(&self, label: impl Into<SlotLabel>) -> Option<&SlotInfo> {
|
||||
let label = label.into();
|
||||
let index = self.get_slot_index(&label)?;
|
||||
self.slots.get(index)
|
||||
}
|
||||
|
||||
/// Retrieves the [`SlotInfo`] for the provided label mutably.
|
||||
pub fn get_slot_mut(&mut self, label: impl Into<SlotLabel>) -> Option<&mut SlotInfo> {
|
||||
let label = label.into();
|
||||
let index = self.get_slot_index(&label)?;
|
||||
self.slots.get_mut(index)
|
||||
}
|
||||
|
||||
/// Retrieves the index (inside input or output slots) of the slot for the provided label.
|
||||
pub fn get_slot_index(&self, label: impl Into<SlotLabel>) -> Option<usize> {
|
||||
let label = label.into();
|
||||
match label {
|
||||
SlotLabel::Index(index) => Some(index),
|
||||
SlotLabel::Name(ref name) => self
|
||||
.slots
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find(|(_i, s)| s.name == *name)
|
||||
.map(|(i, _s)| i),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over the slot infos.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &SlotInfo> {
|
||||
self.slots.iter()
|
||||
}
|
||||
}
|
||||
211
maplibre/src/render/graph_runner.rs
Normal file
211
maplibre/src/render/graph_runner.rs
Normal file
@ -0,0 +1,211 @@
|
||||
//! Executes a [`RenderGraph`]
|
||||
|
||||
use log::error;
|
||||
use smallvec::smallvec;
|
||||
use smallvec::SmallVec;
|
||||
use std::collections::HashMap;
|
||||
use std::{borrow::Cow, collections::VecDeque};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::render::graph::{
|
||||
Edge, NodeId, NodeRunError, NodeState, RenderContext, RenderGraph, RenderGraphContext,
|
||||
SlotLabel, SlotType, SlotValue,
|
||||
};
|
||||
use crate::render::RenderState;
|
||||
|
||||
pub(crate) struct RenderGraphRunner;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RenderGraphRunnerError {
|
||||
#[error(transparent)]
|
||||
NodeRunError(#[from] NodeRunError),
|
||||
#[error("node output slot not set (index {slot_index}, name {slot_name})")]
|
||||
EmptyNodeOutputSlot {
|
||||
type_name: &'static str,
|
||||
slot_index: usize,
|
||||
slot_name: Cow<'static, str>,
|
||||
},
|
||||
#[error("graph (name: '{graph_name:?}') could not be run because slot '{slot_name}' at index {slot_index} has no value")]
|
||||
MissingInput {
|
||||
slot_index: usize,
|
||||
slot_name: Cow<'static, str>,
|
||||
graph_name: Option<Cow<'static, str>>,
|
||||
},
|
||||
#[error("attempted to use the wrong type for input slot")]
|
||||
MismatchedInputSlotType {
|
||||
slot_index: usize,
|
||||
label: SlotLabel,
|
||||
expected: SlotType,
|
||||
actual: SlotType,
|
||||
},
|
||||
}
|
||||
|
||||
impl RenderGraphRunner {
|
||||
pub fn run(
|
||||
graph: &RenderGraph,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
state: &RenderState,
|
||||
) -> Result<(), RenderGraphRunnerError> {
|
||||
let command_encoder =
|
||||
device.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
|
||||
let mut render_context = RenderContext {
|
||||
device,
|
||||
command_encoder,
|
||||
};
|
||||
|
||||
Self::run_graph(graph, None, &mut render_context, state, &[])?;
|
||||
{
|
||||
#[cfg(feature = "trace")]
|
||||
let _span = tracing::info_span!("submit_graph_commands").entered();
|
||||
queue.submit(vec![render_context.command_encoder.finish()]);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_graph(
|
||||
graph: &RenderGraph,
|
||||
graph_name: Option<Cow<'static, str>>,
|
||||
render_context: &mut RenderContext,
|
||||
state: &RenderState,
|
||||
inputs: &[SlotValue],
|
||||
) -> Result<(), RenderGraphRunnerError> {
|
||||
let mut node_outputs: HashMap<NodeId, SmallVec<[SlotValue; 4]>> = HashMap::default();
|
||||
#[cfg(feature = "trace")]
|
||||
let span = if let Some(name) = &graph_name {
|
||||
tracing::info_span!("run_graph", name = name.as_ref())
|
||||
} else {
|
||||
tracing::info_span!("run_graph", name = "main_graph")
|
||||
};
|
||||
#[cfg(feature = "trace")]
|
||||
let _guard = span.enter();
|
||||
|
||||
// Queue up nodes without inputs, which can be run immediately
|
||||
let mut node_queue: VecDeque<&NodeState> = graph
|
||||
.iter_nodes()
|
||||
.filter(|node| node.input_slots.is_empty())
|
||||
.collect();
|
||||
|
||||
// pass inputs into the graph
|
||||
if let Some(input_node) = graph.input_node() {
|
||||
let mut input_values: SmallVec<[SlotValue; 4]> = SmallVec::new();
|
||||
for (i, input_slot) in input_node.input_slots.iter().enumerate() {
|
||||
if let Some(input_value) = inputs.get(i) {
|
||||
if input_slot.slot_type != input_value.slot_type() {
|
||||
return Err(RenderGraphRunnerError::MismatchedInputSlotType {
|
||||
slot_index: i,
|
||||
actual: input_value.slot_type(),
|
||||
expected: input_slot.slot_type,
|
||||
label: input_slot.name.clone().into(),
|
||||
});
|
||||
} else {
|
||||
input_values.push(input_value.clone());
|
||||
}
|
||||
} else {
|
||||
return Err(RenderGraphRunnerError::MissingInput {
|
||||
slot_index: i,
|
||||
slot_name: input_slot.name.clone(),
|
||||
graph_name: graph_name.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
node_outputs.insert(input_node.id, input_values);
|
||||
|
||||
for (_, node_state) in graph.iter_node_outputs(input_node.id).expect("node exists") {
|
||||
node_queue.push_front(node_state);
|
||||
}
|
||||
}
|
||||
|
||||
'handle_node: while let Some(node_state) = node_queue.pop_back() {
|
||||
// skip nodes that are already processed
|
||||
if node_outputs.contains_key(&node_state.id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut slot_indices_and_inputs: SmallVec<[(usize, SlotValue); 4]> = SmallVec::new();
|
||||
// check if all dependencies have finished running
|
||||
for (edge, input_node) in graph
|
||||
.iter_node_inputs(node_state.id)
|
||||
.expect("node is in graph")
|
||||
{
|
||||
match edge {
|
||||
Edge::SlotEdge {
|
||||
output_index,
|
||||
input_index,
|
||||
..
|
||||
} => {
|
||||
if let Some(outputs) = node_outputs.get(&input_node.id) {
|
||||
slot_indices_and_inputs
|
||||
.push((*input_index, outputs[*output_index].clone()));
|
||||
} else {
|
||||
node_queue.push_front(node_state);
|
||||
continue 'handle_node;
|
||||
}
|
||||
}
|
||||
Edge::NodeEdge { .. } => {
|
||||
if !node_outputs.contains_key(&input_node.id) {
|
||||
node_queue.push_front(node_state);
|
||||
continue 'handle_node;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// construct final sorted input list
|
||||
slot_indices_and_inputs.sort_by_key(|(index, _)| *index);
|
||||
let inputs: SmallVec<[SlotValue; 4]> = slot_indices_and_inputs
|
||||
.into_iter()
|
||||
.map(|(_, value)| value)
|
||||
.collect();
|
||||
|
||||
assert_eq!(inputs.len(), node_state.input_slots.len());
|
||||
|
||||
let mut outputs: SmallVec<[Option<SlotValue>; 4]> =
|
||||
smallvec![None; node_state.output_slots.len()];
|
||||
{
|
||||
let mut context = RenderGraphContext::new(graph, node_state, &inputs, &mut outputs);
|
||||
{
|
||||
#[cfg(feature = "trace")]
|
||||
let _span = tracing::info_span!("node", name = node_state.type_name).entered();
|
||||
|
||||
node_state.node.run(&mut context, render_context, state)?;
|
||||
}
|
||||
|
||||
for run_sub_graph in context.finish() {
|
||||
let sub_graph = graph
|
||||
.get_sub_graph(&run_sub_graph.name)
|
||||
.expect("sub graph exists because it was validated when queued.");
|
||||
Self::run_graph(
|
||||
sub_graph,
|
||||
Some(run_sub_graph.name),
|
||||
render_context,
|
||||
state,
|
||||
&run_sub_graph.inputs,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut values: SmallVec<[SlotValue; 4]> = SmallVec::new();
|
||||
for (i, output) in outputs.into_iter().enumerate() {
|
||||
if let Some(value) = output {
|
||||
values.push(value);
|
||||
} else {
|
||||
let empty_slot = node_state.output_slots.get_slot(i).unwrap();
|
||||
return Err(RenderGraphRunnerError::EmptyNodeOutputSlot {
|
||||
type_name: node_state.type_name,
|
||||
slot_index: i,
|
||||
slot_name: empty_slot.name.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
node_outputs.insert(node_state.id, values);
|
||||
|
||||
for (_, node_state) in graph.iter_node_outputs(node_state.id).expect("node exists") {
|
||||
node_queue.push_front(node_state);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
116
maplibre/src/render/main_pass.rs
Normal file
116
maplibre/src/render/main_pass.rs
Normal file
@ -0,0 +1,116 @@
|
||||
//! The main render pass for this application.
|
||||
//!
|
||||
//! Right now there is only one render graph. A use case for multiple render passes would be
|
||||
//! [shadows](https://www.raywenderlich.com/books/metal-by-tutorials/v2.0/chapters/14-multipass-deferred-rendering).
|
||||
|
||||
use crate::render::graph::{Node, NodeRunError, RenderContext, RenderGraphContext, SlotInfo};
|
||||
use crate::render::render_commands::{DrawMasks, DrawTiles};
|
||||
use crate::render::render_phase::{PhaseItem, RenderCommand};
|
||||
use crate::render::resource::TrackedRenderPass;
|
||||
use crate::render::stages::draw_graph;
|
||||
use crate::render::util::FloatOrd;
|
||||
use crate::render::Eventually::Initialized;
|
||||
use crate::render::RenderState;
|
||||
use std::ops::{Deref, Range};
|
||||
|
||||
pub struct MainPassNode {}
|
||||
|
||||
impl MainPassNode {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for MainPassNode {
|
||||
fn input(&self) -> Vec<SlotInfo> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn update(&mut self, _state: &mut RenderState) {}
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
_graph: &mut RenderGraphContext,
|
||||
render_context: &mut RenderContext,
|
||||
state: &RenderState,
|
||||
) -> Result<(), NodeRunError> {
|
||||
let (render_target, multisampling_texture, depth_texture) = if let (
|
||||
Initialized(render_target),
|
||||
Initialized(multisampling_texture),
|
||||
Initialized(depth_texture),
|
||||
) = (
|
||||
&state.render_target,
|
||||
&state.multisampling_texture,
|
||||
&state.depth_texture,
|
||||
) {
|
||||
(render_target, multisampling_texture, depth_texture)
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let color_attachment = if let Some(texture) = multisampling_texture {
|
||||
wgpu::RenderPassColorAttachment {
|
||||
view: &texture.view,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
|
||||
store: true,
|
||||
},
|
||||
resolve_target: Some(render_target.deref()),
|
||||
}
|
||||
} else {
|
||||
wgpu::RenderPassColorAttachment {
|
||||
view: render_target.deref(),
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
|
||||
store: true,
|
||||
},
|
||||
resolve_target: None,
|
||||
}
|
||||
};
|
||||
|
||||
let render_pass =
|
||||
render_context
|
||||
.command_encoder
|
||||
.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: None,
|
||||
color_attachments: &[color_attachment],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: &depth_texture.view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(0.0),
|
||||
store: true,
|
||||
}),
|
||||
stencil_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(0),
|
||||
store: true,
|
||||
}),
|
||||
}),
|
||||
});
|
||||
|
||||
let mut tracked_pass = TrackedRenderPass::new(render_pass);
|
||||
|
||||
for item in &state.mask_phase.items {
|
||||
DrawMasks::render(state, item, &mut tracked_pass);
|
||||
}
|
||||
|
||||
for item in &state.tile_phase.items {
|
||||
DrawTiles::render(state, item, &mut tracked_pass);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MainPassDriverNode;
|
||||
|
||||
impl Node for MainPassDriverNode {
|
||||
fn run(
|
||||
&self,
|
||||
graph: &mut RenderGraphContext,
|
||||
_render_context: &mut RenderContext,
|
||||
_state: &RenderState,
|
||||
) -> Result<(), NodeRunError> {
|
||||
graph.run_sub_graph(draw_graph::NAME, vec![])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,15 +1,367 @@
|
||||
//! This module implements the rendering algorithm of maplibre-rs. It manages the whole
|
||||
//! communication with the GPU.
|
||||
//!
|
||||
//! The render in this module is largely based on the
|
||||
//! [bevy_render](https://github.com/bevyengine/bevy/tree/aced6a/crates/bevy_render)
|
||||
//! crate with commit `aced6a`.
|
||||
//! It is dual-licensed under MIT and Apache:
|
||||
//!
|
||||
//! ```text
|
||||
//! Bevy is dual-licensed under either
|
||||
//!
|
||||
//! * MIT License (docs/LICENSE-MIT or http://opensource.org/licenses/MIT)
|
||||
//! * Apache License, Version 2.0 (docs/LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
//!
|
||||
//! at your option.
|
||||
//! ```
|
||||
//!
|
||||
//! We appreciate the design and implementation work which as gone into it.
|
||||
//!
|
||||
|
||||
mod buffer_pool;
|
||||
mod options;
|
||||
mod piplines;
|
||||
use crate::render::render_phase::RenderPhase;
|
||||
use crate::render::resource::{BufferPool, Globals, IndexEntry};
|
||||
use crate::render::resource::{Head, Surface};
|
||||
use crate::render::resource::{Texture, TextureView};
|
||||
use crate::render::settings::{RendererSettings, SurfaceType, WgpuSettings};
|
||||
use crate::render::shaders::{ShaderFeatureStyle, ShaderLayerMetadata};
|
||||
use crate::render::tile_view_pattern::{TileInView, TileShape, TileViewPattern};
|
||||
use crate::render::util::Eventually;
|
||||
use crate::tessellation::IndexDataType;
|
||||
use crate::MapWindow;
|
||||
use log::info;
|
||||
|
||||
// Rendering internals
|
||||
mod graph;
|
||||
mod graph_runner;
|
||||
mod main_pass;
|
||||
mod render_commands;
|
||||
mod render_phase;
|
||||
mod resource;
|
||||
mod shaders;
|
||||
mod texture;
|
||||
mod stages;
|
||||
mod tile_pipeline;
|
||||
mod tile_view_pattern;
|
||||
mod util;
|
||||
|
||||
// Public API
|
||||
pub mod camera;
|
||||
pub mod render_state;
|
||||
pub mod settings;
|
||||
|
||||
// These are created during tessellation and must be public
|
||||
pub use shaders::ShaderVertex;
|
||||
pub use stages::register_render_stages;
|
||||
|
||||
pub const INDEX_FORMAT: wgpu::IndexFormat = wgpu::IndexFormat::Uint32; // Must match IndexDataType
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RenderState {
|
||||
render_target: Eventually<TextureView>,
|
||||
|
||||
buffer_pool: Eventually<
|
||||
BufferPool<
|
||||
wgpu::Queue,
|
||||
wgpu::Buffer,
|
||||
ShaderVertex,
|
||||
IndexDataType,
|
||||
ShaderLayerMetadata,
|
||||
ShaderFeatureStyle,
|
||||
>,
|
||||
>,
|
||||
tile_view_pattern: Eventually<TileViewPattern<wgpu::Queue, wgpu::Buffer>>,
|
||||
|
||||
tile_pipeline: Eventually<wgpu::RenderPipeline>,
|
||||
mask_pipeline: Eventually<wgpu::RenderPipeline>,
|
||||
|
||||
globals_bind_group: Eventually<Globals>,
|
||||
|
||||
depth_texture: Eventually<Texture>,
|
||||
multisampling_texture: Eventually<Option<Texture>>,
|
||||
|
||||
mask_phase: RenderPhase<TileInView>,
|
||||
tile_phase: RenderPhase<(IndexEntry, TileShape)>,
|
||||
}
|
||||
|
||||
pub struct Renderer {
|
||||
pub instance: wgpu::Instance,
|
||||
pub device: wgpu::Device,
|
||||
pub queue: wgpu::Queue,
|
||||
pub adapter_info: wgpu::AdapterInfo,
|
||||
|
||||
pub wgpu_settings: WgpuSettings,
|
||||
pub settings: RendererSettings,
|
||||
|
||||
pub state: RenderState,
|
||||
pub surface: Surface,
|
||||
}
|
||||
|
||||
impl Renderer {
|
||||
/// Initializes the renderer by retrieving and preparing the GPU instance, device and queue
|
||||
/// for the specified backend.
|
||||
pub async fn initialize<MW>(
|
||||
window: &MW,
|
||||
wgpu_settings: WgpuSettings,
|
||||
settings: RendererSettings,
|
||||
) -> Result<Self, wgpu::RequestDeviceError>
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
let instance = wgpu::Instance::new(wgpu_settings.backends.unwrap_or(wgpu::Backends::all()));
|
||||
|
||||
let maybe_surface = match &settings.surface_type {
|
||||
SurfaceType::Headless => None,
|
||||
SurfaceType::Headed => Some(Surface::from_window(&instance, window, &settings)),
|
||||
};
|
||||
|
||||
let compatible_surface = if let Some(surface) = &maybe_surface {
|
||||
match &surface.head() {
|
||||
Head::Headed(window_head) => Some(window_head.surface()),
|
||||
Head::Headless(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (device, queue, adapter_info) = Self::request_device(
|
||||
&instance,
|
||||
&wgpu_settings,
|
||||
&wgpu::RequestAdapterOptions {
|
||||
power_preference: wgpu_settings.power_preference,
|
||||
force_fallback_adapter: false,
|
||||
compatible_surface,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let surface = maybe_surface.unwrap_or_else(|| match &settings.surface_type {
|
||||
SurfaceType::Headless => Surface::from_image(&device, window, &settings),
|
||||
SurfaceType::Headed => Surface::from_window(&instance, window, &settings),
|
||||
});
|
||||
|
||||
match surface.head() {
|
||||
Head::Headed(window) => window.configure(&device),
|
||||
Head::Headless(_) => {}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
instance,
|
||||
device,
|
||||
queue,
|
||||
adapter_info,
|
||||
wgpu_settings,
|
||||
settings,
|
||||
state: Default::default(),
|
||||
surface,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, width: u32, height: u32) {
|
||||
self.surface.resize(width, height)
|
||||
}
|
||||
|
||||
/// Requests a device
|
||||
async fn request_device(
|
||||
instance: &wgpu::Instance,
|
||||
settings: &WgpuSettings,
|
||||
request_adapter_options: &wgpu::RequestAdapterOptions<'_>,
|
||||
) -> Result<(wgpu::Device, wgpu::Queue, wgpu::AdapterInfo), wgpu::RequestDeviceError> {
|
||||
let adapter = instance
|
||||
.request_adapter(request_adapter_options)
|
||||
.await
|
||||
.expect("Unable to find a GPU! Make sure you have installed required drivers!");
|
||||
|
||||
let adapter_info = adapter.get_info();
|
||||
info!("{:?}", adapter_info);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
let trace_path = if settings.record_trace {
|
||||
let path = std::path::Path::new("wgpu_trace");
|
||||
// ignore potential error, wgpu will log it
|
||||
let _ = std::fs::create_dir(path);
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
let trace_path = None;
|
||||
|
||||
// Maybe get features and limits based on what is supported by the adapter/backend
|
||||
let mut features = wgpu::Features::empty();
|
||||
let mut limits = settings.limits.clone();
|
||||
|
||||
features = adapter.features() | wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
|
||||
if adapter_info.device_type == wgpu::DeviceType::DiscreteGpu {
|
||||
// `MAPPABLE_PRIMARY_BUFFERS` can have a significant, negative performance impact for
|
||||
// discrete GPUs due to having to transfer data across the PCI-E bus and so it
|
||||
// should not be automatically enabled in this case. It is however beneficial for
|
||||
// integrated GPUs.
|
||||
features -= wgpu::Features::MAPPABLE_PRIMARY_BUFFERS;
|
||||
}
|
||||
limits = adapter.limits();
|
||||
|
||||
// Enforce the disabled features
|
||||
if let Some(disabled_features) = settings.disabled_features {
|
||||
features -= disabled_features;
|
||||
}
|
||||
// NOTE: |= is used here to ensure that any explicitly-enabled features are respected.
|
||||
features |= settings.features;
|
||||
|
||||
// Enforce the limit constraints
|
||||
if let Some(constrained_limits) = settings.constrained_limits.as_ref() {
|
||||
// NOTE: Respect the configured limits as an 'upper bound'. This means for 'max' limits, we
|
||||
// take the minimum of the calculated limits according to the adapter/backend and the
|
||||
// specified max_limits. For 'min' limits, take the maximum instead. This is intended to
|
||||
// err on the side of being conservative. We can't claim 'higher' limits that are supported
|
||||
// but we can constrain to 'lower' limits.
|
||||
limits = wgpu::Limits {
|
||||
max_texture_dimension_1d: limits
|
||||
.max_texture_dimension_1d
|
||||
.min(constrained_limits.max_texture_dimension_1d),
|
||||
max_texture_dimension_2d: limits
|
||||
.max_texture_dimension_2d
|
||||
.min(constrained_limits.max_texture_dimension_2d),
|
||||
max_texture_dimension_3d: limits
|
||||
.max_texture_dimension_3d
|
||||
.min(constrained_limits.max_texture_dimension_3d),
|
||||
max_texture_array_layers: limits
|
||||
.max_texture_array_layers
|
||||
.min(constrained_limits.max_texture_array_layers),
|
||||
max_bind_groups: limits
|
||||
.max_bind_groups
|
||||
.min(constrained_limits.max_bind_groups),
|
||||
max_dynamic_uniform_buffers_per_pipeline_layout: limits
|
||||
.max_dynamic_uniform_buffers_per_pipeline_layout
|
||||
.min(constrained_limits.max_dynamic_uniform_buffers_per_pipeline_layout),
|
||||
max_dynamic_storage_buffers_per_pipeline_layout: limits
|
||||
.max_dynamic_storage_buffers_per_pipeline_layout
|
||||
.min(constrained_limits.max_dynamic_storage_buffers_per_pipeline_layout),
|
||||
max_sampled_textures_per_shader_stage: limits
|
||||
.max_sampled_textures_per_shader_stage
|
||||
.min(constrained_limits.max_sampled_textures_per_shader_stage),
|
||||
max_samplers_per_shader_stage: limits
|
||||
.max_samplers_per_shader_stage
|
||||
.min(constrained_limits.max_samplers_per_shader_stage),
|
||||
max_storage_buffers_per_shader_stage: limits
|
||||
.max_storage_buffers_per_shader_stage
|
||||
.min(constrained_limits.max_storage_buffers_per_shader_stage),
|
||||
max_storage_textures_per_shader_stage: limits
|
||||
.max_storage_textures_per_shader_stage
|
||||
.min(constrained_limits.max_storage_textures_per_shader_stage),
|
||||
max_uniform_buffers_per_shader_stage: limits
|
||||
.max_uniform_buffers_per_shader_stage
|
||||
.min(constrained_limits.max_uniform_buffers_per_shader_stage),
|
||||
max_uniform_buffer_binding_size: limits
|
||||
.max_uniform_buffer_binding_size
|
||||
.min(constrained_limits.max_uniform_buffer_binding_size),
|
||||
max_storage_buffer_binding_size: limits
|
||||
.max_storage_buffer_binding_size
|
||||
.min(constrained_limits.max_storage_buffer_binding_size),
|
||||
max_vertex_buffers: limits
|
||||
.max_vertex_buffers
|
||||
.min(constrained_limits.max_vertex_buffers),
|
||||
max_vertex_attributes: limits
|
||||
.max_vertex_attributes
|
||||
.min(constrained_limits.max_vertex_attributes),
|
||||
max_vertex_buffer_array_stride: limits
|
||||
.max_vertex_buffer_array_stride
|
||||
.min(constrained_limits.max_vertex_buffer_array_stride),
|
||||
max_push_constant_size: limits
|
||||
.max_push_constant_size
|
||||
.min(constrained_limits.max_push_constant_size),
|
||||
min_uniform_buffer_offset_alignment: limits
|
||||
.min_uniform_buffer_offset_alignment
|
||||
.max(constrained_limits.min_uniform_buffer_offset_alignment),
|
||||
min_storage_buffer_offset_alignment: limits
|
||||
.min_storage_buffer_offset_alignment
|
||||
.max(constrained_limits.min_storage_buffer_offset_alignment),
|
||||
max_inter_stage_shader_components: limits
|
||||
.max_inter_stage_shader_components
|
||||
.min(constrained_limits.max_inter_stage_shader_components),
|
||||
max_compute_workgroup_storage_size: limits
|
||||
.max_compute_workgroup_storage_size
|
||||
.min(constrained_limits.max_compute_workgroup_storage_size),
|
||||
max_compute_invocations_per_workgroup: limits
|
||||
.max_compute_invocations_per_workgroup
|
||||
.min(constrained_limits.max_compute_invocations_per_workgroup),
|
||||
max_compute_workgroup_size_x: limits
|
||||
.max_compute_workgroup_size_x
|
||||
.min(constrained_limits.max_compute_workgroup_size_x),
|
||||
max_compute_workgroup_size_y: limits
|
||||
.max_compute_workgroup_size_y
|
||||
.min(constrained_limits.max_compute_workgroup_size_y),
|
||||
max_compute_workgroup_size_z: limits
|
||||
.max_compute_workgroup_size_z
|
||||
.min(constrained_limits.max_compute_workgroup_size_z),
|
||||
max_compute_workgroups_per_dimension: limits
|
||||
.max_compute_workgroups_per_dimension
|
||||
.min(constrained_limits.max_compute_workgroups_per_dimension),
|
||||
};
|
||||
}
|
||||
|
||||
let (device, queue) = adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: settings.device_label.as_ref().map(|a| a.as_ref()),
|
||||
features,
|
||||
limits,
|
||||
},
|
||||
trace_path,
|
||||
)
|
||||
.await?;
|
||||
Ok((device, queue, adapter_info))
|
||||
}
|
||||
|
||||
pub fn instance(&self) -> &wgpu::Instance {
|
||||
&self.instance
|
||||
}
|
||||
pub fn device(&self) -> &wgpu::Device {
|
||||
&self.device
|
||||
}
|
||||
pub fn queue(&self) -> &wgpu::Queue {
|
||||
&self.queue
|
||||
}
|
||||
pub fn state(&self) -> &RenderState {
|
||||
&self.state
|
||||
}
|
||||
pub fn surface(&self) -> &Surface {
|
||||
&self.surface
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::render::graph::RenderGraph;
|
||||
use crate::render::graph_runner::RenderGraphRunner;
|
||||
use crate::render::pass_pipeline::build_graph;
|
||||
use crate::render::World;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_render() {
|
||||
let graph = build_graph();
|
||||
|
||||
let instance = wgpu::Instance::new(wgpu::Backends::all());
|
||||
|
||||
let adapter = instance
|
||||
.request_adapter(&wgpu::RequestAdapterOptions {
|
||||
power_preference: Default::default(),
|
||||
force_fallback_adapter: false,
|
||||
compatible_surface: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (device, queue) = adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
features: wgpu::Features::default(),
|
||||
limits: wgpu::Limits::default(),
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.unwrap();
|
||||
|
||||
RenderGraphRunner::run(&graph, &device, &queue, &World {});
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
use wgpu::BufferAddress;
|
||||
|
||||
pub const DEBUG_WIREFRAME: bool = false;
|
||||
pub const DEBUG_STENCIL_PATTERN: bool = false;
|
||||
pub const INDEX_FORMAT: wgpu::IndexFormat = wgpu::IndexFormat::Uint32; // Must match IndexDataType
|
||||
|
||||
pub const VERTEX_BUFFER_SIZE: BufferAddress = 1024 * 1024 * 32;
|
||||
pub const FEATURE_METADATA_BUFFER_SIZE: BufferAddress = 1024 * 1024 * 32;
|
||||
pub const INDICES_BUFFER_SIZE: BufferAddress = 1024 * 1024 * 32;
|
||||
pub const LAYER_METADATA_BUFFER_SIZE: BufferAddress = 1024 * 24;
|
||||
|
||||
pub const TILE_VIEW_BUFFER_SIZE: BufferAddress = 4096;
|
||||
@ -1,81 +0,0 @@
|
||||
use crate::render::options::{DEBUG_STENCIL_PATTERN, DEBUG_WIREFRAME};
|
||||
use wgpu::{FragmentState, PipelineLayout, RenderPipelineDescriptor, VertexState};
|
||||
|
||||
use super::texture::DEPTH_TEXTURE_FORMAT;
|
||||
|
||||
///
|
||||
/// Creates a render pipeline description
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `update_stencil`: Fragments passing through the pipeline will be able to update the stencil
|
||||
/// buffer. This is used for masking
|
||||
///
|
||||
/// returns: RenderPipelineDescriptor
|
||||
pub fn create_map_render_pipeline_description<'a>(
|
||||
pipeline_layout: &'a PipelineLayout,
|
||||
vertex_state: VertexState<'a>,
|
||||
fragment_state: FragmentState<'a>,
|
||||
sample_count: u32,
|
||||
update_stencil: bool,
|
||||
) -> RenderPipelineDescriptor<'a> {
|
||||
let stencil_state = if update_stencil {
|
||||
wgpu::StencilFaceState {
|
||||
compare: wgpu::CompareFunction::Always, // Allow ALL values to update the stencil
|
||||
fail_op: wgpu::StencilOperation::Keep,
|
||||
depth_fail_op: wgpu::StencilOperation::Keep, // This is used when the depth test already failed
|
||||
pass_op: wgpu::StencilOperation::Replace,
|
||||
}
|
||||
} else {
|
||||
wgpu::StencilFaceState {
|
||||
compare: if DEBUG_STENCIL_PATTERN {
|
||||
wgpu::CompareFunction::Always
|
||||
} else {
|
||||
wgpu::CompareFunction::Equal
|
||||
},
|
||||
fail_op: wgpu::StencilOperation::Keep,
|
||||
depth_fail_op: wgpu::StencilOperation::Keep,
|
||||
pass_op: wgpu::StencilOperation::Keep,
|
||||
}
|
||||
};
|
||||
|
||||
wgpu::RenderPipelineDescriptor {
|
||||
label: None,
|
||||
layout: Some(pipeline_layout),
|
||||
vertex: vertex_state,
|
||||
fragment: Some(fragment_state),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
polygon_mode: if update_stencil {
|
||||
wgpu::PolygonMode::Fill
|
||||
} else if DEBUG_WIREFRAME {
|
||||
wgpu::PolygonMode::Line
|
||||
} else {
|
||||
wgpu::PolygonMode::Fill
|
||||
},
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
strip_index_format: None,
|
||||
cull_mode: None, // TODO Maps look the same from he bottom and above
|
||||
conservative: false,
|
||||
unclipped_depth: false,
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: DEPTH_TEXTURE_FORMAT,
|
||||
depth_write_enabled: !update_stencil,
|
||||
depth_compare: wgpu::CompareFunction::Greater, // FIXME
|
||||
stencil: wgpu::StencilState {
|
||||
front: stencil_state,
|
||||
back: stencil_state,
|
||||
read_mask: 0xff, // Applied to stencil values being read from the stencil buffer
|
||||
write_mask: 0xff, // Applied to fragment stencil values before being written to the stencil buffer
|
||||
},
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState {
|
||||
count: sample_count,
|
||||
mask: !0,
|
||||
alpha_to_coverage_enabled: false,
|
||||
},
|
||||
multiview: None,
|
||||
}
|
||||
}
|
||||
154
maplibre/src/render/render_commands.rs
Normal file
154
maplibre/src/render/render_commands.rs
Normal file
@ -0,0 +1,154 @@
|
||||
//! Specifies the instructions which are going to be sent to the GPU. Render commands can be concatenated
|
||||
//! into a new render command which executes multiple instruction sets.
|
||||
|
||||
use crate::render::render_phase::{PhaseItem, RenderCommand, RenderCommandResult};
|
||||
use crate::render::resource::{Globals, IndexEntry, TrackedRenderPass};
|
||||
use crate::render::tile_view_pattern::{TileInView, TileShape};
|
||||
use crate::render::util::Eventually::Initialized;
|
||||
use crate::render::INDEX_FORMAT;
|
||||
use crate::RenderState;
|
||||
|
||||
impl PhaseItem for TileInView {
|
||||
type SortKey = ();
|
||||
|
||||
fn sort_key(&self) -> Self::SortKey {}
|
||||
}
|
||||
|
||||
impl PhaseItem for (IndexEntry, TileShape) {
|
||||
type SortKey = u32;
|
||||
|
||||
fn sort_key(&self) -> Self::SortKey {
|
||||
self.0.style_layer.index
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SetViewBindGroup<const I: usize>;
|
||||
impl<const I: usize, P: PhaseItem> RenderCommand<P> for SetViewBindGroup<I> {
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
_item: &P,
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult {
|
||||
if let Initialized(Globals { bind_group, .. }) = &state.globals_bind_group {
|
||||
pass.set_bind_group(0, bind_group, &[]);
|
||||
RenderCommandResult::Success
|
||||
} else {
|
||||
RenderCommandResult::Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SetMaskPipeline;
|
||||
impl<P: PhaseItem> RenderCommand<P> for SetMaskPipeline {
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
_item: &P,
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult {
|
||||
if let Initialized(pipeline) = &state.mask_pipeline {
|
||||
pass.set_render_pipeline(pipeline);
|
||||
RenderCommandResult::Success
|
||||
} else {
|
||||
RenderCommandResult::Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SetTilePipeline;
|
||||
impl<P: PhaseItem> RenderCommand<P> for SetTilePipeline {
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
_item: &P,
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult {
|
||||
if let Initialized(pipeline) = &state.tile_pipeline {
|
||||
pass.set_render_pipeline(pipeline);
|
||||
RenderCommandResult::Success
|
||||
} else {
|
||||
RenderCommandResult::Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DrawMask;
|
||||
impl RenderCommand<TileInView> for DrawMask {
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
TileInView { shape, fallback }: &TileInView,
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult {
|
||||
if let Initialized(tile_view_pattern) = &state.tile_view_pattern {
|
||||
tracing::trace!("Drawing mask {}", &shape.coords);
|
||||
|
||||
let shape_to_render = fallback.as_ref().unwrap_or(shape);
|
||||
|
||||
let reference =
|
||||
tile_view_pattern.stencil_reference_value(&shape_to_render.coords) as u32;
|
||||
|
||||
pass.set_stencil_reference(reference);
|
||||
pass.set_vertex_buffer(
|
||||
0,
|
||||
tile_view_pattern.buffer().slice(shape.buffer_range.clone()),
|
||||
);
|
||||
pass.draw(0..6, 0..1);
|
||||
RenderCommandResult::Success
|
||||
} else {
|
||||
RenderCommandResult::Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DrawTile;
|
||||
impl RenderCommand<(IndexEntry, TileShape)> for DrawTile {
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
(entry, shape): &(IndexEntry, TileShape),
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult {
|
||||
if let (Initialized(buffer_pool), Initialized(tile_view_pattern)) =
|
||||
(&state.buffer_pool, &state.tile_view_pattern)
|
||||
{
|
||||
let reference = tile_view_pattern.stencil_reference_value(&shape.coords) as u32;
|
||||
|
||||
tracing::trace!(
|
||||
"Drawing layer {:?} at {}",
|
||||
entry.style_layer.source_layer,
|
||||
&entry.coords
|
||||
);
|
||||
|
||||
pass.set_stencil_reference(reference);
|
||||
pass.set_index_buffer(
|
||||
buffer_pool.indices().slice(entry.indices_buffer_range()),
|
||||
INDEX_FORMAT,
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
0,
|
||||
buffer_pool.vertices().slice(entry.vertices_buffer_range()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
1,
|
||||
tile_view_pattern.buffer().slice(shape.buffer_range.clone()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
2,
|
||||
buffer_pool
|
||||
.metadata()
|
||||
.slice(entry.layer_metadata_buffer_range()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
3,
|
||||
buffer_pool
|
||||
.feature_metadata()
|
||||
.slice(entry.feature_metadata_buffer_range()),
|
||||
);
|
||||
pass.draw_indexed(entry.indices_range(), 0, 0..1);
|
||||
RenderCommandResult::Success
|
||||
} else {
|
||||
RenderCommandResult::Failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type DrawTiles = (SetTilePipeline, SetViewBindGroup<0>, DrawTile);
|
||||
|
||||
pub type DrawMasks = (SetMaskPipeline, DrawMask);
|
||||
93
maplibre/src/render/render_phase/draw.rs
Normal file
93
maplibre/src/render/render_phase/draw.rs
Normal file
@ -0,0 +1,93 @@
|
||||
use crate::render::resource::TrackedRenderPass;
|
||||
use crate::RenderState;
|
||||
use std::collections::HashMap;
|
||||
use std::{any::TypeId, fmt::Debug, hash::Hash};
|
||||
|
||||
/// A draw function which is used to draw a specific [`PhaseItem`].
|
||||
///
|
||||
/// They are the the general form of drawing items, whereas [`RenderCommands`](RenderCommand)
|
||||
/// are more modular.
|
||||
pub trait Draw<P: PhaseItem>: 'static {
|
||||
/// Draws the [`PhaseItem`] by issuing draw calls via the [`TrackedRenderPass`].
|
||||
fn draw<'w>(&mut self, pass: &mut TrackedRenderPass<'w>, state: &'w RenderState, item: &P);
|
||||
}
|
||||
|
||||
/// An item which will be drawn to the screen. A phase item should be queued up for rendering
|
||||
/// during the [`RenderStageLabel::Queue`](crate::RenderStageLabel::Queue) stage.
|
||||
/// Afterwards it will be sorted and rendered automatically in the
|
||||
/// [`RenderStageLabel::PhaseSort`](crate::RenderStageLabel::PhaseSort) stage and
|
||||
/// [`RenderStageLabel::Render`](crate::RenderStageLabel::Render) stage, respectively.
|
||||
pub trait PhaseItem {
|
||||
/// The type used for ordering the items. The smallest values are drawn first.
|
||||
type SortKey: Ord;
|
||||
/// Determines the order in which the items are drawn during the corresponding [`RenderPhase`](super::RenderPhase).
|
||||
fn sort_key(&self) -> Self::SortKey;
|
||||
}
|
||||
|
||||
/// [`RenderCommand`] is a trait that runs an ECS query and produces one or more
|
||||
/// [`TrackedRenderPass`] calls. Types implementing this trait can be composed (as tuples).
|
||||
///
|
||||
/// They can be registered as a [`Draw`] function via the
|
||||
/// [`AddRenderCommand::add_render_command`] method.
|
||||
///
|
||||
/// # Example
|
||||
/// The `DrawPbr` draw function is created from the following render command
|
||||
/// tuple. Const generics are used to set specific bind group locations:
|
||||
///
|
||||
/// ```ignore
|
||||
/// pub type DrawPbr = (
|
||||
/// SetItemPipeline,
|
||||
/// SetMeshViewBindGroup<0>,
|
||||
/// SetStandardMaterialBindGroup<1>,
|
||||
/// SetTransformBindGroup<2>,
|
||||
/// DrawMesh,
|
||||
/// );
|
||||
/// ```
|
||||
pub trait RenderCommand<P: PhaseItem> {
|
||||
/// Renders the [`PhaseItem`] by issuing draw calls via the [`TrackedRenderPass`].
|
||||
fn render<'w>(
|
||||
state: &'w RenderState,
|
||||
item: &P,
|
||||
pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult;
|
||||
}
|
||||
|
||||
pub enum RenderCommandResult {
|
||||
Success,
|
||||
Failure,
|
||||
}
|
||||
|
||||
macro_rules! render_command_tuple_impl {
|
||||
($($name: ident),*) => {
|
||||
impl<P: PhaseItem, $($name: RenderCommand<P>),*> RenderCommand<P> for ($($name,)*) {
|
||||
#[allow(non_snake_case)]
|
||||
fn render<'w>(
|
||||
_state: &'w RenderState,
|
||||
_item: &P,
|
||||
_pass: &mut TrackedRenderPass<'w>,
|
||||
) -> RenderCommandResult{
|
||||
$(if let RenderCommandResult::Failure = $name::render(_state, _item, _pass) {
|
||||
return RenderCommandResult::Failure;
|
||||
})*
|
||||
RenderCommandResult::Success
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
render_command_tuple_impl!(C0);
|
||||
render_command_tuple_impl!(C0, C1);
|
||||
render_command_tuple_impl!(C0, C1, C2);
|
||||
render_command_tuple_impl!(C0, C1, C2, C3);
|
||||
render_command_tuple_impl!(C0, C1, C2, C3, C4);
|
||||
|
||||
impl<P, C: 'static> Draw<P> for C
|
||||
where
|
||||
P: PhaseItem,
|
||||
C: RenderCommand<P>,
|
||||
{
|
||||
/// Prepares data for the wrapped [`RenderCommand`] and then renders it.
|
||||
fn draw<'w>(&mut self, pass: &mut TrackedRenderPass<'w>, state: &'w RenderState, item: &P) {
|
||||
C::render(state, item, pass);
|
||||
}
|
||||
}
|
||||
29
maplibre/src/render/render_phase/mod.rs
Normal file
29
maplibre/src/render/render_phase/mod.rs
Normal file
@ -0,0 +1,29 @@
|
||||
//! Describes the concept of a [`RenderPhase`] and [`PhaseItem`]
|
||||
|
||||
mod draw;
|
||||
|
||||
pub use draw::*;
|
||||
|
||||
/// A resource to collect and sort draw requests for specific [`PhaseItems`](PhaseItem).
|
||||
pub struct RenderPhase<I: PhaseItem> {
|
||||
pub items: Vec<I>,
|
||||
}
|
||||
|
||||
impl<I: PhaseItem> Default for RenderPhase<I> {
|
||||
fn default() -> Self {
|
||||
Self { items: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: PhaseItem> RenderPhase<I> {
|
||||
/// Adds a [`PhaseItem`] to this render phase.
|
||||
#[inline]
|
||||
pub fn add(&mut self, item: I) {
|
||||
self.items.push(item);
|
||||
}
|
||||
|
||||
/// Sorts all of its [`PhaseItems`](PhaseItem).
|
||||
pub fn sort(&mut self) {
|
||||
self.items.sort_by_key(|d| d.sort_key());
|
||||
}
|
||||
}
|
||||
@ -1,654 +0,0 @@
|
||||
use std::default::Default;
|
||||
|
||||
use std::{cmp, iter};
|
||||
|
||||
use tracing;
|
||||
use wgpu::{Buffer, Limits, Queue};
|
||||
|
||||
use crate::style::Style;
|
||||
|
||||
use crate::coords::{ViewRegion, Zoom};
|
||||
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::LayerTessellateMessage;
|
||||
use crate::platform::MIN_BUFFER_SIZE;
|
||||
use crate::render::buffer_pool::{BackingBufferDescriptor, BufferPool, IndexEntry};
|
||||
|
||||
use crate::render::camera::{Camera, ViewProjection};
|
||||
use crate::render::options::{
|
||||
DEBUG_WIREFRAME, FEATURE_METADATA_BUFFER_SIZE, INDEX_FORMAT, INDICES_BUFFER_SIZE,
|
||||
LAYER_METADATA_BUFFER_SIZE, TILE_VIEW_BUFFER_SIZE, VERTEX_BUFFER_SIZE,
|
||||
};
|
||||
use crate::render::tile_view_pattern::{TileInView, TileViewPattern};
|
||||
use crate::tessellation::IndexDataType;
|
||||
use crate::util::FPSMeter;
|
||||
use crate::MapWindow;
|
||||
|
||||
use super::piplines::*;
|
||||
use super::shaders;
|
||||
use super::shaders::*;
|
||||
use super::texture::Texture;
|
||||
|
||||
pub struct RenderState {
|
||||
instance: wgpu::Instance,
|
||||
|
||||
device: wgpu::Device,
|
||||
queue: wgpu::Queue,
|
||||
|
||||
fps_meter: FPSMeter,
|
||||
|
||||
surface: wgpu::Surface,
|
||||
surface_config: wgpu::SurfaceConfiguration,
|
||||
suspended: bool,
|
||||
|
||||
render_pipeline: wgpu::RenderPipeline,
|
||||
mask_pipeline: wgpu::RenderPipeline,
|
||||
bind_group: wgpu::BindGroup,
|
||||
|
||||
sample_count: u32,
|
||||
multisampling_texture: Option<Texture>,
|
||||
|
||||
depth_texture: Texture,
|
||||
|
||||
globals_uniform_buffer: wgpu::Buffer,
|
||||
|
||||
buffer_pool: BufferPool<
|
||||
Queue,
|
||||
Buffer,
|
||||
ShaderVertex,
|
||||
IndexDataType,
|
||||
ShaderLayerMetadata,
|
||||
ShaderFeatureStyle,
|
||||
>,
|
||||
|
||||
tile_view_pattern: TileViewPattern<Queue, Buffer>,
|
||||
}
|
||||
|
||||
impl RenderState {
|
||||
pub async fn initialize(
|
||||
instance: wgpu::Instance,
|
||||
surface: wgpu::Surface,
|
||||
surface_config: wgpu::SurfaceConfiguration,
|
||||
) -> Option<Self> {
|
||||
let sample_count = 4;
|
||||
|
||||
let adapter = instance
|
||||
.request_adapter(&wgpu::RequestAdapterOptions {
|
||||
power_preference: wgpu::PowerPreference::LowPower,
|
||||
compatible_surface: Some(&surface),
|
||||
force_fallback_adapter: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let limits = if cfg!(feature = "web-webgl") {
|
||||
Limits {
|
||||
max_texture_dimension_2d: 4096,
|
||||
..wgpu::Limits::downlevel_webgl2_defaults()
|
||||
}
|
||||
} else if cfg!(target_os = "android") {
|
||||
Limits {
|
||||
max_storage_textures_per_shader_stage: 4,
|
||||
max_compute_workgroups_per_dimension: 0,
|
||||
max_compute_workgroup_size_z: 0,
|
||||
max_compute_workgroup_size_y: 0,
|
||||
max_compute_workgroup_size_x: 0,
|
||||
max_compute_workgroup_storage_size: 0,
|
||||
max_compute_invocations_per_workgroup: 0,
|
||||
..wgpu::Limits::downlevel_defaults()
|
||||
}
|
||||
} else {
|
||||
Limits {
|
||||
..wgpu::Limits::default()
|
||||
}
|
||||
};
|
||||
|
||||
// create a device and a queue
|
||||
let features = if DEBUG_WIREFRAME {
|
||||
wgpu::Features::default() | wgpu::Features::POLYGON_MODE_LINE
|
||||
} else {
|
||||
wgpu::Features::default()
|
||||
};
|
||||
|
||||
let (device, queue) = adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
features,
|
||||
limits,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.ok()?;
|
||||
|
||||
surface.configure(&device, &surface_config);
|
||||
|
||||
let vertex_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size: VERTEX_BUFFER_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let feature_metadata_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size: FEATURE_METADATA_BUFFER_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let indices_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size: INDICES_BUFFER_SIZE,
|
||||
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let tile_view_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size: TILE_VIEW_BUFFER_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let globals_buffer_byte_size =
|
||||
cmp::max(MIN_BUFFER_SIZE, std::mem::size_of::<ShaderGlobals>() as u64);
|
||||
|
||||
let layer_metadata_buffer_size =
|
||||
std::mem::size_of::<ShaderLayerMetadata>() as u64 * LAYER_METADATA_BUFFER_SIZE;
|
||||
let layer_metadata_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Layer Metadata ubo"),
|
||||
size: layer_metadata_buffer_size,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let globals_uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Globals ubo"),
|
||||
size: globals_buffer_byte_size,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("Bind group layout"),
|
||||
entries: &[wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: wgpu::BufferSize::new(globals_buffer_byte_size),
|
||||
},
|
||||
count: None,
|
||||
}],
|
||||
});
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Bind group"),
|
||||
layout: &bind_group_layout,
|
||||
entries: &[wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(
|
||||
globals_uniform_buffer.as_entire_buffer_binding(),
|
||||
),
|
||||
}],
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
bind_group_layouts: &[&bind_group_layout],
|
||||
push_constant_ranges: &[],
|
||||
label: None,
|
||||
});
|
||||
|
||||
let mut vertex_shader = shaders::tile::VERTEX;
|
||||
let mut fragment_shader = shaders::tile::FRAGMENT;
|
||||
|
||||
let render_pipeline_descriptor = create_map_render_pipeline_description(
|
||||
&pipeline_layout,
|
||||
vertex_shader.create_vertex_state(&device),
|
||||
fragment_shader.create_fragment_state(&device),
|
||||
sample_count,
|
||||
false,
|
||||
);
|
||||
|
||||
let mut vertex_shader = shaders::tile_mask::VERTEX;
|
||||
let mut fragment_shader = shaders::tile_mask::FRAGMENT;
|
||||
|
||||
let mask_pipeline_descriptor = create_map_render_pipeline_description(
|
||||
&pipeline_layout,
|
||||
vertex_shader.create_vertex_state(&device),
|
||||
fragment_shader.create_fragment_state(&device),
|
||||
sample_count,
|
||||
true,
|
||||
);
|
||||
|
||||
let render_pipeline = device.create_render_pipeline(&render_pipeline_descriptor);
|
||||
let mask_pipeline = device.create_render_pipeline(&mask_pipeline_descriptor);
|
||||
|
||||
let depth_texture = Texture::create_depth_texture(&device, &surface_config, sample_count);
|
||||
|
||||
let multisampling_texture = if sample_count > 1 {
|
||||
Some(Texture::create_multisampling_texture(
|
||||
&device,
|
||||
&surface_config,
|
||||
sample_count,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Some(Self {
|
||||
instance,
|
||||
surface,
|
||||
device,
|
||||
queue,
|
||||
surface_config,
|
||||
render_pipeline,
|
||||
mask_pipeline,
|
||||
bind_group,
|
||||
multisampling_texture,
|
||||
depth_texture,
|
||||
sample_count,
|
||||
globals_uniform_buffer,
|
||||
fps_meter: FPSMeter::new(),
|
||||
suspended: false, // Initially rendering is not suspended
|
||||
buffer_pool: BufferPool::new(
|
||||
BackingBufferDescriptor::new(vertex_buffer, VERTEX_BUFFER_SIZE),
|
||||
BackingBufferDescriptor::new(indices_buffer, INDICES_BUFFER_SIZE),
|
||||
BackingBufferDescriptor::new(layer_metadata_buffer, layer_metadata_buffer_size),
|
||||
BackingBufferDescriptor::new(feature_metadata_buffer, FEATURE_METADATA_BUFFER_SIZE),
|
||||
),
|
||||
tile_view_pattern: TileViewPattern::new(BackingBufferDescriptor::new(
|
||||
tile_view_buffer,
|
||||
TILE_VIEW_BUFFER_SIZE,
|
||||
)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn recreate_surface<W: MapWindow>(&mut self, window: &W) {
|
||||
// We only create a new surface if we are currently suspended. On Android (and probably iOS)
|
||||
// the surface gets invalid after the app has been suspended.
|
||||
if self.suspended {
|
||||
let surface = unsafe { self.instance.create_surface(window.inner()) };
|
||||
surface.configure(&self.device, &self.surface_config);
|
||||
self.surface = surface;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, width: u32, height: u32) {
|
||||
// While the app is suspended we can not re-configure a surface
|
||||
if self.suspended {
|
||||
return;
|
||||
}
|
||||
|
||||
self.surface_config.width = width;
|
||||
self.surface_config.height = height;
|
||||
|
||||
self.surface.configure(&self.device, &self.surface_config);
|
||||
|
||||
// Re-configure depth buffer
|
||||
self.depth_texture =
|
||||
Texture::create_depth_texture(&self.device, &self.surface_config, self.sample_count);
|
||||
|
||||
// Re-configure multi-sampling buffer
|
||||
self.multisampling_texture = if self.sample_count > 1 {
|
||||
Some(Texture::create_multisampling_texture(
|
||||
&self.device,
|
||||
&self.surface_config,
|
||||
self.sample_count,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
pub fn update_globals(&self, view_proj: &ViewProjection, camera: &Camera) {
|
||||
// Update globals
|
||||
self.queue.write_buffer(
|
||||
&self.globals_uniform_buffer,
|
||||
0,
|
||||
bytemuck::cast_slice(&[ShaderGlobals::new(ShaderCamera::new(
|
||||
view_proj.downcast().into(),
|
||||
camera
|
||||
.position
|
||||
.to_homogeneous()
|
||||
.cast::<f32>()
|
||||
.unwrap()
|
||||
.into(),
|
||||
))]),
|
||||
);
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) fn update_metadata(&mut self) {
|
||||
/*let animated_one = 0.5
|
||||
* (1.0
|
||||
+ ((SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs_f64()
|
||||
* 10.0)
|
||||
.sin()));*/
|
||||
|
||||
// Factor which determines how much we need to adjust the width of lines for example.
|
||||
// If zoom == z -> zoom_factor == 1
|
||||
|
||||
/* for entries in self.buffer_pool.index().iter() {
|
||||
for entry in entries {
|
||||
let world_coords = entry.coords;*/
|
||||
|
||||
// TODO: Update features
|
||||
/*let source_layer = entry.style_layer.source_layer.as_ref().unwrap();
|
||||
|
||||
if let Some(result) = scheduler
|
||||
.get_tile_cache()
|
||||
.iter_tessellated_layers_at(&world_coords)
|
||||
.unwrap()
|
||||
.find(|layer| source_layer.as_str() == layer.layer_name())
|
||||
{
|
||||
let color: Option<Vec4f32> = entry
|
||||
.style_layer
|
||||
.paint
|
||||
.as_ref()
|
||||
.and_then(|paint| paint.get_color())
|
||||
.map(|mut color| {
|
||||
color.color.b = animated_one as f32;
|
||||
color.into()
|
||||
});
|
||||
|
||||
match result {
|
||||
LayerTessellateResult::UnavailableLayer { .. } => {}
|
||||
LayerTessellateResult::TessellatedLayer {
|
||||
layer_data,
|
||||
feature_indices,
|
||||
..
|
||||
} => {
|
||||
|
||||
let feature_metadata = layer_data
|
||||
.features()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(i, _feature)| {
|
||||
iter::repeat(ShaderFeatureStyle {
|
||||
color: color.unwrap(),
|
||||
})
|
||||
.take(feature_indices[i] as usize)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.buffer_pool.update_feature_metadata(
|
||||
&self.queue,
|
||||
entry,
|
||||
&feature_metadata,
|
||||
);
|
||||
}
|
||||
}
|
||||
}*/
|
||||
/* }
|
||||
}*/
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn update_tile_view_pattern(
|
||||
&mut self,
|
||||
view_region: &ViewRegion,
|
||||
view_proj: &ViewProjection,
|
||||
zoom: Zoom,
|
||||
) {
|
||||
self.tile_view_pattern
|
||||
.update_pattern(view_region, &self.buffer_pool, zoom);
|
||||
self.tile_view_pattern
|
||||
.upload_pattern(&self.queue, view_proj);
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn upload_tile_geometry(
|
||||
&mut self,
|
||||
view_region: &ViewRegion,
|
||||
style: &Style,
|
||||
tile_cache: &TileCache,
|
||||
) {
|
||||
// Upload all tessellated layers which are in view
|
||||
for world_coords in view_region.iter() {
|
||||
let loaded_layers = self
|
||||
.buffer_pool
|
||||
.get_loaded_layers_at(&world_coords)
|
||||
.unwrap_or_default();
|
||||
if let Some(available_layers) = tile_cache
|
||||
.iter_tessellated_layers_at(&world_coords)
|
||||
.map(|layers| {
|
||||
layers
|
||||
.filter(|result| !loaded_layers.contains(&result.layer_name()))
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
{
|
||||
for style_layer in &style.layers {
|
||||
let source_layer = style_layer.source_layer.as_ref().unwrap();
|
||||
|
||||
if let Some(message) = available_layers
|
||||
.iter()
|
||||
.find(|layer| source_layer.as_str() == layer.layer_name())
|
||||
{
|
||||
let color: Option<Vec4f32> = style_layer
|
||||
.paint
|
||||
.as_ref()
|
||||
.and_then(|paint| paint.get_color())
|
||||
.map(|color| color.into());
|
||||
|
||||
match message {
|
||||
LayerTessellateMessage::UnavailableLayer { coords: _, .. } => {
|
||||
/*self.buffer_pool.mark_layer_unavailable(*coords);*/
|
||||
}
|
||||
LayerTessellateMessage::TessellatedLayer {
|
||||
coords,
|
||||
feature_indices,
|
||||
layer_data,
|
||||
buffer,
|
||||
..
|
||||
} => {
|
||||
let allocate_feature_metadata = tracing::span!(
|
||||
tracing::Level::TRACE,
|
||||
"allocate_feature_metadata"
|
||||
);
|
||||
|
||||
let guard = allocate_feature_metadata.enter();
|
||||
let feature_metadata = layer_data
|
||||
.features
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(i, _feature)| {
|
||||
iter::repeat(ShaderFeatureStyle {
|
||||
color: color.unwrap(),
|
||||
})
|
||||
.take(feature_indices[i] as usize)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
drop(guard);
|
||||
|
||||
tracing::trace!("Allocating geometry at {}", &coords);
|
||||
self.buffer_pool.allocate_layer_geometry(
|
||||
&self.queue,
|
||||
*coords,
|
||||
style_layer.clone(),
|
||||
buffer,
|
||||
ShaderLayerMetadata::new(style_layer.index as f32),
|
||||
&feature_metadata,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
|
||||
let render_setup_span = tracing::span!(tracing::Level::TRACE, "render prepare");
|
||||
let _guard = render_setup_span.enter();
|
||||
|
||||
let frame = self.surface.get_current_texture()?;
|
||||
let frame_view = frame
|
||||
.texture
|
||||
.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let mut encoder = self
|
||||
.device
|
||||
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("Encoder"),
|
||||
});
|
||||
|
||||
drop(_guard);
|
||||
|
||||
{
|
||||
let _span_ = tracing::span!(tracing::Level::TRACE, "render pass").entered();
|
||||
{
|
||||
let color_attachment =
|
||||
if let Some(multisampling_target) = &self.multisampling_texture {
|
||||
wgpu::RenderPassColorAttachment {
|
||||
view: &multisampling_target.view,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
|
||||
store: true,
|
||||
},
|
||||
resolve_target: Some(&frame_view),
|
||||
}
|
||||
} else {
|
||||
wgpu::RenderPassColorAttachment {
|
||||
view: &frame_view,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
|
||||
store: true,
|
||||
},
|
||||
resolve_target: None,
|
||||
}
|
||||
};
|
||||
|
||||
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: None,
|
||||
color_attachments: &[color_attachment],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: &self.depth_texture.view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(0.0),
|
||||
store: true,
|
||||
}),
|
||||
stencil_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(0),
|
||||
store: true,
|
||||
}),
|
||||
}),
|
||||
});
|
||||
|
||||
pass.set_bind_group(0, &self.bind_group, &[]);
|
||||
|
||||
{
|
||||
let index = self.buffer_pool.index();
|
||||
|
||||
for TileInView { shape, fallback } in self.tile_view_pattern.iter() {
|
||||
let coords = shape.coords;
|
||||
tracing::trace!("Drawing tile at {coords}");
|
||||
|
||||
let shape_to_render = fallback.as_ref().unwrap_or(shape);
|
||||
|
||||
let reference = self
|
||||
.tile_view_pattern
|
||||
.stencil_reference_value(&shape_to_render.coords)
|
||||
as u32;
|
||||
|
||||
// Draw mask
|
||||
{
|
||||
tracing::trace!("Drawing mask {}", &coords);
|
||||
|
||||
pass.set_pipeline(&self.mask_pipeline);
|
||||
pass.set_stencil_reference(reference);
|
||||
pass.set_vertex_buffer(
|
||||
0,
|
||||
self.tile_view_pattern
|
||||
.buffer()
|
||||
.slice(shape.buffer_range.clone()),
|
||||
);
|
||||
pass.draw(0..6, 0..1);
|
||||
}
|
||||
|
||||
if let Some(entries) = index.get_layers(&shape_to_render.coords) {
|
||||
let mut layers_to_render: Vec<&IndexEntry> = Vec::from_iter(entries);
|
||||
layers_to_render.sort_by_key(|entry| entry.style_layer.index);
|
||||
|
||||
for entry in layers_to_render {
|
||||
// Draw tile
|
||||
{
|
||||
tracing::trace!(
|
||||
"Drawing layer {:?} at {}",
|
||||
entry.style_layer.source_layer,
|
||||
&entry.coords
|
||||
);
|
||||
|
||||
pass.set_pipeline(&self.render_pipeline);
|
||||
pass.set_stencil_reference(reference);
|
||||
pass.set_index_buffer(
|
||||
self.buffer_pool
|
||||
.indices()
|
||||
.slice(entry.indices_buffer_range()),
|
||||
INDEX_FORMAT,
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
0,
|
||||
self.buffer_pool
|
||||
.vertices()
|
||||
.slice(entry.vertices_buffer_range()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
1,
|
||||
self.tile_view_pattern
|
||||
.buffer()
|
||||
.slice(shape_to_render.buffer_range.clone()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
2,
|
||||
self.buffer_pool
|
||||
.metadata()
|
||||
.slice(entry.layer_metadata_buffer_range()),
|
||||
);
|
||||
pass.set_vertex_buffer(
|
||||
3,
|
||||
self.buffer_pool
|
||||
.feature_metadata()
|
||||
.slice(entry.feature_metadata_buffer_range()),
|
||||
);
|
||||
pass.draw_indexed(entry.indices_range(), 0, 0..1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tracing::trace!("No layers found at {}", &shape_to_render.coords);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let _span = tracing::span!(tracing::Level::TRACE, "render finish").entered();
|
||||
tracing::trace!("Finished drawing");
|
||||
|
||||
self.queue.submit(Some(encoder.finish()));
|
||||
tracing::trace!("Submitted queue");
|
||||
|
||||
frame.present();
|
||||
tracing::trace!("Presented frame");
|
||||
}
|
||||
|
||||
self.fps_meter.update_and_print();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn suspend(&mut self) {
|
||||
self.suspended = true;
|
||||
}
|
||||
|
||||
pub fn resume(&mut self) {
|
||||
self.suspended = false;
|
||||
}
|
||||
}
|
||||
@ -1,15 +1,20 @@
|
||||
//! A ring-buffer like pool of [buffers](wgpu::Buffer).
|
||||
|
||||
use crate::coords::{Quadkey, WorldTileCoords};
|
||||
use crate::style::layer::StyleLayer;
|
||||
use crate::tessellation::OverAlignedVertexBuffer;
|
||||
use bytemuck::Pod;
|
||||
use std::collections::{btree_map, BTreeMap, HashSet, VecDeque};
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::size_of;
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::style::layer::StyleLayer;
|
||||
use wgpu::BufferAddress;
|
||||
pub const VERTEX_SIZE: wgpu::BufferAddress = 1_000_000;
|
||||
pub const INDICES_SIZE: wgpu::BufferAddress = 1_000_000;
|
||||
|
||||
use crate::coords::{Quadkey, WorldTileCoords};
|
||||
|
||||
use crate::tessellation::OverAlignedVertexBuffer;
|
||||
pub const FEATURE_METADATA_SIZE: wgpu::BufferAddress = 1024 * 1000;
|
||||
pub const LAYER_METADATA_SIZE: wgpu::BufferAddress = 1024;
|
||||
|
||||
pub trait Queue<B> {
|
||||
fn write_buffer(&self, buffer: &B, offset: wgpu::BufferAddress, data: &[u8]);
|
||||
@ -24,7 +29,7 @@ impl Queue<wgpu::Buffer> for wgpu::Queue {
|
||||
/// This is inspired by the memory pool in Vulkan documented
|
||||
/// [here](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/custom_memory_pools.html).
|
||||
#[derive(Debug)]
|
||||
pub struct BufferPool<Q, B, V, I, M, FM> {
|
||||
pub struct BufferPool<Q, B, V, I, TM, FM> {
|
||||
vertices: BackingBuffer<B>,
|
||||
indices: BackingBuffer<B>,
|
||||
layer_metadata: BackingBuffer<B>,
|
||||
@ -34,7 +39,7 @@ pub struct BufferPool<Q, B, V, I, M, FM> {
|
||||
phantom_v: PhantomData<V>,
|
||||
phantom_i: PhantomData<I>,
|
||||
phantom_q: PhantomData<Q>,
|
||||
phantom_m: PhantomData<M>,
|
||||
phantom_m: PhantomData<TM>,
|
||||
phantom_fm: PhantomData<FM>,
|
||||
}
|
||||
|
||||
@ -46,9 +51,57 @@ enum BackingBufferType {
|
||||
FeatureMetadata,
|
||||
}
|
||||
|
||||
impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM: bytemuck::Pod>
|
||||
BufferPool<Q, B, V, I, TM, FM>
|
||||
{
|
||||
impl<V: Pod, I: Pod, TM: Pod, FM: Pod> BufferPool<wgpu::Queue, wgpu::Buffer, V, I, TM, FM> {
|
||||
pub fn from_device(device: &wgpu::Device) -> Self {
|
||||
let vertex_buffer_desc = wgpu::BufferDescriptor {
|
||||
label: Some("vertex buffer"),
|
||||
size: size_of::<V>() as wgpu::BufferAddress * VERTEX_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
};
|
||||
|
||||
let indices_buffer_desc = wgpu::BufferDescriptor {
|
||||
label: Some("indices buffer"),
|
||||
size: size_of::<I>() as wgpu::BufferAddress * INDICES_SIZE,
|
||||
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
};
|
||||
|
||||
let feature_metadata_desc = wgpu::BufferDescriptor {
|
||||
label: Some("feature metadata buffer"),
|
||||
size: size_of::<FM>() as wgpu::BufferAddress * FEATURE_METADATA_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
};
|
||||
|
||||
let layer_metadata_desc = wgpu::BufferDescriptor {
|
||||
label: Some("layer metadata buffer"),
|
||||
size: size_of::<TM>() as wgpu::BufferAddress * LAYER_METADATA_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
};
|
||||
|
||||
BufferPool::new(
|
||||
BackingBufferDescriptor::new(
|
||||
device.create_buffer(&vertex_buffer_desc),
|
||||
vertex_buffer_desc.size,
|
||||
),
|
||||
BackingBufferDescriptor::new(
|
||||
device.create_buffer(&indices_buffer_desc),
|
||||
indices_buffer_desc.size,
|
||||
),
|
||||
BackingBufferDescriptor::new(
|
||||
device.create_buffer(&layer_metadata_desc),
|
||||
layer_metadata_desc.size,
|
||||
),
|
||||
BackingBufferDescriptor::new(
|
||||
device.create_buffer(&feature_metadata_desc),
|
||||
feature_metadata_desc.size,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
impl<Q: Queue<B>, B, V: Pod, I: Pod, TM: Pod, FM: Pod> BufferPool<Q, B, V, I, TM, FM> {
|
||||
pub fn new(
|
||||
vertices: BackingBufferDescriptor<B>,
|
||||
indices: BackingBufferDescriptor<B>,
|
||||
@ -120,7 +173,7 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
|
||||
stride: wgpu::BufferAddress,
|
||||
elements: wgpu::BufferAddress,
|
||||
usable_elements: wgpu::BufferAddress,
|
||||
) -> (BufferAddress, BufferAddress) {
|
||||
) -> (wgpu::BufferAddress, wgpu::BufferAddress) {
|
||||
let bytes = elements * stride;
|
||||
|
||||
let usable_bytes = (usable_elements * stride) as wgpu::BufferAddress;
|
||||
@ -164,21 +217,21 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
|
||||
|
||||
let (vertices_bytes, aligned_vertices_bytes) = Self::align(
|
||||
vertices_stride,
|
||||
geometry.buffer.vertices.len() as BufferAddress,
|
||||
geometry.buffer.vertices.len() as BufferAddress,
|
||||
geometry.buffer.vertices.len() as wgpu::BufferAddress,
|
||||
geometry.buffer.vertices.len() as wgpu::BufferAddress,
|
||||
);
|
||||
let (indices_bytes, aligned_indices_bytes) = Self::align(
|
||||
indices_stride,
|
||||
geometry.buffer.indices.len() as BufferAddress,
|
||||
geometry.usable_indices as BufferAddress,
|
||||
geometry.buffer.indices.len() as wgpu::BufferAddress,
|
||||
geometry.usable_indices as wgpu::BufferAddress,
|
||||
);
|
||||
let (layer_metadata_bytes, aligned_layer_metadata_bytes) =
|
||||
Self::align(layer_metadata_stride, 1, 1);
|
||||
|
||||
let (feature_metadata_bytes, aligned_feature_metadata_bytes) = Self::align(
|
||||
feature_metadata_stride,
|
||||
feature_metadata.len() as BufferAddress,
|
||||
feature_metadata.len() as BufferAddress,
|
||||
feature_metadata.len() as wgpu::BufferAddress,
|
||||
feature_metadata.len() as wgpu::BufferAddress,
|
||||
);
|
||||
|
||||
if feature_metadata_bytes != aligned_feature_metadata_bytes {
|
||||
@ -255,8 +308,8 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
|
||||
|
||||
let (feature_metadata_bytes, aligned_feature_metadata_bytes) = Self::align(
|
||||
feature_metadata_stride,
|
||||
feature_metadata.len() as BufferAddress,
|
||||
feature_metadata.len() as BufferAddress,
|
||||
feature_metadata.len() as wgpu::BufferAddress,
|
||||
feature_metadata.len() as wgpu::BufferAddress,
|
||||
);
|
||||
|
||||
if entry.buffer_feature_metadata.end - entry.buffer_feature_metadata.start
|
||||
@ -321,7 +374,10 @@ impl<B> BackingBuffer<B> {
|
||||
index: &mut RingIndex,
|
||||
) -> Range<wgpu::BufferAddress> {
|
||||
if new_data > self.inner_size {
|
||||
panic!("can not allocate because backing buffers are too small")
|
||||
panic!(
|
||||
"can not allocate because backing buffer {:?} are too small",
|
||||
self.typ
|
||||
)
|
||||
}
|
||||
|
||||
let mut available_gap = self.find_largest_gap(index);
|
||||
@ -380,7 +436,7 @@ impl<B> BackingBuffer<B> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IndexEntry {
|
||||
pub coords: WorldTileCoords,
|
||||
pub style_layer: StyleLayer,
|
||||
@ -524,7 +580,6 @@ impl RingIndex {
|
||||
mod tests {
|
||||
use crate::style::layer::StyleLayer;
|
||||
use lyon::tessellation::VertexBuffers;
|
||||
use wgpu::BufferAddress;
|
||||
|
||||
use crate::render::buffer_pool::{
|
||||
BackingBufferDescriptor, BackingBufferType, BufferPool, Queue,
|
||||
@ -532,13 +587,13 @@ mod tests {
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestBuffer {
|
||||
size: BufferAddress,
|
||||
size: wgpu::BufferAddress,
|
||||
}
|
||||
struct TestQueue;
|
||||
|
||||
impl Queue<TestBuffer> for TestQueue {
|
||||
fn write_buffer(&self, buffer: &TestBuffer, offset: BufferAddress, data: &[u8]) {
|
||||
if offset + data.len() as BufferAddress > buffer.size {
|
||||
fn write_buffer(&self, buffer: &TestBuffer, offset: wgpu::BufferAddress, data: &[u8]) {
|
||||
if offset + data.len() as wgpu::BufferAddress > buffer.size {
|
||||
panic!("write out of bounds");
|
||||
}
|
||||
}
|
||||
37
maplibre/src/render/resource/globals.rs
Normal file
37
maplibre/src/render/resource/globals.rs
Normal file
@ -0,0 +1,37 @@
|
||||
//! A bind group which binds a buffer with global data like the current camera transformations.
|
||||
|
||||
use crate::platform::MIN_BUFFER_SIZE;
|
||||
use crate::render::shaders::ShaderGlobals;
|
||||
use std::cmp;
|
||||
use std::mem::size_of;
|
||||
|
||||
pub struct Globals {
|
||||
pub uniform_buffer: wgpu::Buffer,
|
||||
pub bind_group: wgpu::BindGroup,
|
||||
}
|
||||
|
||||
impl Globals {
|
||||
pub fn from_device(device: &wgpu::Device, group: &wgpu::BindGroupLayout) -> Self {
|
||||
let globals_buffer_byte_size = cmp::max(MIN_BUFFER_SIZE, size_of::<ShaderGlobals>() as u64);
|
||||
|
||||
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Globals ubo"),
|
||||
size: globals_buffer_byte_size,
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("Bind group"),
|
||||
layout: group,
|
||||
entries: &[wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(uniform_buffer.as_entire_buffer_binding()),
|
||||
}],
|
||||
});
|
||||
Self {
|
||||
uniform_buffer,
|
||||
bind_group,
|
||||
}
|
||||
}
|
||||
}
|
||||
18
maplibre/src/render/resource/mod.rs
Normal file
18
maplibre/src/render/resource/mod.rs
Normal file
@ -0,0 +1,18 @@
|
||||
//! Utilities which holds references to GPU-owned. Usually a resource is a wrapper which makes using
|
||||
//! buffers or textures simpler.
|
||||
|
||||
mod buffer_pool;
|
||||
mod globals;
|
||||
mod pipeline;
|
||||
mod shader;
|
||||
mod surface;
|
||||
mod texture;
|
||||
mod tracked_render_pass;
|
||||
|
||||
pub use buffer_pool::*;
|
||||
pub use globals::*;
|
||||
pub use pipeline::*;
|
||||
pub use shader::*;
|
||||
pub use surface::*;
|
||||
pub use texture::*;
|
||||
pub use tracked_render_pass::*;
|
||||
89
maplibre/src/render/resource/pipeline.rs
Normal file
89
maplibre/src/render/resource/pipeline.rs
Normal file
@ -0,0 +1,89 @@
|
||||
//! Utility for creating [RenderPipelines](wgpu::RenderPipeline)
|
||||
|
||||
use crate::render::resource::shader::{FragmentState, VertexState};
|
||||
use std::borrow::Cow;
|
||||
|
||||
pub trait RenderPipeline {
|
||||
fn describe_render_pipeline(self) -> RenderPipelineDescriptor;
|
||||
}
|
||||
|
||||
pub struct RenderPipelineDescriptor {
|
||||
/// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
|
||||
pub label: Option<Cow<'static, str>>,
|
||||
/// The layout of bind groups for this pipeline.
|
||||
pub layout: Option<Vec<Vec<wgpu::BindGroupLayoutEntry>>>,
|
||||
/// The compiled vertex stage, its entry point, and the input buffers layout.
|
||||
pub vertex: VertexState,
|
||||
/// The properties of the pipeline at the primitive assembly and rasterization level.
|
||||
pub primitive: wgpu::PrimitiveState,
|
||||
/// The effect of draw calls on the depth and stencil aspects of the output target, if any.
|
||||
pub depth_stencil: Option<wgpu::DepthStencilState>,
|
||||
/// The multi-sampling properties of the pipeline.
|
||||
pub multisample: wgpu::MultisampleState,
|
||||
/// The compiled fragment stage, its entry point, and the color targets.
|
||||
pub fragment: FragmentState,
|
||||
}
|
||||
|
||||
impl RenderPipelineDescriptor {
|
||||
pub fn initialize(&self, device: &wgpu::Device) -> wgpu::RenderPipeline {
|
||||
let bind_group_layouts = if let Some(layout) = &self.layout {
|
||||
layout
|
||||
.iter()
|
||||
.map(|entries| {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: None,
|
||||
entries: entries.as_ref(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
bind_group_layouts: &bind_group_layouts.iter().collect::<Vec<_>>(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let vertex_shader_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
|
||||
label: None,
|
||||
source: wgpu::ShaderSource::Wgsl(self.vertex.source.into()),
|
||||
});
|
||||
let fragment_shader_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
|
||||
label: None,
|
||||
source: wgpu::ShaderSource::Wgsl(self.fragment.source.into()),
|
||||
});
|
||||
|
||||
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: self.label.as_ref().map(|label| label.as_ref()),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &vertex_shader_module,
|
||||
entry_point: self.vertex.entry_point,
|
||||
buffers: self
|
||||
.vertex
|
||||
.buffers
|
||||
.iter()
|
||||
.map(|layout| wgpu::VertexBufferLayout {
|
||||
array_stride: layout.array_stride,
|
||||
step_mode: layout.step_mode,
|
||||
attributes: layout.attributes.as_slice(),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &fragment_shader_module,
|
||||
entry_point: self.fragment.entry_point,
|
||||
targets: self.fragment.targets.as_slice(),
|
||||
}),
|
||||
primitive: self.primitive,
|
||||
depth_stencil: self.depth_stencil.clone(),
|
||||
multisample: self.multisample,
|
||||
|
||||
multiview: None,
|
||||
});
|
||||
|
||||
pipeline
|
||||
}
|
||||
}
|
||||
35
maplibre/src/render/resource/shader.rs
Normal file
35
maplibre/src/render/resource/shader.rs
Normal file
@ -0,0 +1,35 @@
|
||||
//! Utilities for creating shader states.
|
||||
|
||||
/// Describes how the vertex buffer is interpreted.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VertexBufferLayout {
|
||||
/// The stride, in bytes, between elements of this buffer.
|
||||
pub array_stride: wgpu::BufferAddress,
|
||||
/// How often this vertex buffer is "stepped" forward.
|
||||
pub step_mode: wgpu::VertexStepMode,
|
||||
/// The list of attributes which comprise a single vertex.
|
||||
pub attributes: Vec<wgpu::VertexAttribute>,
|
||||
}
|
||||
|
||||
/// Describes the fragment process in a render pipeline.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct FragmentState {
|
||||
/// The shader source
|
||||
pub source: &'static str,
|
||||
/// The name of the entry point in the compiled shader. There must be a
|
||||
/// function with this name in the shader.
|
||||
pub entry_point: &'static str,
|
||||
/// The color state of the render targets.
|
||||
pub targets: Vec<wgpu::ColorTargetState>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VertexState {
|
||||
/// The shader source
|
||||
pub source: &'static str,
|
||||
/// The name of the entry point in the compiled shader. There must be a
|
||||
/// function with this name in the shader.
|
||||
pub entry_point: &'static str,
|
||||
/// The format of any vertex buffers used with this pipeline.
|
||||
pub buffers: Vec<VertexBufferLayout>,
|
||||
}
|
||||
221
maplibre/src/render/resource/surface.rs
Normal file
221
maplibre/src/render/resource/surface.rs
Normal file
@ -0,0 +1,221 @@
|
||||
//! Utilities for handling surfaces which can be either headless or headed. A headed surface has
|
||||
//! a handle to a window. A headless surface renders to a texture.
|
||||
|
||||
use crate::render::resource::texture::TextureView;
|
||||
use crate::render::settings::RendererSettings;
|
||||
use crate::render::util::HasChanged;
|
||||
use crate::{MapWindow, WindowSize};
|
||||
use std::mem::size_of;
|
||||
|
||||
struct BufferDimensions {
|
||||
width: usize,
|
||||
height: usize,
|
||||
unpadded_bytes_per_row: usize,
|
||||
padded_bytes_per_row: usize,
|
||||
}
|
||||
|
||||
impl BufferDimensions {
|
||||
fn new(width: usize, height: usize) -> Self {
|
||||
let bytes_per_pixel = size_of::<u32>();
|
||||
let unpadded_bytes_per_row = width * bytes_per_pixel;
|
||||
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
|
||||
let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align;
|
||||
let padded_bytes_per_row = unpadded_bytes_per_row + padded_bytes_per_row_padding;
|
||||
Self {
|
||||
width,
|
||||
height,
|
||||
unpadded_bytes_per_row,
|
||||
padded_bytes_per_row,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WindowHead {
|
||||
surface: wgpu::Surface,
|
||||
surface_config: wgpu::SurfaceConfiguration,
|
||||
}
|
||||
|
||||
impl WindowHead {
|
||||
pub fn configure(&self, device: &wgpu::Device) {
|
||||
self.surface.configure(device, &self.surface_config);
|
||||
}
|
||||
|
||||
pub fn recreate_surface<MW>(&mut self, window: &MW, instance: &wgpu::Instance)
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
self.surface = unsafe { instance.create_surface(window.inner()) };
|
||||
}
|
||||
pub fn surface(&self) -> &wgpu::Surface {
|
||||
&self.surface
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferedTextureHead {
|
||||
texture: wgpu::Texture,
|
||||
output_buffer: wgpu::Buffer,
|
||||
buffer_dimensions: BufferDimensions,
|
||||
}
|
||||
|
||||
pub enum Head {
|
||||
Headed(WindowHead),
|
||||
Headless(BufferedTextureHead),
|
||||
}
|
||||
|
||||
pub struct Surface {
|
||||
size: WindowSize,
|
||||
head: Head,
|
||||
}
|
||||
|
||||
impl Surface {
|
||||
pub fn from_window<MW>(
|
||||
instance: &wgpu::Instance,
|
||||
window: &MW,
|
||||
settings: &RendererSettings,
|
||||
) -> Self
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
let size = window.size();
|
||||
let surface_config = wgpu::SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
format: settings.texture_format,
|
||||
width: size.width(),
|
||||
height: size.height(),
|
||||
//present_mode: wgpu::PresentMode::Immediate,
|
||||
present_mode: wgpu::PresentMode::Fifo, // VSync
|
||||
};
|
||||
|
||||
let surface = unsafe { instance.create_surface(window.inner()) };
|
||||
|
||||
Self {
|
||||
size,
|
||||
head: Head::Headed(WindowHead {
|
||||
surface,
|
||||
surface_config,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_image<MW>(device: &wgpu::Device, window: &MW, settings: &RendererSettings) -> Self
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
let size = window.size();
|
||||
|
||||
// It is a WebGPU requirement that ImageCopyBuffer.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
|
||||
// So we calculate padded_bytes_per_row by rounding unpadded_bytes_per_row
|
||||
// up to the next multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
|
||||
// https://en.wikipedia.org/wiki/Data_structure_alignment#Computing_padding
|
||||
let buffer_dimensions =
|
||||
BufferDimensions::new(size.width() as usize, size.height() as usize);
|
||||
// The output buffer lets us retrieve the data as an array
|
||||
let output_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size: (buffer_dimensions.padded_bytes_per_row * buffer_dimensions.height) as u64,
|
||||
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Surface texture"),
|
||||
size: wgpu::Extent3d {
|
||||
width: size.width(),
|
||||
height: size.height(),
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: settings.texture_format,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
|
||||
});
|
||||
|
||||
Self {
|
||||
size,
|
||||
head: Head::Headless(BufferedTextureHead {
|
||||
texture,
|
||||
output_buffer,
|
||||
buffer_dimensions,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "create_view", skip_all)]
|
||||
pub fn create_view(&self, device: &wgpu::Device) -> TextureView {
|
||||
match &self.head {
|
||||
Head::Headed(window) => {
|
||||
let WindowHead { surface, .. } = window;
|
||||
let frame = match surface.get_current_texture() {
|
||||
Ok(view) => view,
|
||||
Err(wgpu::SurfaceError::Outdated) => {
|
||||
tracing::trace!("surface outdated");
|
||||
window.configure(device);
|
||||
surface
|
||||
.get_current_texture()
|
||||
.expect("Error reconfiguring surface")
|
||||
}
|
||||
err => err.expect("Failed to acquire next swap chain texture!"),
|
||||
};
|
||||
frame.into()
|
||||
}
|
||||
Head::Headless(BufferedTextureHead { texture, .. }) => texture
|
||||
.create_view(&wgpu::TextureViewDescriptor::default())
|
||||
.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size(&self) -> WindowSize {
|
||||
self.size
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, width: u32, height: u32) {
|
||||
self.size = WindowSize::new(width, height).expect("Invalid size for resizing the surface.");
|
||||
match &mut self.head {
|
||||
Head::Headed(window) => {
|
||||
window.surface_config.height = height;
|
||||
window.surface_config.width = width;
|
||||
}
|
||||
Head::Headless(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconfigure(&mut self, device: &wgpu::Device) {
|
||||
match &mut self.head {
|
||||
Head::Headed(window) => {
|
||||
if window.has_changed(&(self.size.width(), self.size.height())) {
|
||||
window.configure(device);
|
||||
}
|
||||
}
|
||||
Head::Headless(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn recreate<MW>(&mut self, window: &MW, instance: &wgpu::Instance)
|
||||
where
|
||||
MW: MapWindow,
|
||||
{
|
||||
match &mut self.head {
|
||||
Head::Headed(head) => {
|
||||
head.recreate_surface(window, instance);
|
||||
}
|
||||
Head::Headless(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn head(&self) -> &Head {
|
||||
&self.head
|
||||
}
|
||||
|
||||
pub fn head_mut(&mut self) -> &mut Head {
|
||||
&mut self.head
|
||||
}
|
||||
}
|
||||
|
||||
impl HasChanged for WindowHead {
|
||||
type Criteria = (u32, u32);
|
||||
|
||||
fn has_changed(&self, criteria: &Self::Criteria) -> bool {
|
||||
self.surface_config.height != criteria.0 || self.surface_config.width != criteria.1
|
||||
}
|
||||
}
|
||||
110
maplibre/src/render/resource/texture.rs
Normal file
110
maplibre/src/render/resource/texture.rs
Normal file
@ -0,0 +1,110 @@
|
||||
//! Utility for a texture view which can either be created by a [`TextureView`](wgpu::TextureView)
|
||||
//! or [`SurfaceTexture`](wgpu::SurfaceTexture)
|
||||
|
||||
use crate::render::settings::Msaa;
|
||||
use crate::render::util::HasChanged;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Describes a [`TextureView`].
|
||||
///
|
||||
/// May be converted from a [`TextureView`](wgpu::TextureView) or [`SurfaceTexture`](wgpu::SurfaceTexture)
|
||||
/// or dereferences to a wgpu [`TextureView`](wgpu::TextureView).
|
||||
#[derive(Debug)]
|
||||
pub enum TextureView {
|
||||
/// The value is an actual wgpu [`TextureView`](wgpu::TextureView).
|
||||
TextureView(wgpu::TextureView),
|
||||
|
||||
/// The value is a wgpu [`SurfaceTexture`](wgpu::SurfaceTexture), but dereferences to
|
||||
/// a [`TextureView`](wgpu::TextureView).
|
||||
SurfaceTexture {
|
||||
// NOTE: The order of these fields is important because the view must be dropped before the
|
||||
// frame is dropped
|
||||
view: wgpu::TextureView,
|
||||
texture: wgpu::SurfaceTexture,
|
||||
},
|
||||
}
|
||||
|
||||
impl TextureView {
|
||||
/// Returns the [`SurfaceTexture`](wgpu::SurfaceTexture) of the texture view if it is of that type.
|
||||
#[inline]
|
||||
pub fn take_surface_texture(self) -> Option<wgpu::SurfaceTexture> {
|
||||
match self {
|
||||
TextureView::TextureView(_) => None,
|
||||
TextureView::SurfaceTexture { texture, .. } => Some(texture),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::TextureView> for TextureView {
|
||||
fn from(value: wgpu::TextureView) -> Self {
|
||||
TextureView::TextureView(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::SurfaceTexture> for TextureView {
|
||||
fn from(surface_texture: wgpu::SurfaceTexture) -> Self {
|
||||
let view = surface_texture.texture.create_view(&Default::default());
|
||||
|
||||
TextureView::SurfaceTexture {
|
||||
texture: surface_texture,
|
||||
view,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for TextureView {
|
||||
type Target = wgpu::TextureView;
|
||||
|
||||
#[inline]
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match &self {
|
||||
TextureView::TextureView(value) => value,
|
||||
TextureView::SurfaceTexture { view, .. } => view,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Texture {
|
||||
pub size: (u32, u32),
|
||||
pub texture: wgpu::Texture,
|
||||
pub view: TextureView,
|
||||
}
|
||||
|
||||
impl Texture {
|
||||
pub fn new(
|
||||
label: wgpu::Label,
|
||||
device: &wgpu::Device,
|
||||
format: wgpu::TextureFormat,
|
||||
width: u32,
|
||||
height: u32,
|
||||
msaa: Msaa,
|
||||
) -> Texture {
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label,
|
||||
size: wgpu::Extent3d {
|
||||
width,
|
||||
height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: msaa.samples,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
});
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
Self {
|
||||
size: (width, height),
|
||||
texture,
|
||||
view: TextureView::TextureView(view),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasChanged for Texture {
|
||||
type Criteria = (u32, u32);
|
||||
|
||||
fn has_changed(&self, criteria: &Self::Criteria) -> bool {
|
||||
!self.size.eq(criteria)
|
||||
}
|
||||
}
|
||||
246
maplibre/src/render/resource/tracked_render_pass.rs
Normal file
246
maplibre/src/render/resource/tracked_render_pass.rs
Normal file
@ -0,0 +1,246 @@
|
||||
//! A render pass which allows tracking, for example using a tracing framework.
|
||||
|
||||
use log::trace;
|
||||
use std::ops::Range;
|
||||
|
||||
/// A [`RenderPass`], which tracks the current pipeline state to ensure all draw calls are valid.
|
||||
/// It is used to set the current [`RenderPipeline`], [`BindGroups`](BindGroup) and buffers.
|
||||
/// After all requirements are specified, draw calls can be issued.
|
||||
pub struct TrackedRenderPass<'a> {
|
||||
pass: wgpu::RenderPass<'a>,
|
||||
}
|
||||
|
||||
impl<'a> TrackedRenderPass<'a> {
|
||||
/// Tracks the supplied render pass.
|
||||
pub fn new(pass: wgpu::RenderPass<'a>) -> Self {
|
||||
Self { pass }
|
||||
}
|
||||
|
||||
/// Sets the active [`RenderPipeline`].
|
||||
///
|
||||
/// Subsequent draw calls will exhibit the behavior defined by the `pipeline`.
|
||||
pub fn set_render_pipeline(&mut self, pipeline: &'a wgpu::RenderPipeline) {
|
||||
trace!("set pipeline: {:?}", pipeline);
|
||||
self.pass.set_pipeline(pipeline);
|
||||
}
|
||||
|
||||
/// Sets the active [`BindGroup`] for a given bind group index. The bind group layout in the
|
||||
/// active pipeline when any `draw()` function is called must match the layout of this `bind group`.
|
||||
pub fn set_bind_group(
|
||||
&mut self,
|
||||
index: usize,
|
||||
bind_group: &'a wgpu::BindGroup,
|
||||
dynamic_uniform_indices: &[u32],
|
||||
) {
|
||||
self.pass
|
||||
.set_bind_group(index as u32, bind_group, dynamic_uniform_indices);
|
||||
}
|
||||
|
||||
/// Assign a vertex buffer to a slot.
|
||||
///
|
||||
/// Subsequent calls to [`TrackedRenderPass::draw`] and [`TrackedRenderPass::draw_indexed`]
|
||||
/// will use the buffer referenced by `buffer_slice` as one of the source vertex buffer(s).
|
||||
///
|
||||
/// The `slot_index` refers to the index of the matching descriptor in
|
||||
/// [`VertexState::buffers`](crate::render_resource::VertexState::buffers).
|
||||
pub fn set_vertex_buffer(&mut self, slot_index: usize, buffer_slice: wgpu::BufferSlice<'a>) {
|
||||
self.pass.set_vertex_buffer(slot_index as u32, buffer_slice);
|
||||
}
|
||||
|
||||
/// Sets the active index buffer.
|
||||
///
|
||||
/// Subsequent calls to [`TrackedRenderPass::draw_indexed`] will use the buffer referenced by
|
||||
/// `buffer_slice` as the source index buffer.
|
||||
pub fn set_index_buffer(
|
||||
&mut self,
|
||||
buffer_slice: wgpu::BufferSlice<'a>,
|
||||
index_format: wgpu::IndexFormat,
|
||||
) {
|
||||
self.pass.set_index_buffer(buffer_slice, index_format);
|
||||
}
|
||||
|
||||
/// Draws primitives from the active vertex buffer(s).
|
||||
///
|
||||
/// The active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`].
|
||||
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
|
||||
trace!("draw: {:?} {:?}", vertices, instances);
|
||||
self.pass.draw(vertices, instances);
|
||||
}
|
||||
|
||||
/// Draws indexed primitives using the active index buffer and the active vertex buffer(s).
|
||||
///
|
||||
/// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the
|
||||
/// active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`].
|
||||
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
|
||||
trace!(
|
||||
"draw indexed: {:?} {} {:?}",
|
||||
indices,
|
||||
base_vertex,
|
||||
instances
|
||||
);
|
||||
self.pass.draw_indexed(indices, base_vertex, instances);
|
||||
}
|
||||
|
||||
/// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
|
||||
///
|
||||
/// The active vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`].
|
||||
///
|
||||
/// The structure expected in `indirect_buffer` is the following:
|
||||
///
|
||||
/// ```rust
|
||||
/// #[repr(C)]
|
||||
/// struct DrawIndirect {
|
||||
/// vertex_count: u32, // The number of vertices to draw.
|
||||
/// instance_count: u32, // The number of instances to draw.
|
||||
/// first_vertex: u32, // The Index of the first vertex to draw.
|
||||
/// first_instance: u32, // The instance ID of the first instance to draw.
|
||||
/// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled.
|
||||
/// }
|
||||
/// ```
|
||||
pub fn draw_indirect(&mut self, indirect_buffer: &'a wgpu::Buffer, indirect_offset: u64) {
|
||||
trace!("draw indirect: {:?} {}", indirect_buffer, indirect_offset);
|
||||
self.pass.draw_indirect(indirect_buffer, indirect_offset);
|
||||
}
|
||||
|
||||
/// Draws indexed primitives using the active index buffer and the active vertex buffers,
|
||||
/// based on the contents of the `indirect_buffer`.
|
||||
///
|
||||
/// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the active
|
||||
/// vertex buffers can be set with [`TrackedRenderPass::set_vertex_buffer`].
|
||||
///
|
||||
/// The structure expected in `indirect_buffer` is the following:
|
||||
///
|
||||
/// ```rust
|
||||
/// #[repr(C)]
|
||||
/// struct DrawIndexedIndirect {
|
||||
/// vertex_count: u32, // The number of vertices to draw.
|
||||
/// instance_count: u32, // The number of instances to draw.
|
||||
/// first_index: u32, // The base index within the index buffer.
|
||||
/// vertex_offset: i32, // The value added to the vertex index before indexing into the vertex buffer.
|
||||
/// first_instance: u32, // The instance ID of the first instance to draw.
|
||||
/// // has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`] is enabled.
|
||||
/// }
|
||||
/// ```
|
||||
pub fn draw_indexed_indirect(
|
||||
&mut self,
|
||||
indirect_buffer: &'a wgpu::Buffer,
|
||||
indirect_offset: u64,
|
||||
) {
|
||||
trace!(
|
||||
"draw indexed indirect: {:?} {}",
|
||||
indirect_buffer,
|
||||
indirect_offset
|
||||
);
|
||||
self.pass
|
||||
.draw_indexed_indirect(indirect_buffer, indirect_offset);
|
||||
}
|
||||
|
||||
/// Sets the stencil reference.
|
||||
///
|
||||
/// Subsequent stencil tests will test against this value.
|
||||
pub fn set_stencil_reference(&mut self, reference: u32) {
|
||||
trace!("set stencil reference: {}", reference);
|
||||
self.pass.set_stencil_reference(reference);
|
||||
}
|
||||
|
||||
/// Sets the scissor region.
|
||||
///
|
||||
/// Subsequent draw calls will discard any fragments that fall outside this region.
|
||||
pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) {
|
||||
trace!("set_scissor_rect: {} {} {} {}", x, y, width, height);
|
||||
self.pass.set_scissor_rect(x, y, width, height);
|
||||
}
|
||||
|
||||
/// Set push constant data.
|
||||
///
|
||||
/// `Features::PUSH_CONSTANTS` must be enabled on the device in order to call these functions.
|
||||
pub fn set_push_constants(&mut self, stages: wgpu::ShaderStages, offset: u32, data: &[u8]) {
|
||||
trace!(
|
||||
"set push constants: {:?} offset: {} data.len: {}",
|
||||
stages,
|
||||
offset,
|
||||
data.len()
|
||||
);
|
||||
self.pass.set_push_constants(stages, offset, data);
|
||||
}
|
||||
|
||||
/// Set the rendering viewport.
|
||||
///
|
||||
/// Subsequent draw calls will be projected into that viewport.
|
||||
pub fn set_viewport(
|
||||
&mut self,
|
||||
x: f32,
|
||||
y: f32,
|
||||
width: f32,
|
||||
height: f32,
|
||||
min_depth: f32,
|
||||
max_depth: f32,
|
||||
) {
|
||||
trace!(
|
||||
"set viewport: {} {} {} {} {} {}",
|
||||
x,
|
||||
y,
|
||||
width,
|
||||
height,
|
||||
min_depth,
|
||||
max_depth
|
||||
);
|
||||
self.pass
|
||||
.set_viewport(x, y, width, height, min_depth, max_depth);
|
||||
}
|
||||
|
||||
/// Insert a single debug marker.
|
||||
///
|
||||
/// This is a GPU debugging feature. This has no effect on the rendering itself.
|
||||
pub fn insert_debug_marker(&mut self, label: &str) {
|
||||
trace!("insert debug marker: {}", label);
|
||||
self.pass.insert_debug_marker(label);
|
||||
}
|
||||
|
||||
/// Start a new debug group.
|
||||
///
|
||||
/// Push a new debug group over the internal stack. Subsequent render commands and debug
|
||||
/// markers are grouped into this new group, until [`pop_debug_group`] is called.
|
||||
///
|
||||
/// ```
|
||||
/// # fn example(mut pass: bevy_render::render_phase::TrackedRenderPass<'static>) {
|
||||
/// pass.push_debug_group("Render the car");
|
||||
/// // [setup pipeline etc...]
|
||||
/// pass.draw(0..64, 0..1);
|
||||
/// pass.pop_debug_group();
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// Note that [`push_debug_group`] and [`pop_debug_group`] must always be called in pairs.
|
||||
///
|
||||
/// This is a GPU debugging feature. This has no effect on the rendering itself.
|
||||
///
|
||||
/// [`push_debug_group`]: TrackedRenderPass::push_debug_group
|
||||
/// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group
|
||||
pub fn push_debug_group(&mut self, label: &str) {
|
||||
trace!("push_debug_group marker: {}", label);
|
||||
self.pass.push_debug_group(label);
|
||||
}
|
||||
|
||||
/// End the current debug group.
|
||||
///
|
||||
/// Subsequent render commands and debug markers are not grouped anymore in
|
||||
/// this group, but in the previous one (if any) or the default top-level one
|
||||
/// if the debug group was the last one on the stack.
|
||||
///
|
||||
/// Note that [`push_debug_group`] and [`pop_debug_group`] must always be called in pairs.
|
||||
///
|
||||
/// This is a GPU debugging feature. This has no effect on the rendering itself.
|
||||
///
|
||||
/// [`push_debug_group`]: TrackedRenderPass::push_debug_group
|
||||
/// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group
|
||||
pub fn pop_debug_group(&mut self) {
|
||||
trace!("pop_debug_group");
|
||||
self.pass.pop_debug_group();
|
||||
}
|
||||
|
||||
pub fn set_blend_constant(&mut self, color: wgpu::Color) {
|
||||
trace!("set blend constant: {:?}", color);
|
||||
self.pass.set_blend_constant(color);
|
||||
}
|
||||
}
|
||||
116
maplibre/src/render/settings.rs
Normal file
116
maplibre/src/render/settings.rs
Normal file
@ -0,0 +1,116 @@
|
||||
//! Settings for the renderer
|
||||
|
||||
use crate::platform::COLOR_TEXTURE_FORMAT;
|
||||
use std::borrow::Cow;
|
||||
|
||||
pub use wgpu::Backends;
|
||||
|
||||
/// Provides configuration for renderer initialization. Use [`Device::features`](crate::renderer::Device::features),
|
||||
/// [`Device::limits`](crate::renderer::Device::limits), and the [`WgpuAdapterInfo`](crate::render_resource::WgpuAdapterInfo)
|
||||
/// resource to get runtime information about the actual adapter, backend, features, and limits.
|
||||
#[derive(Clone)]
|
||||
pub struct WgpuSettings {
|
||||
pub device_label: Option<Cow<'static, str>>,
|
||||
pub backends: Option<wgpu::Backends>,
|
||||
pub power_preference: wgpu::PowerPreference,
|
||||
/// The features to ensure are enabled regardless of what the adapter/backend supports.
|
||||
/// Setting these explicitly may cause renderer initialization to fail.
|
||||
pub features: wgpu::Features,
|
||||
/// The features to ensure are disabled regardless of what the adapter/backend supports
|
||||
pub disabled_features: Option<wgpu::Features>,
|
||||
/// The imposed limits.
|
||||
pub limits: wgpu::Limits,
|
||||
/// The constraints on limits allowed regardless of what the adapter/backend supports
|
||||
pub constrained_limits: Option<wgpu::Limits>,
|
||||
|
||||
/// Whether a trace is recorded an stored in the current working directory
|
||||
pub record_trace: bool,
|
||||
}
|
||||
|
||||
impl Default for WgpuSettings {
|
||||
fn default() -> Self {
|
||||
let backends = Some(wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()));
|
||||
|
||||
let limits = if cfg!(feature = "web-webgl") {
|
||||
wgpu::Limits {
|
||||
max_texture_dimension_2d: 4096,
|
||||
..wgpu::Limits::downlevel_webgl2_defaults()
|
||||
}
|
||||
} else if cfg!(target_os = "android") {
|
||||
wgpu::Limits {
|
||||
max_storage_textures_per_shader_stage: 4,
|
||||
max_compute_workgroups_per_dimension: 0,
|
||||
max_compute_workgroup_size_z: 0,
|
||||
max_compute_workgroup_size_y: 0,
|
||||
max_compute_workgroup_size_x: 0,
|
||||
max_compute_workgroup_storage_size: 0,
|
||||
max_compute_invocations_per_workgroup: 0,
|
||||
..wgpu::Limits::downlevel_defaults()
|
||||
}
|
||||
} else {
|
||||
wgpu::Limits {
|
||||
..wgpu::Limits::default()
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
device_label: Default::default(),
|
||||
backends,
|
||||
power_preference: wgpu::PowerPreference::HighPerformance,
|
||||
features: wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
|
||||
disabled_features: None,
|
||||
limits,
|
||||
constrained_limits: None,
|
||||
record_trace: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum SurfaceType {
|
||||
Headless,
|
||||
Headed,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
/// Configuration resource for [Multi-Sample Anti-Aliasing](https://en.wikipedia.org/wiki/Multisample_anti-aliasing).
|
||||
///
|
||||
pub struct Msaa {
|
||||
/// The number of samples to run for Multi-Sample Anti-Aliasing. Higher numbers result in
|
||||
/// smoother edges.
|
||||
/// Defaults to 4.
|
||||
///
|
||||
/// Note that WGPU currently only supports 1 or 4 samples.
|
||||
/// Ultimately we plan on supporting whatever is natively supported on a given device.
|
||||
/// Check out this issue for more info: <https://github.com/gfx-rs/wgpu/issues/1832>
|
||||
pub samples: u32,
|
||||
}
|
||||
|
||||
impl Msaa {
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.samples > 1
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Msaa {
|
||||
fn default() -> Self {
|
||||
Self { samples: 4 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RendererSettings {
|
||||
pub msaa: Msaa,
|
||||
pub texture_format: wgpu::TextureFormat,
|
||||
pub surface_type: SurfaceType,
|
||||
}
|
||||
|
||||
impl Default for RendererSettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
msaa: Msaa::default(),
|
||||
texture_format: COLOR_TEXTURE_FORMAT,
|
||||
surface_type: SurfaceType::Headed,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,9 +1,7 @@
|
||||
#![allow(clippy::identity_op)]
|
||||
use wgpu::{
|
||||
ColorTargetState, Device, FragmentState, ShaderModule, VertexBufferLayout, VertexState,
|
||||
};
|
||||
|
||||
use crate::coords::WorldCoords;
|
||||
use crate::render::resource::{FragmentState, VertexBufferLayout, VertexState};
|
||||
use bytemuck_derive::{Pod, Zeroable};
|
||||
use cgmath::SquareMatrix;
|
||||
|
||||
@ -18,101 +16,25 @@ impl From<WorldCoords> for Vec3f32 {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FragmentShaderState {
|
||||
source: &'static str,
|
||||
targets: &'static [ColorTargetState],
|
||||
module: Option<ShaderModule>,
|
||||
pub trait Shader {
|
||||
fn describe_vertex(&self) -> VertexState;
|
||||
fn describe_fragment(&self) -> FragmentState;
|
||||
}
|
||||
|
||||
pub struct VertexShaderState {
|
||||
source: &'static str,
|
||||
buffers: &'static [VertexBufferLayout<'static>],
|
||||
module: Option<ShaderModule>,
|
||||
pub struct TileMaskShader {
|
||||
pub format: wgpu::TextureFormat,
|
||||
pub draw_colors: bool,
|
||||
}
|
||||
|
||||
impl FragmentShaderState {
|
||||
pub const fn new(source: &'static str, targets: &'static [ColorTargetState]) -> Self {
|
||||
Self {
|
||||
source,
|
||||
targets,
|
||||
module: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_fragment_state(&mut self, device: &Device) -> FragmentState {
|
||||
self.module = Some(device.create_shader_module(&wgpu::ShaderModuleDescriptor {
|
||||
label: Some("fragment shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(self.source.into()),
|
||||
}));
|
||||
|
||||
wgpu::FragmentState {
|
||||
module: self.module.as_ref().unwrap(),
|
||||
impl Shader for TileMaskShader {
|
||||
fn describe_vertex(&self) -> VertexState {
|
||||
VertexState {
|
||||
source: include_str!("tile_mask.vertex.wgsl"),
|
||||
entry_point: "main",
|
||||
targets: self.targets,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VertexShaderState {
|
||||
pub const fn new(
|
||||
source: &'static str,
|
||||
buffers: &'static [VertexBufferLayout<'static>],
|
||||
) -> Self {
|
||||
Self {
|
||||
source,
|
||||
buffers,
|
||||
module: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_vertex_state(&mut self, device: &Device) -> VertexState {
|
||||
self.module = Some(device.create_shader_module(&wgpu::ShaderModuleDescriptor {
|
||||
label: Some("vertex shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(self.source.into()),
|
||||
}));
|
||||
|
||||
wgpu::VertexState {
|
||||
module: self.module.as_ref().unwrap(),
|
||||
entry_point: "main",
|
||||
buffers: self.buffers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod tile {
|
||||
use super::{ShaderLayerMetadata, ShaderVertex};
|
||||
use crate::platform::COLOR_TEXTURE_FORMAT;
|
||||
use crate::render::shaders::{ShaderFeatureStyle, ShaderTileMetadata};
|
||||
|
||||
use super::{FragmentShaderState, VertexShaderState};
|
||||
|
||||
pub const VERTEX: VertexShaderState = VertexShaderState::new(
|
||||
include_str!("tile.vertex.wgsl"),
|
||||
&[
|
||||
// vertex data
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderVertex>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
// position
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
shader_location: 0,
|
||||
},
|
||||
// normal
|
||||
wgpu::VertexAttribute {
|
||||
offset: wgpu::VertexFormat::Float32x2.size(),
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
shader_location: 1,
|
||||
},
|
||||
],
|
||||
},
|
||||
// tile metadata
|
||||
wgpu::VertexBufferLayout {
|
||||
buffers: vec![VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderTileMetadata>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: &[
|
||||
attributes: vec![
|
||||
// translate
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
@ -134,118 +56,142 @@ pub mod tile {
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 7,
|
||||
},
|
||||
// zoom_factor
|
||||
wgpu::VertexAttribute {
|
||||
offset: 4 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32,
|
||||
shader_location: 9,
|
||||
},
|
||||
],
|
||||
},
|
||||
// layer metadata
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderLayerMetadata>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: &[
|
||||
// z_index
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32,
|
||||
shader_location: 10,
|
||||
},
|
||||
],
|
||||
},
|
||||
// features
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderFeatureStyle>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
// color
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 8,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
);
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
pub const FRAGMENT: FragmentShaderState = FragmentShaderState::new(
|
||||
include_str!("tile.fragment.wgsl"),
|
||||
&[wgpu::ColorTargetState {
|
||||
format: COLOR_TEXTURE_FORMAT,
|
||||
/*blend: Some(wgpu::BlendState {
|
||||
color: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::SrcAlpha,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
fn describe_fragment(&self) -> FragmentState {
|
||||
FragmentState {
|
||||
source: include_str!("tile_mask.fragment.wgsl"),
|
||||
entry_point: "main",
|
||||
targets: vec![wgpu::ColorTargetState {
|
||||
format: self.format,
|
||||
blend: None,
|
||||
write_mask: if self.draw_colors {
|
||||
wgpu::ColorWrites::ALL
|
||||
} else {
|
||||
wgpu::ColorWrites::empty()
|
||||
},
|
||||
alpha: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::SrcAlpha,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
},
|
||||
}),*/
|
||||
blend: None,
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
}],
|
||||
);
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod tile_mask {
|
||||
use crate::platform::COLOR_TEXTURE_FORMAT;
|
||||
use crate::render::options::DEBUG_STENCIL_PATTERN;
|
||||
use crate::render::shaders::ShaderTileMetadata;
|
||||
use wgpu::ColorWrites;
|
||||
pub struct TileShader {
|
||||
pub format: wgpu::TextureFormat,
|
||||
}
|
||||
|
||||
use super::{FragmentShaderState, VertexShaderState};
|
||||
|
||||
pub const VERTEX: VertexShaderState = VertexShaderState::new(
|
||||
include_str!("tile_mask.vertex.wgsl"),
|
||||
&[wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderTileMetadata>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: &[
|
||||
// translate
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 4,
|
||||
impl Shader for TileShader {
|
||||
fn describe_vertex(&self) -> VertexState {
|
||||
VertexState {
|
||||
source: include_str!("tile.vertex.wgsl"),
|
||||
entry_point: "main",
|
||||
buffers: vec![
|
||||
// vertex data
|
||||
VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderVertex>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: vec![
|
||||
// position
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
shader_location: 0,
|
||||
},
|
||||
// normal
|
||||
wgpu::VertexAttribute {
|
||||
offset: wgpu::VertexFormat::Float32x2.size(),
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
shader_location: 1,
|
||||
},
|
||||
],
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 1 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 5,
|
||||
// tile metadata
|
||||
VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderTileMetadata>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: vec![
|
||||
// translate
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 4,
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 1 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 5,
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 2 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 6,
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 3 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 7,
|
||||
},
|
||||
// zoom_factor
|
||||
wgpu::VertexAttribute {
|
||||
offset: 4 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32,
|
||||
shader_location: 9,
|
||||
},
|
||||
],
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 2 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 6,
|
||||
// layer metadata
|
||||
VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderLayerMetadata>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Instance,
|
||||
attributes: vec![
|
||||
// z_index
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32,
|
||||
shader_location: 10,
|
||||
},
|
||||
],
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: 3 * wgpu::VertexFormat::Float32x4.size(),
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 7,
|
||||
// features
|
||||
VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<ShaderFeatureStyle>() as u64,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: vec![
|
||||
// color
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
format: wgpu::VertexFormat::Float32x4,
|
||||
shader_location: 8,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub const FRAGMENT: FragmentShaderState = FragmentShaderState::new(
|
||||
include_str!("tile_mask.fragment.wgsl"),
|
||||
&[wgpu::ColorTargetState {
|
||||
format: COLOR_TEXTURE_FORMAT,
|
||||
blend: None,
|
||||
write_mask: mask_write_mask(),
|
||||
}],
|
||||
);
|
||||
|
||||
pub const fn mask_write_mask() -> ColorWrites {
|
||||
if DEBUG_STENCIL_PATTERN {
|
||||
wgpu::ColorWrites::ALL
|
||||
} else {
|
||||
wgpu::ColorWrites::empty()
|
||||
fn describe_fragment(&self) -> FragmentState {
|
||||
FragmentState {
|
||||
source: include_str!("tile.fragment.wgsl"),
|
||||
entry_point: "main",
|
||||
targets: vec![wgpu::ColorTargetState {
|
||||
format: self.format,
|
||||
/*blend: Some(wgpu::BlendState {
|
||||
color: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::SrcAlpha,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
},
|
||||
alpha: wgpu::BlendComponent {
|
||||
src_factor: wgpu::BlendFactor::SrcAlpha,
|
||||
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
|
||||
operation: wgpu::BlendOperation::Add,
|
||||
},
|
||||
}),*/
|
||||
blend: None,
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
106
maplibre/src/render/stages/graph_runner_stage.rs
Normal file
106
maplibre/src/render/stages/graph_runner_stage.rs
Normal file
@ -0,0 +1,106 @@
|
||||
//! Executes the [`RenderGraph`] current render graph.
|
||||
|
||||
// Plugins that contribute to the RenderGraph should use the following label conventions:
|
||||
// 1. Graph modules should have a NAME, input module, and node module (where relevant)
|
||||
// 2. The "top level" graph is the plugin module root. Just add things like `pub mod node` directly under the plugin module
|
||||
// 3. "sub graph" modules should be nested beneath their parent graph module
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::render::graph::{EmptyNode, RenderGraph};
|
||||
use crate::render::graph_runner::RenderGraphRunner;
|
||||
use crate::render::main_pass::{MainPassDriverNode, MainPassNode};
|
||||
use crate::render::util::Eventually::Initialized;
|
||||
use crate::schedule::Stage;
|
||||
use crate::Renderer;
|
||||
use log::error;
|
||||
|
||||
pub mod node {
|
||||
pub const MAIN_PASS_DEPENDENCIES: &str = "main_pass_dependencies";
|
||||
pub const MAIN_PASS_DRIVER: &str = "main_pass_driver";
|
||||
}
|
||||
|
||||
pub mod draw_graph {
|
||||
pub const NAME: &str = "draw";
|
||||
pub mod input {}
|
||||
pub mod node {
|
||||
pub const MAIN_PASS: &str = "main_pass";
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the [`RenderGraph`] with all of its nodes and then runs it to render the entire frame.
|
||||
pub struct GraphRunnerStage {
|
||||
graph: RenderGraph,
|
||||
}
|
||||
|
||||
impl Default for GraphRunnerStage {
|
||||
fn default() -> Self {
|
||||
let pass_node = MainPassNode::new();
|
||||
let mut graph = RenderGraph::default();
|
||||
|
||||
let mut draw_graph = RenderGraph::default();
|
||||
draw_graph.add_node(draw_graph::node::MAIN_PASS, pass_node);
|
||||
let input_node_id = draw_graph.set_input(vec![]);
|
||||
draw_graph
|
||||
.add_node_edge(input_node_id, draw_graph::node::MAIN_PASS)
|
||||
.unwrap();
|
||||
graph.add_sub_graph(draw_graph::NAME, draw_graph);
|
||||
|
||||
graph.add_node(node::MAIN_PASS_DEPENDENCIES, EmptyNode);
|
||||
graph.add_node(node::MAIN_PASS_DRIVER, MainPassDriverNode);
|
||||
graph
|
||||
.add_node_edge(node::MAIN_PASS_DEPENDENCIES, node::MAIN_PASS_DRIVER)
|
||||
.unwrap();
|
||||
Self { graph }
|
||||
}
|
||||
}
|
||||
|
||||
impl Stage for GraphRunnerStage {
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
renderer:
|
||||
Renderer {
|
||||
device,
|
||||
queue,
|
||||
state,
|
||||
..
|
||||
},
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
self.graph.update(state);
|
||||
|
||||
if let Err(e) = RenderGraphRunner::run(&self.graph, device, queue, state) {
|
||||
error!("Error running render graph:");
|
||||
{
|
||||
let mut src: &dyn std::error::Error = &e;
|
||||
loop {
|
||||
error!("> {}", src);
|
||||
match src.source() {
|
||||
Some(s) => src = s,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
panic!("Error running render graph: {:?}", e);
|
||||
}
|
||||
|
||||
{
|
||||
let _span = tracing::info_span!("present_frames").entered();
|
||||
|
||||
if let Initialized(render_target) = state.render_target.take() {
|
||||
if let Some(surface_texture) = render_target.take_surface_texture() {
|
||||
surface_texture.present();
|
||||
}
|
||||
|
||||
#[cfg(feature = "tracing-tracy")]
|
||||
tracing::event!(
|
||||
tracing::Level::INFO,
|
||||
message = "finished frame",
|
||||
tracy.frame_mark = true
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
56
maplibre/src/render/stages/mod.rs
Normal file
56
maplibre/src/render/stages/mod.rs
Normal file
@ -0,0 +1,56 @@
|
||||
//! Rendering specific [Stages](Stage)
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::schedule::{MultiStage, Schedule, Stage, StageLabel};
|
||||
use graph_runner_stage::GraphRunnerStage;
|
||||
use resource_stage::ResourceStage;
|
||||
use upload_stage::UploadStage;
|
||||
|
||||
mod graph_runner_stage;
|
||||
mod phase_sort_stage;
|
||||
mod queue_stage;
|
||||
mod resource_stage;
|
||||
mod upload_stage;
|
||||
|
||||
use crate::multi_stage;
|
||||
use crate::render::stages::phase_sort_stage::PhaseSortStage;
|
||||
use crate::render::stages::queue_stage::QueueStage;
|
||||
pub use graph_runner_stage::{draw_graph, node};
|
||||
|
||||
/// The labels of the default App rendering stages.
|
||||
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
|
||||
pub enum RenderStageLabel {
|
||||
/// Prepare render resources from the extracted data for the GPU.
|
||||
/// For example during this phase textures are created, buffers are allocated and written.
|
||||
Prepare,
|
||||
|
||||
/// Queues [PhaseItems](crate::render::render_phase::draw::PhaseItem) that depend on
|
||||
/// [`Prepare`](RenderStageLabel::Prepare) data and queue up draw calls to run during the
|
||||
/// [`Render`](RenderStageLabel::Render) stage.
|
||||
Queue,
|
||||
|
||||
/// Sort the [`RenderPhases`](crate::render_phase::RenderPhase) here.
|
||||
PhaseSort,
|
||||
|
||||
/// Actual rendering happens here.
|
||||
/// In most cases, only the render backend should insert resources here.
|
||||
Render,
|
||||
|
||||
/// Cleanup render resources here.
|
||||
Cleanup,
|
||||
}
|
||||
|
||||
impl StageLabel for RenderStageLabel {
|
||||
fn dyn_clone(&self) -> Box<dyn StageLabel> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
multi_stage!(PrepareStage, upload: UploadStage, resource: ResourceStage);
|
||||
|
||||
pub fn register_render_stages(schedule: &mut Schedule) {
|
||||
schedule.add_stage(RenderStageLabel::Prepare, PrepareStage::default());
|
||||
schedule.add_stage(RenderStageLabel::Queue, QueueStage::default());
|
||||
schedule.add_stage(RenderStageLabel::PhaseSort, PhaseSortStage::default());
|
||||
schedule.add_stage(RenderStageLabel::Render, GraphRunnerStage::default());
|
||||
}
|
||||
35
maplibre/src/render/stages/phase_sort_stage.rs
Normal file
35
maplibre/src/render/stages/phase_sort_stage.rs
Normal file
@ -0,0 +1,35 @@
|
||||
//! Sorts items of the [RenderPhases](RenderPhase).
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::coords::{ViewRegion, Zoom};
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::LayerTessellateMessage;
|
||||
use crate::render::camera::ViewProjection;
|
||||
use crate::render::render_phase::RenderPhase;
|
||||
use crate::render::resource::IndexEntry;
|
||||
use crate::render::shaders::{
|
||||
ShaderCamera, ShaderFeatureStyle, ShaderGlobals, ShaderLayerMetadata, Vec4f32,
|
||||
};
|
||||
use crate::render::tile_view_pattern::TileInView;
|
||||
use crate::render::util::Eventually::Initialized;
|
||||
use crate::schedule::Stage;
|
||||
use crate::{RenderState, Renderer, Style};
|
||||
use std::iter;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PhaseSortStage;
|
||||
|
||||
impl Stage for PhaseSortStage {
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
renderer: Renderer { state, .. },
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
let mask_phase: &mut RenderPhase<_> = &mut state.mask_phase;
|
||||
mask_phase.sort();
|
||||
let file_phase = &mut state.tile_phase;
|
||||
file_phase.sort();
|
||||
}
|
||||
}
|
||||
64
maplibre/src/render/stages/queue_stage.rs
Normal file
64
maplibre/src/render/stages/queue_stage.rs
Normal file
@ -0,0 +1,64 @@
|
||||
//! Queues [PhaseItems](crate::render::render_phase::PhaseItem) for rendering.
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::coords::{ViewRegion, Zoom};
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::LayerTessellateMessage;
|
||||
use crate::render::camera::ViewProjection;
|
||||
use crate::render::resource::IndexEntry;
|
||||
use crate::render::shaders::{
|
||||
ShaderCamera, ShaderFeatureStyle, ShaderGlobals, ShaderLayerMetadata, Vec4f32,
|
||||
};
|
||||
use crate::render::tile_view_pattern::TileInView;
|
||||
use crate::render::util::Eventually::Initialized;
|
||||
use crate::schedule::Stage;
|
||||
use crate::{RenderState, Renderer, Style};
|
||||
use std::iter;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct QueueStage;
|
||||
|
||||
impl Stage for QueueStage {
|
||||
#[tracing::instrument(name = "QueueStage", skip_all)]
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
renderer: Renderer { state, .. },
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
state.mask_phase.items.clear();
|
||||
state.tile_phase.items.clear();
|
||||
|
||||
if let (Initialized(tile_view_pattern), Initialized(buffer_pool)) =
|
||||
(&state.tile_view_pattern, &state.buffer_pool)
|
||||
{
|
||||
let index = buffer_pool.index();
|
||||
|
||||
for tile_in_view in tile_view_pattern.iter() {
|
||||
let TileInView { shape, fallback } = &tile_in_view;
|
||||
let coords = shape.coords;
|
||||
tracing::trace!("Drawing tile at {coords}");
|
||||
|
||||
let shape_to_render = fallback.as_ref().unwrap_or(shape);
|
||||
|
||||
// Draw mask
|
||||
state.mask_phase.add(tile_in_view.clone());
|
||||
|
||||
if let Some(entries) = index.get_layers(&shape_to_render.coords) {
|
||||
let mut layers_to_render: Vec<&IndexEntry> = Vec::from_iter(entries);
|
||||
layers_to_render.sort_by_key(|entry| entry.style_layer.index);
|
||||
|
||||
for entry in layers_to_render {
|
||||
// Draw tile
|
||||
state
|
||||
.tile_phase
|
||||
.add((entry.clone(), shape_to_render.clone()))
|
||||
}
|
||||
} else {
|
||||
tracing::trace!("No layers found at {}", &shape_to_render.coords);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
139
maplibre/src/render/stages/resource_stage.rs
Normal file
139
maplibre/src/render/stages/resource_stage.rs
Normal file
@ -0,0 +1,139 @@
|
||||
//! Prepares GPU-owned resources by initializing them if they are uninitialized or out-of-date.
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::platform::MIN_BUFFER_SIZE;
|
||||
use crate::render::resource::Texture;
|
||||
use crate::render::resource::{BackingBufferDescriptor, BufferPool};
|
||||
use crate::render::resource::{Globals, RenderPipeline};
|
||||
use crate::render::shaders;
|
||||
use crate::render::shaders::{Shader, ShaderGlobals, ShaderTileMetadata};
|
||||
use crate::render::tile_pipeline::TilePipeline;
|
||||
use crate::render::tile_view_pattern::TileViewPattern;
|
||||
use crate::schedule::Stage;
|
||||
use crate::Renderer;
|
||||
use std::cmp;
|
||||
use std::mem::size_of;
|
||||
|
||||
pub const TILE_VIEW_SIZE: wgpu::BufferAddress = 32;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ResourceStage;
|
||||
|
||||
impl Stage for ResourceStage {
|
||||
#[tracing::instrument(name = "ResourceStage", skip_all)]
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
renderer:
|
||||
Renderer {
|
||||
settings,
|
||||
device,
|
||||
surface,
|
||||
state,
|
||||
..
|
||||
},
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
let size = surface.size();
|
||||
|
||||
surface.reconfigure(device);
|
||||
|
||||
state
|
||||
.render_target
|
||||
.initialize(|| surface.create_view(device));
|
||||
|
||||
state.depth_texture.reinitialize(
|
||||
|| {
|
||||
Texture::new(
|
||||
Some("depth texture"),
|
||||
device,
|
||||
wgpu::TextureFormat::Depth24PlusStencil8,
|
||||
size.width(),
|
||||
size.height(),
|
||||
settings.msaa,
|
||||
)
|
||||
},
|
||||
&(size.width(), size.height()),
|
||||
);
|
||||
|
||||
state.multisampling_texture.reinitialize(
|
||||
|| {
|
||||
if settings.msaa.is_active() {
|
||||
Some(Texture::new(
|
||||
Some("multisampling texture"),
|
||||
device,
|
||||
settings.texture_format,
|
||||
size.width(),
|
||||
size.height(),
|
||||
settings.msaa,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
&(size.width(), size.height()),
|
||||
);
|
||||
|
||||
state
|
||||
.buffer_pool
|
||||
.initialize(|| BufferPool::from_device(device));
|
||||
|
||||
state.tile_view_pattern.initialize(|| {
|
||||
let tile_view_buffer_desc = wgpu::BufferDescriptor {
|
||||
label: Some("tile view buffer"),
|
||||
size: size_of::<ShaderTileMetadata>() as wgpu::BufferAddress * TILE_VIEW_SIZE,
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
};
|
||||
|
||||
TileViewPattern::new(BackingBufferDescriptor::new(
|
||||
device.create_buffer(&tile_view_buffer_desc),
|
||||
tile_view_buffer_desc.size,
|
||||
))
|
||||
});
|
||||
|
||||
state.tile_pipeline.initialize(|| {
|
||||
let tile_shader = shaders::TileShader {
|
||||
format: settings.texture_format,
|
||||
};
|
||||
|
||||
let pipeline = TilePipeline::new(
|
||||
settings.msaa,
|
||||
tile_shader.describe_vertex(),
|
||||
tile_shader.describe_fragment(),
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.describe_render_pipeline()
|
||||
.initialize(device);
|
||||
|
||||
state
|
||||
.globals_bind_group
|
||||
.initialize(|| Globals::from_device(device, &pipeline.get_bind_group_layout(0)));
|
||||
|
||||
pipeline
|
||||
});
|
||||
|
||||
state.mask_pipeline.initialize(|| {
|
||||
let mask_shader = shaders::TileMaskShader {
|
||||
format: settings.texture_format,
|
||||
draw_colors: false,
|
||||
};
|
||||
|
||||
TilePipeline::new(
|
||||
settings.msaa,
|
||||
mask_shader.describe_vertex(),
|
||||
mask_shader.describe_fragment(),
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.describe_render_pipeline()
|
||||
.initialize(device)
|
||||
});
|
||||
}
|
||||
}
|
||||
253
maplibre/src/render/stages/upload_stage.rs
Normal file
253
maplibre/src/render/stages/upload_stage.rs
Normal file
@ -0,0 +1,253 @@
|
||||
//! Uploads data to the GPU which is needed for rendering.
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::coords::{ViewRegion, Zoom};
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::LayerTessellateMessage;
|
||||
use crate::render::camera::ViewProjection;
|
||||
use crate::render::resource::IndexEntry;
|
||||
use crate::render::shaders::{
|
||||
ShaderCamera, ShaderFeatureStyle, ShaderGlobals, ShaderLayerMetadata, Vec4f32,
|
||||
};
|
||||
use crate::render::tile_view_pattern::TileInView;
|
||||
use crate::render::util::Eventually::Initialized;
|
||||
use crate::schedule::Stage;
|
||||
use crate::{RenderState, Renderer, Style};
|
||||
|
||||
use std::iter;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct UploadStage;
|
||||
|
||||
impl Stage for UploadStage {
|
||||
#[tracing::instrument(name = "UploadStage", skip_all)]
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
renderer:
|
||||
Renderer {
|
||||
settings: _,
|
||||
device: _,
|
||||
queue,
|
||||
surface: _,
|
||||
state,
|
||||
..
|
||||
},
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
let visible_level = view_state.visible_level();
|
||||
|
||||
let view_proj = view_state.view_projection();
|
||||
|
||||
if let Initialized(globals_bind_group) = &state.globals_bind_group {
|
||||
// Update globals
|
||||
queue.write_buffer(
|
||||
&globals_bind_group.uniform_buffer,
|
||||
0,
|
||||
bytemuck::cast_slice(&[ShaderGlobals::new(ShaderCamera::new(
|
||||
view_proj.downcast().into(),
|
||||
view_state
|
||||
.camera
|
||||
.position
|
||||
.to_homogeneous()
|
||||
.cast::<f32>()
|
||||
.unwrap()
|
||||
.into(),
|
||||
))]),
|
||||
);
|
||||
}
|
||||
|
||||
let view_region = view_state
|
||||
.camera
|
||||
.view_region_bounding_box(&view_proj.invert())
|
||||
.map(|bounding_box| ViewRegion::new(bounding_box, 0, *view_state.zoom, visible_level));
|
||||
|
||||
if let Some(view_region) = &view_region {
|
||||
let zoom = view_state.zoom();
|
||||
|
||||
self.upload_tile_geometry(state, queue, tile_cache, style, view_region);
|
||||
self.update_tile_view_pattern(state, queue, view_region, &view_proj, zoom);
|
||||
self.update_metadata();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UploadStage {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) fn update_metadata(&self) {
|
||||
/*let animated_one = 0.5
|
||||
* (1.0
|
||||
+ ((SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs_f64()
|
||||
* 10.0)
|
||||
.sin()));*/
|
||||
|
||||
// Factor which determines how much we need to adjust the width of lines for example.
|
||||
// If zoom == z -> zoom_factor == 1
|
||||
|
||||
/* for entries in self.buffer_pool.index().iter() {
|
||||
for entry in entries {
|
||||
let world_coords = entry.coords;*/
|
||||
|
||||
// TODO: Update features
|
||||
/*let source_layer = entry.style_layer.source_layer.as_ref().unwrap();
|
||||
|
||||
if let Some(result) = scheduler
|
||||
.get_tile_cache()
|
||||
.iter_tessellated_layers_at(&world_coords)
|
||||
.unwrap()
|
||||
.find(|layer| source_layer.as_str() == layer.layer_name())
|
||||
{
|
||||
let color: Option<Vec4f32> = entry
|
||||
.style_layer
|
||||
.paint
|
||||
.as_ref()
|
||||
.and_then(|paint| paint.get_color())
|
||||
.map(|mut color| {
|
||||
color.color.b = animated_one as f32;
|
||||
color.into()
|
||||
});
|
||||
|
||||
match result {
|
||||
LayerTessellateResult::UnavailableLayer { .. } => {}
|
||||
LayerTessellateResult::TessellatedLayer {
|
||||
layer_data,
|
||||
feature_indices,
|
||||
..
|
||||
} => {
|
||||
|
||||
let feature_metadata = layer_data
|
||||
.features()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(i, _feature)| {
|
||||
iter::repeat(ShaderFeatureStyle {
|
||||
color: color.unwrap(),
|
||||
})
|
||||
.take(feature_indices[i] as usize)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.buffer_pool.update_feature_metadata(
|
||||
&self.queue,
|
||||
entry,
|
||||
&feature_metadata,
|
||||
);
|
||||
}
|
||||
}
|
||||
}*/
|
||||
/* }
|
||||
}*/
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn update_tile_view_pattern(
|
||||
&self,
|
||||
RenderState {
|
||||
tile_view_pattern,
|
||||
buffer_pool,
|
||||
..
|
||||
}: &mut RenderState,
|
||||
queue: &wgpu::Queue,
|
||||
view_region: &ViewRegion,
|
||||
view_proj: &ViewProjection,
|
||||
zoom: Zoom,
|
||||
) {
|
||||
if let (Initialized(tile_view_pattern), Initialized(buffer_pool)) =
|
||||
(tile_view_pattern, buffer_pool)
|
||||
{
|
||||
tile_view_pattern.update_pattern(view_region, buffer_pool, zoom);
|
||||
tile_view_pattern.upload_pattern(queue, view_proj);
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn upload_tile_geometry(
|
||||
&self,
|
||||
RenderState { buffer_pool, .. }: &mut RenderState,
|
||||
queue: &wgpu::Queue,
|
||||
tile_cache: &TileCache,
|
||||
style: &Style,
|
||||
view_region: &ViewRegion,
|
||||
) {
|
||||
if let Initialized(buffer_pool) = buffer_pool {
|
||||
// Upload all tessellated layers which are in view
|
||||
for world_coords in view_region.iter() {
|
||||
let loaded_layers = buffer_pool
|
||||
.get_loaded_layers_at(&world_coords)
|
||||
.unwrap_or_default();
|
||||
if let Some(available_layers) = tile_cache
|
||||
.iter_tessellated_layers_at(&world_coords)
|
||||
.map(|layers| {
|
||||
layers
|
||||
.filter(|result| !loaded_layers.contains(&result.layer_name()))
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
{
|
||||
for style_layer in &style.layers {
|
||||
let source_layer = style_layer.source_layer.as_ref().unwrap();
|
||||
|
||||
if let Some(message) = available_layers
|
||||
.iter()
|
||||
.find(|layer| source_layer.as_str() == layer.layer_name())
|
||||
{
|
||||
let color: Option<Vec4f32> = style_layer
|
||||
.paint
|
||||
.as_ref()
|
||||
.and_then(|paint| paint.get_color())
|
||||
.map(|color| color.into());
|
||||
|
||||
match message {
|
||||
LayerTessellateMessage::UnavailableLayer { coords: _, .. } => {
|
||||
/*self.buffer_pool.mark_layer_unavailable(*coords);*/
|
||||
}
|
||||
LayerTessellateMessage::TessellatedLayer {
|
||||
coords,
|
||||
feature_indices,
|
||||
layer_data,
|
||||
buffer,
|
||||
..
|
||||
} => {
|
||||
let allocate_feature_metadata = tracing::span!(
|
||||
tracing::Level::TRACE,
|
||||
"allocate_feature_metadata"
|
||||
);
|
||||
|
||||
let guard = allocate_feature_metadata.enter();
|
||||
let feature_metadata = layer_data
|
||||
.features
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(i, _feature)| {
|
||||
iter::repeat(ShaderFeatureStyle {
|
||||
color: color.unwrap(),
|
||||
})
|
||||
.take(feature_indices[i] as usize)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
drop(guard);
|
||||
|
||||
tracing::trace!("Allocating geometry at {}", &coords);
|
||||
buffer_pool.allocate_layer_geometry(
|
||||
queue,
|
||||
*coords,
|
||||
style_layer.clone(),
|
||||
buffer,
|
||||
ShaderLayerMetadata::new(style_layer.index as f32),
|
||||
&feature_metadata,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,59 +0,0 @@
|
||||
pub struct Texture {
|
||||
pub texture: wgpu::Texture,
|
||||
pub view: wgpu::TextureView,
|
||||
}
|
||||
|
||||
pub const DEPTH_TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24PlusStencil8;
|
||||
|
||||
impl Texture {
|
||||
pub fn create_depth_texture(
|
||||
device: &wgpu::Device,
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
sample_count: u32,
|
||||
) -> Self {
|
||||
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("Depth texture"),
|
||||
size: wgpu::Extent3d {
|
||||
width: config.width,
|
||||
height: config.height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: DEPTH_TEXTURE_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
});
|
||||
let view = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
Self {
|
||||
texture: depth_texture,
|
||||
view,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a texture that uses MSAA and fits a given swap chain.
|
||||
pub fn create_multisampling_texture(
|
||||
device: &wgpu::Device,
|
||||
desc: &wgpu::SurfaceConfiguration,
|
||||
sample_count: u32,
|
||||
) -> Texture {
|
||||
let multisampling_texture = &wgpu::TextureDescriptor {
|
||||
label: Some("Multisampled frame descriptor"),
|
||||
size: wgpu::Extent3d {
|
||||
width: desc.width,
|
||||
height: desc.height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: desc.format,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
};
|
||||
|
||||
let texture = device.create_texture(multisampling_texture);
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
Self { texture, view }
|
||||
}
|
||||
}
|
||||
120
maplibre/src/render/tile_pipeline.rs
Normal file
120
maplibre/src/render/tile_pipeline.rs
Normal file
@ -0,0 +1,120 @@
|
||||
//! Utility for declaring pipelines.
|
||||
|
||||
use crate::platform::MIN_BUFFER_SIZE;
|
||||
use crate::render::resource::{FragmentState, VertexState};
|
||||
use crate::render::resource::{RenderPipeline, RenderPipelineDescriptor};
|
||||
use crate::render::settings::Msaa;
|
||||
use crate::render::shaders::ShaderGlobals;
|
||||
use std::cmp;
|
||||
|
||||
pub struct TilePipeline {
|
||||
bind_globals: bool,
|
||||
update_stencil: bool,
|
||||
debug_stencil: bool,
|
||||
wireframe: bool,
|
||||
msaa: Msaa,
|
||||
|
||||
vertex_state: VertexState,
|
||||
fragment_state: FragmentState,
|
||||
}
|
||||
|
||||
impl TilePipeline {
|
||||
pub(crate) fn new(
|
||||
msaa: Msaa,
|
||||
vertex_state: VertexState,
|
||||
fragment_state: FragmentState,
|
||||
bind_globals: bool,
|
||||
update_stencil: bool,
|
||||
debug_stencil: bool,
|
||||
wireframe: bool,
|
||||
) -> Self {
|
||||
TilePipeline {
|
||||
bind_globals,
|
||||
update_stencil,
|
||||
debug_stencil,
|
||||
wireframe,
|
||||
msaa,
|
||||
vertex_state,
|
||||
fragment_state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RenderPipeline for TilePipeline {
|
||||
fn describe_render_pipeline(self) -> RenderPipelineDescriptor {
|
||||
let stencil_state = if self.update_stencil {
|
||||
wgpu::StencilFaceState {
|
||||
compare: wgpu::CompareFunction::Always, // Allow ALL values to update the stencil
|
||||
fail_op: wgpu::StencilOperation::Keep,
|
||||
depth_fail_op: wgpu::StencilOperation::Keep, // This is used when the depth test already failed
|
||||
pass_op: wgpu::StencilOperation::Replace,
|
||||
}
|
||||
} else {
|
||||
wgpu::StencilFaceState {
|
||||
compare: if self.debug_stencil {
|
||||
wgpu::CompareFunction::Always
|
||||
} else {
|
||||
wgpu::CompareFunction::Equal
|
||||
},
|
||||
fail_op: wgpu::StencilOperation::Keep,
|
||||
depth_fail_op: wgpu::StencilOperation::Keep,
|
||||
pass_op: wgpu::StencilOperation::Keep,
|
||||
}
|
||||
};
|
||||
|
||||
let globals_buffer_byte_size =
|
||||
cmp::max(MIN_BUFFER_SIZE, std::mem::size_of::<ShaderGlobals>() as u64);
|
||||
|
||||
RenderPipelineDescriptor {
|
||||
label: None,
|
||||
layout: if self.bind_globals {
|
||||
Some(vec![vec![wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: wgpu::BufferSize::new(globals_buffer_byte_size),
|
||||
},
|
||||
count: None,
|
||||
}]])
|
||||
} else {
|
||||
None
|
||||
},
|
||||
vertex: self.vertex_state,
|
||||
fragment: self.fragment_state,
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
polygon_mode: if self.update_stencil {
|
||||
wgpu::PolygonMode::Fill
|
||||
} else if self.wireframe {
|
||||
wgpu::PolygonMode::Line
|
||||
} else {
|
||||
wgpu::PolygonMode::Fill
|
||||
},
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
strip_index_format: None,
|
||||
cull_mode: None, // Maps look the same from he bottom and above -> No culling needed
|
||||
conservative: false,
|
||||
unclipped_depth: false,
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: wgpu::TextureFormat::Depth24PlusStencil8,
|
||||
depth_write_enabled: !self.update_stencil,
|
||||
depth_compare: wgpu::CompareFunction::Greater,
|
||||
stencil: wgpu::StencilState {
|
||||
front: stencil_state,
|
||||
back: stencil_state,
|
||||
read_mask: 0xff, // Applied to stencil values being read from the stencil buffer
|
||||
write_mask: 0xff, // Applied to fragment stencil values before being written to the stencil buffer
|
||||
},
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState {
|
||||
count: self.msaa.samples,
|
||||
mask: !0,
|
||||
alpha_to_coverage_enabled: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,8 @@
|
||||
use crate::coords::{ViewRegion, WorldTileCoords, Zoom};
|
||||
//! Utility for generating a tile pattern which can be used for masking.
|
||||
|
||||
use crate::render::buffer_pool::{BackingBufferDescriptor, BufferPool, Queue};
|
||||
use crate::coords::{ViewRegion, WorldTileCoords, Zoom};
|
||||
use crate::render::camera::ViewProjection;
|
||||
use crate::render::resource::{BackingBufferDescriptor, BufferPool, Queue};
|
||||
use crate::render::shaders::{ShaderFeatureStyle, ShaderLayerMetadata, ShaderTileMetadata};
|
||||
use cgmath::Matrix4;
|
||||
|
||||
@ -10,7 +11,6 @@ use crate::tessellation::IndexDataType;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::size_of;
|
||||
use std::ops::Range;
|
||||
use wgpu::Buffer;
|
||||
|
||||
/// The tile mask pattern assigns each tile a value which can be used for stencil testing.
|
||||
pub struct TileViewPattern<Q, B> {
|
||||
@ -19,6 +19,7 @@ pub struct TileViewPattern<Q, B> {
|
||||
phantom_q: PhantomData<Q>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TileShape {
|
||||
pub zoom_factor: f64,
|
||||
|
||||
@ -40,6 +41,7 @@ impl TileShape {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TileInView {
|
||||
pub shape: TileShape,
|
||||
|
||||
@ -75,7 +77,7 @@ impl<Q: Queue<B>, B> TileViewPattern<Q, B> {
|
||||
view_region: &ViewRegion,
|
||||
buffer_pool: &BufferPool<
|
||||
wgpu::Queue,
|
||||
Buffer,
|
||||
wgpu::Buffer,
|
||||
ShaderVertex,
|
||||
IndexDataType,
|
||||
ShaderLayerMetadata,
|
||||
|
||||
103
maplibre/src/render/util/mod.rs
Normal file
103
maplibre/src/render/util/mod.rs
Normal file
@ -0,0 +1,103 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::mem;
|
||||
|
||||
/// A wrapper type that enables ordering floats. This is a work around for the famous "rust float
|
||||
/// ordering" problem. By using it, you acknowledge that sorting NaN is undefined according to spec.
|
||||
/// This implementation treats NaN as the "smallest" float.
|
||||
#[derive(Debug, Copy, Clone, PartialOrd)]
|
||||
pub struct FloatOrd(pub f32);
|
||||
|
||||
impl PartialEq for FloatOrd {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
if self.0.is_nan() && other.0.is_nan() {
|
||||
true
|
||||
} else {
|
||||
self.0 == other.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for FloatOrd {}
|
||||
|
||||
#[allow(clippy::derive_ord_xor_partial_ord)]
|
||||
impl Ord for FloatOrd {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.0.partial_cmp(&other.0).unwrap_or_else(|| {
|
||||
if self.0.is_nan() && !other.0.is_nan() {
|
||||
Ordering::Less
|
||||
} else if !self.0.is_nan() && other.0.is_nan() {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper around a resource which can be initialized or uninitialized.
|
||||
/// Uninitialized resourced can be initialized by calling [`Eventually::initialize()`].
|
||||
pub enum Eventually<T> {
|
||||
Initialized(T),
|
||||
Uninitialized,
|
||||
}
|
||||
|
||||
pub trait HasChanged {
|
||||
type Criteria: Eq;
|
||||
|
||||
fn has_changed(&self, criteria: &Self::Criteria) -> bool;
|
||||
}
|
||||
|
||||
impl<T> HasChanged for Option<T>
|
||||
where
|
||||
T: HasChanged,
|
||||
{
|
||||
type Criteria = T::Criteria;
|
||||
|
||||
fn has_changed(&self, criteria: &Self::Criteria) -> bool {
|
||||
match self {
|
||||
None => true,
|
||||
Some(value) => value.has_changed(criteria),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Eventually<T>
|
||||
where
|
||||
T: HasChanged,
|
||||
{
|
||||
#[tracing::instrument(name = "reinitialize", skip_all)]
|
||||
pub fn reinitialize(&mut self, f: impl FnOnce() -> T, criteria: &T::Criteria) {
|
||||
let should_replace = match &self {
|
||||
Eventually::Initialized(current) => {
|
||||
if current.has_changed(criteria) {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
Eventually::Uninitialized => true,
|
||||
};
|
||||
|
||||
if should_replace {
|
||||
mem::replace(self, Eventually::Initialized(f()));
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T> Eventually<T> {
|
||||
#[tracing::instrument(name = "initialize", skip_all)]
|
||||
pub fn initialize(&mut self, f: impl FnOnce() -> T) {
|
||||
if let Eventually::Uninitialized = self {
|
||||
mem::replace(self, Eventually::Initialized(f()));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn take(&mut self) -> Eventually<T> {
|
||||
mem::replace(self, Eventually::Uninitialized)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Eventually<T> {
|
||||
fn default() -> Self {
|
||||
Eventually::Uninitialized
|
||||
}
|
||||
}
|
||||
272
maplibre/src/schedule.rs
Normal file
272
maplibre/src/schedule.rs
Normal file
@ -0,0 +1,272 @@
|
||||
use crate::context::MapContext;
|
||||
use crate::define_label;
|
||||
use downcast_rs::{impl_downcast, Downcast};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct NopStage;
|
||||
|
||||
impl Stage for NopStage {
|
||||
fn run(&mut self, _context: &mut MapContext) {}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! multi_stage {
|
||||
($multi_stage:ident, $($stage:ident: $stage_ty:ty),*) => {
|
||||
pub struct $multi_stage {
|
||||
$($stage: $stage_ty),*
|
||||
}
|
||||
|
||||
impl Stage for $multi_stage {
|
||||
fn run(&mut self, context: &mut MapContext) {
|
||||
$(self.$stage.run(context);)*
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for $multi_stage {
|
||||
fn default() -> Self {
|
||||
$multi_stage {
|
||||
$($stage: <$stage_ty>::default()),*
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub struct MultiStage<const I: usize, S>
|
||||
where
|
||||
S: Stage,
|
||||
{
|
||||
stages: [S; I],
|
||||
}
|
||||
|
||||
impl<const I: usize, S> MultiStage<I, S>
|
||||
where
|
||||
S: Stage,
|
||||
{
|
||||
pub fn new(stages: [S; I]) -> Self {
|
||||
Self { stages }
|
||||
}
|
||||
}
|
||||
|
||||
impl<const I: usize, S> Stage for MultiStage<I, S>
|
||||
where
|
||||
S: Stage,
|
||||
{
|
||||
fn run(&mut self, context: &mut MapContext) {
|
||||
for stage in self.stages.iter_mut() {
|
||||
stage.run(context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_label!(StageLabel);
|
||||
pub(crate) type BoxedStageLabel = Box<dyn StageLabel>;
|
||||
|
||||
pub trait Stage: Downcast {
|
||||
/// Runs the stage; this happens once per update.
|
||||
/// Implementors must initialize all of their state before running the first time.
|
||||
fn run(&mut self, context: &mut MapContext);
|
||||
}
|
||||
|
||||
impl_downcast!(Stage);
|
||||
|
||||
/// A container of [`Stage`]s set to be run in a linear order.
|
||||
///
|
||||
/// Since `Schedule` implements the [`Stage`] trait, it can be inserted into another schedule.
|
||||
/// In this way, the properties of the child schedule can be set differently from the parent.
|
||||
/// For example, it can be set to run only once during app execution, while the parent schedule
|
||||
/// runs indefinitely.
|
||||
#[derive(Default)]
|
||||
pub struct Schedule {
|
||||
stages: HashMap<BoxedStageLabel, Box<dyn Stage>>,
|
||||
stage_order: Vec<BoxedStageLabel>,
|
||||
}
|
||||
|
||||
impl Schedule {
|
||||
/// Adds the given `stage` at the last position of the schedule.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// #
|
||||
/// # let mut schedule = Schedule::default();
|
||||
/// schedule.add_stage("my_stage", NopStage);
|
||||
/// ```
|
||||
pub fn add_stage<S: Stage>(&mut self, label: impl StageLabel, stage: S) -> &mut Self {
|
||||
let label: Box<dyn StageLabel> = Box::new(label);
|
||||
self.stage_order.push(label.clone());
|
||||
let prev = self.stages.insert(label.clone(), Box::new(stage));
|
||||
assert!(prev.is_none(), "Stage already exists: {:?}.", label);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds the given `stage` immediately after the `target` stage.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// #
|
||||
/// # let mut schedule = Schedule::default();
|
||||
/// # schedule.add_stage("target_stage", NopStage);
|
||||
/// schedule.add_stage_after("target_stage", "my_stage", NopStage);
|
||||
/// ```
|
||||
pub fn add_stage_after<S: Stage>(
|
||||
&mut self,
|
||||
target: impl StageLabel,
|
||||
label: impl StageLabel,
|
||||
stage: S,
|
||||
) -> &mut Self {
|
||||
let label: Box<dyn StageLabel> = Box::new(label);
|
||||
let target = &target as &dyn StageLabel;
|
||||
let target_index = self
|
||||
.stage_order
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find(|(_i, stage_label)| &***stage_label == target)
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or_else(|| panic!("Target stage does not exist: {:?}.", target));
|
||||
|
||||
self.stage_order.insert(target_index + 1, label.clone());
|
||||
let prev = self.stages.insert(label.clone(), Box::new(stage));
|
||||
assert!(prev.is_none(), "Stage already exists: {:?}.", label);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds the given `stage` immediately before the `target` stage.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// #
|
||||
/// # let mut schedule = Schedule::default();
|
||||
/// # schedule.add_stage("target_stage", NopStage);
|
||||
/// #
|
||||
/// schedule.add_stage_before("target_stage", "my_stage", NopStage);
|
||||
/// ```
|
||||
pub fn add_stage_before<S: Stage>(
|
||||
&mut self,
|
||||
target: impl StageLabel,
|
||||
label: impl StageLabel,
|
||||
stage: S,
|
||||
) -> &mut Self {
|
||||
let label: Box<dyn StageLabel> = Box::new(label);
|
||||
let target = &target as &dyn StageLabel;
|
||||
let target_index = self
|
||||
.stage_order
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find(|(_i, stage_label)| &***stage_label == target)
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or_else(|| panic!("Target stage does not exist: {:?}.", target));
|
||||
|
||||
self.stage_order.insert(target_index, label.clone());
|
||||
let prev = self.stages.insert(label.clone(), Box::new(stage));
|
||||
assert!(prev.is_none(), "Stage already exists: {:?}.", label);
|
||||
self
|
||||
}
|
||||
|
||||
/// Fetches the [`Stage`] of type `T` marked with `label`, then executes the provided
|
||||
/// `func` passing the fetched stage to it as an argument.
|
||||
///
|
||||
/// The `func` argument should be a function or a closure that accepts a mutable reference
|
||||
/// to a struct implementing `Stage` and returns the same type. That means that it should
|
||||
/// also assume that the stage has already been fetched successfully.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// # let mut schedule = Schedule::default();
|
||||
///
|
||||
/// # schedule.add_stage("my_stage", NopStage);
|
||||
/// #
|
||||
/// schedule.stage("my_stage", |stage: &mut NopStage| {
|
||||
/// // modify stage
|
||||
/// stage
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `label` refers to a non-existing stage, or if it's not of type `T`.
|
||||
pub fn stage<T: Stage, F: FnOnce(&mut T) -> &mut T>(
|
||||
&mut self,
|
||||
label: impl StageLabel,
|
||||
func: F,
|
||||
) -> &mut Self {
|
||||
let stage = self.get_stage_mut::<T>(&label).unwrap_or_else(move || {
|
||||
panic!("stage '{:?}' does not exist or is the wrong type", label)
|
||||
});
|
||||
func(stage);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns a shared reference to the stage identified by `label`, if it exists.
|
||||
///
|
||||
/// If the requested stage does not exist, `None` is returned instead.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// #
|
||||
/// # let mut schedule = Schedule::default();
|
||||
/// # schedule.add_stage("my_stage", NopStage);
|
||||
/// #
|
||||
/// let stage = schedule.get_stage::<NopStage>(&"my_stage").unwrap();
|
||||
/// ```
|
||||
pub fn get_stage<T: Stage>(&self, label: &dyn StageLabel) -> Option<&T> {
|
||||
self.stages
|
||||
.get(label)
|
||||
.and_then(|stage| stage.downcast_ref::<T>())
|
||||
}
|
||||
|
||||
/// Returns a unique, mutable reference to the stage identified by `label`, if it exists.
|
||||
///
|
||||
/// If the requested stage does not exist, `None` is returned instead.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::schedule::{Schedule, NopStage};
|
||||
/// #
|
||||
/// # let mut schedule = Schedule::default();
|
||||
/// # schedule.add_stage("my_stage", NopStage);
|
||||
/// #
|
||||
/// let stage = schedule.get_stage_mut::<NopStage>(&"my_stage").unwrap();
|
||||
/// ```
|
||||
pub fn get_stage_mut<T: Stage>(&mut self, label: &dyn StageLabel) -> Option<&mut T> {
|
||||
self.stages
|
||||
.get_mut(label)
|
||||
.and_then(|stage| stage.downcast_mut::<T>())
|
||||
}
|
||||
|
||||
/// Executes each [`Stage`] contained in the schedule, one at a time.
|
||||
pub fn run_once(&mut self, context: &mut MapContext) {
|
||||
for label in &self.stage_order {
|
||||
#[cfg(feature = "trace")]
|
||||
let _stage_span = tracing::info_span!("stage", name = ?label).entered();
|
||||
let stage = self.stages.get_mut(label).unwrap();
|
||||
stage.run(context);
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over all of schedule's stages and their labels, in execution order.
|
||||
pub fn iter_stages(&self) -> impl Iterator<Item = (&dyn StageLabel, &dyn Stage)> {
|
||||
self.stage_order
|
||||
.iter()
|
||||
.map(move |label| (&**label, &*self.stages[label]))
|
||||
}
|
||||
}
|
||||
|
||||
impl Stage for Schedule {
|
||||
fn run(&mut self, context: &mut MapContext) {
|
||||
self.run_once(context);
|
||||
}
|
||||
}
|
||||
15
maplibre/src/stages/mod.rs
Normal file
15
maplibre/src/stages/mod.rs
Normal file
@ -0,0 +1,15 @@
|
||||
//! [Stages](Stage) for requesting and preparing data
|
||||
|
||||
use crate::io::source_client::SourceClient;
|
||||
use crate::schedule::Schedule;
|
||||
use crate::stages::populate_tile_store_stage::PopulateTileStore;
|
||||
use crate::HTTPClient;
|
||||
use request_stage::RequestStage;
|
||||
|
||||
mod populate_tile_store_stage;
|
||||
mod request_stage;
|
||||
|
||||
pub fn register_stages<HC: HTTPClient>(schedule: &mut Schedule, source_client: SourceClient<HC>) {
|
||||
schedule.add_stage("request", RequestStage::new(source_client));
|
||||
schedule.add_stage("populate_tile_store", PopulateTileStore::default());
|
||||
}
|
||||
42
maplibre/src/stages/populate_tile_store_stage.rs
Normal file
42
maplibre/src/stages/populate_tile_store_stage.rs
Normal file
@ -0,0 +1,42 @@
|
||||
//! Receives data from async threads and populates the [`crate::io::tile_cache::TileCache`].
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::io::{TessellateMessage, TileTessellateMessage};
|
||||
use crate::schedule::Stage;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PopulateTileStore {}
|
||||
|
||||
impl Stage for PopulateTileStore {
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
tile_cache,
|
||||
shared_thread_state,
|
||||
message_receiver,
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
if let Ok(result) = message_receiver.try_recv() {
|
||||
match result {
|
||||
TessellateMessage::Layer(layer_result) => {
|
||||
tracing::trace!(
|
||||
"Layer {} at {} reached main thread",
|
||||
layer_result.layer_name(),
|
||||
layer_result.get_coords()
|
||||
);
|
||||
tile_cache.put_tessellated_layer(layer_result);
|
||||
}
|
||||
TessellateMessage::Tile(TileTessellateMessage { request_id, coords }) => loop {
|
||||
if let Ok(mut tile_request_state) =
|
||||
shared_thread_state.tile_request_state.try_lock()
|
||||
{
|
||||
tile_request_state.finish_tile_request(request_id);
|
||||
tracing::trace!("Tile at {} finished loading", coords);
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
172
maplibre/src/stages/request_stage.rs
Normal file
172
maplibre/src/stages/request_stage.rs
Normal file
@ -0,0 +1,172 @@
|
||||
//! Requests tiles which are currently in view
|
||||
|
||||
use crate::context::MapContext;
|
||||
use crate::coords::{ViewRegion, WorldTileCoords};
|
||||
use crate::error::Error;
|
||||
use crate::io::shared_thread_state::SharedThreadState;
|
||||
use crate::io::source_client::SourceClient;
|
||||
use crate::io::tile_cache::TileCache;
|
||||
use crate::io::TileRequest;
|
||||
use crate::schedule::Stage;
|
||||
use crate::{HTTPClient, ScheduleMethod, Style};
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub struct RequestStage<HC>
|
||||
where
|
||||
HC: HTTPClient,
|
||||
{
|
||||
pub source_client: SourceClient<HC>,
|
||||
pub try_failed: bool,
|
||||
}
|
||||
|
||||
impl<HC> RequestStage<HC>
|
||||
where
|
||||
HC: HTTPClient,
|
||||
{
|
||||
pub fn new(source_client: SourceClient<HC>) -> Self {
|
||||
Self {
|
||||
source_client,
|
||||
try_failed: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<HC> Stage for RequestStage<HC>
|
||||
where
|
||||
HC: HTTPClient,
|
||||
{
|
||||
fn run(
|
||||
&mut self,
|
||||
MapContext {
|
||||
view_state,
|
||||
style,
|
||||
tile_cache,
|
||||
scheduler,
|
||||
shared_thread_state,
|
||||
..
|
||||
}: &mut MapContext,
|
||||
) {
|
||||
let visible_level = view_state.visible_level();
|
||||
|
||||
let view_proj = view_state.view_projection();
|
||||
|
||||
let view_region = view_state
|
||||
.camera
|
||||
.view_region_bounding_box(&view_proj.invert())
|
||||
.map(|bounding_box| ViewRegion::new(bounding_box, 0, *view_state.zoom, visible_level));
|
||||
|
||||
if view_state.camera.did_change(0.05) || view_state.zoom.did_change(0.05) || self.try_failed
|
||||
{
|
||||
if let Some(view_region) = &view_region {
|
||||
// FIXME: We also need to request tiles from layers above if we are over the maximum zoom level
|
||||
self.try_failed = self.request_tiles_in_view(
|
||||
tile_cache,
|
||||
style,
|
||||
shared_thread_state,
|
||||
scheduler,
|
||||
view_region,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
view_state.camera.update_reference();
|
||||
view_state.zoom.update_reference();
|
||||
}
|
||||
}
|
||||
|
||||
impl<HC> RequestStage<HC>
|
||||
where
|
||||
HC: HTTPClient,
|
||||
{
|
||||
/// Request tiles which are currently in view.
|
||||
#[tracing::instrument(skip_all)]
|
||||
fn request_tiles_in_view(
|
||||
&self,
|
||||
tile_cache: &TileCache,
|
||||
style: &Style,
|
||||
shared_thread_state: &SharedThreadState,
|
||||
scheduler: &Box<dyn ScheduleMethod>,
|
||||
view_region: &ViewRegion,
|
||||
) -> bool {
|
||||
let mut try_failed = false;
|
||||
let source_layers: HashSet<String> = style
|
||||
.layers
|
||||
.iter()
|
||||
.filter_map(|layer| layer.source_layer.clone())
|
||||
.collect();
|
||||
|
||||
for coords in view_region.iter() {
|
||||
if coords.build_quad_key().is_some() {
|
||||
// TODO: Make tesselation depend on style?
|
||||
try_failed = self
|
||||
.try_request_tile(
|
||||
tile_cache,
|
||||
shared_thread_state,
|
||||
scheduler,
|
||||
&coords,
|
||||
&source_layers,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
try_failed
|
||||
}
|
||||
|
||||
fn try_request_tile(
|
||||
&self,
|
||||
tile_cache: &TileCache,
|
||||
shared_thread_state: &SharedThreadState,
|
||||
scheduler: &Box<dyn ScheduleMethod>,
|
||||
coords: &WorldTileCoords,
|
||||
layers: &HashSet<String>,
|
||||
) -> Result<bool, Error> {
|
||||
if !tile_cache.is_layers_missing(coords, layers) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if let Ok(mut tile_request_state) = shared_thread_state.tile_request_state.try_lock() {
|
||||
if let Some(request_id) = tile_request_state.start_tile_request(TileRequest {
|
||||
coords: *coords,
|
||||
layers: layers.clone(),
|
||||
}) {
|
||||
tracing::info!("new tile request: {}", &coords);
|
||||
|
||||
// The following snippet can be added instead of the next code block to demonstrate
|
||||
// an understanable approach of fetching
|
||||
/*#[cfg(target_arch = "wasm32")]
|
||||
if let Some(tile_coords) = coords.into_tile(TileAddressingScheme::TMS) {
|
||||
crate::platform::legacy_webworker_fetcher::request_tile(
|
||||
request_id,
|
||||
tile_coords,
|
||||
);
|
||||
}*/
|
||||
|
||||
let client = self.source_client.clone();
|
||||
let coords = *coords;
|
||||
|
||||
scheduler
|
||||
.schedule(
|
||||
shared_thread_state.clone(),
|
||||
Box::new(move |state: SharedThreadState| {
|
||||
Box::pin(async move {
|
||||
match client.fetch(&coords).await {
|
||||
Ok(data) => state
|
||||
.process_tile(request_id, data.into_boxed_slice())
|
||||
.unwrap(),
|
||||
Err(e) => {
|
||||
log::error!("{:?}", &e);
|
||||
state.tile_unavailable(&coords, request_id).unwrap()
|
||||
}
|
||||
}
|
||||
})
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
} else {
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3,13 +3,12 @@
|
||||
use bytemuck::Pod;
|
||||
use std::ops::Add;
|
||||
|
||||
use crate::render::ShaderVertex;
|
||||
use lyon::tessellation::{
|
||||
FillVertex, FillVertexConstructor, StrokeVertex, StrokeVertexConstructor, VertexBuffers,
|
||||
};
|
||||
|
||||
use crate::error::Error;
|
||||
use wgpu::BufferAddress;
|
||||
use crate::render::ShaderVertex;
|
||||
|
||||
pub mod zero_tessellator;
|
||||
|
||||
@ -80,8 +79,8 @@ trait Align<V: Pod, I: Pod> {
|
||||
impl<V: Pod, I: Pod> Align<V, I> for VertexBuffers<V, I> {
|
||||
fn align_vertices(&mut self) {
|
||||
let align = wgpu::COPY_BUFFER_ALIGNMENT;
|
||||
let stride = std::mem::size_of::<ShaderVertex>() as BufferAddress;
|
||||
let unpadded_bytes = self.vertices.len() as BufferAddress * stride;
|
||||
let stride = std::mem::size_of::<ShaderVertex>() as wgpu::BufferAddress;
|
||||
let unpadded_bytes = self.vertices.len() as wgpu::BufferAddress * stride;
|
||||
let padding_bytes = (align - unpadded_bytes % align) % align;
|
||||
|
||||
if padding_bytes != 0 {
|
||||
@ -94,8 +93,8 @@ impl<V: Pod, I: Pod> Align<V, I> for VertexBuffers<V, I> {
|
||||
|
||||
fn align_indices(&mut self) {
|
||||
let align = wgpu::COPY_BUFFER_ALIGNMENT;
|
||||
let stride = std::mem::size_of::<I>() as BufferAddress;
|
||||
let unpadded_bytes = self.indices.len() as BufferAddress * stride;
|
||||
let stride = std::mem::size_of::<I>() as wgpu::BufferAddress;
|
||||
let unpadded_bytes = self.indices.len() as wgpu::BufferAddress * stride;
|
||||
let padding_bytes = (align - unpadded_bytes % align) % align;
|
||||
let overpad = (padding_bytes + stride - 1) / stride; // Divide by stride but round up
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
use geozero::{FeatureProcessor, GeomProcessor, PropertyProcessor};
|
||||
use lyon::geom;
|
||||
|
||||
use crate::render::ShaderVertex;
|
||||
use lyon::lyon_tessellation::VertexBuffers;
|
||||
use lyon::path::path::Builder;
|
||||
use lyon::path::Path;
|
||||
@ -12,7 +13,6 @@ use lyon::tessellation::{
|
||||
};
|
||||
use std::cell::RefCell;
|
||||
|
||||
use crate::render::ShaderVertex;
|
||||
use crate::tessellation::{VertexConstructor, DEFAULT_TOLERANCE};
|
||||
|
||||
type GeoResult<T> = geozero::error::Result<T>;
|
||||
|
||||
101
maplibre/src/util/label.rs
Normal file
101
maplibre/src/util/label.rs
Normal file
@ -0,0 +1,101 @@
|
||||
//! Traits used by label implementations
|
||||
|
||||
use std::{
|
||||
any::Any,
|
||||
hash::{Hash, Hasher},
|
||||
};
|
||||
|
||||
pub trait DynEq: Any {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
|
||||
fn dyn_eq(&self, other: &dyn DynEq) -> bool;
|
||||
}
|
||||
|
||||
impl<T> DynEq for T
|
||||
where
|
||||
T: Any + Eq,
|
||||
{
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn dyn_eq(&self, other: &dyn DynEq) -> bool {
|
||||
if let Some(other) = other.as_any().downcast_ref::<T>() {
|
||||
return self == other;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DynHash: DynEq {
|
||||
fn as_dyn_eq(&self) -> &dyn DynEq;
|
||||
|
||||
fn dyn_hash(&self, state: &mut dyn Hasher);
|
||||
}
|
||||
|
||||
impl<T> DynHash for T
|
||||
where
|
||||
T: DynEq + Hash,
|
||||
{
|
||||
fn as_dyn_eq(&self) -> &dyn DynEq {
|
||||
self
|
||||
}
|
||||
|
||||
fn dyn_hash(&self, mut state: &mut dyn Hasher) {
|
||||
T::hash(self, &mut state);
|
||||
self.type_id().hash(&mut state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Macro to define a new label trait
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use maplibre::define_label;
|
||||
/// define_label!(MyNewLabelTrait);
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! define_label {
|
||||
($label_trait_name:ident) => {
|
||||
/// Defines a set of strongly-typed labels for a class of objects
|
||||
pub trait $label_trait_name:
|
||||
$crate::util::label::DynHash + ::std::fmt::Debug + Send + Sync + 'static
|
||||
{
|
||||
#[doc(hidden)]
|
||||
fn dyn_clone(&self) -> Box<dyn $label_trait_name>;
|
||||
}
|
||||
|
||||
impl PartialEq for dyn $label_trait_name {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.dyn_eq(other.as_dyn_eq())
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for dyn $label_trait_name {}
|
||||
|
||||
impl ::std::hash::Hash for dyn $label_trait_name {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.dyn_hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<dyn $label_trait_name> {
|
||||
fn clone(&self) -> Self {
|
||||
self.dyn_clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl $label_trait_name for ::std::borrow::Cow<'static, str> {
|
||||
fn dyn_clone(&self) -> Box<dyn $label_trait_name> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl $label_trait_name for &'static str {
|
||||
fn dyn_clone(&self) -> Box<dyn $label_trait_name> {
|
||||
Box::new(<&str>::clone(self))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
mod fps_meter;
|
||||
pub mod grid;
|
||||
pub mod label;
|
||||
pub mod math;
|
||||
|
||||
use crate::coords::WorldTileCoords;
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
//! Utilities for the window system.
|
||||
|
||||
use crate::{HTTPClient, MapState, ScheduleMethod};
|
||||
use crate::{HTTPClient, MapSchedule, ScheduleMethod};
|
||||
|
||||
/// Window with an optional [carte::window::WindowSize].
|
||||
pub trait MapWindow {
|
||||
type EventLoop;
|
||||
type Window: raw_window_handle::HasRawWindowHandle;
|
||||
type Window: raw_window_handle::HasRawWindowHandle; // FIXME: Not true for headless
|
||||
type MapWindowConfig: MapWindowConfig<MapWindow = Self>;
|
||||
|
||||
fn create(map_window_config: &Self::MapWindowConfig) -> Self;
|
||||
@ -25,11 +25,11 @@ where
|
||||
SM: ScheduleMethod,
|
||||
HC: HTTPClient,
|
||||
{
|
||||
fn run(self, map_state: MapState<MWC, SM, HC>, max_frames: Option<u64>);
|
||||
fn run(self, map_state: MapSchedule<MWC, SM, HC>, max_frames: Option<u64>);
|
||||
}
|
||||
|
||||
/// Window size with a width and an height in pixels.
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub struct WindowSize {
|
||||
width: u32,
|
||||
height: u32,
|
||||
|
||||
@ -8,7 +8,7 @@ publish = false
|
||||
|
||||
[features]
|
||||
web-webgl = ["maplibre/web-webgl"]
|
||||
enable-tracing = ["maplibre/enable-tracing", "tracing-wasm"]
|
||||
trace = ["maplibre/trace", "tracing-wasm"]
|
||||
default = []
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
|
||||
@ -138,9 +138,6 @@ export const startMapLibre = async (wasmPath: string | undefined, workerPath: st
|
||||
const memory = new WebAssembly.Memory({initial: 1024, maximum: MEMORY_PAGES, shared: true})
|
||||
await init(wasmPath, memory)
|
||||
|
||||
// TODO: Inline is not yet working
|
||||
// let worker = new Worker(new URL('blob-url:./test_worker.js', import.meta.url), {type: 'module'});
|
||||
|
||||
const schedulerPtr = create_pool_scheduler(() => {
|
||||
return workerPath ? new Worker(workerPath, {
|
||||
type: 'module'
|
||||
|
||||
@ -14,7 +14,7 @@ mod platform;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
compile_error!("web works only on wasm32.");
|
||||
|
||||
#[cfg(feature = "enable-tracing")]
|
||||
#[cfg(feature = "trace")]
|
||||
fn enable_tracing() {
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Registry;
|
||||
@ -35,7 +35,7 @@ pub fn wasm_bindgen_start() {
|
||||
}
|
||||
panic::set_hook(Box::new(console_error_panic_hook::hook));
|
||||
|
||||
#[cfg(any(feature = "enable-tracing"))]
|
||||
#[cfg(any(feature = "trace"))]
|
||||
enable_tracing();
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
use wasm_bindgen::prelude::*;
|
||||
use wasm_bindgen::JsCast;
|
||||
@ -34,14 +35,15 @@ impl WebWorkerPoolScheduleMethod {
|
||||
}
|
||||
|
||||
impl ScheduleMethod for WebWorkerPoolScheduleMethod {
|
||||
fn schedule<T>(
|
||||
fn schedule(
|
||||
&self,
|
||||
shared_thread_state: SharedThreadState,
|
||||
future_factory: impl (FnOnce(SharedThreadState) -> T) + Send + 'static,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Future<Output = ()> + 'static,
|
||||
{
|
||||
future_factory: Box<
|
||||
(dyn (FnOnce(SharedThreadState) -> Pin<Box<dyn Future<Output = ()> + 'static>>)
|
||||
+ Send
|
||||
+ 'static),
|
||||
>,
|
||||
) -> Result<(), Error> {
|
||||
self.pool
|
||||
.run(move || {
|
||||
wasm_bindgen_futures::future_to_promise(async move {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user