Show existing tiles while loading others

This commit is contained in:
Maximilian Ammann 2022-03-31 19:12:14 +02:00
parent 2f517cce6b
commit d8c2844d3d
4 changed files with 128 additions and 49 deletions

View File

@ -86,7 +86,7 @@ impl Default for StyleLayer {
metadata: None, metadata: None,
paint: None, paint: None,
source: None, source: None,
source_layer: None, source_layer: Some("does not exist".to_string()),
} }
} }
} }

View File

@ -158,6 +158,7 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
geometry: &OverAlignedVertexBuffer<V, I>, geometry: &OverAlignedVertexBuffer<V, I>,
tile_metadata: TM, tile_metadata: TM,
feature_metadata: &[FM], feature_metadata: &[FM],
empty: bool,
) { ) {
let vertices_stride = size_of::<V>() as wgpu::BufferAddress; let vertices_stride = size_of::<V>() as wgpu::BufferAddress;
let indices_stride = size_of::<I>() as wgpu::BufferAddress; let indices_stride = size_of::<I>() as wgpu::BufferAddress;
@ -202,29 +203,40 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
buffer_feature_metadata: self buffer_feature_metadata: self
.feature_metadata .feature_metadata
.make_room(feature_metadata_bytes, &mut self.index), .make_room(feature_metadata_bytes, &mut self.index),
empty,
}; };
// write_buffer() is the preferred method for WASM: https://toji.github.io/webgpu-best-practices/buffer-uploads.html#when-in-doubt-writebuffer // write_buffer() is the preferred method for WASM: https://toji.github.io/webgpu-best-practices/buffer-uploads.html#when-in-doubt-writebuffer
queue.write_buffer( if !maybe_entry.buffer_vertices.is_empty() {
&self.vertices.inner, queue.write_buffer(
maybe_entry.buffer_vertices.start, &self.vertices.inner,
&bytemuck::cast_slice(&geometry.buffer.vertices)[0..aligned_vertices_bytes as usize], maybe_entry.buffer_vertices.start,
); &bytemuck::cast_slice(&geometry.buffer.vertices)
queue.write_buffer( [0..aligned_vertices_bytes as usize],
&self.indices.inner, );
maybe_entry.buffer_indices.start, }
&bytemuck::cast_slice(&geometry.buffer.indices)[0..aligned_indices_bytes as usize], if !maybe_entry.buffer_indices.is_empty() {
); queue.write_buffer(
queue.write_buffer( &self.indices.inner,
&self.metadata.inner, maybe_entry.buffer_indices.start,
maybe_entry.buffer_tile_metadata.start, &bytemuck::cast_slice(&geometry.buffer.indices)[0..aligned_indices_bytes as usize],
&bytemuck::cast_slice(&[tile_metadata])[0..aligned_tile_metadata_bytes as usize], );
); }
queue.write_buffer( if !maybe_entry.buffer_tile_metadata.is_empty() {
&self.feature_metadata.inner, queue.write_buffer(
maybe_entry.buffer_feature_metadata.start, &self.metadata.inner,
&bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize], maybe_entry.buffer_tile_metadata.start,
); &bytemuck::cast_slice(&[tile_metadata])[0..aligned_tile_metadata_bytes as usize],
);
}
if !maybe_entry.buffer_feature_metadata.is_empty() {
queue.write_buffer(
&self.feature_metadata.inner,
maybe_entry.buffer_feature_metadata.start,
&bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize],
);
}
self.index.push_back(maybe_entry); self.index.push_back(maybe_entry);
} }
@ -361,6 +373,7 @@ pub struct IndexEntry {
// Amount of actually usable indices. Each index has the size/format `IndexDataType`. // Amount of actually usable indices. Each index has the size/format `IndexDataType`.
// Can be lower than size(buffer_indices) / indices_stride because of alignment. // Can be lower than size(buffer_indices) / indices_stride because of alignment.
usable_indices: u32, usable_indices: u32,
pub empty: bool,
} }
impl IndexEntry { impl IndexEntry {
@ -417,14 +430,40 @@ impl RingIndex {
.and_then(|key| self.tree_index.get(&key)) .and_then(|key| self.tree_index.get(&key))
} }
pub fn get_layers_fallback(&self, coords: &WorldTileCoords) -> Option<&VecDeque<IndexEntry>> { pub fn get_layers_fallback(
&self,
coords: &WorldTileCoords,
) -> Option<(&VecDeque<IndexEntry>, &IndexEntry)> {
let mut current = *coords; let mut current = *coords;
let mut first_mask_entry: Option<&IndexEntry> = None;
loop { loop {
if let Some(entries) = self.get_layers(&current) { if let Some((entries, mask_entry)) = self
return Some(entries); .get_layers(&current)
} else if let Some(parent) = current.get_parent() { .map(|entries| (entries, entries.front()))
current = parent {
if let Some(mask_entry) = mask_entry {
if let None = first_mask_entry {
first_mask_entry = Some(&mask_entry);
}
if mask_entry.empty {
if let Some(parent) = current.get_parent() {
// Continue with parent because there is no data in current tile
current = parent
} else {
// We do not have a parent tile with actual data, do not render
return None;
}
} else {
// Return actual data
return Some((entries, first_mask_entry.unwrap()));
}
} else {
// No data to we can not draw this tile yet
return None;
}
} else { } else {
// No data to we can not draw this tile yet
return None; return None;
} }
} }

View File

@ -7,7 +7,7 @@ use crate::coords::{ViewRegion, TILE_SIZE};
use crate::io::scheduler::IOScheduler; use crate::io::scheduler::IOScheduler;
use crate::io::LayerTessellateResult; use crate::io::LayerTessellateResult;
use style_spec::layer::LayerPaint; use style_spec::layer::{LayerPaint, StyleLayer};
use style_spec::{EncodedSrgb, Style}; use style_spec::{EncodedSrgb, Style};
use wgpu::{Buffer, Limits, Queue}; use wgpu::{Buffer, Limits, Queue};
use winit::dpi::PhysicalSize; use winit::dpi::PhysicalSize;
@ -21,7 +21,7 @@ use crate::render::options::{
TILE_META_COUNT, VERTEX_BUFFER_SIZE, TILE_META_COUNT, VERTEX_BUFFER_SIZE,
}; };
use crate::render::tile_mask_pattern::TileMaskPattern; use crate::render::tile_mask_pattern::TileMaskPattern;
use crate::tessellation::IndexDataType; use crate::tessellation::{IndexDataType, OverAlignedVertexBuffer};
use crate::util::FPSMeter; use crate::util::FPSMeter;
@ -371,7 +371,9 @@ impl RenderState {
.collect(); .collect();
for coords in view_region.iter() { for coords in view_region.iter() {
scheduler.try_request_tile(&coords, &source_layers).unwrap(); if let Some(_) = coords.build_quad_key() {
scheduler.try_request_tile(&coords, &source_layers).unwrap();
}
} }
} }
@ -432,7 +434,31 @@ impl RenderState {
.map(|color| color.into()); .map(|color| color.into());
match result { match result {
LayerTessellateResult::UnavailableLayer { .. } => {} LayerTessellateResult::UnavailableLayer { coords, .. } => {
// We are casting here from 64bit to 32bit, because 32bit is more performant and is
// better supported.
let transform: Matrix4<f32> = (view_proj
.to_model_view_projection(
world_coords.transform_for_zoom(self.zoom),
))
.downcast();
let tile_metadata = ShaderTileMetadata::new(
transform.into(),
zoom_factor,
style_layer.index as f32,
);
println!("unavailable layer");
self.buffer_pool.allocate_tile_geometry(
&self.queue,
*coords,
style_layer.clone(),
&OverAlignedVertexBuffer::empty(),
tile_metadata,
&[],
true,
);
}
LayerTessellateResult::TessellatedLayer { LayerTessellateResult::TessellatedLayer {
coords, coords,
feature_indices, feature_indices,
@ -474,6 +500,7 @@ impl RenderState {
buffer, buffer,
tile_metadata, tile_metadata,
&feature_metadata, &feature_metadata,
false,
); );
} }
} }
@ -554,13 +581,15 @@ impl RenderState {
let index = self.buffer_pool.index(); let index = self.buffer_pool.index();
/*println!("Render pass start");*/ /* println!("Render pass start");*/
if let Some(view_region) = &view_region { if let Some(view_region) = &view_region {
for world_coords in view_region.iter() { for world_coords in view_region.iter() {
/*println!("Render coordinate {:?}", world_coords);*/ /* println!("Render coordinate {:?}", world_coords);*/
if let Some(entries) = index.get_layers_fallback(&world_coords) { if let Some((entries, mask_entry)) =
index.get_layers_fallback(&world_coords)
{
let mut to_render: Vec<&IndexEntry> = Vec::from_iter(entries); let mut to_render: Vec<&IndexEntry> = Vec::from_iter(entries);
to_render.sort_by_key(|entry| entry.style_layer.index); to_render.sort_by_key(|entry| entry.style_layer.index);
@ -569,27 +598,29 @@ impl RenderState {
.stencil_reference_value(&world_coords) .stencil_reference_value(&world_coords)
as u32; as u32;
/*println!("Render mask");*/ /* println!("Render mask");*/
if let Some(entry) = entries.front() { // Draw mask
// Draw mask {
{ pass.set_pipeline(&self.mask_pipeline);
pass.set_pipeline(&self.mask_pipeline); pass.set_stencil_reference(reference);
pass.set_stencil_reference(reference); pass.set_vertex_buffer(
pass.set_vertex_buffer( 0,
0, self.buffer_pool
self.buffer_pool .metadata()
.metadata() .slice(mask_entry.tile_metadata_buffer_range()),
.slice(entry.tile_metadata_buffer_range()), );
); pass.draw(0..6, 0..1);
pass.draw(0..6, 0..1);
}
} }
for entry in to_render { for entry in to_render {
if entry.empty {
continue;
}
// Draw tile // Draw tile
{ {
/*println!("Render tile");*/ /* println!("Render tile");*/
pass.set_pipeline(&self.render_pipeline); pass.set_pipeline(&self.render_pipeline);
pass.set_stencil_reference(reference); pass.set_stencil_reference(reference);
@ -624,7 +655,7 @@ impl RenderState {
} }
} }
/*println!("Render pass end");*/ /* println!("Render pass end");*/
} }
} }

View File

@ -48,6 +48,15 @@ pub struct OverAlignedVertexBuffer<V, I> {
pub usable_indices: u32, pub usable_indices: u32,
} }
impl<V, I> OverAlignedVertexBuffer<V, I> {
pub fn empty() -> Self {
Self {
buffer: VertexBuffers::with_capacity(0, 0),
usable_indices: 0,
}
}
}
impl<V: Pod, I: Pod> From<VertexBuffers<V, I>> for OverAlignedVertexBuffer<V, I> { impl<V: Pod, I: Pod> From<VertexBuffers<V, I>> for OverAlignedVertexBuffer<V, I> {
fn from(mut buffer: VertexBuffers<V, I>) -> Self { fn from(mut buffer: VertexBuffers<V, I>) -> Self {
let usable_indices = buffer.indices.len() as u32; let usable_indices = buffer.indices.len() as u32;