Show existing tiles while loading others

This commit is contained in:
Maximilian Ammann 2022-03-31 19:12:14 +02:00
parent 2f517cce6b
commit d8c2844d3d
4 changed files with 128 additions and 49 deletions

View File

@ -86,7 +86,7 @@ impl Default for StyleLayer {
metadata: None,
paint: None,
source: None,
source_layer: None,
source_layer: Some("does not exist".to_string()),
}
}
}

View File

@ -158,6 +158,7 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
geometry: &OverAlignedVertexBuffer<V, I>,
tile_metadata: TM,
feature_metadata: &[FM],
empty: bool,
) {
let vertices_stride = size_of::<V>() as wgpu::BufferAddress;
let indices_stride = size_of::<I>() as wgpu::BufferAddress;
@ -202,29 +203,40 @@ impl<Q: Queue<B>, B, V: bytemuck::Pod, I: bytemuck::Pod, TM: bytemuck::Pod, FM:
buffer_feature_metadata: self
.feature_metadata
.make_room(feature_metadata_bytes, &mut self.index),
empty,
};
// write_buffer() is the preferred method for WASM: https://toji.github.io/webgpu-best-practices/buffer-uploads.html#when-in-doubt-writebuffer
queue.write_buffer(
&self.vertices.inner,
maybe_entry.buffer_vertices.start,
&bytemuck::cast_slice(&geometry.buffer.vertices)[0..aligned_vertices_bytes as usize],
);
queue.write_buffer(
&self.indices.inner,
maybe_entry.buffer_indices.start,
&bytemuck::cast_slice(&geometry.buffer.indices)[0..aligned_indices_bytes as usize],
);
queue.write_buffer(
&self.metadata.inner,
maybe_entry.buffer_tile_metadata.start,
&bytemuck::cast_slice(&[tile_metadata])[0..aligned_tile_metadata_bytes as usize],
);
queue.write_buffer(
&self.feature_metadata.inner,
maybe_entry.buffer_feature_metadata.start,
&bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize],
);
if !maybe_entry.buffer_vertices.is_empty() {
queue.write_buffer(
&self.vertices.inner,
maybe_entry.buffer_vertices.start,
&bytemuck::cast_slice(&geometry.buffer.vertices)
[0..aligned_vertices_bytes as usize],
);
}
if !maybe_entry.buffer_indices.is_empty() {
queue.write_buffer(
&self.indices.inner,
maybe_entry.buffer_indices.start,
&bytemuck::cast_slice(&geometry.buffer.indices)[0..aligned_indices_bytes as usize],
);
}
if !maybe_entry.buffer_tile_metadata.is_empty() {
queue.write_buffer(
&self.metadata.inner,
maybe_entry.buffer_tile_metadata.start,
&bytemuck::cast_slice(&[tile_metadata])[0..aligned_tile_metadata_bytes as usize],
);
}
if !maybe_entry.buffer_feature_metadata.is_empty() {
queue.write_buffer(
&self.feature_metadata.inner,
maybe_entry.buffer_feature_metadata.start,
&bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize],
);
}
self.index.push_back(maybe_entry);
}
@ -361,6 +373,7 @@ pub struct IndexEntry {
// Amount of actually usable indices. Each index has the size/format `IndexDataType`.
// Can be lower than size(buffer_indices) / indices_stride because of alignment.
usable_indices: u32,
pub empty: bool,
}
impl IndexEntry {
@ -417,14 +430,40 @@ impl RingIndex {
.and_then(|key| self.tree_index.get(&key))
}
pub fn get_layers_fallback(&self, coords: &WorldTileCoords) -> Option<&VecDeque<IndexEntry>> {
pub fn get_layers_fallback(
&self,
coords: &WorldTileCoords,
) -> Option<(&VecDeque<IndexEntry>, &IndexEntry)> {
let mut current = *coords;
let mut first_mask_entry: Option<&IndexEntry> = None;
loop {
if let Some(entries) = self.get_layers(&current) {
return Some(entries);
} else if let Some(parent) = current.get_parent() {
current = parent
if let Some((entries, mask_entry)) = self
.get_layers(&current)
.map(|entries| (entries, entries.front()))
{
if let Some(mask_entry) = mask_entry {
if let None = first_mask_entry {
first_mask_entry = Some(&mask_entry);
}
if mask_entry.empty {
if let Some(parent) = current.get_parent() {
// Continue with parent because there is no data in current tile
current = parent
} else {
// We do not have a parent tile with actual data, do not render
return None;
}
} else {
// Return actual data
return Some((entries, first_mask_entry.unwrap()));
}
} else {
// No data to we can not draw this tile yet
return None;
}
} else {
// No data to we can not draw this tile yet
return None;
}
}

View File

@ -7,7 +7,7 @@ use crate::coords::{ViewRegion, TILE_SIZE};
use crate::io::scheduler::IOScheduler;
use crate::io::LayerTessellateResult;
use style_spec::layer::LayerPaint;
use style_spec::layer::{LayerPaint, StyleLayer};
use style_spec::{EncodedSrgb, Style};
use wgpu::{Buffer, Limits, Queue};
use winit::dpi::PhysicalSize;
@ -21,7 +21,7 @@ use crate::render::options::{
TILE_META_COUNT, VERTEX_BUFFER_SIZE,
};
use crate::render::tile_mask_pattern::TileMaskPattern;
use crate::tessellation::IndexDataType;
use crate::tessellation::{IndexDataType, OverAlignedVertexBuffer};
use crate::util::FPSMeter;
@ -371,7 +371,9 @@ impl RenderState {
.collect();
for coords in view_region.iter() {
scheduler.try_request_tile(&coords, &source_layers).unwrap();
if let Some(_) = coords.build_quad_key() {
scheduler.try_request_tile(&coords, &source_layers).unwrap();
}
}
}
@ -432,7 +434,31 @@ impl RenderState {
.map(|color| color.into());
match result {
LayerTessellateResult::UnavailableLayer { .. } => {}
LayerTessellateResult::UnavailableLayer { coords, .. } => {
// We are casting here from 64bit to 32bit, because 32bit is more performant and is
// better supported.
let transform: Matrix4<f32> = (view_proj
.to_model_view_projection(
world_coords.transform_for_zoom(self.zoom),
))
.downcast();
let tile_metadata = ShaderTileMetadata::new(
transform.into(),
zoom_factor,
style_layer.index as f32,
);
println!("unavailable layer");
self.buffer_pool.allocate_tile_geometry(
&self.queue,
*coords,
style_layer.clone(),
&OverAlignedVertexBuffer::empty(),
tile_metadata,
&[],
true,
);
}
LayerTessellateResult::TessellatedLayer {
coords,
feature_indices,
@ -474,6 +500,7 @@ impl RenderState {
buffer,
tile_metadata,
&feature_metadata,
false,
);
}
}
@ -554,13 +581,15 @@ impl RenderState {
let index = self.buffer_pool.index();
/*println!("Render pass start");*/
/* println!("Render pass start");*/
if let Some(view_region) = &view_region {
for world_coords in view_region.iter() {
/*println!("Render coordinate {:?}", world_coords);*/
/* println!("Render coordinate {:?}", world_coords);*/
if let Some(entries) = index.get_layers_fallback(&world_coords) {
if let Some((entries, mask_entry)) =
index.get_layers_fallback(&world_coords)
{
let mut to_render: Vec<&IndexEntry> = Vec::from_iter(entries);
to_render.sort_by_key(|entry| entry.style_layer.index);
@ -569,27 +598,29 @@ impl RenderState {
.stencil_reference_value(&world_coords)
as u32;
/*println!("Render mask");*/
/* println!("Render mask");*/
if let Some(entry) = entries.front() {
// Draw mask
{
pass.set_pipeline(&self.mask_pipeline);
pass.set_stencil_reference(reference);
pass.set_vertex_buffer(
0,
self.buffer_pool
.metadata()
.slice(entry.tile_metadata_buffer_range()),
);
pass.draw(0..6, 0..1);
}
// Draw mask
{
pass.set_pipeline(&self.mask_pipeline);
pass.set_stencil_reference(reference);
pass.set_vertex_buffer(
0,
self.buffer_pool
.metadata()
.slice(mask_entry.tile_metadata_buffer_range()),
);
pass.draw(0..6, 0..1);
}
for entry in to_render {
if entry.empty {
continue;
}
// Draw tile
{
/*println!("Render tile");*/
/* println!("Render tile");*/
pass.set_pipeline(&self.render_pipeline);
pass.set_stencil_reference(reference);
@ -624,7 +655,7 @@ impl RenderState {
}
}
/*println!("Render pass end");*/
/* println!("Render pass end");*/
}
}

View File

@ -48,6 +48,15 @@ pub struct OverAlignedVertexBuffer<V, I> {
pub usable_indices: u32,
}
impl<V, I> OverAlignedVertexBuffer<V, I> {
pub fn empty() -> Self {
Self {
buffer: VertexBuffers::with_capacity(0, 0),
usable_indices: 0,
}
}
}
impl<V: Pod, I: Pod> From<VertexBuffers<V, I>> for OverAlignedVertexBuffer<V, I> {
fn from(mut buffer: VertexBuffers<V, I>) -> Self {
let usable_indices = buffer.indices.len() as u32;