buffer for 3D hipses + prepare refac of HiPS2D and its buffer + + hpx_unproj gpu side (not tested)

This commit is contained in:
Matthieu Baumann
2024-10-14 18:22:40 +02:00
committed by Matthieu Baumann
parent 97b51f1567
commit 8ca394459d
26 changed files with 1804 additions and 1125 deletions

View File

@@ -47,6 +47,7 @@ pub struct HiPSProperties {
hips_initial_fov: Option<f64>,
hips_initial_ra: Option<f64>,
hips_initial_dec: Option<f64>,
hips_cube_depth: Option<u32>,
// Parametrable by the user
#[allow(unused)]
@@ -78,6 +79,11 @@ impl HiPSProperties {
self.min_order
}
#[inline(always)]
pub fn get_cube_depth(&self) -> Option<u32> {
self.hips_cube_depth
}
#[inline(always)]
pub fn get_bitpix(&self) -> Option<i32> {
self.bitpix

View File

@@ -0,0 +1,252 @@
use crate::image::format::ImageFormat;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
use crate::texture::Texture2DMeta;
use crate::webgl_ctx::WebGlContext;
use crate::webgl_ctx::WebGlRenderingCtx;
use crate::Abort;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
use web_sys::HtmlImageElement;
pub struct Texture2DArray {
gl: WebGlContext,
texture: Option<WebGlTexture>,
metadata: Option<Rc<RefCell<Texture2DMeta>>>,
pub num_slices: i32,
}
impl Texture2DArray {
pub fn create_empty<F: ImageFormat>(
gl: &WebGlContext,
// The weight of the individual textures
width: i32,
// Their height
height: i32,
// How many texture slices it contains
num_slices: i32,
tex_params: &'static [(u32, u32)],
) -> Result<Texture2DArray, JsValue> {
let texture = gl.create_texture();
gl.bind_texture(WebGlRenderingCtx::TEXTURE_2D_ARRAY, texture.as_ref());
for (pname, param) in tex_params.iter() {
gl.tex_parameteri(WebGlRenderingCtx::TEXTURE_2D_ARRAY, *pname, *param as i32);
}
gl.tex_storage_3d(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
1,
F::INTERNAL_FORMAT as u32,
width,
height,
num_slices,
);
let gl = gl.clone();
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
type_: F::TYPE,
})));
Ok(Texture2DArray {
texture,
gl: gl.clone(),
num_slices,
metadata,
})
}
pub fn generate_mipmap(&self) {
self.gl.generate_mipmap(WebGlRenderingCtx::TEXTURE_2D_ARRAY);
}
pub fn bind(&self) -> Texture2DArrayBound {
self.gl
.bind_texture(WebGlRenderingCtx::TEXTURE_2D_ARRAY, self.texture.as_ref());
Texture2DArrayBound { tex: self }
}
pub fn active_texture(&self, idx_tex_unit: u8) -> &Self {
self.gl
.active_texture(WebGlRenderingCtx::TEXTURE0 + idx_tex_unit as u32);
self
}
}
impl Drop for Texture2DArray {
fn drop(&mut self) {
self.gl.delete_texture(self.texture.as_ref());
}
}
use super::CUR_IDX_TEX_UNIT;
use crate::shader::UniformType;
use web_sys::WebGlUniformLocation;
impl UniformType for Texture2DArray {
fn uniform(gl: &WebGlContext, location: Option<&WebGlUniformLocation>, tex: &Self) {
unsafe {
let _ = tex
// 1. Active the texture unit of the texture
.active_texture(CUR_IDX_TEX_UNIT)
// 2. Bind the texture to that texture unit
.bind();
gl.uniform1i(location, CUR_IDX_TEX_UNIT as i32);
CUR_IDX_TEX_UNIT += 1;
};
}
}
pub struct Texture2DArrayBound<'a> {
tex: &'a Texture2DArray,
}
impl<'a> Texture2DArrayBound<'a> {
pub fn tex_sub_image_3d_with_html_image_element(
&self,
dx: i32,
dy: i32,
idx: i32,
image: &HtmlImageElement,
) {
let metadata = self.tex.metadata.as_ref().unwrap_abort().borrow();
self.tex
.gl
.tex_sub_image_3d_with_html_image_element(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
0,
dx,
dy,
idx,
image.width() as i32,
image.height() as i32,
1,
metadata.format,
metadata.type_,
image,
)
.expect("Sub texture 3d");
}
pub fn tex_sub_image_3d_with_html_canvas_element(
&self,
dx: i32,
dy: i32,
idx: i32,
canvas: &HtmlCanvasElement,
) {
let metadata = self.tex.metadata.as_ref().unwrap_abort().borrow();
self.tex
.gl
.tex_sub_image_3d_with_html_canvas_element(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
0,
dx,
dy,
idx,
canvas.width() as i32,
canvas.height() as i32,
1,
metadata.format,
metadata.type_,
canvas,
)
.expect("Sub texture 2d");
}
pub fn tex_sub_image_3d_with_image_bitmap(
&self,
dx: i32,
dy: i32,
idx: i32,
image: &web_sys::ImageBitmap,
) {
let metadata = self.tex.metadata.as_ref().unwrap_abort().borrow();
self.tex
.gl
.tex_sub_image_3d_with_image_bitmap(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
0,
dx,
dy,
idx,
image.width() as i32,
image.height() as i32,
1,
metadata.format,
metadata.type_,
image,
)
.expect("Sub texture 2d");
}
pub fn tex_sub_image_3d_with_opt_array_buffer_view(
&self,
dx: i32,
dy: i32,
idx: i32,
w: i32,
h: i32,
image: Option<&js_sys::Object>,
) {
let metadata = self.tex.metadata.as_ref().unwrap_abort().borrow();
self.tex
.gl
.tex_sub_image_3d_with_opt_array_buffer_view(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
0,
dx,
dy,
idx,
w,
h,
1,
metadata.format,
metadata.type_,
image,
)
.expect("Sub texture 2d");
}
#[allow(dead_code)]
pub fn tex_sub_image_3d_with_opt_u8_array(
&self,
dx: i32,
dy: i32,
idx: i32,
w: i32,
h: i32,
pixels: Option<&[u8]>,
) {
let metadata = self.tex.metadata.as_ref().unwrap_abort().borrow();
self.tex
.gl
.tex_sub_image_3d_with_opt_u8_array(
WebGlRenderingCtx::TEXTURE_2D_ARRAY,
0,
dx,
dy,
idx,
w,
h,
1,
metadata.format,
metadata.type_,
pixels,
)
.expect("Sub texture 2d");
}
}

View File

@@ -329,6 +329,7 @@ impl Texture2D {
let metadata = self.metadata.as_ref().unwrap_abort().borrow();
self.gl
.viewport(x, y, metadata.width as i32, metadata.height as i32);
#[cfg(feature = "webgl2")]
let value = match (metadata.format, metadata.type_) {
(WebGlRenderingCtx::RED_INTEGER, WebGlRenderingCtx::UNSIGNED_BYTE) => {
@@ -359,24 +360,6 @@ impl Texture2D {
"Pixel retrieval not implemented for that texture format.",
)),
};
#[cfg(feature = "webgl1")]
let value = match (*format, *type_) {
(WebGlRenderingCtx::LUMINANCE_ALPHA, WebGlRenderingCtx::FLOAT) => {
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
(WebGlRenderingCtx::RGB, WebGlRenderingCtx::UNSIGNED_BYTE) => {
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
(WebGlRenderingCtx::RGBA, WebGlRenderingCtx::UNSIGNED_BYTE) => {
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
_ => Err(JsValue::from_str(
"Pixel retrieval not implemented for that texture format.",
)),
};
// Unbind the framebuffer
self.gl

View File

@@ -259,25 +259,24 @@ impl App {
self.tile_fetcher.clear();
// Loop over the surveys
for survey in self.layers.values_mut_hips() {
let cfg = survey.get_config();
if self.camera.get_texture_depth() == 0
&& self
.downloader
.borrow()
.is_queried(&query::Allsky::new(survey.get_config()).id)
.is_queried(&query::Allsky::new(cfg).id)
{
// do not ask for tiles if we download the allsky
continue;
}
let min_tile_depth = survey
.get_config()
.delta_depth()
.max(survey.get_config().get_min_depth_tile());
let min_tile_depth = cfg.delta_depth().max(cfg.get_min_depth_tile());
let mut ancestors = HashSet::new();
let creator_did = survey.get_config().get_creator_did().to_string();
let root_url = survey.get_config().get_root_url().to_string();
let format = survey.get_config().get_format();
let creator_did = cfg.get_creator_did().to_string();
let root_url = cfg.get_root_url().to_string();
let format = cfg.get_format();
if let Some(tiles_iter) = survey.look_for_new_tiles(&mut self.camera, &self.projection)
{
@@ -287,6 +286,7 @@ impl App {
creator_did.clone(),
root_url.clone(),
format,
None,
));
// check if we are starting aladin lite or not.
@@ -308,6 +308,7 @@ impl App {
creator_did.clone(),
root_url.clone(),
format,
None,
));
}
}
@@ -732,6 +733,7 @@ impl App {
cfg.get_creator_did().to_string(),
cfg.get_root_url().to_string(),
cfg.get_format(),
None,
);
self.tile_fetcher.append_base_tile(query);
}

View File

@@ -22,14 +22,16 @@ pub struct Tile {
pub id: QueryId,
}
use crate::healpix::cell::HEALPixCell;
use crate::renderable::hips::config::HiPSConfig;
use crate::renderable::CreatorDid;
use crate::{healpix::cell::HEALPixCell, survey::config::HiPSConfig};
impl Tile {
pub fn new(
cell: &HEALPixCell,
hips_cdid: String,
hips_url: String,
format: ImageFormatType,
channel: Option<u32>,
) -> Self {
let ext = format.get_ext_file();
@@ -37,12 +39,19 @@ impl Tile {
let dir_idx = (idx / 10000) * 10000;
let url = format!(
"{}/Norder{}/Dir{}/Npix{}.{}",
hips_url, depth, dir_idx, idx, ext
);
let mut url = format!("{}/Norder{}/Dir{}/Npix{}", hips_url, depth, dir_idx, idx);
let id = format!("{}{}{}{}", hips_cdid, depth, idx, ext);
// handle cube case
if let Some(channel) = channel {
url.push_str(&format!("_{:?}", channel));
}
// add the tile format
url.push_str(&format!(".{}", ext));
let channel = channel.unwrap_or(0);
let id = format!("{}{}{}{}{}", hips_cdid, depth, idx, channel, ext);
Tile {
hips_cdid,

View File

@@ -218,11 +218,6 @@ impl Tile {
pub fn cell(&self) -> &HEALPixCell {
&self.cell
}
/*#[inline(always)]
pub fn query(&self) -> query::Tile {
query::Tile::new(&self.cell, self.hips_url.clone(), self.format)
}*/
}
impl<'a> From<&'a TileRequest> for Option<Tile> {

View File

@@ -106,7 +106,6 @@ mod inertia;
pub mod math;
pub mod renderable;
mod shader;
mod survey;
mod tile_fetcher;
mod time;

View File

@@ -2,83 +2,7 @@ use al_api::hips::ImageExt;
use al_core::{image::format::ImageFormat, image::raw::ImageBuffer};
#[derive(Debug)]
pub struct EmptyTileImage {
inner: ImageType,
}
use al_core::{image::ImageType, pixel::Pixel};
impl EmptyTileImage {
fn new(size: i32, channel: ChannelType) -> EmptyTileImage {
debug_assert!(math::utils::is_power_of_two(size));
let inner = match channel {
ChannelType::RGBA8U => {
let image = ImageBuffer::<RGBA8U>::allocate(
&<<RGBA8U as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawRgba8u { image }
}
ChannelType::RGB8U => {
let image = ImageBuffer::<RGB8U>::allocate(
&<<RGB8U as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawRgb8u { image }
}
ChannelType::R32F => {
let image = ImageBuffer::<R32F>::allocate(
&<<R32F as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawR32f { image }
}
ChannelType::R64F => {
let image = ImageBuffer::<R32F>::allocate(
&<<R32F as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawR32f { image }
}
#[cfg(feature = "webgl2")]
ChannelType::R8UI => {
let image = ImageBuffer::<R8UI>::allocate(
&<<R8UI as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawR8ui { image }
}
#[cfg(feature = "webgl2")]
ChannelType::R16I => {
let image = ImageBuffer::<R16I>::allocate(
&<<R16I as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawR16i { image }
}
#[cfg(feature = "webgl2")]
ChannelType::R32I => {
let image = ImageBuffer::<R32I>::allocate(
&<<R32I as ImageFormat>::P as Pixel>::BLACK,
size,
size,
);
ImageType::RawR32i { image }
}
_ => todo!(),
};
EmptyTileImage {
inner,
//pixel_fill,
}
}
}
use al_core::{
image::{
@@ -89,55 +13,12 @@ use al_core::{
};
use cgmath::Vector3;
/*
impl Image for EmptyTileImage {
fn insert(
&self,
// The texture array
textures: &Texture2DArray,
// An offset to write the image in the texture array
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
self.inner.tex_sub_image_3d(textures, offset)
}
}
*/
use al_core::image::format::{ChannelType, ImageFormatType, RGB8U, RGBA8U};
//use super::TileArrayBuffer;
/*use super::{ArrayF32, ArrayF64, ArrayI16, ArrayI32, ArrayU8};
fn create_black_tile(format: FormatImageType, width: i32, value: f32) -> TileArrayBufferImage {
let _num_channels = format.get_num_channels() as i32;
match format {
FormatImageType::JPG => TileArrayBufferImage::U8(JPG::create_black_tile(width)),
FormatImageType::PNG => TileArrayBufferImage::U8(PNG::create_black_tile(width)),
FormatImageType::FITS(_fits) => match format.get_type() {
WebGl2RenderingContext::FLOAT => {
TileArrayBufferImage::F32(FITS::create_black_tile(width, value))
}
WebGl2RenderingContext::INT => {
TileArrayBufferImage::I32(FITS::create_black_tile(width, value as i32))
}
WebGl2RenderingContext::SHORT => {
TileArrayBufferImage::I16(FITS::create_black_tile(width, value as i16))
}
WebGl2RenderingContext::UNSIGNED_BYTE => {
TileArrayBufferImage::U8(FITS::create_black_tile(width, value as u8))
}
_ => unimplemented!(),
},
_ => unimplemented!(),
}
}*/
#[derive(Debug)]
pub struct HiPSConfig {
pub root_url: String,
// HiPS image format
// TODO: Make that independant of the HiPS but of the ImageFormat
pub empty_image: EmptyTileImage,
// The size of the texture images
pub texture_size: i32,
@@ -147,6 +28,9 @@ pub struct HiPSConfig {
delta_depth: u8,
min_depth_tile: u8,
min_depth_texture: u8,
// the number of slices for cubes
cube_depth: Option<u32>,
// Num tiles per texture
num_tiles_per_texture: usize,
// Max depth of the current HiPS tiles
@@ -191,6 +75,7 @@ impl HiPSConfig {
pub fn new(properties: &HiPSProperties, img_ext: ImageExt) -> Result<HiPSConfig, JsValue> {
let root_url = properties.get_url();
let creator_did = properties.get_creator_did().to_string();
let cube_depth = properties.get_cube_depth();
// Define the size of the 2d texture array depending on the
// characterics of the client
@@ -281,8 +166,6 @@ impl HiPSConfig {
}
};*/
let empty_image = EmptyTileImage::new(tile_size, format.get_channel());
let texture_size = std::cmp::min(512, tile_size << max_depth_tile);
//let texture_size = tile_size;
let num_tile_per_side_texture = (texture_size / tile_size) as usize;
@@ -308,8 +191,6 @@ impl HiPSConfig {
creator_did,
// HiPS name
root_url: root_url.to_string(),
// Tile size & blank tile data
empty_image,
// Texture config
// The size of the texture images
texture_size,
@@ -334,13 +215,14 @@ impl HiPSConfig {
tex_storing_integers,
tex_storing_unsigned_int,
// the number of slices in a cube
cube_depth,
size_tile_uv,
frame,
bitpix,
format,
tile_size,
//dataproduct_subtype,
//colored,
};
Ok(hips_config)
@@ -415,9 +297,6 @@ impl HiPSConfig {
self.format = format;
// Recompute the empty image
self.empty_image = EmptyTileImage::new(self.tile_size, self.format.get_channel());
// Recompute if the survey will be colored or not
/*self.colored = if self.tex_storing_fits {
false
@@ -509,11 +388,6 @@ impl HiPSConfig {
pub fn is_colored(&self) -> bool {
self.format.is_colored()
}
/*#[inline(always)]
pub fn get_default_image(&self) -> &EmptyTileImage {
&self.empty_image
}*/
}
use al_core::shader::{SendUniforms, ShaderBound};

View File

@@ -16,13 +16,13 @@ use al_core::shader::{SendUniforms, ShaderBound};
use al_core::Texture2DArray;
use al_core::WebGlContext;
use super::config::HiPSConfig;
use super::texture::Texture;
use super::texture::TextureUniforms;
use super::texture::{Texture, TextureUniforms};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::cell::HEALPixCell;
use crate::healpix::cell::NUM_HPX_TILES_DEPTH_ZERO;
use crate::math::lonlat::LonLatT;
use crate::renderable::hips::config::HiPSConfig;
use crate::time::Time;
use crate::Abort;
use crate::JsValue;
@@ -305,7 +305,7 @@ impl HiPS2DBuffer {
// This method pushes a new downloaded tile into the buffer
// It must be ensured that the tile is not already contained into the buffer
pub fn push<I: Image + std::fmt::Debug>(
pub fn push<I: Image>(
&mut self,
cell: &HEALPixCell,
image: I,

View File

@@ -0,0 +1,825 @@
pub mod buffer;
pub mod texture;
use al_api::hips::ImageExt;
use al_api::hips::ImageMetadata;
use al_core::colormap::Colormap;
use al_core::colormap::Colormaps;
use al_core::image::format::ChannelType;
use al_core::image::Image;
use al_core::shader::Shader;
use al_core::webgl_ctx::GlWrapper;
use al_core::VecData;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
use crate::math::{angle::Angle, vector::dist2};
use crate::ProjectionType;
use crate::camera::CameraViewPort;
use crate::shader::ShaderManager;
use crate::{math::lonlat::LonLatT, utils};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::math::lonlat::LonLat;
use crate::renderable::utils::index_patch::DefaultPatchIndexIter;
use crate::time::Time;
use super::config::HiPSConfig;
use std::collections::HashSet;
// Recursively compute the number of subdivision needed for a cell
// to not be too much skewed
use buffer::HiPS2DBuffer;
use texture::Texture;
use super::raytracing::RayTracer;
use super::uv::{TileCorner, TileUVW};
use cgmath::Matrix;
use wasm_bindgen::JsValue;
use web_sys::WebGl2RenderingContext;
const M: f64 = 280.0 * 280.0;
const N: f64 = 150.0 * 150.0;
const RAP: f64 = 0.7;
fn is_too_large(cell: &HEALPixCell, camera: &CameraViewPort, projection: &ProjectionType) -> bool {
let vertices = cell
.vertices()
.iter()
.filter_map(|(lon, lat)| {
let vertex = crate::math::lonlat::radec_to_xyzw(Angle(*lon), Angle(*lat));
projection.icrs_celestial_to_screen_space(&vertex, camera)
})
.collect::<Vec<_>>();
if vertices.len() < 4 {
false
} else {
let d1 = dist2(vertices[0].as_ref(), &vertices[2].as_ref());
let d2 = dist2(vertices[1].as_ref(), &vertices[3].as_ref());
if d1 > M || d2 > M {
true
} else if d1 < N && d2 < N {
false
} else {
let rap = if d2 > d1 { d1 / d2 } else { d2 / d1 };
rap < RAP
}
}
}
fn num_subdivision(cell: &HEALPixCell, camera: &CameraViewPort, projection: &ProjectionType) -> u8 {
let d = cell.depth();
// Subdivide all cells at least one time.
// TODO: use a single subdivision number computed from the current cells inside the view
// i.e. subdivide all cells in the view with the cell that has to be the most subdivided
let mut num_sub = 1;
if d < 2 {
num_sub = 2 - d;
}
// Largest deformation cell among the cells of a specific depth
let largest_center_to_vertex_dist =
healpix::largest_center_to_vertex_distance(d, 0.0, healpix::TRANSITION_LATITUDE);
let smallest_center_to_vertex_dist =
healpix::largest_center_to_vertex_distance(d, 0.0, healpix::LAT_OF_SQUARE_CELL);
let (lon, lat) = cell.center();
let center_to_vertex_dist = healpix::largest_center_to_vertex_distance(d, lon, lat);
let skewed_factor = (center_to_vertex_dist - smallest_center_to_vertex_dist)
/ (largest_center_to_vertex_dist - smallest_center_to_vertex_dist);
if skewed_factor > 0.25 || is_too_large(cell, camera, projection) || cell.is_on_pole() {
num_sub += 1;
}
num_sub
}
pub struct TextureToDraw<'a, 'b> {
pub starting_texture: &'a Texture,
pub ending_texture: &'a Texture,
pub cell: &'b HEALPixCell,
}
impl<'a, 'b> TextureToDraw<'a, 'b> {
fn new(
starting_texture: &'a Texture,
ending_texture: &'a Texture,
cell: &'b HEALPixCell,
) -> TextureToDraw<'a, 'b> {
TextureToDraw {
starting_texture,
ending_texture,
cell,
}
}
}
pub fn get_raster_shader<'a>(
cmap: &Colormap,
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap.frag",
)
}
}
}
pub fn get_raytracer_shader<'a>(
cmap: &Colormap,
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
//let colored_hips = config.is_colored();
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap.frag",
)
}
}
}
pub struct HiPS2D {
//color: Color,
// The image survey texture buffer
textures: HiPS2DBuffer,
// The projected vertices data
// For WebGL2 wasm, the data are interleaved
//#[cfg(feature = "webgl2")]
//vertices: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 0) in vec3 position;
position: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 1) in vec3 uv_start;
uv_start: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 2) in vec3 uv_end;
uv_end: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 3) in float time_tile_received;
time_tile_received: Vec<f32>,
idx_vertices: Vec<u16>,
num_idx: usize,
vao: VertexArrayObject,
gl: WebGlContext,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
}
impl HiPS2D {
pub fn new(config: HiPSConfig, gl: &WebGlContext) -> Result<Self, JsValue> {
let mut vao = VertexArrayObject::new(gl);
// layout (location = 0) in vec2 lonlat;
// layout (location = 1) in vec3 position;
// layout (location = 2) in vec3 uv_start;
// layout (location = 3) in vec3 uv_end;
// layout (location = 4) in float time_tile_received;
//let vertices = vec![0.0; MAX_NUM_FLOATS_TO_DRAW];
//let indices = vec![0_u16; MAX_NUM_INDICES_TO_DRAW];
//let vertices = vec![];
let position = vec![];
let uv_start = vec![];
let uv_end = vec![];
let time_tile_received = vec![];
let idx_vertices = vec![];
#[cfg(feature = "webgl2")]
vao.bind_for_update()
.add_array_buffer_single(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
.add_array_buffer_single(
3,
"uv_start",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_start),
)
.add_array_buffer_single(
3,
"uv_end",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_end),
)
.add_array_buffer_single(
1,
"time_tile_received",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&time_tile_received),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u16>(&idx_vertices),
)
.unbind();
let num_idx = 0;
let textures = HiPS2DBuffer::new(gl, config)?;
let gl = gl.clone();
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(Self {
// The image survey texture buffer
textures,
num_idx,
vao,
gl,
position,
uv_start,
uv_end,
time_tile_received,
idx_vertices,
footprint_moc,
hpx_cells_in_view,
})
}
pub fn look_for_new_tiles<'a>(
&'a mut self,
camera: &'a CameraViewPort,
proj: &ProjectionType,
) -> Option<impl Iterator<Item = HEALPixCell> + 'a> {
// do not add tiles if the view is already at depth 0
let cfg = self.get_config();
let mut depth_tile = (camera.get_texture_depth() + cfg.delta_depth())
.min(cfg.get_max_depth_tile())
.max(cfg.get_min_depth_tile());
let dd = cfg.delta_depth();
//let min_depth_tile = self.get_min_depth_tile();
//let delta_depth = self.get_config().delta_depth();
//let min_bound_depth = min_depth_tile.max(delta_depth);
// do not ask to query tiles that:
// * either do not exist because < to min_depth_tile
// * either are part of a base tile already handled i.e. tiles < delta_depth
//console_log(depth_tile);
//console_log(min_bound_depth);
//if depth_tile >= min_bound_depth {
//let depth_tile = depth_tile.max(min_bound_depth);
let survey_frame = cfg.get_frame();
let mut already_considered_tiles = HashSet::new();
// raytracer is rendering and the shader only renders HPX texture cells of depth 0
if camera.is_raytracing(proj) {
depth_tile = 0;
}
let tile_cells_iter = camera
.get_hpx_cells(depth_tile, survey_frame)
//.flat_map(move |cell| {
// let texture_cell = cell.get_texture_cell(delta_depth);
// texture_cell.get_tile_cells(delta_depth)
//})
.into_iter()
.flat_map(move |tile_cell| {
let tex_cell = tile_cell.get_texture_cell(dd);
tex_cell.get_tile_cells(dd)
})
.filter(move |tile_cell| {
if already_considered_tiles.contains(tile_cell) {
return false;
}
already_considered_tiles.insert(*tile_cell);
if let Some(moc) = self.footprint_moc.as_ref() {
moc.intersects_cell(tile_cell) && !self.update_priority_tile(tile_cell)
} else {
!self.update_priority_tile(tile_cell)
}
});
Some(tile_cells_iter)
}
pub fn contains_tile(&self, cell: &HEALPixCell) -> bool {
self.textures.contains_tile(cell)
}
pub fn update(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
let raytracing = camera.is_raytracing(projection);
if raytracing {
return;
}
// rasterizer mode
let available_tiles = self.textures.reset_available_tiles();
let new_cells_in_view = self.retrieve_cells_in_camera(camera);
if new_cells_in_view || available_tiles {
self.recompute_vertices(camera, projection);
}
}
// returns a boolean if the view cells has changed with respect to the last frame
pub fn retrieve_cells_in_camera(&mut self, camera: &CameraViewPort) -> bool {
let cfg = self.get_config();
// Get the coo system transformation matrix
let hips_frame = cfg.get_frame();
let depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let hpx_cells_in_view = camera.get_hpx_cells(depth, hips_frame);
let new_cells = if hpx_cells_in_view.len() != self.hpx_cells_in_view.len() {
true
} else {
!self
.hpx_cells_in_view
.iter()
.zip(hpx_cells_in_view.iter())
.all(|(&a, &b)| a == b)
};
self.hpx_cells_in_view = hpx_cells_in_view;
new_cells
}
#[inline]
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
}
#[inline]
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
self.footprint_moc.as_ref()
}
pub fn set_img_format(&mut self, ext: ImageExt) -> Result<(), JsValue> {
self.textures.set_format(&self.gl, ext)
}
pub fn is_allsky(&self) -> bool {
self.textures.config().is_allsky
}
// Position given is in the camera space
pub fn read_pixel(
&self,
pos: &LonLatT<f64>,
camera: &CameraViewPort,
) -> Result<JsValue, JsValue> {
// 1. Convert it to the hips frame system
let cfg = self.textures.config();
let camera_frame = camera.get_coo_system();
let hips_frame = cfg.get_frame();
let pos = crate::coosys::apply_coo_system(camera_frame, hips_frame, &pos.vector());
// Get the array of textures from that survey
let tile_depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let pos_tex = self
.textures
.get_pixel_position_in_texture(&pos.lonlat(), tile_depth)?;
let slice_idx = pos_tex.z as usize;
let texture_array = self.textures.get_texture_array();
unimplemented!();
/*let value = texture_array[slice_idx].read_pixel(pos_tex.x, pos_tex.y)?;
if cfg.tex_storing_fits {
let value = value
.as_f64()
.ok_or_else(|| JsValue::from_str("Error unwraping the pixel read value."))?;
let scale = cfg.scale as f64;
let offset = cfg.offset as f64;
Ok(JsValue::from_f64(value * scale + offset))
} else {
Ok(value)
}*/
}
fn recompute_vertices(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
self.position.clear();
self.uv_start.clear();
self.uv_end.clear();
self.time_tile_received.clear();
self.idx_vertices.clear();
let cfg = self.textures.config();
// Get the coo system transformation matrix
let channel = cfg.get_format().get_channel();
// Retrieve the model and inverse model matrix
let mut off_indices = 0;
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(&cell) {
Some(&cell)
} else {
if channel == ChannelType::RGB8U {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
// but JPEG tiles do have black when no pixels data is found
// We therefore must draw in black for the tiles outside the HiPS MOC
Some(&cell)
} else {
None
}
}
} else {
Some(&cell)
};
if let Some(cell) = cell {
let texture_to_draw = if self.textures.contains(cell) {
if let Some(ending_cell_in_tex) = self.textures.get(cell) {
if let Some(parent_cell) = self.textures.get_nearest_parent(cell) {
if let Some(starting_cell_in_tex) = self.textures.get(&parent_cell) {
Some(TextureToDraw::new(
starting_cell_in_tex,
ending_cell_in_tex,
cell,
))
} else {
// no blending here
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
None
}
} else {
if let Some(parent_cell) = self.textures.get_nearest_parent(cell) {
if let Some(ending_cell_in_tex) = self.textures.get(&parent_cell) {
if let Some(grand_parent_cell) =
self.textures.get_nearest_parent(&parent_cell)
{
if let Some(starting_cell_in_tex) =
self.textures.get(&grand_parent_cell)
{
Some(TextureToDraw::new(
starting_cell_in_tex,
ending_cell_in_tex,
cell,
))
} else {
// no blending
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
unreachable!();
}
} else {
None
}
};
if let Some(TextureToDraw {
cell,
starting_texture,
ending_texture,
}) = texture_to_draw
{
let uv_0 = TileUVW::new(cell, starting_texture, cfg);
let uv_1 = TileUVW::new(cell, ending_texture, cfg);
let d01s = uv_0[TileCorner::BottomRight].x - uv_0[TileCorner::BottomLeft].x;
let d02s = uv_0[TileCorner::TopLeft].y - uv_0[TileCorner::BottomLeft].y;
let d01e = uv_1[TileCorner::BottomRight].x - uv_1[TileCorner::BottomLeft].x;
let d02e = uv_1[TileCorner::TopLeft].y - uv_1[TileCorner::BottomLeft].y;
let start_time = ending_texture.start_time().as_millis();
let num_subdivision = num_subdivision(cell, camera, projection);
let n_segments_by_side: usize = 1 << (num_subdivision as usize);
let n_segments_by_side_f32 = n_segments_by_side as f32;
let n_vertices_per_segment = n_segments_by_side + 1;
let mut pos = Vec::with_capacity((n_segments_by_side + 1) * 4);
let grid_lonlat =
healpix::nested::grid(cell.depth(), cell.idx(), n_segments_by_side as u16);
let grid_lonlat_iter = grid_lonlat.iter();
for (idx, &(lon, lat)) in grid_lonlat_iter.enumerate() {
let i: usize = idx / n_vertices_per_segment;
let j: usize = idx % n_vertices_per_segment;
let hj0 = (j as f32) / n_segments_by_side_f32;
let hi0 = (i as f32) / n_segments_by_side_f32;
let uv_start = [
uv_0[TileCorner::BottomLeft].x + hj0 * d01s,
uv_0[TileCorner::BottomLeft].y + hi0 * d02s,
uv_0[TileCorner::BottomLeft].z,
];
let uv_end = [
uv_1[TileCorner::BottomLeft].x + hj0 * d01e,
uv_1[TileCorner::BottomLeft].y + hi0 * d02e,
uv_1[TileCorner::BottomLeft].z,
];
self.uv_start.extend(uv_start);
self.uv_end.extend(uv_end);
self.time_tile_received.push(start_time);
pos.push([lon as f32, lat as f32]);
}
let patch_indices_iter = DefaultPatchIndexIter::new(
&(0..=n_segments_by_side),
&(0..=n_segments_by_side),
n_vertices_per_segment,
)
.flatten()
.map(|indices| {
[
indices.0 + off_indices,
indices.1 + off_indices,
indices.2 + off_indices,
]
})
.flatten();
self.idx_vertices.extend(patch_indices_iter);
off_indices += pos.len() as u16;
// Replace options with an arbitrary vertex
let position_iter = pos
.into_iter()
//.map(|ndc| ndc.unwrap_or([0.0, 0.0]))
.flatten();
self.position.extend(position_iter);
}
}
}
self.num_idx = self.idx_vertices.len();
let mut vao = self.vao.bind_for_update();
vao.update_array(
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.position),
)
.update_array(
"uv_start",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.uv_start),
)
.update_array(
"uv_end",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.uv_end),
)
.update_array(
"time_tile_received",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.time_tile_received),
)
.update_element_array(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.idx_vertices),
);
}
// Return a boolean to signal if the tile is present or not in the survey
pub fn update_priority_tile(&mut self, cell: &HEALPixCell) -> bool {
if self.textures.contains_tile(cell) {
// The cell is present in the survey, we update its priority
self.textures.update_priority(cell);
true
} else {
false
}
}
pub fn add_tile<I: Image>(
&mut self,
cell: &HEALPixCell,
image: I,
time_request: Time,
) -> Result<(), JsValue> {
self.textures.push(&cell, image, time_request)
}
pub fn add_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
self.textures.push_allsky(allsky)
}
/* Accessors */
#[inline]
pub fn get_config(&self) -> &HiPSConfig {
self.textures.config()
}
#[inline]
pub fn get_config_mut(&mut self) -> &mut HiPSConfig {
self.textures.config_mut()
}
pub fn draw(
&self,
shaders: &mut ShaderManager,
colormaps: &Colormaps,
camera: &CameraViewPort,
raytracer: &RayTracer,
cfg: &ImageMetadata,
proj: &ProjectionType,
) -> Result<(), JsValue> {
// Get the coo system transformation matrix
let selected_frame = camera.get_coo_system();
let hips_cfg = self.textures.config();
let hips_frame = hips_cfg.get_frame();
let c = selected_frame.to(hips_frame);
let raytracing = camera.is_raytracing(proj);
let config = self.get_config();
//self.gl.enable(WebGl2RenderingContext::BLEND);
let ImageMetadata {
color,
opacity,
blend_cfg,
..
} = cfg;
// Add starting fading
//let fading = self.get_fading_factor();
//let opacity = opacity * fading;
// Get the colormap from the color
let cmap = colormaps.get(color.cmap_name.as_ref());
blend_cfg.enable(&self.gl, || {
if raytracing {
let w2v = c * (*camera.get_w2m());
let shader = get_raytracer_shader(cmap, &self.gl, shaders, &config)?;
let shader = shader.bind(&self.gl);
shader
.attach_uniforms_from(camera)
.attach_uniforms_from(&self.textures)
// send the cmap appart from the color config
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniform("model", &w2v)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniforms_from(colormaps);
raytracer.draw(&shader);
} else {
let v2w = (*camera.get_m2w()) * c.transpose();
// The rasterizer has a buffer containing:
// - The vertices of the HEALPix cells for the most refined survey
// - The starting and ending uv for the blending animation
// - The time for each HEALPix cell at which the animation begins
//
// Each of these data can be changed at different circumstances:
// - The vertices are changed if:
// * new cells are added/removed (because new cells are added)
// to the previous frame.
// - The UVs are changed if:
// * new cells are added/removed (because new cells are added)
// * there are new available tiles for the GPU
let shader = get_raster_shader(cmap, &self.gl, shaders, &config)?.bind(&self.gl);
shader
.attach_uniforms_from(&self.textures)
// send the cmap appart from the color config
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniforms_from(camera)
.attach_uniform("inv_model", &v2w)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
Some(self.num_idx as i32),
WebGl2RenderingContext::UNSIGNED_SHORT,
0,
);
}
Ok(())
})?;
//self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
}
}

View File

@@ -34,7 +34,7 @@ pub struct Texture {
//missing: bool,
}
use super::config::HiPSConfig;
use crate::renderable::hips::config::HiPSConfig;
impl Texture {
pub fn new(texture_cell: &HEALPixCell, idx: i32, time_request: Time) -> Texture {

View File

@@ -0,0 +1,289 @@
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use al_core::image::format::ChannelType;
use cgmath::Vector3;
use al_api::hips::ImageExt;
use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::image::format::ImageFormat;
use al_core::image::format::{R16I, R32F, R32I, R64F, R8UI, RGB8U, RGBA8U};
use al_core::image::Image;
use al_core::shader::{SendUniforms, ShaderBound};
use al_core::Texture2DArray;
use al_core::WebGlContext;
use super::texture::HEALPixTexturedCube;
use crate::downloader::request::allsky::Allsky;
use crate::healpix::cell::HEALPixCell;
use crate::healpix::cell::NUM_HPX_TILES_DEPTH_ZERO;
use crate::math::lonlat::LonLatT;
use crate::renderable::hips::config::HiPSConfig;
use crate::time::Time;
use crate::Abort;
use crate::JsValue;
// Fixed sized binary heap
pub struct HiPS3DBuffer {
// Some information about the HiPS
textures: HashMap<HEALPixCell, HEALPixTexturedCube>,
config: HiPSConfig,
num_root_textures_available: u8,
available_tiles_during_frame: bool,
gl: WebGlContext,
}
impl HiPS3DBuffer {
pub fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let textures = HashMap::new();
let num_root_textures_available = 0;
let available_tiles_during_frame = false;
let gl = gl.clone();
Ok(Self {
config,
num_root_textures_available,
textures,
available_tiles_during_frame,
gl,
})
}
/*
pub fn set_format(&mut self, gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
self.config.set_image_fmt(ext)?;
let channel = self.config.get_format().get_channel();
self.texture_2d_array = match channel {
ChannelType::RGBA32F => unimplemented!(),
ChannelType::RGB32F => unimplemented!(),
ChannelType::RGBA8U => create_texture_array::<RGBA8U>(gl, &self.config)?,
ChannelType::RGB8U => create_texture_array::<RGB8U>(gl, &self.config)?,
ChannelType::R32F => create_texture_array::<R32F>(gl, &self.config)?,
#[cfg(feature = "webgl2")]
ChannelType::R8UI => create_texture_array::<R8UI>(gl, &self.config)?,
#[cfg(feature = "webgl2")]
ChannelType::R16I => create_texture_array::<R16I>(gl, &self.config)?,
#[cfg(feature = "webgl2")]
ChannelType::R32I => create_texture_array::<R32I>(gl, &self.config)?,
#[cfg(feature = "webgl2")]
ChannelType::R64F => create_texture_array::<R64F>(gl, &self.config)?,
};
let now = Time::now();
self.base_textures = [
Texture::new(&HEALPixCell(0, 0), 0, now),
Texture::new(&HEALPixCell(0, 1), 1, now),
Texture::new(&HEALPixCell(0, 2), 2, now),
Texture::new(&HEALPixCell(0, 3), 3, now),
Texture::new(&HEALPixCell(0, 4), 4, now),
Texture::new(&HEALPixCell(0, 5), 5, now),
Texture::new(&HEALPixCell(0, 6), 6, now),
Texture::new(&HEALPixCell(0, 7), 7, now),
Texture::new(&HEALPixCell(0, 8), 8, now),
Texture::new(&HEALPixCell(0, 9), 9, now),
Texture::new(&HEALPixCell(0, 10), 10, now),
Texture::new(&HEALPixCell(0, 11), 11, now),
];
self.heap.clear();
self.textures.clear();
//self.ready = false;
self.num_root_textures_available = 0;
self.available_tiles_during_frame = false;
Ok(())
}*/
pub fn push_allsky(&mut self, allsky: Allsky, slice_idx: u16) -> Result<(), JsValue> {
let Allsky {
image,
time_req,
depth_tile,
..
} = allsky;
{
let mutex_locked = image.borrow();
let images = mutex_locked.as_ref().unwrap_abort();
for (idx, image) in images.iter().enumerate() {
self.push(
&HEALPixCell(depth_tile, idx as u64),
image,
time_req,
slice_idx,
)?;
}
}
Ok(())
}
// This method pushes a new downloaded tile into the buffer
// It must be ensured that the tile is not already contained into the buffer
pub fn push<I: Image>(
&mut self,
cell: &HEALPixCell,
image: I,
time_request: Time,
slice_idx: u16,
) -> Result<(), JsValue> {
let tex = if let Some(tex) = self.textures.get_mut(cell) {
tex
} else {
self.textures
.insert(*cell, HEALPixTexturedCube::new(*cell, time_request));
self.textures.get_mut(cell).unwrap()
};
// copy to the 3D textured block
tex.append_slice(image, slice_idx, &self.config, &self.gl)?;
self.available_tiles_during_frame = true;
Ok(())
}
// Return if tiles did become available
pub fn reset_available_tiles(&mut self) -> bool {
let available_tiles_during_frame = self.available_tiles_during_frame;
self.available_tiles_during_frame = false;
available_tiles_during_frame
}
// Tell if a texture is available meaning all its sub tiles
// must have been written for the GPU
pub fn contains(&self, texture_cell: &HEALPixCell) -> bool {
self.get(texture_cell).is_some()
}
// lonlat is given in the
/*pub fn get_pixel_position_in_texture(
&self,
lonlat: &LonLatT<f64>,
depth: u8,
) -> Result<Vector3<i32>, JsValue> {
let (pix, dx, dy) = crate::healpix::utils::hash_with_dxdy(depth, lonlat);
let texture_cell = HEALPixCell(depth, pix);
if let Some(texture) = self.get(&texture_cell) {
let cfg = &self.config;
// Index of the texture in the total set of textures
let texture_idx = texture.idx();
// The size of the global texture containing the tiles
let texture_size = cfg.get_texture_size();
// Offset in the slice in pixels
let mut offset = Vector3::new(
(dy * (texture_size as f64)) as i32,
(dx * (texture_size as f64)) as i32,
texture_idx,
);
// Offset in the slice in pixels
if self.config.tex_storing_fits {
let texture_size = self.config.get_texture_size() as f32;
let mut uvy = offset.y as f32 / texture_size;
uvy = self.config.size_tile_uv
+ 2.0 * self.config.size_tile_uv * (uvy / self.config.size_tile_uv).floor()
- uvy;
offset.y = (uvy * texture_size) as i32;
}
Ok(offset)
} else {
Err(JsValue::from_str(&format!(
"{:?} not loaded in the GPU, please wait before trying again.",
texture_cell
)))
}
}*/
/// Accessors
pub fn get(&self, cell: &HEALPixCell) -> Option<&HEALPixTexturedCube> {
self.textures.get(cell)
}
// Get the nearest parent tile found in the CPU buffer
pub fn get_nearest_parent(&self, cell: &HEALPixCell) -> Option<HEALPixCell> {
if cell.is_root() {
// Root cells are in the buffer by definition
Some(*cell)
} else {
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_root() {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
} else {
None
}
}
}
pub fn config(&self) -> &HiPSConfig {
&self.config
}
pub fn config_mut(&mut self) -> &mut HiPSConfig {
&mut self.config
}
/*pub fn get_texture_array(&self) -> &Texture2DArray {
&self.texture_2d_array
}*/
}
/*
impl SendUniforms for HiPS3DBuffer {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
// Send the textures
/*let textures = &self.base_textures;
for (idx, texture) in textures.iter().enumerate() {
let texture_uniforms = TextureUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
}*/
//if self.raytracing {
for idx in 0..NUM_HPX_TILES_DEPTH_ZERO {
let cell = HEALPixCell(0, idx as u64);
let texture = self.get(&cell).unwrap();
let texture_uniforms = TextureUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
}
//}
let shader = shader
.attach_uniforms_from(&self.config)
.attach_uniform("tex", &self.texture_2d_array)
.attach_uniform("num_slices", &(self.texture_2d_array.num_slices as i32));
shader
}
}
*/
impl Drop for HiPS3DBuffer {
fn drop(&mut self) {
// drop all the 3D block textures
self.textures.clear();
}
}

View File

@@ -0,0 +1,2 @@
pub mod buffer;
pub mod texture;

View File

@@ -0,0 +1,251 @@
use crate::{healpix::cell::HEALPixCell, time::Time};
use al_core::image::format::{
ChannelType, ImageFormatType, R16I, R32F, R32I, R64F, R8UI, RGB32F, RGB8U, RGBA32F, RGBA8U,
};
use al_core::image::Image;
use al_core::texture::Texture3D;
use al_core::webgl_ctx::WebGlRenderingCtx;
use cgmath::Vector3;
use std::collections::HashSet;
use wasm_bindgen::JsValue;
pub struct HEALPixTexturedCube {
tile_cell: HEALPixCell,
// Precomputed uniq number
uniq: i32,
// The time the texture has been received
// If the texture contains multiple tiles, then the receiving time
// is set when all the tiles have been copied to the buffer
start_time: Option<Time>,
// The time request of the texture is the time request
// of the first tile being inserted in it
// It is then only given in the constructor of Texture
// This is approximate, it should correspond to the minimum
// of the time requests of the cells currenlty contained in the
// texture. But this is too expensive because at each tile inserted
// in the buffer, one should reevalute the priority of the texture
// in the buffer's binary heap.
time_request: Time,
// We autorize 512 cubic tiles of size 32 each which allows to store max 16384 slices
textures: Vec<Option<Texture3D>>,
// A set of already inserted slices. Each cubic tiles can have 32 slices. The occupancy of the
// slices inside a cubic tile is done with a u32 mask
slices: [u32; 512],
}
use crate::renderable::hips::config::HiPSConfig;
use crate::WebGlContext;
impl HEALPixTexturedCube {
pub fn new(tile_cell: HEALPixCell, time_request: Time) -> Self {
let start_time = None;
let uniq = tile_cell.uniq();
let textures = std::iter::repeat(None).take(512).collect();
let slices = [0; 512];
Self {
tile_cell,
uniq,
time_request,
start_time,
textures,
slices,
}
}
// Get the good cubic texture and the slice idx inside it
pub fn get_cubic_texture_from_slice(&self, slice: u16) -> (Option<&Texture3D>, u8) {
let cube_idx = slice >> 5;
let slice_idx = (slice & 0x1f) as u8;
(self.textures[cube_idx as usize].as_ref(), slice_idx)
}
// Panic if cell is not contained in the texture
// Do nothing if the texture is full
// Return true if the tile is newly added
pub fn append_slice<I: Image>(
&mut self,
image: I,
slice: u16,
cfg: &HiPSConfig,
gl: &WebGlContext,
) -> Result<(), JsValue> {
let cube_idx = (slice >> 5) as usize;
let texture = if let Some(texture) = self.textures[cube_idx as usize].as_ref() {
texture
} else {
let tile_size = cfg.get_tile_size();
let params = &[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents r-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_R,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
];
let texture = match cfg.get_format().get_channel() {
ChannelType::RGBA32F => {
Texture3D::create_empty::<RGBA32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGB32F => {
Texture3D::create_empty::<RGB32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGBA8U => {
Texture3D::create_empty::<RGBA8U>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGB8U => {
Texture3D::create_empty::<RGB8U>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R32F => {
Texture3D::create_empty::<R32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R64F => {
Texture3D::create_empty::<R64F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R8UI => {
Texture3D::create_empty::<R8UI>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R16I => {
Texture3D::create_empty::<R16I>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R32I => {
Texture3D::create_empty::<R32I>(gl, tile_size, tile_size, 32, params)
}
};
self.textures[cube_idx] = Some(texture?);
self.textures[cube_idx].as_ref().unwrap()
};
let slice_idx = slice & 0x1f;
// if there is already something, do not tex sub
if self.slices[cube_idx] & (1 << slice_idx) == 0 {
image.insert_into_3d_texture(texture, &Vector3::<i32>::new(0, 0, slice_idx as i32))?
}
self.start_time = Some(Time::now());
Ok(())
}
// Cell must be contained in the texture
pub fn contains_slice(&self, slice: u16) -> bool {
let cube_idx = (slice >> 5) as usize;
let slice_idx = slice & 0x1f;
self.slices[cube_idx] & (1 << slice_idx) == 1
}
// Getter
// Returns the current time if the texture is not full
pub fn start_time(&self) -> Time {
if let Some(t) = self.start_time {
t
} else {
Time::now()
}
}
pub fn time_request(&self) -> Time {
self.time_request
}
pub fn cell(&self) -> &HEALPixCell {
&self.tile_cell
}
// Setter
/*pub fn replace(&mut self, texture_cell: &HEALPixCell, time_request: Time) {
// Cancel the tasks copying the tiles contained in the texture
// which have not yet been completed.
//self.clear_tasks_in_progress(config, exec);
self.texture_cell = *texture_cell;
self.uniq = texture_cell.uniq();
self.full = false;
self.start_time = None;
self.time_request = time_request;
self.tiles.clear();
//self.missing = true;
self.num_tiles_written = 0;
}*/
}
use std::cmp::Ordering;
impl PartialOrd for HEALPixTexturedCube {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.uniq.partial_cmp(&other.uniq)
}
}
use crate::Abort;
impl Ord for HEALPixTexturedCube {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap_abort()
}
}
impl PartialEq for HEALPixTexturedCube {
fn eq(&self, other: &Self) -> bool {
self.uniq == other.uniq
}
}
impl Eq for HEALPixTexturedCube {}
/*
pub struct TextureUniforms<'a> {
texture: &'a HEALPixTexturedCube,
name: String,
}
impl<'a> TextureUniforms<'a> {
pub fn new(texture: &Texture, idx_texture: i32) -> TextureUniforms {
let name = format!("textures_tiles[{}].", idx_texture);
TextureUniforms { texture, name }
}
}
use al_core::shader::{SendUniforms, ShaderBound};
impl<'a> SendUniforms for TextureUniforms<'a> {
fn attach_uniforms<'b>(&self, shader: &'b ShaderBound<'b>) -> &'b ShaderBound<'b> {
shader
.attach_uniform(&format!("{}{}", self.name, "uniq"), &self.texture.uniq)
.attach_uniform(
&format!("{}{}", self.name, "texture_idx"),
&self.texture.idx,
)
.attach_uniform(
&format!("{}{}", self.name, "empty"),
//&((self.texture.full as u8) as f32),
&0.0,
)
.attach_uniform(
&format!("{}{}", self.name, "start_time"),
&self.texture.start_time(),
);
shader
}
}
*/

View File

@@ -1,885 +1,9 @@
pub mod config;
pub mod d2;
pub mod d3;
pub mod raytracing;
mod triangulation;
pub mod uv;
use al_api::hips::ImageExt;
use al_api::hips::ImageMetadata;
use al_core::colormap::Colormap;
use al_core::colormap::Colormaps;
use al_core::image::format::ChannelType;
use al_core::image::Image;
use al_core::shader::Shader;
use al_core::webgl_ctx::GlWrapper;
use al_core::VecData;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
use crate::math::{angle::Angle, vector::dist2};
use crate::ProjectionType;
use crate::camera::CameraViewPort;
use crate::{math::lonlat::LonLatT, utils};
use crate::{shader::ShaderManager, survey::config::HiPSConfig};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::math::lonlat::LonLat;
use crate::renderable::utils::index_patch::DefaultPatchIndexIter;
use crate::time::Time;
use std::collections::HashSet;
// Recursively compute the number of subdivision needed for a cell
// to not be too much skewed
use crate::survey::buffer::HiPS2DBuffer;
use crate::survey::texture::Texture;
use raytracing::RayTracer;
use uv::{TileCorner, TileUVW};
use cgmath::Matrix;
use std::fmt::Debug;
use wasm_bindgen::JsValue;
use web_sys::WebGl2RenderingContext;
const M: f64 = 280.0 * 280.0;
const N: f64 = 150.0 * 150.0;
const RAP: f64 = 0.7;
fn is_too_large(cell: &HEALPixCell, camera: &CameraViewPort, projection: &ProjectionType) -> bool {
let vertices = cell
.vertices()
.iter()
.filter_map(|(lon, lat)| {
let vertex = crate::math::lonlat::radec_to_xyzw(Angle(*lon), Angle(*lat));
projection.icrs_celestial_to_screen_space(&vertex, camera)
})
.collect::<Vec<_>>();
if vertices.len() < 4 {
false
} else {
let d1 = dist2(vertices[0].as_ref(), &vertices[2].as_ref());
let d2 = dist2(vertices[1].as_ref(), &vertices[3].as_ref());
if d1 > M || d2 > M {
true
} else if d1 < N && d2 < N {
false
} else {
let rap = if d2 > d1 { d1 / d2 } else { d2 / d1 };
rap < RAP
}
}
}
fn num_subdivision(cell: &HEALPixCell, camera: &CameraViewPort, projection: &ProjectionType) -> u8 {
let d = cell.depth();
// Subdivide all cells at least one time.
// TODO: use a single subdivision number computed from the current cells inside the view
// i.e. subdivide all cells in the view with the cell that has to be the most subdivided
let mut num_sub = 1;
if d < 2 {
num_sub = 2 - d;
}
// Largest deformation cell among the cells of a specific depth
let largest_center_to_vertex_dist =
healpix::largest_center_to_vertex_distance(d, 0.0, healpix::TRANSITION_LATITUDE);
let smallest_center_to_vertex_dist =
healpix::largest_center_to_vertex_distance(d, 0.0, healpix::LAT_OF_SQUARE_CELL);
let (lon, lat) = cell.center();
let center_to_vertex_dist = healpix::largest_center_to_vertex_distance(d, lon, lat);
let skewed_factor = (center_to_vertex_dist - smallest_center_to_vertex_dist)
/ (largest_center_to_vertex_dist - smallest_center_to_vertex_dist);
if skewed_factor > 0.25 || is_too_large(cell, camera, projection) || cell.is_on_pole() {
num_sub += 1;
}
num_sub
}
pub struct TextureToDraw<'a, 'b> {
pub starting_texture: &'a Texture,
pub ending_texture: &'a Texture,
pub cell: &'b HEALPixCell,
}
impl<'a, 'b> TextureToDraw<'a, 'b> {
fn new(
starting_texture: &'a Texture,
ending_texture: &'a Texture,
cell: &'b HEALPixCell,
) -> TextureToDraw<'a, 'b> {
TextureToDraw {
starting_texture,
ending_texture,
cell,
}
}
}
pub fn get_raster_shader<'a>(
cmap: &Colormap,
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap.frag",
)
}
}
}
pub fn get_raytracer_shader<'a>(
cmap: &Colormap,
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
//let colored_hips = config.is_colored();
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap.frag",
)
}
}
}
pub struct HiPS {
//color: Color,
// The image survey texture buffer
textures: HiPS2DBuffer,
// The projected vertices data
// For WebGL2 wasm, the data are interleaved
//#[cfg(feature = "webgl2")]
//vertices: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 0) in vec3 position;
position: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 1) in vec3 uv_start;
uv_start: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 2) in vec3 uv_end;
uv_end: Vec<f32>,
//#[cfg(feature = "webgl1")]
// layout (location = 3) in float time_tile_received;
time_tile_received: Vec<f32>,
idx_vertices: Vec<u16>,
num_idx: usize,
vao: VertexArrayObject,
gl: WebGlContext,
//min_depth_tile: u8,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
}
impl HiPS {
pub fn new(config: HiPSConfig, gl: &WebGlContext) -> Result<Self, JsValue> {
let mut vao = VertexArrayObject::new(gl);
// layout (location = 0) in vec2 lonlat;
// layout (location = 1) in vec3 position;
// layout (location = 2) in vec3 uv_start;
// layout (location = 3) in vec3 uv_end;
// layout (location = 4) in float time_tile_received;
//let vertices = vec![0.0; MAX_NUM_FLOATS_TO_DRAW];
//let indices = vec![0_u16; MAX_NUM_INDICES_TO_DRAW];
//let vertices = vec![];
let position = vec![];
let uv_start = vec![];
let uv_end = vec![];
let time_tile_received = vec![];
let idx_vertices = vec![];
#[cfg(feature = "webgl2")]
vao.bind_for_update()
.add_array_buffer_single(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
.add_array_buffer_single(
3,
"uv_start",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_start),
)
.add_array_buffer_single(
3,
"uv_end",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_end),
)
.add_array_buffer_single(
1,
"time_tile_received",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&time_tile_received),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u16>(&idx_vertices),
)
.unbind();
#[cfg(feature = "webgl1")]
vao.bind_for_update()
.add_array_buffer(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
.add_array_buffer(
3,
"uv_start",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_start),
)
.add_array_buffer(
3,
"uv_end",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&uv_end),
)
.add_array_buffer(
1,
"time_tile_received",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&time_tile_received),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u16>(&idx_vertices),
)
.unbind();
let num_idx = 0;
let textures = HiPS2DBuffer::new(gl, config)?;
let gl = gl.clone();
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(HiPS {
// The image survey texture buffer
textures,
num_idx,
vao,
gl,
position,
uv_start,
uv_end,
time_tile_received,
idx_vertices,
footprint_moc,
hpx_cells_in_view,
})
}
pub fn look_for_new_tiles<'a>(
&'a mut self,
camera: &'a CameraViewPort,
proj: &ProjectionType,
) -> Option<impl Iterator<Item = HEALPixCell> + 'a> {
// do not add tiles if the view is already at depth 0
let mut depth_tile = (camera.get_texture_depth() + self.get_config().delta_depth())
.min(self.get_config().get_max_depth_tile())
.max(self.get_config().get_min_depth_tile());
let dd = self.get_config().delta_depth();
//let min_depth_tile = self.get_min_depth_tile();
//let delta_depth = self.get_config().delta_depth();
//let min_bound_depth = min_depth_tile.max(delta_depth);
// do not ask to query tiles that:
// * either do not exist because < to min_depth_tile
// * either are part of a base tile already handled i.e. tiles < delta_depth
//console_log(depth_tile);
//console_log(min_bound_depth);
//if depth_tile >= min_bound_depth {
//let depth_tile = depth_tile.max(min_bound_depth);
let survey_frame = self.get_config().get_frame();
let mut already_considered_tiles = HashSet::new();
// raytracer is rendering and the shader only renders HPX texture cells of depth 0
if camera.is_raytracing(proj) {
depth_tile = 0;
}
let tile_cells_iter = camera
.get_hpx_cells(depth_tile, survey_frame)
//.flat_map(move |cell| {
// let texture_cell = cell.get_texture_cell(delta_depth);
// texture_cell.get_tile_cells(delta_depth)
//})
.into_iter()
.flat_map(move |tile_cell| {
let tex_cell = tile_cell.get_texture_cell(dd);
tex_cell.get_tile_cells(dd)
})
.filter(move |tile_cell| {
if already_considered_tiles.contains(tile_cell) {
return false;
}
already_considered_tiles.insert(*tile_cell);
if let Some(moc) = self.footprint_moc.as_ref() {
moc.intersects_cell(tile_cell) && !self.update_priority_tile(tile_cell)
} else {
!self.update_priority_tile(tile_cell)
}
});
/*if depth_tile >= min_depth_tile + 3 {
// Retrieve the grand-grand parent cells but not if it is root ones as it may interfere with already done requests
let tile_cells_ancestor_iter =
(&tile_cells_iter).map(|tile_cell| tile_cell.ancestor(3));
tile_cells_iter.chain(tile_cells_ancestor_iter);
}*/
/*let tile_cells: HashSet<_> = if let Some(moc) = survey.get_moc() {
tile_cells_iter
.filter(|tile_cell| moc.intersects_cell(tile_cell))
.collect()
} else {
tile_cells_iter.collect()
};*/
Some(tile_cells_iter)
//} else {
// None
//}
}
pub fn contains_tile(&self, cell: &HEALPixCell) -> bool {
self.textures.contains_tile(cell)
}
pub fn update(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
let raytracing = camera.is_raytracing(projection);
if raytracing {
return;
}
// rasterizer mode
let available_tiles = self.textures.reset_available_tiles();
let new_cells_in_view = self.retrieve_cells_in_camera(camera);
if new_cells_in_view || available_tiles {
self.recompute_vertices(camera, projection);
}
}
// returns a boolean if the view cells has changed with respect to the last frame
pub fn retrieve_cells_in_camera(&mut self, camera: &CameraViewPort) -> bool {
let cfg = self.textures.config();
// Get the coo system transformation matrix
let hips_frame = cfg.get_frame();
let depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let hpx_cells_in_view = camera.get_hpx_cells(depth, hips_frame);
let new_cells = if hpx_cells_in_view.len() != self.hpx_cells_in_view.len() {
true
} else {
!self
.hpx_cells_in_view
.iter()
.zip(hpx_cells_in_view.iter())
.all(|(&a, &b)| a == b)
};
self.hpx_cells_in_view = hpx_cells_in_view;
new_cells
}
#[inline]
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
}
#[inline]
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
self.footprint_moc.as_ref()
}
pub fn set_img_format(&mut self, ext: ImageExt) -> Result<(), JsValue> {
self.textures.set_format(&self.gl, ext)
}
pub fn is_allsky(&self) -> bool {
self.textures.config().is_allsky
}
// Position given is in the camera space
pub fn read_pixel(
&self,
pos: &LonLatT<f64>,
camera: &CameraViewPort,
) -> Result<JsValue, JsValue> {
// 1. Convert it to the hips frame system
let cfg = self.textures.config();
let camera_frame = camera.get_coo_system();
let hips_frame = cfg.get_frame();
let pos = crate::coosys::apply_coo_system(camera_frame, hips_frame, &pos.vector());
// Get the array of textures from that survey
let tile_depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let pos_tex = self
.textures
.get_pixel_position_in_texture(&pos.lonlat(), tile_depth)?;
let slice_idx = pos_tex.z as usize;
let texture_array = self.textures.get_texture_array();
unimplemented!();
/*let value = texture_array[slice_idx].read_pixel(pos_tex.x, pos_tex.y)?;
if cfg.tex_storing_fits {
let value = value
.as_f64()
.ok_or_else(|| JsValue::from_str("Error unwraping the pixel read value."))?;
let scale = cfg.scale as f64;
let offset = cfg.offset as f64;
Ok(JsValue::from_f64(value * scale + offset))
} else {
Ok(value)
}*/
}
pub fn recompute_vertices(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
self.position.clear();
self.uv_start.clear();
self.uv_end.clear();
self.time_tile_received.clear();
self.idx_vertices.clear();
let cfg = self.textures.config();
// Get the coo system transformation matrix
let channel = cfg.get_format().get_channel();
// Retrieve the model and inverse model matrix
let mut off_indices = 0;
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(&cell) {
Some(&cell)
} else {
if channel == ChannelType::RGB8U {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
// but JPEG tiles do have black when no pixels data is found
// We therefore must draw in black for the tiles outside the HiPS MOC
Some(&cell)
} else {
None
}
}
} else {
Some(&cell)
};
if let Some(cell) = cell {
let texture_to_draw = if self.textures.contains(cell) {
if let Some(ending_cell_in_tex) = self.textures.get(cell) {
if let Some(parent_cell) = self.textures.get_nearest_parent(cell) {
if let Some(starting_cell_in_tex) = self.textures.get(&parent_cell) {
Some(TextureToDraw::new(
starting_cell_in_tex,
ending_cell_in_tex,
cell,
))
} else {
// no blending here
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
None
}
} else {
if let Some(parent_cell) = self.textures.get_nearest_parent(cell) {
if let Some(ending_cell_in_tex) = self.textures.get(&parent_cell) {
if let Some(grand_parent_cell) =
self.textures.get_nearest_parent(&parent_cell)
{
if let Some(starting_cell_in_tex) =
self.textures.get(&grand_parent_cell)
{
Some(TextureToDraw::new(
starting_cell_in_tex,
ending_cell_in_tex,
cell,
))
} else {
// no blending
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
Some(TextureToDraw::new(
ending_cell_in_tex,
ending_cell_in_tex,
cell,
))
}
} else {
unreachable!();
}
} else {
None
}
};
if let Some(TextureToDraw {
cell,
starting_texture,
ending_texture,
}) = texture_to_draw
{
let uv_0 = TileUVW::new(cell, starting_texture, cfg);
let uv_1 = TileUVW::new(cell, ending_texture, cfg);
let start_time = ending_texture.start_time().as_millis();
let num_subdivision = num_subdivision(cell, camera, projection);
let n_segments_by_side: usize = 1 << (num_subdivision as usize);
let n_segments_by_side_f32 = n_segments_by_side as f32;
let n_vertices_per_segment = n_segments_by_side + 1;
let mut pos = Vec::with_capacity((n_segments_by_side + 1) * 4);
let grid_lonlat =
healpix::nested::grid(cell.depth(), cell.idx(), n_segments_by_side as u16);
let grid_lonlat_iter = grid_lonlat.iter();
for (idx, &(lon, lat)) in grid_lonlat_iter.enumerate() {
//let xyzw = crate::math::lonlat::radec_to_xyzw(lon, lat);
//let xyzw =
// crate::coosys::apply_coo_system(hips_frame, selected_frame, &xyzw);
//let ndc = projection
// .model_to_normalized_device_space(&xyzw, camera)
// .map(|v| [v.x as f32, v.y as f32]);
let i: usize = idx / n_vertices_per_segment;
let j: usize = idx % n_vertices_per_segment;
let hj0 = (j as f32) / n_segments_by_side_f32;
let hi0 = (i as f32) / n_segments_by_side_f32;
let d01s = uv_0[TileCorner::BottomRight].x - uv_0[TileCorner::BottomLeft].x;
let d02s = uv_0[TileCorner::TopLeft].y - uv_0[TileCorner::BottomLeft].y;
let d01e = uv_1[TileCorner::BottomRight].x - uv_1[TileCorner::BottomLeft].x;
let d02e = uv_1[TileCorner::TopLeft].y - uv_1[TileCorner::BottomLeft].y;
let uv_start = [
uv_0[TileCorner::BottomLeft].x + hj0 * d01s,
uv_0[TileCorner::BottomLeft].y + hi0 * d02s,
uv_0[TileCorner::BottomLeft].z,
];
let uv_end = [
uv_1[TileCorner::BottomLeft].x + hj0 * d01e,
uv_1[TileCorner::BottomLeft].y + hi0 * d02e,
uv_1[TileCorner::BottomLeft].z,
];
self.uv_start.extend(uv_start);
self.uv_end.extend(uv_end);
self.time_tile_received.push(start_time);
pos.push([lon as f32, lat as f32]);
}
let patch_indices_iter = DefaultPatchIndexIter::new(
&(0..=n_segments_by_side),
&(0..=n_segments_by_side),
n_vertices_per_segment,
)
.flatten()
.map(|indices| {
[
indices.0 + off_indices,
indices.1 + off_indices,
indices.2 + off_indices,
]
})
.flatten();
self.idx_vertices.extend(patch_indices_iter);
off_indices += pos.len() as u16;
// Replace options with an arbitrary vertex
let position_iter = pos
.into_iter()
//.map(|ndc| ndc.unwrap_or([0.0, 0.0]))
.flatten();
self.position.extend(position_iter);
}
}
}
self.num_idx = self.idx_vertices.len();
let mut vao = self.vao.bind_for_update();
vao.update_array(
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.position),
)
.update_array(
"uv_start",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.uv_start),
)
.update_array(
"uv_end",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.uv_end),
)
.update_array(
"time_tile_received",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.time_tile_received),
)
.update_element_array(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.idx_vertices),
);
}
// Return a boolean to signal if the tile is present or not in the survey
pub fn update_priority_tile(&mut self, cell: &HEALPixCell) -> bool {
if self.textures.contains_tile(cell) {
// The cell is present in the survey, we update its priority
self.textures.update_priority(cell);
true
} else {
false
}
}
pub fn add_tile<I: Image + Debug>(
&mut self,
cell: &HEALPixCell,
image: I,
time_request: Time,
) -> Result<(), JsValue> {
self.textures.push(&cell, image, time_request)
}
pub fn add_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
self.textures.push_allsky(allsky)
}
/* Accessors */
#[inline]
pub fn get_config(&self) -> &HiPSConfig {
self.textures.config()
}
#[inline]
pub fn get_config_mut(&mut self) -> &mut HiPSConfig {
self.textures.config_mut()
}
pub fn draw(
&self,
shaders: &mut ShaderManager,
colormaps: &Colormaps,
camera: &CameraViewPort,
raytracer: &RayTracer,
cfg: &ImageMetadata,
proj: &ProjectionType,
) -> Result<(), JsValue> {
// Get the coo system transformation matrix
let selected_frame = camera.get_coo_system();
let hips_cfg = self.textures.config();
let hips_frame = hips_cfg.get_frame();
let c = selected_frame.to(hips_frame);
let raytracing = camera.is_raytracing(proj);
let config = self.get_config();
//self.gl.enable(WebGl2RenderingContext::BLEND);
let ImageMetadata {
color,
opacity,
blend_cfg,
..
} = cfg;
// Add starting fading
//let fading = self.get_fading_factor();
//let opacity = opacity * fading;
// Get the colormap from the color
let cmap = colormaps.get(color.cmap_name.as_ref());
blend_cfg.enable(&self.gl, || {
if raytracing {
let w2v = c * (*camera.get_w2m());
let shader = get_raytracer_shader(cmap, &self.gl, shaders, &config)?;
let shader = shader.bind(&self.gl);
shader
.attach_uniforms_from(camera)
.attach_uniforms_from(&self.textures)
// send the cmap appart from the color config
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniform("model", &w2v)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniforms_from(colormaps);
raytracer.draw(&shader);
} else {
let v2w = (*camera.get_m2w()) * c.transpose();
// The rasterizer has a buffer containing:
// - The vertices of the HEALPix cells for the most refined survey
// - The starting and ending uv for the blending animation
// - The time for each HEALPix cell at which the animation begins
//
// Each of these data can be changed at different circumstances:
// - The vertices are changed if:
// * new cells are added/removed (because new cells are added)
// to the previous frame.
// - The UVs are changed if:
// * new cells are added/removed (because new cells are added)
// * there are new available tiles for the GPU
let shader = get_raster_shader(cmap, &self.gl, shaders, &config)?.bind(&self.gl);
shader
.attach_uniforms_from(&self.textures)
// send the cmap appart from the color config
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniforms_from(camera)
.attach_uniform("inv_model", &v2w)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
Some(self.num_idx as i32),
WebGl2RenderingContext::UNSIGNED_SHORT,
0,
);
}
Ok(())
})?;
//self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
}
}
pub use d2::HiPS2D;

View File

@@ -12,10 +12,9 @@ impl<T> Deref for UV<T> {
}
}
use crate::{
healpix::cell::HEALPixCell,
survey::{config::HiPSConfig, texture::Texture},
};
use super::config::HiPSConfig;
use super::d2::texture::Texture;
use crate::healpix::cell::HEALPixCell;
pub struct TileUVW([Vector3<f32>; 4]);
impl TileUVW {
// The texture cell passed must be a child of texture

View File

@@ -14,7 +14,7 @@ use crate::tile_fetcher::TileFetcherQueue;
use al_core::image::format::ChannelType;
pub use hips::HiPS;
pub use hips::HiPS2D;
pub use catalog::Manager;
@@ -30,10 +30,11 @@ use al_core::VertexArrayObject;
use al_core::WebGlContext;
use crate::camera::CameraViewPort;
use crate::renderable::hips::config::HiPSConfig;
use crate::shader::ShaderId;
use crate::shader::ShaderManager;
use crate::Abort;
use crate::ProjectionType;
use crate::{shader::ShaderManager, survey::config::HiPSConfig};
// Recursively compute the number of subdivision needed for a cell
// to not be too much skewed
@@ -56,7 +57,7 @@ pub(crate) type CreatorDid = String;
type LayerId = String;
pub struct Layers {
// Surveys to query
surveys: HashMap<CreatorDid, HiPS>,
surveys: HashMap<CreatorDid, HiPS2D>,
images: HashMap<Id, Vec<Image>>, // an url can contain multiple images i.e. a fits file can contain
// multiple image extensions
// The meta data associated with a layer
@@ -414,7 +415,7 @@ impl Layers {
camera: &mut CameraViewPort,
proj: &ProjectionType,
tile_fetcher: &mut TileFetcherQueue,
) -> Result<&HiPS, JsValue> {
) -> Result<&HiPS2D, JsValue> {
let HiPSCfg {
layer,
properties,
@@ -468,7 +469,7 @@ impl Layers {
}*/
camera.register_view_frame(cfg.get_frame(), proj);
let hips = HiPS::new(cfg, gl)?;
let hips = HiPS2D::new(cfg, gl)?;
// add the frame to the camera
self.surveys.insert(creator_did.clone(), hips);
@@ -563,7 +564,7 @@ impl Layers {
) -> Result<(), JsValue> {
let layer_ref = layer.as_str();
if let Some(meta_old) = self.meta.get(layer_ref) {
/*if let Some(meta_old) = self.meta.get(layer_ref) {
if !meta_old.visible() && meta.visible() {
if let Some(survey) = self.get_mut_hips_from_layer(layer_ref) {
survey.recompute_vertices(camera, projection);
@@ -595,7 +596,7 @@ impl Layers {
}
}
}
}
}*/
// Expect the image survey to be found in the hash map
self.meta.insert(layer.clone(), meta).ok_or_else(|| {
@@ -607,14 +608,14 @@ impl Layers {
// Accessors
// HiPSes getters
pub fn get_hips_from_layer(&self, layer: &str) -> Option<&HiPS> {
pub fn get_hips_from_layer(&self, layer: &str) -> Option<&HiPS2D> {
self.ids
.get(layer)
.map(|cdid| self.surveys.get(cdid))
.flatten()
}
pub fn get_mut_hips_from_layer(&mut self, layer: &str) -> Option<&mut HiPS> {
pub fn get_mut_hips_from_layer(&mut self, layer: &str) -> Option<&mut HiPS2D> {
if let Some(cdid) = self.ids.get_mut(layer) {
self.surveys.get_mut(cdid)
} else {
@@ -622,19 +623,19 @@ impl Layers {
}
}
pub fn get_mut_hips_from_cdid(&mut self, cdid: &str) -> Option<&mut HiPS> {
pub fn get_mut_hips_from_cdid(&mut self, cdid: &str) -> Option<&mut HiPS2D> {
self.surveys.get_mut(cdid)
}
pub fn get_hips_from_cdid(&mut self, cdid: &str) -> Option<&HiPS> {
pub fn get_hips_from_cdid(&mut self, cdid: &str) -> Option<&HiPS2D> {
self.surveys.get(cdid)
}
pub fn values_hips(&self) -> impl Iterator<Item = &HiPS> {
pub fn values_hips(&self) -> impl Iterator<Item = &HiPS2D> {
self.surveys.values()
}
pub fn values_mut_hips(&mut self) -> impl Iterator<Item = &mut HiPS> {
pub fn values_mut_hips(&mut self) -> impl Iterator<Item = &mut HiPS2D> {
self.surveys.values_mut()
}

View File

@@ -1,40 +0,0 @@
struct BitVector<const N: usize>([u64; N]);
impl<const N: usize> Default for BitVector<N> {
fn default() -> Self {
Self([0_u64; N])
}
}
impl<const N: usize> BitVector<N> {
pub fn new_empty() -> Self {
Self::default()
}
pub fn set(&mut self, i: usize) {
debug_assert!(i < (N << 6));
let j = i >> 6;
let k = i & 0x3f;
self.0[j] |= 1 << k;
}
pub fn get(&self, i: usize) -> bool {
debug_assert!(i < (N << 6));
let j = i >> 6;
let k = i & 0x3f;
(self.0[j] >> k) & 0x1 == 1
}
}
#[cfg(test)]
mod tests {
use super::BitVector;
#[test]
fn test_bitvector_basic_op() {
let mut bv: BitVector<32> = BitVector::new_empty();
bv.set(64);
assert!(bv.get(64));
}
}

View File

@@ -1,5 +0,0 @@
pub mod bitvector;
pub mod buffer;
pub mod config;
pub mod hpx_cubic_tile;
pub mod texture;

View File

@@ -1,5 +1,5 @@
use crate::downloader::{query, Downloader};
use crate::renderable::HiPS;
use crate::renderable::HiPS2D;
use crate::time::{DeltaTime, Time};
use crate::Abort;
@@ -190,7 +190,7 @@ impl TileFetcherQueue {
pub fn launch_starting_hips_requests(
&mut self,
hips: &HiPS,
hips: &HiPS2D,
downloader: Rc<RefCell<Downloader>>,
) {
let cfg = hips.get_config();
@@ -236,6 +236,7 @@ impl TileFetcherQueue {
hips_cdid.clone(),
hips_url.clone(),
hips_fmt,
None,
)) {
let dl = downloader.clone();

View File

@@ -21,7 +21,7 @@ struct Tile {
uniform Tile textures_tiles[12];
#include ../color.glsl;
#include ../../projection/hpx.glsl;
#include ../../projection/hpx_proj.glsl;
uniform float opacity;

View File

@@ -28,7 +28,7 @@ struct TileColor {
};
#include ../color.glsl;
#include ../../projection/hpx.glsl;
#include ../../projection/hpx_proj.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);

View File

@@ -23,7 +23,7 @@ uniform Tile textures_tiles[12];
uniform float opacity;
#include ../color_i.glsl;
#include ../../projection/hpx.glsl;
#include ../../projection/hpx_proj.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);

View File

@@ -23,7 +23,7 @@ uniform Tile textures_tiles[12];
uniform float opacity;
#include ../color_u.glsl;
#include ../../projection/hpx.glsl;
#include ../../projection/hpx_proj.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);

View File

@@ -0,0 +1,112 @@
const float ONE_OVER_SQRT6 = 0.408_248_290_463_863;
const float FRAC_PI_2 = 1.57079632679489661923132169163975144;
const float FRAC_PI_4 = 0.785398163397448309615660845819875721;
const float TRANSITION_Z = 0.66666666666;
vec2 pm1_offset_decompose(float x) {
uint fl = uint(x);
uint odd_fl = fl | 1;
vec2(
float(odd_fl & 7), // offset: value modulo 8 = 1/3/5/7
x - float(odd_fl) // pm1
);
}
/// Returns the position in on the unit sphere `(x, y, z)` of the give position in the HEALPix
/// 2D Euclidean projection plane.
/// # Inputs
/// - `X`: coordinate along the X-axis in the projection plane, in `[-4, 4]`
/// - `Y`: coordinate along the Y-axis in the projection plane, in `[-2, 2]`
/// # Output:
/// - `x`: in `[-1.0, 1.0]`
/// - `y`: in `[-1.0, 1.0]`
/// - `z`: in `[-1.0, 1.0]`
/// # Remark
/// From the HPX projection as defined in Calabreta, use:
/// - `X /= PI / 4`
/// - `Y /= PI / 4`
vec3 hpx_unproj(vec2 p) {
if (p.y > 1.0) {
// North Polar Cap
float x = (p.x < 0.0) ? (8.0 + p.x) : p.x;
vec2 offset_pm1 = pm1_offset_decompose(x);
float sqrt_of_three_time_one_minus_sin_of = 2.0 - p.y;
x = 0.0;
if (sqrt_of_three_time_one_minus_sin_of > 1e-6) {
x = deal_with_numerical_approx_in_edges(offset_pm1.y / sqrt_of_three_time_one_minus_sin_of);
} else {
x = pm1;
}
x += offset_pm1.x;
// It would be faster, but less accurate, to use:
// let z = 1.0 - sqrt_of_three_time_one_minus_sin_of.pow2() / 3.0;
// let cos_lat = sqrt(1 - z^2);
float lat = 2.0 * acos(sqrt_of_three_time_one_minus_sin_of * ONE_OVER_SQRT6) - FRAC_PI_2;
float lon = x * FRAC_PI_4;
float sin_lon = sin(lon);
float cos_lon = cos(lon);
float sin_lat = sin(lat);
float cos_lat = cos(lat);
return vec3(
cos_lon * cos_lat,
sin_lon * cos_lat,
sin_lat
);
} else if (p.y < -1.0) {
// South polar cap
float x = (p.x < 0.0) ? (8.0 + p.x) : p.x;
vec2 offset_pm1 = pm1_offset_decompose(x);
float sqrt_of_three_time_one_minus_sin_of = 2.0 + p.y;
x = 0.0;
if (sqrt_of_three_time_one_minus_sin_of > 1e-6) {
x = deal_with_numerical_approx_in_edges(offset_pm1.y / sqrt_of_three_time_one_minus_sin_of);
} else {
x = offset_pm1.y;
}
x += offset_pm1.x;
// It would be faster, but less accurate, to use:
// let z = -1.0 + sqrt_of_three_time_one_minus_sin_of.pow2() / 3.0;
// let cos_lat = sqrt(1 - z^2);
float lat = FRAC_PI_2 - 2.0 * acos(sqrt_of_three_time_one_minus_sin_of * ONE_OVER_SQRT6);
float lon = x * FRAC_PI_4;
float sin_lon = sin(lon);
float cos_lon = cos(lon);
float sin_lat = sin(lat);
float cos_lat = cos(lat);
return vec3(
cos_lon * cos_lat,
sin_lon * cos_lat,
sin_lat
);
} else {
// Equatorial region
float z = p.y * TRANSITION_Z; // = sin(lat)
float cos_lat = 0.0;
if (z < 1e-2) {
// sqrt(1 - x²) = 1 - x²/2 - x⁴/8 - x⁶/16
float tmp = 0.5 * z * z;
cos_lat = 1.0 - tmp - 0.5 * tmp * tmp;
} else {
cos_lat = sqrt(1.0 - z * z)
}
float lon = x * FRAC_PI_4;
float sin_lon = sin(lon);
float cos_lon = cos(lon);
return vec3(
cos_lon * cos_lat,
sin_lon * cos_lat,
z
);
}
}