Compare commits

...

12 Commits

Author SHA1 Message Date
Matthieu Baumann
e89769c87d update changelog for v3.4.2-beta 2024-07-02 18:57:51 +02:00
Matthieu Baumann
253c272262 remove some rust warnings 2024-07-02 18:55:38 +02:00
Matthieu Baumann
05c3eb5911 handle rotation for ICRS frame when exporting the WCS. CRVAL on the equator and Galactic frame with cylindrical projection are not handled. This targets issue https://github.com/cds-astro/aladin-lite/issues/170 2024-07-02 18:50:10 +02:00
Matthieu Baumann
631b2cdf4b WIP: polyline catalog renderer 2024-07-02 18:50:10 +02:00
Matthieu Baumann
3fee4a345d propose a removeHiPSFromFavorites method on the aladin object. Targets issue: https://github.com/cds-astro/aladin-lite/issues/171 2024-07-02 18:50:10 +02:00
Matthieu Baumann
2b69ae8a0d move grid rendering from line rasterizer to grid module 2024-07-02 18:50:10 +02:00
Matthieu Baumann
957f2b2414 rename coverage -> moc 2024-07-02 18:50:09 +02:00
Matthieu Baumann
d8cb01ddef cache the 12 base cell textures 2024-07-02 18:50:09 +02:00
Matthieu Baumann
1ad97180f3 move rendering part from line rasterizer to the moc renderable 2024-07-02 18:50:09 +02:00
Matthieu Baumann
8d9ca2e2b9 store shaders in the wasm, add a build:dbg vite bundle mode, projections on the gpu 2024-07-02 18:50:09 +02:00
Matthieu Baumann
776cd36969 use of instancing and impl inside the line rasterizer. Lyon is not needed anymore for plotting lines, but still used for plotting filled paths 2024-07-02 18:50:09 +02:00
Matthieu Baumann
49061a746b first commit 2024-07-02 18:50:06 +02:00
150 changed files with 2915 additions and 3625 deletions

View File

@@ -1,9 +1,11 @@
# Changelogs
## Unreleased
## 3.4.2-beta
* [impr] Improve `WCS` view export with 3rd euler rotation encoding: <https://github.com/cds-astro/aladin-lite/issues/170>. Still some cases are to be handled like: crval on the equator or cylindrical with a galactic frame rotation.
* [fixed] Change `RADECSYS` to `RADESYS` for `Aladin#getViewWCS` to follow fits standard deprecation
* [feat] Add new method `Aladin#getViewImageBuffer` to get the current view as a PNG buffer
* [feat] New line rasterizer using GL instancing. This enhances the rendering speed of MOCs.
## 3.3.3

View File

@@ -96,27 +96,6 @@
background-color: pink;
}
@media screen and (max-width:480px) {
/* smartphones, Android phones, landscape iPhone */
.aladin-cooFrame {
top: unset;
position: absolute;
bottom: 0;
}
.aladin-location {
left: 0.2rem;
}
.aladin-fov {
left: 6rem;
}
.aladin-projection-control {
display: none;
}
}
</style>
</body>

View File

@@ -26,7 +26,6 @@ A.init.then(() => {
aladin.addCatalog(A.catalogFromURL('https://vizier.u-strasbg.fr/viz-bin/votable?-source=HIP2&-c=LMC&-out.add=_RAJ,_DEJ&-oc.form=dm&-out.meta=DhuL&-out.max=9999&-c.rm=180', {sourceSize:12, color: '#f08080'}));
aladin.addCatalog(A.catalogFromURL(vmc_cepheids, {onClick: 'showTable', sourceSize:14, color: '#fff080'}));

View File

@@ -14,7 +14,7 @@
A.init.then(() => {
aladin = A.aladin('#aladin-lite-div', {showReticle: true, showSurveyStackControl: true, showOverlayStackControl: false, projection: "TAN", target: '15 16 57.636 -60 55 7.49', showProjectionControl: true, realFullscreen: true, showZoomControl: true, showSimbadPointerControl: true, showShareControl: true, showContextMenu: true, showCooGridControl: true, fullScreen: true, showCooGrid: true, fov: 90});
var moc_0_99 = A.MOCFromURL("./data//gw/gw_0.9.fits",{ name: "GW 90%", color: "#ff0000", opacity: 0.0, lineWidth: 3, fill: false, perimeter: true});
var moc_0_99 = A.MOCFromURL("./data//gw/gw_0.9.fits",{ name: "GW 90%", color: "#ff0000", opacity: 0.0, lineWidth: 10, fill: false, perimeter: true});
var moc_0_95 = A.MOCFromURL("./data/gw/gw_0.6.fits",{ name: "GW 60%", color: "#00ff00", opacity: 0.5, lineWidth: 3, fill: true, perimeter: true});
var moc_0_5 = A.MOCFromURL("./data/gw/gw_0.3.fits",{ name: "GW 30%", color: "#00ffff", opacity: 0.5, lineWidth: 3, fill: true, perimeter: true});
var moc_0_2 = A.MOCFromURL("./data/gw/gw_0.1.fits",{ name: "GW 10%", color: "#ff00ff", opacity: 0.5, lineWidth: 3, fill: true, perimeter: true});

View File

@@ -7,10 +7,9 @@
<div id="aladin-lite-div" style="width: 768px; height: 512px"></div>
<script>let aladin;</script>
<script type="module">
import A from '../src/js/A.js';
let aladin;
A.init.then(() => {
aladin = A.aladin(
'#aladin-lite-div',
@@ -23,20 +22,13 @@
reticleColor: '#ff89ff', // change reticle color
reticleSize: 64, // change reticle size
showContextMenu: true,
showCooGrid: true,
showFrame: true,
}
);
});
</script>
<style>
.aladin-location {
position: absolute;
left: 0.2rem;
top: 0.2rem;
}
.aladin-cooFrame {
display: none;
}
</style>
</body>
</html>

View File

@@ -18,7 +18,7 @@
console.log(moc.contains(205.9019247, +2.4492764));
console.log(moc.contains(-205.9019247, +2.4492764));
});
var moc10 = A.MOCFromURL('https://alasky.unistra.fr/MocServer/query?ivorn=ivo%3A%2F%2FCDS%2FV%2F139%2Fsdss9&get=moc&order=11&fmt=fits', {color: '#ffffff', perimeter: true, fillColor: '#aabbcc', opacity: 0.1, lineWidth: 3});
var moc10 = A.MOCFromURL('https://alasky.unistra.fr/MocServer/query?ivorn=ivo%3A%2F%2FCDS%2FV%2F139%2Fsdss9&get=moc&order=11&fmt=fits', {color: '#ffffff', perimeter: true, fillColor: '#aabbcc', opacity: 0.3, lineWidth: 3});
var moc9 = A.MOCFromURL('https://alasky.unistra.fr/MocServer/query?ivorn=ivo%3A%2F%2FCDS%2FV%2F139%2Fsdss9&get=moc&order=4&fmt=fits', {color: '#00ff00', opacity: 0.5, lineWidth: 3, perimeter: true});
aladin.addMOC(moc11);

View File

@@ -25,6 +25,8 @@
survey3.setColormap('cubehelix', {stretch: 'asinh'});
aladin.setImageLayer(survey2);
aladin.removeHiPSFromFavorites(survey3);
});
</script>

View File

@@ -11,7 +11,7 @@
A.init.then(() => {
// Start up Aladin Lite
let aladin = A.aladin('#aladin-lite-div', {survey: "CDS/P/DSS2/color", target: 'Sgr a*', fov: 0.5, showContextMenu: true});
let aladin = A.aladin('#aladin-lite-div', {survey: "CDS/P/DSS2/color", target: 'Sgr a*', fov: 0.5, showContextMenu: true, showCooGrid: true});
// This table contains a s_region column containing stcs expressed regions
// that are automatically parsed
aladin.addCatalog(A.catalogFromURL('https://aladin.cds.unistra.fr/AladinLite/doc/API/examples/data/alma-footprints.xml', {name: 'ALMA footprints', onClick: 'showTable', hoverColor: 'lightgreen'}));

View File

@@ -33,11 +33,15 @@
],
"scripts": {
"wasm": "wasm-pack build ./src/core --target web --release --out-name core -- --features webgl2 -Z build-std=panic_abort,std -Z build-std-features=panic_immediate_abort ",
"wasm:dbg": "wasm-pack build --dev ./src/core --target web --out-name core -- --features=webgl2,dbg -Z build-std=panic_abort,std -Z build-std-features=panic_immediate_abort ",
"predeploy": "npm run build && rm -rf aladin-lite*.tgz && npm pack",
"deploy": "python3 deploy/deploy.py",
"build": "npm run wasm && vite build && cp examples/index.html dist/index.html",
"build:dbg": "npm run wasm:dbg && vite build && cp examples/index.html dist/index.html",
"dev": "npm run build && vite",
"dev:dbg": "npm run build:dbg && vite",
"serve": "npm run dev",
"serve:dbg": "npm run dev:dbg",
"preview": "vite preview",
"test:build": "cd src/core && cargo test --release --features webgl2",
"test:unit": "vitest run",

View File

@@ -29,11 +29,13 @@ mapproj = "0.3.0"
fitsrs = "0.2.9"
wcs = "0.2.8"
colorgrad = "0.6.2"
lyon = "1.0.1"
#lyon = "1.0.1"
console_error_panic_hook = {version = "0.1.7", optional = true}
[features]
webgl1 = [ "al-core/webgl1", "al-api/webgl1", "web-sys/WebGlRenderingContext", "web-sys/AngleInstancedArrays", "web-sys/ExtSRgb", "web-sys/OesTextureFloat",]
webgl2 = [ "al-core/webgl2", "al-api/webgl2", "web-sys/WebGl2RenderingContext", "web-sys/WebGlVertexArrayObject", "web-sys/ExtColorBufferFloat",]
dbg = ['dep:console_error_panic_hook']
[dev-dependencies]
rand = "0.8"
@@ -43,10 +45,6 @@ package = "cdshealpix"
git = "https://github.com/cds-astro/cds-healpix-rust"
branch = "master"
#[dependencies.moclib]
#package = "moc"
#version = "0.14.2"
[dependencies.moclib]
package = "moc"
#path = "../../../cds-moc-rust/"
@@ -73,16 +71,12 @@ version = "0.24.2"
default-features = false
features = [ "jpeg", "png",]
[build-dependencies]
# Shader preprocessing
walkdir = "2.3.2"
[profile.dev]
opt-level = "z"
debug = true
debug-assertions = true
overflow-checks = true
lto = true
panic = "unwind"
incremental = true
codegen-units = 256
rpath = false
[profile.release]
opt-level = "z"

View File

@@ -6,6 +6,7 @@ use crate::webgl_ctx::WebGlContext;
pub struct ArrayBufferInstanced {
buffer: WebGlBuffer,
len: usize,
num_packed_data: usize,
offset_idx: u32,
@@ -39,7 +40,7 @@ impl ArrayBufferInstanced {
offset_idx: u32,
stride: usize,
sizes: &[usize],
_offsets: &[usize],
offsets: &[usize],
usage: u32,
data: B,
) -> ArrayBufferInstanced {
@@ -49,29 +50,43 @@ impl ArrayBufferInstanced {
let num_f32_in_buf = data.len() as i32;
let num_instances = num_f32_in_buf / (num_f32_per_instance as i32);
let len = data.len();
let buffer = gl.create_buffer().ok_or("failed to create buffer").unwrap_abort();
let buffer = gl
.create_buffer()
.ok_or("failed to create buffer")
.unwrap_abort();
// Bind the buffer
gl.bind_buffer(WebGlRenderingCtx::ARRAY_BUFFER, Some(buffer.as_ref()));
// Pass the vertices data to the buffer
f32::buffer_data_with_array_buffer_view(gl, data, WebGlRenderingCtx::ARRAY_BUFFER, usage);
// Link to the shader
let idx = offset_idx;
for (idx, (size, offset)) in sizes.iter().zip(offsets.iter()).enumerate() {
let idx = (idx as u32) + offset_idx;
f32::vertex_attrib_pointer_with_i32(gl, idx, *sizes.first().unwrap_abort() as i32, 0, 0);
gl.enable_vertex_attrib_array(idx);
f32::vertex_attrib_pointer_with_i32(
gl,
idx,
*size as i32,
stride as i32,
*offset as i32,
);
#[cfg(feature = "webgl2")]
gl.vertex_attrib_divisor(idx, 1);
#[cfg(feature = "webgl1")]
gl.ext.angles.vertex_attrib_divisor_angle(idx, 1);
gl.enable_vertex_attrib_array(idx);
#[cfg(feature = "webgl2")]
gl.vertex_attrib_divisor(idx, 1);
#[cfg(feature = "webgl1")]
gl.ext.angles.vertex_attrib_divisor_angle(idx, 1);
}
let num_packed_data = sizes.len();
let gl = gl.clone();
// Returns an instance that keeps only the buffer
ArrayBufferInstanced {
buffer,
len,
num_packed_data,
offset_idx,
@@ -119,13 +134,30 @@ impl ArrayBufferInstanced {
self.gl.disable_vertex_attrib_array(loc as u32);
}
pub fn update<'a, B: BufferDataStorage<'a, f32>>(&self, buffer: B) {
pub fn update<'a, B: BufferDataStorage<'a, f32>>(&mut self, usage: u32, data: B) {
self.bind();
f32::buffer_sub_data_with_i32_and_array_buffer_view(
if self.len >= data.len() {
f32::buffer_sub_data_with_i32_and_array_buffer_view(
&self.gl,
data,
WebGlRenderingCtx::ARRAY_BUFFER,
);
} else {
self.len = data.len();
f32::buffer_data_with_array_buffer_view(
&self.gl,
data,
WebGlRenderingCtx::ARRAY_BUFFER,
usage,
);
}
/*f32::buffer_sub_data_with_i32_and_array_buffer_view(
&self.gl,
buffer,
WebGlRenderingCtx::ARRAY_BUFFER,
);
);*/
/*self.gl.buffer_sub_data_with_i32_and_array_buffer_view(
WebGlRenderingCtx::ARRAY_BUFFER,
0,

View File

@@ -41,3 +41,20 @@ where
self.0.as_ptr()
}
}
impl<'a, T> BufferDataStorage<'a, T> for &'a [T]
where
T: VertexAttribPointerType,
{
fn get_slice(&self) -> &[T] {
self
}
fn len(&self) -> usize {
self.as_ref().len()
}
fn ptr(&self) -> *const T {
self.as_ptr()
}
}

View File

@@ -9,8 +9,8 @@ pub mod vao {
use crate::object::element_array_buffer::ElementArrayBuffer;
use crate::webgl_ctx::WebGlContext;
use std::collections::HashMap;
use crate::Abort;
use std::collections::HashMap;
pub struct VertexArrayObject {
array_buffer: HashMap<&'static str, ArrayBuffer>,
@@ -88,7 +88,10 @@ pub mod vao {
}*/
pub fn num_elements(&self) -> usize {
self.element_array_buffer.as_ref().unwrap_abort().num_elements()
self.element_array_buffer
.as_ref()
.unwrap_abort()
.num_elements()
}
pub fn num_instances(&self) -> i32 {
@@ -143,13 +146,14 @@ pub mod vao {
pub fn update_instanced_array<B: BufferDataStorage<'a, f32>>(
&mut self,
attr: &'static str,
usage: u32,
array_data: B,
) -> &mut Self {
self.vao
.array_buffer_instanced
.get_mut(attr)
.unwrap_abort()
.update(array_data);
.update(usage, array_data);
self
}
@@ -333,13 +337,14 @@ pub mod vao {
pub fn update_instanced_array<B: BufferDataStorage<'a, f32>>(
&mut self,
attr: &'static str,
usage: u32,
array_data: B,
) -> &mut Self {
self.vao
.array_buffer_instanced
.get_mut(attr)
.unwrap_abort()
.update(array_data);
.update(usage, array_data);
self
}
@@ -444,7 +449,10 @@ pub mod vao {
}*/
pub fn num_elements(&self) -> usize {
self.element_array_buffer.as_ref().unwrap_abort().num_elements()
self.element_array_buffer
.as_ref()
.unwrap_abort()
.num_elements()
}
pub fn num_instances(&self) -> i32 {
@@ -694,13 +702,14 @@ pub mod vao {
pub fn update_instanced_array<B: BufferDataStorage<'a, f32>>(
&mut self,
attr: &'static str,
usage: u32,
array_data: B,
) -> &mut Self {
self.vao
.array_buffer_instanced
.get_mut(attr)
.expect("cannot get attribute from the array buffer")
.update(array_data);
.update(usage, array_data);
self
}

View File

@@ -1,8 +1,8 @@
use web_sys::{WebGlProgram, WebGlShader, WebGlUniformLocation};
use wasm_bindgen::JsValue;
use web_sys::{WebGlProgram, WebGlShader, WebGlUniformLocation};
use crate::Colormaps;
use crate::webgl_ctx::WebGlRenderingCtx;
use crate::Colormaps;
fn compile_shader(
gl: &WebGlContext,
shader_type: u32,
@@ -289,17 +289,6 @@ impl UniformType for TransferFunction {
}
}
/*use al_api::hips::GrayscaleParameter;
impl SendUniforms for GrayscaleParameter {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader
.attach_uniforms_from(&self.h)
.attach_uniform("min_value", &self.min_value)
.attach_uniform("max_value", &self.max_value);
shader
}
}*/
use al_api::hips::HiPSColor;
use al_api::hips::ImageMetadata;
@@ -314,7 +303,7 @@ impl SendUniforms for ImageMetadata {
}
impl SendUniforms for HiPSColor {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
let reversed = self.reversed as u8 as f32;
shader
@@ -326,14 +315,17 @@ impl SendUniforms for HiPSColor {
.attach_uniform("k_brightness", &self.k_brightness)
.attach_uniform("k_contrast", &self.k_contrast)
.attach_uniform("reversed", &reversed);
shader
}
}
impl SendUniformsWithParams<Colormaps> for HiPSColor {
fn attach_uniforms_with_params<'a>(&self, shader: &'a ShaderBound<'a>, cmaps: &Colormaps) -> &'a ShaderBound<'a> {
fn attach_uniforms_with_params<'a>(
&self,
shader: &'a ShaderBound<'a>,
cmaps: &Colormaps,
) -> &'a ShaderBound<'a> {
let reversed = self.reversed as u8 as f32;
let cmap = cmaps.get(&self.cmap_name.as_ref());
@@ -347,7 +339,7 @@ impl SendUniformsWithParams<Colormaps> for HiPSColor {
.attach_uniform("k_brightness", &self.k_brightness)
.attach_uniform("k_contrast", &self.k_contrast)
.attach_uniform("reversed", &reversed);
shader
}
}
@@ -375,7 +367,11 @@ impl<'a> ShaderBound<'a> {
self
}
pub fn attach_uniforms_with_params_from<P, T: SendUniformsWithParams<P>>(&'a self, t: &T, params: &P) -> &'a Self {
pub fn attach_uniforms_with_params_from<P, T: SendUniformsWithParams<P>>(
&'a self,
t: &T,
params: &P,
) -> &'a Self {
t.attach_uniforms_with_params(self, params);
self
@@ -422,5 +418,9 @@ pub trait SendUniforms {
}
pub trait SendUniformsWithParams<T> {
fn attach_uniforms_with_params<'a>(&self, shader: &'a ShaderBound<'a>, params: &T) -> &'a ShaderBound<'a>;
fn attach_uniforms_with_params<'a>(
&self,
shader: &'a ShaderBound<'a>,
params: &T,
) -> &'a ShaderBound<'a>;
}

105
src/core/build.rs Normal file
View File

@@ -0,0 +1,105 @@
use std::{error::Error, fs};
use walkdir::WalkDir;
extern crate walkdir;
use std::io::BufRead;
// All my shaders reside in the 'src/shaders' directory
fn generate_shaders() -> std::result::Result<(), Box<dyn Error>> {
println!("generate shaders");
let mut shaders = HashMap::new();
for entry in WalkDir::new("../glsl/webgl2/")
.into_iter()
.filter_map(|e| e.ok())
{
if entry.file_type().is_file() {
let path = entry.path();
if let Some(ext) = path.extension() {
if ext == "vert" || ext == "frag" {
let file_name = path.file_name().unwrap().to_str().unwrap();
let out_file_name = path
.strip_prefix("../glsl/webgl2/")
.unwrap()
//.with_extension("")
.to_string_lossy()
.to_owned()
.replace("/", "_");
//let out_name = format!("{}/{}", OUT_PATH, out_file_name);
let src = read_shader(path)?;
shaders.insert(out_file_name, src);
//fs::write(&out_name, result)?;
println!("cargo:rerun-if-changed=src/shaders/{}", file_name);
}
}
}
}
write("src/shaders.rs".into(), shaders)?;
Ok(())
}
fn read_shader<P: AsRef<std::path::Path>>(path: P) -> std::io::Result<String> {
let path = path.as_ref();
let file = fs::File::open(path.to_str().unwrap())?;
let shader_src = std::io::BufReader::new(file)
.lines()
.flatten()
.map(|l| {
if l.starts_with("#include") {
let incl_file_names: Vec<_> = l.split_terminator(&[';', ' '][..]).collect();
let incl_file_name_rel = incl_file_names[1];
let incl_file_name = path.parent().unwrap().join(incl_file_name_rel);
read_shader(incl_file_name.to_str().unwrap()).unwrap()
} else {
l
}
})
.collect::<Vec<_>>()
.join("\n");
Ok(shader_src)
}
use std::collections::HashMap;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
pub fn write(path: PathBuf, entries: HashMap<String, String>) -> Result<(), Box<dyn Error>> {
let mut all_the_files = File::create(&path)?;
writeln!(&mut all_the_files, r#"use std::collections::HashMap;"#,)?;
writeln!(&mut all_the_files, r#""#,)?;
writeln!(&mut all_the_files, r#"#[allow(dead_code)]"#,)?;
writeln!(
&mut all_the_files,
r#"pub fn get_all() -> HashMap<&'static str, &'static str> {{"#,
)?;
writeln!(&mut all_the_files, r#" let mut out = HashMap::new();"#,)?;
for (name, content) in entries {
writeln!(
&mut all_the_files,
r##" out.insert("{name}", r#"{content}"#);"##,
)?;
}
writeln!(&mut all_the_files, r#" out"#,)?;
writeln!(&mut all_the_files, r#"}}"#,)?;
Ok(())
}
fn main() {
if let Err(err) = generate_shaders() {
// panic here for a nicer error message, otherwise it will
// be flattened to one line for some reason
panic!("Unable to generate shaders\n{}", err);
}
}

View File

@@ -2,7 +2,6 @@ use crate::{
//async_task::{BuildCatalogIndex, ParseTableTask, TaskExecutor, TaskResult, TaskType},
camera::CameraViewPort,
downloader::Downloader,
grid::ProjetedGrid,
healpix::coverage::HEALPixCoverage,
inertia::Inertia,
math::{
@@ -10,9 +9,10 @@ use crate::{
angle::{Angle, ArcDeg},
lonlat::{LonLat, LonLatT},
},
renderable::grid::ProjetedGrid,
renderable::Layers,
renderable::{
catalog::Manager, coverage::MOCRenderer, line::RasterizedLineRenderer, ImageCfg, Renderer,
catalog::Manager, line::RasterizedLineRenderer, moc::MOCRenderer, ImageCfg, Renderer,
},
shader::ShaderManager,
tile_fetcher::TileFetcherQueue,
@@ -52,7 +52,7 @@ pub struct App {
//ui: GuiRef,
shaders: ShaderManager,
camera: CameraViewPort,
pub camera: CameraViewPort,
downloader: Downloader,
tile_fetcher: TileFetcherQueue,
@@ -94,7 +94,7 @@ pub struct App {
colormaps: Colormaps,
projection: ProjectionType,
pub projection: ProjectionType,
// Async data receivers
fits_send: async_channel::Sender<ImageCfg>,
@@ -145,6 +145,7 @@ impl App {
//gl.enable(WebGl2RenderingContext::CULL_FACE);
//gl.cull_face(WebGl2RenderingContext::BACK);
//gl.enable(WebGl2RenderingContext::CULL_FACE);
// The tile buffer responsible for the tile requests
let downloader = Downloader::new();
@@ -165,7 +166,7 @@ impl App {
let manager = Manager::new(&gl, &mut shaders, &camera, &resources)?;
// Grid definition
let grid = ProjetedGrid::new(aladin_div)?;
let grid = ProjetedGrid::new(gl.clone(), aladin_div)?;
// Variable storing the location to move to
let inertia = None;
@@ -190,7 +191,7 @@ impl App {
let request_for_new_tiles = true;
let moc = MOCRenderer::new()?;
let moc = MOCRenderer::new(&gl)?;
gl.clear_color(0.15, 0.15, 0.15, 1.0);
let (fits_send, fits_recv) = async_channel::unbounded::<ImageCfg>();
@@ -522,12 +523,7 @@ impl App {
pub(crate) fn set_moc_cfg(&mut self, cfg: al_api::moc::MOC) -> Result<(), JsValue> {
self.moc
.set_cfg(
cfg,
&mut self.camera,
&self.projection,
&mut self.line_renderer,
)
.set_cfg(cfg, &mut self.camera, &self.projection, &mut self.shaders)
.ok_or_else(|| JsValue::from_str("MOC not found"))?;
self.request_redraw = true;
@@ -853,16 +849,6 @@ impl App {
Ok(has_camera_moved)
}
pub(crate) fn reset_north_orientation(&mut self) {
// Reset the rotation around the center if there is one
self.camera
.set_rotation_around_center(Angle(0.0), &self.projection);
// Reset the camera position to its current position
// this will keep the current position but reset the orientation
// so that the north pole is at the top of the center.
self.set_center(&self.get_center());
}
pub(crate) fn read_pixel(&self, pos: &Vector2<f64>, layer: &str) -> Result<JsValue, JsValue> {
if let Some(lonlat) = self.screen_to_world(pos) {
if let Some(survey) = self.layers.get_hips_from_layer(layer) {
@@ -962,26 +948,24 @@ impl App {
//let fbo_view = &self.fbo_view;
//catalogs.draw(&gl, shaders, camera, colormaps, fbo_view)?;
//catalogs.draw(&gl, shaders, camera, colormaps, None, self.projection)?;
self.line_renderer.begin();
//Time::measure_perf("moc draw", || {
self.moc.draw(
&mut self.shaders,
&mut self.camera,
&self.projection,
&mut self.line_renderer,
);
&mut self.shaders,
//&mut self.line_renderer,
)?;
self.line_renderer.begin();
//Time::measure_perf("moc draw", || {
// Ok(())
//})?;
self.grid.draw(
&self.camera,
&mut self.shaders,
&self.projection,
&mut self.line_renderer,
)?;
self.grid
.draw(&self.camera, &self.projection, &mut self.shaders)?;
self.line_renderer.end();
self.line_renderer.draw(&self.camera)?;
self.line_renderer
.draw(&mut self.shaders, &self.camera, &self.projection)?;
//let dpi = self.camera.get_dpi();
//ui.draw(&gl, dpi)?;
@@ -1411,10 +1395,10 @@ impl App {
pub(crate) fn world_to_screen(&self, ra: f64, dec: f64) -> Option<Vector2<f64>> {
let lonlat = LonLatT::new(ArcDeg(ra).into(), ArcDeg(dec).into());
let model_pos_xyz = lonlat.vector();
let icrs_pos = lonlat.vector();
self.projection
.view_to_screen_space(&model_pos_xyz, &self.camera)
.icrs_celestial_to_screen_space(&icrs_pos, &self.camera)
}
pub(crate) fn screen_to_world(&self, pos: &Vector2<f64>) -> Option<LonLatT<f64>> {
@@ -1445,11 +1429,11 @@ impl App {
LonLatT::new(ra, dec)
}
/// lonlat must be given in icrs frame
pub(crate) fn set_center(&mut self, lonlat: &LonLatT<f64>) {
self.prev_cam_position = self.camera.get_center().truncate();
self.camera
.set_center(lonlat, CooSystem::ICRS, &self.projection);
self.camera.set_center(lonlat, &self.projection);
self.request_for_new_tiles = true;
// And stop the current inertia as well if there is one
@@ -1540,17 +1524,17 @@ impl App {
self.inertia = Some(Inertia::new(ampl.to_radians(), axis))
}
pub(crate) fn rotate_around_center(&mut self, theta: ArcDeg<f64>) {
pub(crate) fn set_view_center_pos_angle(&mut self, theta: ArcDeg<f64>) {
self.camera
.set_rotation_around_center(theta.into(), &self.projection);
.set_view_center_pos_angle(theta.into(), &self.projection);
// New tiles can be needed and some tiles can be removed
self.request_for_new_tiles = true;
self.request_redraw = true;
}
pub(crate) fn get_rotation_around_center(&self) -> &Angle<f64> {
self.camera.get_rotation_around_center()
pub(crate) fn get_north_shift_angle(&self) -> Angle<f64> {
self.camera.get_north_shift_angle()
}
pub(crate) fn set_fov(&mut self, fov: Angle<f64>) {

View File

@@ -6,16 +6,15 @@ use crate::math::sph_geom::region::{Intersection, PoleContained, Region};
use crate::math::{projection::Projection, sph_geom::bbox::BoundingBox};
use crate::LonLatT;
use crate::ProjectionType;
use std::iter;
fn ndc_to_world(
ndc_coo: &[XYNDC],
ndc_coo: &[XYNDC<f64>],
ndc_to_clip: &Vector2<f64>,
clip_zoom_factor: f64,
projection: &ProjectionType,
) -> Option<Vec<XYZWWorld>> {
) -> Option<Vec<XYZWWorld<f64>>> {
// Deproject the FOV from ndc to the world space
let mut world_coo = Vec::with_capacity(ndc_coo.len());
@@ -35,7 +34,7 @@ fn ndc_to_world(
Some(world_coo)
}
fn world_to_model(world_coo: &[XYZWWorld], w2m: &Matrix4<f64>) -> Vec<XYZWModel> {
fn world_to_model(world_coo: &[XYZWWorld<f64>], w2m: &Matrix4<f64>) -> Vec<XYZWModel<f64>> {
let mut model_coo = Vec::with_capacity(world_coo.len());
for w in world_coo.iter() {
@@ -61,9 +60,9 @@ const NUM_VERTICES: usize = 4 + 2 * NUM_VERTICES_WIDTH + 2 * NUM_VERTICES_HEIGHT
// This struct belongs to the CameraViewPort
pub struct FieldOfView {
// Vertices
ndc_vertices: Vec<XYNDC>,
world_vertices: Option<Vec<XYZWWorld>>,
model_vertices: Option<Vec<XYZWModel>>,
ndc_vertices: Vec<XYNDC<f64>>,
world_vertices: Option<Vec<XYZWWorld<f64>>>,
model_vertices: Option<Vec<XYZWModel<f64>>>,
reg: Region,
}
@@ -183,7 +182,7 @@ impl FieldOfView {
}
}
pub fn get_vertices(&self) -> Option<&Vec<XYZWModel>> {
pub fn get_vertices(&self) -> Option<&Vec<XYZWModel<f64>>> {
self.model_vertices.as_ref()
}

View File

@@ -1,7 +1,7 @@
pub mod viewport;
use crate::math::lonlat::LonLat;
use crate::math::projection::coo_space::XYZWModel;
pub use viewport::{CameraViewPort};
pub use viewport::CameraViewPort;
pub mod fov;
pub use fov::FieldOfView;
@@ -14,7 +14,7 @@ use crate::ProjectionType;
pub fn build_fov_coverage(
depth: u8,
fov: &FieldOfView,
camera_center: &XYZWModel,
camera_center: &XYZWModel<f64>,
camera_frame: CooSystem,
frame: CooSystem,
proj: &ProjectionType,

View File

@@ -1,13 +1,11 @@
use crate::healpix::cell::HEALPixCell;
use crate::healpix::cell::MAX_HPX_DEPTH;
use crate::camera::XYZWModel;
use crate::healpix::cell::HEALPixCell;
use crate::math::projection::*;
use crate::HEALPixCoverage;
use std::ops::Range;
use moclib::moc::{range::op::degrade::degrade, RangeMOCIterator};
pub(super) struct ViewHpxCells {
hpx_cells: [HpxCells; NUM_COOSYSTEM],
@@ -32,7 +30,7 @@ impl ViewHpxCells {
&mut self,
camera_depth: u8,
fov: &FieldOfView,
center: &XYZWModel,
center: &XYZWModel<f64>,
camera_frame: CooSystem,
proj: &ProjectionType,
// survey frame
@@ -50,7 +48,7 @@ impl ViewHpxCells {
&mut self,
camera_depth: u8,
fov: &FieldOfView,
center: &XYZWModel,
center: &XYZWModel<f64>,
camera_frame: CooSystem,
proj: &ProjectionType,
// survey frame
@@ -70,7 +68,7 @@ impl ViewHpxCells {
&mut self,
camera_depth: u8,
fov: &FieldOfView,
center: &XYZWModel,
center: &XYZWModel<f64>,
camera_frame: CooSystem,
proj: &ProjectionType,
) {
@@ -82,28 +80,38 @@ impl ViewHpxCells {
}
}
pub(super) fn get_cells<'a>(
&'a mut self,
depth: u8,
frame: CooSystem,
) -> impl Iterator<Item = &'a HEALPixCell> {
pub(super) fn get_cells(&self, depth: u8, frame: CooSystem) -> Vec<HEALPixCell> {
self.hpx_cells[frame as usize].get_cells(depth)
}
pub(super) fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.hpx_cells[frame as usize].get_cov()
}
/*pub(super) fn has_changed(&mut self) -> bool {
let mut c = false;
for (frame, num_req) in self.reg_frames.iter().enumerate() {
// if there are surveys/camera requesting the coverage
if *num_req > 0 {
c |= self.hpx_cells[frame].has_view_changed();
}
}
c
}*/
}
// Contains the cells being in the FOV for a specific
pub struct HpxCells {
frame: CooSystem,
// the set of cells all depth
cells: Vec<HEALPixCell>,
//cells: Vec<HEALPixCell>,
// An index vector referring to the indices of each depth cells
idx_rng: [Option<Range<usize>>; MAX_HPX_DEPTH as usize + 1],
//idx_rng: [Option<Range<usize>>; MAX_HPX_DEPTH as usize + 1],
// Coverage created in the frame
cov: HEALPixCoverage,
// boolean refering to if the cells in the view has changed
//new_cells: bool,
}
impl Default for HpxCells {
@@ -113,22 +121,23 @@ impl Default for HpxCells {
}
use al_api::coo_system::{CooSystem, NUM_COOSYSTEM};
use moclib::moc::RangeMOCIntoIterator;
use super::FieldOfView;
impl HpxCells {
pub fn new(frame: CooSystem) -> Self {
let cells = Vec::new();
//let cells = Vec::new();
let cov = HEALPixCoverage::empty(29);
let idx_rng = Default::default();
//let idx_rng = Default::default();
Self {
cells,
idx_rng,
//cells,
//idx_rng,
cov,
frame,
//new_cells: true,
}
}
@@ -140,7 +149,7 @@ impl HpxCells {
&mut self,
camera_depth: u8,
fov: &FieldOfView,
center: &XYZWModel,
center: &XYZWModel<f64>,
camera_frame: CooSystem,
proj: &ProjectionType,
) {
@@ -149,63 +158,72 @@ impl HpxCells {
super::build_fov_coverage(camera_depth, fov, center, camera_frame, self.frame, proj);
// Clear the old cells
self.cells.clear();
/*let r = self.idx_rng[camera_depth as usize]
.as_ref()
.unwrap_or(&(0..0));
let old_cells = &self.cells[r.clone()];
self.idx_rng = Default::default();
let mut new_cells = false;
// Compute the cells at the tile_depth
let tile_depth_cells_iter = self
let cells = self
.cov
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(camera_depth, idx));
.enumerate()
.map(|(j, idx)| {
let c = HEALPixCell(camera_depth, idx);
let num_past = self.cells.len();
self.cells.extend(tile_depth_cells_iter);
if j >= old_cells.len() || old_cells[j] != c {
new_cells = true;
}
c
})
.collect::<Vec<_>>();
if cells.len() != old_cells.len() {
new_cells = true;
}
self.cells = cells;
let num_cur = self.cells.len();
self.idx_rng[camera_depth as usize] = Some(0..num_cur);
self.idx_rng[camera_depth as usize] = Some(num_past..num_cur);
if new_cells {
self.new_cells = true;
}*/
}
// Accessors
// depth MUST be < to camera tile depth
pub fn get_cells<'a>(&'a mut self, depth: u8) -> impl Iterator<Item = &'a HEALPixCell> {
let Range { start, end } = if let Some(idx) = self.idx_rng[depth as usize].as_ref() {
idx.start..idx.end
pub fn get_cells(&self, depth: u8) -> Vec<HEALPixCell> {
let cov_depth = self.cov.depth_max();
if depth == cov_depth {
self.cov
.flatten_to_fixed_depth_cells()
.map(move |idx| HEALPixCell(depth, idx))
.collect()
} else if depth > self.cov.depth_max() {
let cov_d = self.cov.depth_max();
let dd = depth - cov_d;
// compute the cells from the coverage
let cells_iter = self
.cov
self.cov
.flatten_to_fixed_depth_cells()
.map(|idx| {
.flat_map(move |idx| {
// idx is at depth_max
HEALPixCell(cov_d, idx).get_children_cells(dd)
})
.flatten();
// add them and store the cells for latter reuse
let num_past = self.cells.len();
self.cells.extend(cells_iter);
let num_cur = self.cells.len();
self.idx_rng[depth as usize] = Some(num_past..num_cur);
num_past..num_cur
.collect()
} else {
// compute the cells from the coverage
let degraded_moc = self.cov.degraded(depth);
let cells_iter = degraded_moc
degrade((&self.cov.0).into_range_moc_iter(), depth)
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(depth, idx));
// add them and store the cells for latter reuse
let num_past = self.cells.len();
self.cells.extend(cells_iter);
let num_cur = self.cells.len();
self.idx_rng[depth as usize] = Some(num_past..num_cur);
num_past..num_cur
};
self.cells[start..end].iter()
.map(move |idx| HEALPixCell(depth, idx))
.collect()
}
}
/*
@@ -250,8 +268,9 @@ impl HpxCells {
}*/
/*#[inline]
pub fn has_view_changed(&self) -> bool {
//self.new_cells.is_there_new_cells_added()
!self.view_unchanged
pub fn has_view_changed(&mut self) -> bool {
let new_cells = self.new_cells;
self.new_cells = false;
new_cells
}*/
}

View File

@@ -6,22 +6,25 @@ pub enum UserAction {
Starting = 4,
}
// Longitude reversed identity matrix
const ID_R: &Matrix4<f64> = &Matrix4::new(
-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,
);
use super::{fov::FieldOfView, view_hpx_cells::ViewHpxCells};
use crate::healpix::cell::HEALPixCell;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::angle::ToAngle;
use crate::math::{projection::coo_space::XYZWModel, projection::domain::sdf::ProjDef};
use cgmath::{Matrix4, Vector2};
pub struct CameraViewPort {
// The field of view angle
aperture: Angle<f64>,
center: Vector4<f64>,
// The rotation of the camera
rotation_center_angle: Angle<f64>,
center: Vector4<f64>,
w2m_rot: Rotation<f64>,
center_rot: Angle<f64>,
w2m: Matrix4<f64>,
m2w: Matrix4<f64>,
@@ -98,8 +101,8 @@ impl CameraViewPort {
let w2m = Matrix4::identity();
let m2w = w2m;
let center = Vector4::new(0.0, 0.0, 1.0, 1.0);
let center_rot = Angle(0.0);
let center = Vector4::new(0.0, 0.0, 0.0, 1.0);
let moved = false;
let zoomed = false;
@@ -119,9 +122,6 @@ impl CameraViewPort {
let width = width * dpi;
let height = height * dpi;
//let dpi = 1.0;
//gl.scissor(0, 0, width as i32, height as i32);
let aspect = height / width;
let ndc_to_clip = Vector2::new(1.0, (height as f64) / (width as f64));
let clip_zoom_factor = 1.0;
@@ -131,7 +131,6 @@ impl CameraViewPort {
let is_allsky = true;
let time_last_move = Time::now();
let rotation_center_angle = Angle(0.0);
let reversed_longitude = false;
let texture_depth = 0;
@@ -140,6 +139,7 @@ impl CameraViewPort {
CameraViewPort {
// The field of view angle
aperture,
center_rot,
center,
// The rotation of the cameraq
w2m_rot,
@@ -147,7 +147,6 @@ impl CameraViewPort {
m2w,
dpi,
rotation_center_angle,
// The width over height ratio
aspect,
// The width of the screen in pixels
@@ -206,15 +205,15 @@ impl CameraViewPort {
);
}
/*pub fn has_new_hpx_cells(&mut self) -> bool {
self.view_hpx_cells.has_changed()
}*/
pub fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.view_hpx_cells.get_cov(frame)
}
pub fn get_hpx_cells<'a>(
&'a mut self,
depth: u8,
frame: CooSystem,
) -> impl Iterator<Item = &'a HEALPixCell> {
pub fn get_hpx_cells(&self, depth: u8, frame: CooSystem) -> Vec<HEALPixCell> {
self.view_hpx_cells.get_cells(depth, frame)
}
@@ -228,12 +227,12 @@ impl CameraViewPort {
// check the projection
match proj {
ProjectionType::Tan(_) => self.aperture >= 100.0_f64.to_radians().to_angle(),
ProjectionType::Mer(_) => self.aperture >= 200.0_f64.to_radians().to_angle(),
ProjectionType::Mer(_) => self.aperture >= 120.0_f64.to_radians().to_angle(),
ProjectionType::Stg(_) => self.aperture >= 200.0_f64.to_radians().to_angle(),
ProjectionType::Sin(_) => false,
ProjectionType::Ait(_) => false,
ProjectionType::Mol(_) => false,
ProjectionType::Zea(_) => false,
ProjectionType::Ait(_) => self.aperture >= 100.0_f64.to_radians().to_angle(),
ProjectionType::Mol(_) => self.aperture >= 100.0_f64.to_radians().to_angle(),
ProjectionType::Zea(_) => self.aperture >= 140.0_f64.to_radians().to_angle(),
}
}
@@ -474,10 +473,11 @@ impl CameraViewPort {
self.update_rot_matrices(proj);
}
pub fn set_center(&mut self, lonlat: &LonLatT<f64>, coo_sys: CooSystem, proj: &ProjectionType) {
/// lonlat must be given in icrs frame
pub fn set_center(&mut self, lonlat: &LonLatT<f64>, proj: &ProjectionType) {
let icrs_pos: Vector4<_> = lonlat.vector();
let view_pos = coosys::apply_coo_system(coo_sys, self.get_coo_system(), &icrs_pos);
let view_pos = CooSystem::ICRS.to(self.get_coo_system()) * icrs_pos;
let rot = Rotation::from_sky_position(&view_pos);
// Apply the rotation to the camera to go
@@ -524,13 +524,8 @@ impl CameraViewPort {
if self.reversed_longitude != reversed_longitude {
self.reversed_longitude = reversed_longitude;
self.rotation_center_angle = -self.rotation_center_angle;
self.update_rot_matrices(proj);
}
// The camera is reversed => it has moved
self.moved = true;
self.time_last_move = Time::now();
}
pub fn get_longitude_reversed(&self) -> bool {
@@ -558,7 +553,7 @@ impl CameraViewPort {
self.clip_zoom_factor
}
pub fn get_vertices(&self) -> Option<&Vec<XYZWModel>> {
pub fn get_vertices(&self) -> Option<&Vec<XYZWModel<f64>>> {
self.fov.get_vertices()
}
@@ -596,14 +591,17 @@ impl CameraViewPort {
self.zoomed = false;
}
#[inline]
pub fn get_aperture(&self) -> Angle<f64> {
self.aperture
}
#[inline]
pub fn get_center(&self) -> &Vector4<f64> {
&self.center
}
#[inline]
pub fn is_allsky(&self) -> bool {
self.is_allsky
}
@@ -616,13 +614,14 @@ impl CameraViewPort {
self.coo_sys
}
pub fn set_rotation_around_center(&mut self, theta: Angle<f64>, proj: &ProjectionType) {
self.rotation_center_angle = theta;
pub fn set_view_center_pos_angle(&mut self, phi: Angle<f64>, proj: &ProjectionType) {
self.center_rot = phi;
self.update_rot_matrices(proj);
}
pub fn get_rotation_around_center(&self) -> &Angle<f64> {
&self.rotation_center_angle
pub fn get_north_shift_angle(&self) -> Angle<f64> {
(self.w2m.x.y).atan2(self.w2m.y.y).to_angle()
}
}
use crate::ProjectionType;
@@ -654,21 +653,14 @@ impl CameraViewPort {
}
fn update_center(&mut self) {
// Longitude reversed identity matrix
const ID_R: &Matrix4<f64> = &Matrix4::new(
-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,
);
// The center position is on the 3rd column of the w2m matrix
self.center = self.w2m.z;
let axis = &self.center.truncate();
let center_rot = Rotation::from_axis_angle(axis, self.rotation_center_angle);
// The center position is on the 3rd column of the w2m matrix
let center_axis = &self.center.truncate();
// Re-update the model matrix to take into account the rotation
// by theta around the center axis
let final_rot = center_rot * self.w2m_rot;
self.w2m = (&final_rot).into();
let r = Rotation::from_axis_angle(center_axis, self.center_rot) * self.w2m_rot;
self.w2m = (&r).into();
if self.reversed_longitude {
self.w2m = self.w2m * ID_R;
}
@@ -681,15 +673,8 @@ use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for CameraViewPort {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader
//.attach_uniforms_from(&self.last_user_action)
//.attach_uniform("to_icrs", &self.system.to_icrs_j2000::<f32>())
//.attach_uniform("to_galactic", &self.system.to_gal::<f32>())
//.attach_uniform("model", &self.w2m)
//.attach_uniform("inv_model", &self.m2w)
.attach_uniform("ndc_to_clip", &self.ndc_to_clip) // Send ndc to clip
.attach_uniform("czf", &self.clip_zoom_factor) // Send clip zoom factor
.attach_uniform("window_size", &self.get_screen_size()) // Window size
.attach_uniform("fov", &self.aperture);
.attach_uniform("czf", &self.clip_zoom_factor); // Send clip zoom factor
shader
}

View File

@@ -30,6 +30,7 @@ pub fn vertices_lonlat<S: BaseFloat>(cell: &HEALPixCell) -> [LonLatT<S>; 4] {
}
use crate::Abort;
/// Get the grid
#[allow(dead_code)]
pub fn grid_lonlat<S: BaseFloat>(cell: &HEALPixCell, n_segments_by_side: u16) -> Vec<LonLatT<S>> {
debug_assert!(n_segments_by_side > 0);
healpix::nested::grid(cell.depth(), cell.idx(), n_segments_by_side)

View File

@@ -1,2 +0,0 @@
pub mod fits;
pub mod jpg;

View File

@@ -16,7 +16,8 @@
//extern crate itertools_num;
//extern crate num;
//extern crate num_traits;
use crate::time::Time;
//use crate::time::Time;
#[cfg(feature = "dbg")]
use std::panic;
pub trait Abort {
@@ -84,11 +85,11 @@ use crate::math::angle::ToAngle;
mod app;
pub mod async_task;
mod camera;
mod shaders;
mod coosys;
mod downloader;
mod fifo_cache;
mod grid;
mod healpix;
mod inertia;
pub mod math;
@@ -140,7 +141,6 @@ pub struct WebClient {
use al_api::hips::ImageMetadata;
use std::convert::TryInto;
#[wasm_bindgen]
impl WebClient {
/// Create the Aladin Lite webgl backend
@@ -153,23 +153,19 @@ impl WebClient {
#[wasm_bindgen(constructor)]
pub fn new(
aladin_div: &HtmlElement,
shaders: JsValue,
//_shaders: JsValue,
resources: JsValue,
) -> Result<WebClient, JsValue> {
//panic::set_hook(Box::new(console_error_panic_hook::hook));
#[cfg(feature = "dbg")]
panic::set_hook(Box::new(console_error_panic_hook::hook));
let shaders = serde_wasm_bindgen::from_value(shaders)?;
//let shaders = serde_wasm_bindgen::from_value(shaders)?;
let resources = serde_wasm_bindgen::from_value(resources)?;
let gl = WebGlContext::new(aladin_div)?;
let shaders = ShaderManager::new(&gl, shaders).unwrap_abort();
let shaders = ShaderManager::new().unwrap_abort();
// Event listeners callbacks
//let callback_position_changed = js_sys::Function::new_no_args("");
let app = App::new(
&gl, aladin_div, shaders, resources,
//callback_position_changed,
)?;
let app = App::new(&gl, aladin_div, shaders, resources)?;
let dt = DeltaTime::zero();
@@ -498,20 +494,30 @@ impl WebClient {
/// # Arguments
///
/// * `theta` - The rotation angle in degrees
#[wasm_bindgen(js_name = setRotationAroundCenter)]
pub fn rotate_around_center(&mut self, theta: f64) -> Result<(), JsValue> {
#[wasm_bindgen(js_name = setViewCenterPosAngle)]
pub fn set_view_center_pos_angle(&mut self, theta: f64) -> Result<(), JsValue> {
let theta = ArcDeg(theta);
self.app.rotate_around_center(theta);
self.app.set_view_center_pos_angle(theta);
Ok(())
}
/// Get the absolute orientation angle of the view
#[wasm_bindgen(js_name = getRotationAroundCenter)]
pub fn get_rotation_around_center(&mut self) -> Result<f64, JsValue> {
let theta = self.app.get_rotation_around_center();
#[wasm_bindgen(js_name = getViewCenterFromNorthPoleAngle)]
pub fn get_north_shift_angle(&mut self) -> Result<f64, JsValue> {
let phi = self.app.get_north_shift_angle();
Ok(phi.to_degrees())
}
Ok(theta.0 * 360.0 / (2.0 * std::f64::consts::PI))
#[wasm_bindgen(js_name = getNorthPoleCelestialPosition)]
pub fn get_north_pole_celestial_position(&mut self) -> Result<Box<[f64]>, JsValue> {
let np = self
.app
.projection
.north_pole_celestial_space(&self.app.camera);
let (lon, lat) = (np.lon().to_degrees(), np.lat().to_degrees());
Ok(Box::new([lon, lat]))
}
/// Get if the longitude axis is reversed
@@ -575,12 +581,6 @@ impl WebClient {
Ok(Box::new([lon_deg.0, lat_deg.0]))
}
/// Rest the north pole orientation to the top of the screen
#[wasm_bindgen(js_name = resetNorthOrientation)]
pub fn reset_north_orientation(&mut self) {
self.app.reset_north_orientation();
}
/// Go from a location to another one
///
/// # Arguments

View File

@@ -24,6 +24,7 @@ where
}
use cgmath::{Deg, Rad};
use serde::Deserialize;
// Convert a Rad<T> to an ArcDeg<T>
impl<T> From<Rad<T>> for ArcDeg<T>
where
@@ -244,7 +245,7 @@ pub enum SerializeFmt {
DMS,
HMS,
DMM,
DD
DD,
}
use al_api::angle_fmt::AngleSerializeFmt;
@@ -362,7 +363,8 @@ impl FormatType for HMS {
}
}
#[derive(Clone, Copy, Debug, Eq, Hash)]
#[derive(Clone, Copy, Debug, Eq, Hash, Deserialize)]
#[serde(rename_all = "camelCase")]
#[repr(C)]
pub struct Angle<S: BaseFloat>(pub S);
impl<S> Angle<S>
@@ -452,14 +454,14 @@ where
pub trait ToAngle<S>
where
S: BaseFloat
S: BaseFloat,
{
fn to_angle(self) -> Angle<S>;
}
impl<S> ToAngle<S> for S
where
S: BaseFloat
S: BaseFloat,
{
fn to_angle(self) -> Angle<S> {
Angle(self)

View File

@@ -8,9 +8,11 @@ pub trait LonLat<S: BaseFloat> {
fn lonlat(&self) -> LonLatT<S>;
fn from_lonlat(lonlat: &LonLatT<S>) -> Self;
}
use crate::math::angle::Angle;
#[derive(Clone, Copy, Debug)]
use serde::Deserialize;
#[derive(Clone, Copy, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
#[repr(C)]
pub struct LonLatT<S: BaseFloat>(pub Angle<S>, pub Angle<S>);
impl<S> LonLatT<S>
@@ -107,11 +109,10 @@ where
let theta = lonlat.lon();
let delta = lonlat.lat();
Vector3::<S>::new(
delta.cos() * theta.sin(),
delta.sin(),
delta.cos() * theta.cos(),
)
let (dc, ds) = (delta.cos(), delta.sin());
let (tc, ts) = (theta.cos(), theta.sin());
Vector3::<S>::new(dc * ts, ds, dc * tc)
}
}
@@ -180,19 +181,17 @@ pub fn xyzw_to_radec<S: BaseFloat>(v: &Vector4<S>) -> (Angle<S>, Angle<S>) {
#[inline]
pub fn radec_to_xyz<S: BaseFloat>(theta: Angle<S>, delta: Angle<S>) -> Vector3<S> {
Vector3::<S>::new(
delta.cos() * theta.sin(),
delta.sin(),
delta.cos() * theta.cos(),
)
let (dc, ds) = (delta.cos(), delta.sin());
let (tc, ts) = (theta.cos(), theta.sin());
Vector3::<S>::new(dc * ts, ds, dc * tc)
}
#[inline]
pub fn radec_to_xyzw<S: BaseFloat>(theta: Angle<S>, delta: Angle<S>) -> Vector4<S> {
let (dc, ds) = (delta.cos(), delta.sin());
let (tc, ts) = (theta.cos(), theta.sin());
let xyz = radec_to_xyz(theta, delta);
Vector4::<S>::new(dc * ts, ds, dc * tc, S::one())
Vector4::<S>::new(xyz.x, xyz.y, xyz.z, S::one())
}
#[inline]
@@ -223,14 +222,14 @@ pub fn proj(
lonlat: &LonLatT<f64>,
projection: &ProjectionType,
camera: &CameraViewPort,
) -> Option<XYNDC> {
) -> Option<XYNDC<f64>> {
let xyzw = lonlat.vector();
projection.model_to_normalized_device_space(&xyzw, camera)
}
#[inline]
pub fn unproj(
ndc_xy: &XYNDC,
ndc_xy: &XYNDC<f64>,
projection: &ProjectionType,
camera: &CameraViewPort,
) -> Option<LonLatT<f64>> {
@@ -244,14 +243,14 @@ pub fn proj_to_screen(
lonlat: &LonLatT<f64>,
projection: &ProjectionType,
camera: &CameraViewPort,
) -> Option<XYScreen> {
) -> Option<XYScreen<f64>> {
let xyzw = lonlat.vector();
projection.model_to_screen_space(&xyzw, camera)
}
#[inline]
pub fn unproj_from_screen(
xy: &XYScreen,
xy: &XYScreen<f64>,
projection: &ProjectionType,
camera: &CameraViewPort,
) -> Option<LonLatT<f64>> {

View File

@@ -1,13 +1,18 @@
use cgmath::{
Vector2,
Vector3,
Vector4,
};
use cgmath::{Vector2, Vector3, Vector4};
pub type XYScreen = Vector2<f64>;
pub type XYNDC = Vector2<f64>;
pub type XYClip = Vector2<f64>;
pub type XYZWorld = Vector3<f64>;
pub type XYZWWorld = Vector4<f64>;
pub type XYZWModel = Vector4<f64>;
pub type XYZModel = Vector3<f64>;
pub type XYScreen<S> = Vector2<S>;
pub type XYNDC<S> = Vector2<S>;
pub type XYClip<S> = Vector2<S>;
pub type XYZWorld<S> = Vector3<S>;
pub type XYZModel<S> = Vector3<S>;
pub type XYZWWorld<S> = Vector4<S>;
pub type XYZWModel<S> = Vector4<S>;
pub enum CooSpace {
Screen,
NDC,
Clip,
World,
Model,
LonLat,
}

View File

@@ -1,14 +1,14 @@
use crate::math::projection::coo_space::XYClip;
pub struct Disk {
pub radius: f64
pub radius: f64,
}
use cgmath::InnerSpace;
use super::super::sdf::ProjDef;
use cgmath::InnerSpace;
impl ProjDef for Disk {
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
xy.magnitude() - self.radius
}
}
}

View File

@@ -8,13 +8,13 @@ pub struct Ellipse {
pub b: f64,
}
use cgmath::InnerSpace;
use super::super::sdf::ProjDef;
use cgmath::InnerSpace;
impl ProjDef for Ellipse {
fn sdf(&self, xy: &XYClip) -> f64 {
let mut p = Vector2::new( xy.x.abs(), xy.y.abs() );
let mut ab = Vector2::new( self.a, self.b );
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let mut p = Vector2::new(xy.x.abs(), xy.y.abs());
let mut ab = Vector2::new(self.a, self.b);
let sdf = if p.x == 0.0 {
-(self.b - p.y)
@@ -25,44 +25,44 @@ impl ProjDef for Ellipse {
p = Vector2::new(p.y, p.x);
ab = Vector2::new(ab.y, ab.x);
}
let l = ab.y*ab.y - ab.x*ab.x;
let m = ab.x*p.x/l;
let m2 = m*m;
let n = ab.y*p.y/l;
let n2 = n*n;
let c = (m2 + n2 - 1.0)/3.0;
let c3 = c*c*c;
let q = c3 + m2*n2*2.0;
let d = c3 + m2*n2;
let g = m + m*n2;
let l = ab.y * ab.y - ab.x * ab.x;
let m = ab.x * p.x / l;
let m2 = m * m;
let n = ab.y * p.y / l;
let n2 = n * n;
let c = (m2 + n2 - 1.0) / 3.0;
let c3 = c * c * c;
let q = c3 + m2 * n2 * 2.0;
let d = c3 + m2 * n2;
let g = m + m * n2;
let co = if d < 0.0 {
let p = (q/c3).acos()/3.0;
let p = (q / c3).acos() / 3.0;
let s = p.cos();
let t = p.sin()*(3.0_f64).sqrt();
let rx = ( -c*(s + t + 2.0) + m2 ).sqrt();
let ry = ( -c*(s - t + 2.0) + m2 ).sqrt();
( ry + (l).signum()*rx + ((g).abs()/(rx*ry)) - m)/2.0
let t = p.sin() * (3.0_f64).sqrt();
let rx = (-c * (s + t + 2.0) + m2).sqrt();
let ry = (-c * (s - t + 2.0) + m2).sqrt();
(ry + (l).signum() * rx + ((g).abs() / (rx * ry)) - m) / 2.0
} else {
let h = 2.0*m*n*(( d ).sqrt());
let s = (q+h).signum()*( (q+h).abs() ).powf( 1.0/3.0 );
let u = (q-h).signum()*( (q-h).abs() ).powf( 1.0/3.0 );
let rx = -s - u - c*4.0 + 2.0*m2;
let ry = (s - u)*(3.0_f64).sqrt();
let rm = ( rx*rx + ry*ry ).sqrt();
let p = ry/((rm-rx).sqrt());
(p + (2.0*g/rm) - m)/2.0
let h = 2.0 * m * n * ((d).sqrt());
let s = (q + h).signum() * ((q + h).abs()).powf(1.0 / 3.0);
let u = (q - h).signum() * ((q - h).abs()).powf(1.0 / 3.0);
let rx = -s - u - c * 4.0 + 2.0 * m2;
let ry = (s - u) * (3.0_f64).sqrt();
let rm = (rx * rx + ry * ry).sqrt();
let p = ry / ((rm - rx).sqrt());
(p + (2.0 * g / rm) - m) / 2.0
};
let si = ( 1.0 - co*co ).sqrt();
let q = Vector2::new( ab.x*co, ab.y*si );
(q-p).magnitude() * (p.y-q.y).signum()
let si = (1.0 - co * co).sqrt();
let q = Vector2::new(ab.x * co, ab.y * si);
(q - p).magnitude() * (p.y - q.y).signum()
};
sdf
}
}
}

View File

@@ -3,14 +3,14 @@ use crate::math::projection::coo_space::XYClip;
use cgmath::Vector2;
pub struct Parabola {
// Quadratic coefficient
pub k: f64
pub k: f64,
}
use super::super::sdf::ProjDef;
use cgmath::InnerSpace;
impl ProjDef for Parabola {
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let mut xy = *xy;
// There is a singularity around x == 0
@@ -20,21 +20,17 @@ impl ProjDef for Parabola {
xy.x += 1e-4;
}
xy.x = xy.x.abs();
let ik = 1.0/self.k;
let p = ik*(xy.y - 0.5*ik)/3.0;
let q = 0.25*ik*ik*xy.x;
let h = q*q - p*p*p;
let ik = 1.0 / self.k;
let p = ik * (xy.y - 0.5 * ik) / 3.0;
let q = 0.25 * ik * ik * xy.x;
let h = q * q - p * p * p;
let r = h.abs().sqrt();
let x = if h>0.0 {
(q+r).powf(1.0/3.0) - (q-r).abs().powf(1.0/3.0)*(r-q).signum()
let x = if h > 0.0 {
(q + r).powf(1.0 / 3.0) - (q - r).abs().powf(1.0 / 3.0) * (r - q).signum()
} else {
2.0*(r.atan2(q)/3.0).cos()*p.sqrt()
2.0 * (r.atan2(q) / 3.0).cos() * p.sqrt()
};
let a = if xy.x - x < 0.0 {
-1.0
} else {
1.0
};
(xy-Vector2::new(x, self.k*x*x)).magnitude() * a
let a = if xy.x - x < 0.0 { -1.0 } else { 1.0 };
(xy - Vector2::new(x, self.k * x * x)).magnitude() * a
}
}
}

View File

@@ -2,22 +2,19 @@ use crate::math::projection::coo_space::XYClip;
use cgmath::Vector2;
pub struct Rect {
pub dim: Vector2<f64>
pub dim: Vector2<f64>,
}
use super::super::sdf::ProjDef;
use cgmath::InnerSpace;
impl ProjDef for Rect {
fn sdf(&self, xy: &XYClip) -> f64 {
let d = Vector2::new(
xy.x.abs() - self.dim.x,
xy.y.abs() - self.dim.y
);
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let d = Vector2::new(xy.x.abs() - self.dim.x, xy.y.abs() - self.dim.y);
let a = Vector2::new(d.x.max(0.0), d.y.max(0.0));
let b = (d.x.max(d.y)).min(0.0);
a.magnitude() + b
}
}
}

View File

@@ -10,30 +10,27 @@ pub struct Triangle {
use super::super::sdf::ProjDef;
use cgmath::InnerSpace;
impl ProjDef for Triangle {
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let e0 = self.p1 - self.p0;
let e1 = self.p2 - self.p1;
let e2 = self.p0 - self.p2;
let v0 = xy - self.p0;
let v1 = xy - self.p1;
let v2 = xy - self.p2;
let pq0 = v0 - e0 * ( v0.dot(e0) / e0.dot(e0) ).clamp( 0.0, 1.0 );
let pq1 = v1 - e1 * ( v1.dot(e1) / e1.dot(e1) ).clamp( 0.0, 1.0 );
let pq2 = v2 - e2 * ( v2.dot(e2) / e2.dot(e2) ).clamp( 0.0, 1.0 );
let s = e0.x*e2.y - e0.y*e2.x;
let d1 = Vector2::new(pq0.dot( pq0 ), s*(v0.x*e0.y-v0.y*e0.x));
let d2 = Vector2::new(pq1.dot( pq1 ), s*(v1.x*e1.y-v1.y*e1.x));
let d3 = Vector2::new(pq2.dot( pq2 ), s*(v2.x*e2.y-v2.y*e2.x));
let d = Vector2::new(
d1.x.min(d2.x.min(d3.x)),
d1.y.min(d2.y.min(d3.y))
);
-d.x.sqrt()*(d.y.signum())
let pq0 = v0 - e0 * (v0.dot(e0) / e0.dot(e0)).clamp(0.0, 1.0);
let pq1 = v1 - e1 * (v1.dot(e1) / e1.dot(e1)).clamp(0.0, 1.0);
let pq2 = v2 - e2 * (v2.dot(e2) / e2.dot(e2)).clamp(0.0, 1.0);
let s = e0.x * e2.y - e0.y * e2.x;
let d1 = Vector2::new(pq0.dot(pq0), s * (v0.x * e0.y - v0.y * e0.x));
let d2 = Vector2::new(pq1.dot(pq1), s * (v1.x * e1.y - v1.y * e1.x));
let d3 = Vector2::new(pq2.dot(pq2), s * (v2.x * e2.y - v2.y * e2.x));
let d = Vector2::new(d1.x.min(d2.x.min(d3.x)), d1.y.min(d2.y.min(d3.y)));
-d.x.sqrt() * (d.y.signum())
}
}
}

View File

@@ -1,16 +1,13 @@
use crate::math::projection::coo_space::XYClip;
use cgmath::Vector2;
use crate::math::HALF_PI;
use crate::math::angle::PI;
use super::{
sdf::ProjDef,
basic::{ellipse::Ellipse, triangle::Triangle},
op::{Diff, Translate},
basic::{
triangle::Triangle,
ellipse::Ellipse,
}
sdf::ProjDef,
};
use crate::math::angle::PI;
use crate::math::HALF_PI;
pub struct Cod {
pub r_max: f64,
@@ -41,7 +38,7 @@ impl Cod {
}
}
fn to_clip(&self, xy: &Vector2<f64>) -> XYClip {
fn to_clip(&self, xy: &Vector2<f64>) -> XYClip<f64> {
let x = (xy.x - self.x_min) / (self.x_max - self.x_min);
let y = (xy.y - self.y_min) / (self.y_max - self.y_min);
@@ -50,20 +47,26 @@ impl Cod {
}
impl ProjDef for Cod {
fn sdf(&self, xy: &XYClip) -> f64 {
let y_mean = (self.y_min + self.y_max)*0.5;
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let y_mean = (self.y_min + self.y_max) * 0.5;
let center_ellipse = self.to_clip(&Vector2::new(0.0, self.y0 + y_mean));
// Big frontier ellipse
let a = 1.0;
let b = 2.0 * (2.356194490192345 + self.y0) / (2.356194490192345 + 3.0328465566001492);
let e = b / a;
let ext_ellipse = Translate { off: center_ellipse, def: Ellipse { a: a, b: b } };
let ext_ellipse = Translate {
off: center_ellipse,
def: Ellipse { a: a, b: b },
};
// Small ellipse where projection is not defined
let b_int = 2.0 * self.r_min / (2.356194490192345 + 3.0328465566001492);
let a_int = b_int / e;
let int_ellipse = Translate { off: center_ellipse, def: Ellipse { a: a_int, b: b_int } };
let int_ellipse = Translate {
off: center_ellipse,
def: Ellipse { a: a_int, b: b_int },
};
// The top edges
let gamma = PI * self.c - HALF_PI;
@@ -75,9 +78,9 @@ impl ProjDef for Cod {
let tri = Triangle {
p0: center_ellipse,
p1: self.to_clip(&b),
p2: self.to_clip(&c)
p2: self.to_clip(&c),
};
Diff::new(Diff::new(ext_ellipse, int_ellipse), tri).sdf(xy)
}
}
}

View File

@@ -3,12 +3,12 @@ use cgmath::Vector2;
pub struct FullScreen;
use super::{
basic::rect::Rect,
sdf::ProjDef,
};
use super::{basic::rect::Rect, sdf::ProjDef};
impl ProjDef for FullScreen {
fn sdf(&self, xy: &XYClip) -> f64 {
Rect { dim: Vector2::new(1.0, 1.0) }.sdf(xy)
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
Rect {
dim: Vector2::new(1.0, 1.0),
}
.sdf(xy)
}
}

View File

@@ -5,58 +5,57 @@ pub struct Hpx;
use super::sdf::ProjDef;
use super::{
basic::{rect::Rect, triangle::Triangle},
op::Union,
basic::{
triangle::Triangle,
rect::Rect
}
};
impl ProjDef for Hpx {
fn sdf(&self, xy: &XYClip) -> f64 {
let rect = Rect { dim: Vector2::new(1.0, 0.5) };
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let rect = Rect {
dim: Vector2::new(1.0, 0.5),
};
let t1 = Triangle {
p0: Vector2::new(1.0, 0.5),
p1: Vector2::new(0.5, 0.5),
p2: Vector2::new(0.75, 1.0)
p2: Vector2::new(0.75, 1.0),
};
let t2 = Triangle {
p0: Vector2::new(0.5, 0.5),
p1: Vector2::new(0.0, 0.5),
p2: Vector2::new(0.25, 1.0)
p2: Vector2::new(0.25, 1.0),
};
let t3 = Triangle {
p0: Vector2::new(-1.0, 0.5),
p1: Vector2::new(-0.5, 0.5),
p2: Vector2::new(-0.75, 1.0)
p2: Vector2::new(-0.75, 1.0),
};
let t4 = Triangle {
p0: Vector2::new(-0.5, 0.5),
p1: Vector2::new(-0.0, 0.5),
p2: Vector2::new(-0.25, 1.0)
p2: Vector2::new(-0.25, 1.0),
};
let t5 = Triangle {
p0: Vector2::new(-1.0, -0.5),
p1: Vector2::new(-0.5, -0.5),
p2: Vector2::new(-0.75, -1.0)
p2: Vector2::new(-0.75, -1.0),
};
let t6 = Triangle {
p0: Vector2::new(-0.5, -0.5),
p1: Vector2::new(-0.0, -0.5),
p2: Vector2::new(-0.25, -1.0)
p2: Vector2::new(-0.25, -1.0),
};
let t7 = Triangle {
p0: Vector2::new(1.0, -0.5),
p1: Vector2::new(0.5, -0.5),
p2: Vector2::new(0.75, -1.0)
p2: Vector2::new(0.75, -1.0),
};
let t8 = Triangle {
p0: Vector2::new(0.5, -0.5),
p1: Vector2::new(0.0, -0.5),
p2: Vector2::new(0.25, -1.0)
p2: Vector2::new(0.25, -1.0),
};
let t12 = Union::new(t1, t2);

View File

@@ -1,10 +1,10 @@
use super::sdf::ProjDef;
use crate::math::projection::XYClip;
use cgmath::Vector2;
use super::sdf::ProjDef;
pub struct Scale<T>
where
T: ProjDef
T: ProjDef,
{
pub scale: Vector2<f64>,
pub def: T,
@@ -12,17 +12,18 @@ where
impl<T> ProjDef for Scale<T>
where
T: ProjDef
T: ProjDef,
{
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64 {
self.def.sdf(&Vector2::new(xy.x / self.scale.x, xy.y / self.scale.y))
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
self.def
.sdf(&Vector2::new(xy.x / self.scale.x, xy.y / self.scale.y))
}
}
pub struct Translate<T>
where
T: ProjDef
T: ProjDef,
{
pub off: Vector2<f64>,
pub def: T,
@@ -30,10 +31,10 @@ where
impl<T> ProjDef for Translate<T>
where
T: ProjDef
T: ProjDef,
{
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
self.def.sdf(&(*xy - self.off))
}
}
@@ -42,7 +43,7 @@ where
pub struct Union<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
sdf1: T,
sdf2: U,
@@ -51,23 +52,20 @@ where
impl<T, U> Union<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
pub fn new(sdf1: T, sdf2: U) -> Self {
Self {
sdf1,
sdf2,
}
Self { sdf1, sdf2 }
}
}
impl<T, U> ProjDef for Union<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let s1 = self.sdf1.sdf(xy);
let s2 = self.sdf2.sdf(xy);
@@ -79,7 +77,7 @@ where
pub struct Inter<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
sdf1: T,
sdf2: U,
@@ -88,23 +86,20 @@ where
impl<T, U> Inter<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
pub fn new(sdf1: T, sdf2: U) -> Self {
Self {
sdf1,
sdf2,
}
Self { sdf1, sdf2 }
}
}
impl<T, U> ProjDef for Inter<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let s1 = self.sdf1.sdf(xy);
let s2 = self.sdf2.sdf(xy);
@@ -116,7 +111,7 @@ where
pub struct Diff<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
sdf1: T,
sdf2: U,
@@ -125,27 +120,24 @@ where
impl<T, U> Diff<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
pub fn new(sdf1: T, sdf2: U) -> Self {
Self {
sdf1,
sdf2,
}
Self { sdf1, sdf2 }
}
}
impl<T, U> ProjDef for Diff<T, U>
where
T: ProjDef,
U: ProjDef
U: ProjDef,
{
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let s1 = self.sdf1.sdf(xy);
let s2 = self.sdf2.sdf(xy);
// intersection
(-s2).max(s1)
}
}
}

View File

@@ -4,17 +4,23 @@ use cgmath::Vector2;
pub struct Par;
use super::{
sdf::ProjDef,
basic::parabola::Parabola,
op::{Translate, Inter}
op::{Inter, Translate},
sdf::ProjDef,
};
impl ProjDef for Par {
fn sdf(&self, xy: &XYClip) -> f64 {
fn sdf(&self, xy: &XYClip<f64>) -> f64 {
let xy = Vector2::new(xy.y, xy.x);
let p1 = Translate { off: Vector2::new(0.0, -1.0), def: Parabola { k: 1.0 } };
let p2 = Translate { off: Vector2::new(0.0, 1.0), def: Parabola { k: -1.0 } };
let p1 = Translate {
off: Vector2::new(0.0, -1.0),
def: Parabola { k: 1.0 },
};
let p2 = Translate {
off: Vector2::new(0.0, 1.0),
def: Parabola { k: -1.0 },
};
Inter::new(p1, p2).sdf(&xy)
}
}
}

View File

@@ -3,28 +3,28 @@ use crate::math::projection::coo_space::XYClip;
#[enum_dispatch(ProjDefType)]
pub trait ProjDef {
fn is_in(&self, xy: &XYClip) -> bool {
fn is_in(&self, xy: &XYClip<f64>) -> bool {
self.sdf(xy) <= 0.0
}
/// Signed distance function to the definition domain region
fn sdf(&self, xy: &XYClip) -> f64;
fn sdf(&self, xy: &XYClip<f64>) -> f64;
}
use crate::math::vector::NormedVector2;
/// Project a vertex on a valid region defined by a Signed Distance Function (SDF)
///
///
/// # Arguments
///
/// * `p` - A vertex in the clipping space
/// * `p` - A vertex in the clipping space
/// * `dir` - A direction of the normed vector
/// * `valid_reg` - The projection definition region
pub fn ray_marching<P>(p: &XYClip, dir: &NormedVector2, valid_reg: &P) -> Option<XYClip>
pub fn ray_marching<P>(p: &XYClip<f64>, dir: &NormedVector2, valid_reg: &P) -> Option<XYClip<f64>>
where
P: ProjDef
P: ProjDef,
{
// This is done so that we get further a little bit
let in_clip_space = |p: &XYClip| -> bool {
let in_clip_space = |p: &XYClip<f64>| -> bool {
((-1.0)..=1.0).contains(&p.x) && ((-1.0)..=1.0).contains(&p.y)
};
@@ -49,13 +49,7 @@ where
}
}
use super::{
basic::disk::Disk,
full::FullScreen,
hpx::Hpx,
par::Par,
cod::Cod
};
use super::{basic::disk::Disk, cod::Cod, full::FullScreen, hpx::Hpx, par::Par};
// List of all the footprints
// found in Aladin Lite

View File

@@ -23,9 +23,9 @@ use domain::{basic, full::FullScreen};
/* S <-> NDC space conversion methods */
pub fn screen_to_ndc_space(
pos_screen_space: &Vector2<f64>,
pos_screen_space: &XYScreen<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
) -> XYNDC<f64> {
// Screen space in pixels to homogeneous screen space (values between [-1, 1])
let window_size = camera.get_screen_size();
let window_size = Vector2::new(window_size.x as f64, window_size.y as f64);
@@ -42,9 +42,9 @@ pub fn screen_to_ndc_space(
}
pub fn ndc_to_screen_space(
pos_normalized_device: &Vector2<f64>,
pos_normalized_device: &XYNDC<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
) -> XYScreen<f64> {
let window_size = camera.get_screen_size();
let dpi = camera.get_dpi() as f64;
@@ -57,7 +57,7 @@ pub fn ndc_to_screen_space(
}
/* NDC <-> CLIP space conversion methods */
pub fn clip_to_ndc_space(pos_clip_space: &Vector2<f64>, camera: &CameraViewPort) -> Vector2<f64> {
pub fn clip_to_ndc_space(pos_clip_space: &XYClip<f64>, camera: &CameraViewPort) -> XYNDC<f64> {
let ndc_to_clip = camera.get_ndc_to_clip();
let clip_zoom_factor = camera.get_clip_zoom_factor();
@@ -68,9 +68,9 @@ pub fn clip_to_ndc_space(pos_clip_space: &Vector2<f64>, camera: &CameraViewPort)
}
pub fn ndc_to_clip_space(
pos_normalized_device: &Vector2<f64>,
pos_normalized_device: &XYNDC<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
) -> XYClip<f64> {
let ndc_to_clip = camera.get_ndc_to_clip();
let clip_zoom_factor = camera.get_clip_zoom_factor();
@@ -82,17 +82,17 @@ pub fn ndc_to_clip_space(
/* S <-> CLIP space conversion methods */
pub fn clip_to_screen_space(
pos_clip_space: &Vector2<f64>,
pos_clip_space: &XYClip<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
) -> XYScreen<f64> {
let pos_normalized_device = clip_to_ndc_space(pos_clip_space, camera);
ndc_to_screen_space(&pos_normalized_device, camera)
}
pub fn screen_to_clip_space(
pos_screen_space: &Vector2<f64>,
pos_screen_space: &XYScreen<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
) -> XYClip<f64> {
let pos_normalized_device = screen_to_ndc_space(pos_screen_space, camera);
ndc_to_clip_space(&pos_normalized_device, camera)
}
@@ -150,7 +150,16 @@ pub enum ProjectionType {
//Hpx(mapproj::hybrid::hpx::Hpx),
}
use crate::math::lonlat::LonLat;
impl ProjectionType {
pub fn north_pole_celestial_space(&self, camera: &CameraViewPort) -> LonLatT<f64> {
// This is always defined
let np_world = self.north_pole_world_space();
let np_celestial = camera.get_w2m() * np_world;
np_celestial.lonlat()
}
/// Screen to model space deprojection
/// Perform a screen to the world space deprojection
@@ -161,9 +170,9 @@ impl ProjectionType {
/// * ``camera`` - The camera object
pub fn screen_to_world_space(
&self,
pos_screen_space: &Vector2<f64>,
pos_screen_space: &XYScreen<f64>,
camera: &CameraViewPort,
) -> Option<Vector4<f64>> {
) -> Option<XYZWWorld<f64>> {
// Change the screen position according to the dpi
//let dpi = camera.get_dpi();
let pos_screen_space = *pos_screen_space;
@@ -171,13 +180,6 @@ impl ProjectionType {
let pos_clip_space = ndc_to_clip_space(&pos_normalized_device, camera);
self.clip_to_world_space(&pos_clip_space)
/*.map(|mut pos_world_space| {
if camera.get_longitude_reversed() {
pos_world_space.x = -pos_world_space.x;
}
pos_world_space.normalize()
})*/
}
/// Screen to model space deprojection
@@ -190,72 +192,59 @@ impl ProjectionType {
/// * ``camera`` - The camera object
pub fn screen_to_model_space(
&self,
pos_screen_space: &Vector2<f64>,
pos_screen_space: &XYScreen<f64>,
camera: &CameraViewPort,
) -> Option<Vector4<f64>> {
) -> Option<XYZWModel<f64>> {
self.screen_to_world_space(pos_screen_space, camera)
.map(|world_pos| camera.get_w2m() * world_pos)
}
pub fn normalized_device_to_model_space(
&self,
ndc_pos: &XYNDC,
ndc_pos: &XYNDC<f64>,
camera: &CameraViewPort,
) -> Option<XYZWModel> {
) -> Option<XYZWModel<f64>> {
self.normalized_device_to_world_space(ndc_pos, camera)
.map(|world_pos| camera.get_w2m() * world_pos)
}
pub fn model_to_screen_space(
&self,
pos_model_space: &Vector4<f64>,
pos_model_space: &XYZWModel<f64>,
camera: &CameraViewPort,
) -> Option<Vector2<f64>> {
) -> Option<XYScreen<f64>> {
let m2w = camera.get_m2w();
let pos_world_space = m2w * pos_model_space;
self.world_to_screen_space(&pos_world_space, camera)
}
pub fn view_to_screen_space(
pub fn icrs_celestial_to_screen_space(
&self,
pos_model_space: &Vector4<f64>,
icrs_celestial_pos: &XYZWModel<f64>,
camera: &CameraViewPort,
) -> Option<Vector2<f64>> {
self.view_to_normalized_device_space(pos_model_space, camera)
) -> Option<XYScreen<f64>> {
self.icrs_celestial_to_normalized_device_space(icrs_celestial_pos, camera)
.map(|ndc_pos| crate::ndc_to_screen_space(&ndc_pos, camera))
}
pub fn view_to_normalized_device_space(
pub fn icrs_celestial_to_normalized_device_space(
&self,
pos_view_space: &Vector4<f64>,
icrs_celestial_pos: &XYZWModel<f64>,
camera: &CameraViewPort,
) -> Option<Vector2<f64>> {
) -> Option<XYNDC<f64>> {
let view_coosys = camera.get_coo_system();
let c = CooSystem::ICRS.to::<f64>(view_coosys);
let m2w = camera.get_m2w();
let pos_world_space = m2w * c * pos_view_space;
let pos_world_space = m2w * c * icrs_celestial_pos;
self.world_to_normalized_device_space(&pos_world_space, camera)
}
/*pub fn view_to_normalized_device_space_unchecked(
&self,
pos_view_space: &Vector4<f64>,
camera: &CameraViewPort,
) -> Vector2<f64> {
let view_coosys = camera.get_coo_system();
let c = CooSystem::ICRS.to::<f64>(view_coosys);
let m2w = camera.get_m2w();
let pos_world_space = m2w * c * pos_view_space;
self.world_to_normalized_device_space_unchecked(&pos_world_space, camera)
}*/
pub fn model_to_normalized_device_space(
&self,
pos_model_space: &XYZWModel,
pos_model_space: &XYZWModel<f64>,
camera: &CameraViewPort,
) -> Option<XYNDC> {
) -> Option<XYNDC<f64>> {
let m2w = camera.get_m2w();
let pos_world_space = m2w * pos_model_space;
self.world_to_normalized_device_space(&pos_world_space, camera)
@@ -263,9 +252,9 @@ impl ProjectionType {
pub fn model_to_clip_space(
&self,
pos_model_space: &XYZWModel,
pos_model_space: &XYZWModel<f64>,
camera: &CameraViewPort,
) -> Option<XYClip> {
) -> Option<XYClip<f64>> {
let m2w = camera.get_m2w();
let pos_world_space = m2w * pos_model_space;
self.world_to_clip_space(&pos_world_space)
@@ -281,39 +270,39 @@ impl ProjectionType {
/// * `y` - Y mouse position in homogenous screen space (between [-1, 1])
pub fn world_to_normalized_device_space(
&self,
pos_world_space: &Vector4<f64>,
pos_world_space: &XYZWWorld<f64>,
camera: &CameraViewPort,
) -> Option<Vector2<f64>> {
) -> Option<XYNDC<f64>> {
self.world_to_clip_space(pos_world_space)
.map(|pos_clip_space| clip_to_ndc_space(&pos_clip_space, camera))
}
pub fn normalized_device_to_world_space(
&self,
ndc_pos: &XYNDC,
ndc_pos: &XYNDC<f64>,
camera: &CameraViewPort,
) -> Option<XYZWWorld> {
) -> Option<XYZWWorld<f64>> {
let clip_pos = ndc_to_clip_space(ndc_pos, camera);
self.clip_to_world_space(&clip_pos)
}
pub fn world_to_screen_space(
&self,
pos_world_space: &Vector4<f64>,
pos_world_space: &XYZWWorld<f64>,
camera: &CameraViewPort,
) -> Option<Vector2<f64>> {
) -> Option<XYScreen<f64>> {
self.world_to_normalized_device_space(pos_world_space, camera)
.map(|pos_normalized_device| ndc_to_screen_space(&pos_normalized_device, camera))
}
pub(crate) fn is_allsky(&self) -> bool {
/*pub(crate) fn is_allsky(&self) -> bool {
match self {
ProjectionType::Sin(_) | ProjectionType::Tan(_) => false,
//| ProjectionType::Feye(_)
//| ProjectionType::Ncp(_) => false,
_ => true,
}
}
}*/
pub fn bounds_size_ratio(&self) -> f64 {
match self {
@@ -523,7 +512,7 @@ impl ProjectionType {
impl Projection for ProjectionType {
/// Deprojection
fn clip_to_world_space(&self, xy: &XYClip) -> Option<XYZWWorld> {
fn clip_to_world_space(&self, xy: &XYClip<f64>) -> Option<XYZWWorld<f64>> {
match self {
// Zenithal projections
/* TAN, Gnomonic projection */
@@ -579,7 +568,7 @@ impl Projection for ProjectionType {
}
// Projection
fn world_to_clip_space(&self, xyzw: &XYZWWorld) -> Option<XYClip> {
fn world_to_clip_space(&self, xyzw: &XYZWWorld<f64>) -> Option<XYClip<f64>> {
match self {
// Zenithal projections
/* TAN, Gnomonic projection */
@@ -635,6 +624,35 @@ impl Projection for ProjectionType {
}
}
use al_core::shader::UniformType;
use al_core::WebGlContext;
use web_sys::WebGlUniformLocation;
impl UniformType for ProjectionType {
fn uniform(gl: &WebGlContext, location: Option<&WebGlUniformLocation>, value: &Self) {
match value {
// Zenithal projections
/* TAN, Gnomonic projection */
ProjectionType::Tan(_) => gl.uniform1i(location, 0),
/* STG, Stereographic projection */
ProjectionType::Stg(_) => gl.uniform1i(location, 1),
/* SIN, Orthographic */
ProjectionType::Sin(_) => gl.uniform1i(location, 2),
/* ZEA, Equal-area */
ProjectionType::Zea(_) => gl.uniform1i(location, 3),
// Pseudo-cylindrical projections
/* AIT, Aitoff */
ProjectionType::Ait(_) => gl.uniform1i(location, 4),
// MOL, Mollweide */
ProjectionType::Mol(_) => gl.uniform1i(location, 5),
// Cylindrical projections
// MER, Mercator */
ProjectionType::Mer(_) => gl.uniform1i(location, 6),
}
}
}
use cgmath::Vector4;
use mapproj::CanonicalProjection;
@@ -644,19 +662,37 @@ pub trait Projection {
/// # Arguments
///
/// * ``pos_clip_space`` - The position in the clipping space (orthonorlized space)
fn clip_to_world_space(&self, xy_clip: &XYClip) -> Option<XYZWWorld>;
fn clip_to_world_space(&self, xy_clip: &XYClip<f64>) -> Option<XYZWWorld<f64>>;
/// World to the clipping space deprojection
///
/// # Arguments
///
/// * ``pos_world_space`` - The position in the world space
fn world_to_clip_space(&self, pos_world_space: &XYZWWorld) -> Option<XYClip>;
fn world_to_clip_space(&self, pos_world_space: &XYZWWorld<f64>) -> Option<XYClip<f64>>;
/// (`alpha_p`, `delta_p`) in the WCS II paper from Mark Calabretta.
#[inline]
fn north_pole_world_space(&self) -> XYZWWorld<f64> {
// This is always defined
self.clip_to_world_space(&XYClip::new(0.0, 1.0 - 1e-5))
.unwrap()
}
#[inline]
fn south_pole_world_space(&self) -> XYZWWorld<f64> {
// This is always defined
self.clip_to_world_space(&XYClip::new(0.0, -1.0 + 1e-5))
.unwrap()
}
}
use mapproj::ProjXY;
use self::coo_space::XYScreen;
use self::coo_space::XYNDC;
use super::lonlat::LonLatT;
impl<'a, P> Projection for &'a P
where
P: CanonicalProjection,
@@ -666,7 +702,7 @@ where
/// # Arguments
///
/// * ``pos_clip_space`` - The position in the clipping space (orthonorlized space)
fn clip_to_world_space(&self, xy_clip: &XYClip) -> Option<XYZWWorld> {
fn clip_to_world_space(&self, xy_clip: &XYClip<f64>) -> Option<XYZWWorld<f64>> {
let proj_bounds = self.bounds();
// Scale the xy_clip space so that it maps the proj definition domain of mapproj
let xy_mapproj = {
@@ -699,7 +735,7 @@ where
/// # Arguments
///
/// * ``pos_world_space`` - The position in the world space
fn world_to_clip_space(&self, pos_world_space: &XYZWWorld) -> Option<XYClip> {
fn world_to_clip_space(&self, pos_world_space: &XYZWWorld<f64>) -> Option<XYClip<f64>> {
// Xmpp <-> Zal
// -Ympp <-> Xal
// Zmpp <-> Yal

View File

@@ -1,6 +1,6 @@
use crate::math;
use cgmath::Quaternion;
use cgmath::{BaseFloat, InnerSpace};
use cgmath::{Euler, Quaternion};
use cgmath::{Vector3, Vector4};
#[derive(Clone, Copy, Debug)]
@@ -136,6 +136,31 @@ where
m2w * pos_model_space
}
pub fn euler(&self) -> Euler<Rad<S>> {
self.0.into()
}
/// Extract the 3 euler angles from the quaternion
/// Aladin Lite rotation basis is formed by Z, X, Y axis:
/// * Z axis is pointing towards us
/// * Y is pointing upward
/// * X is defined from the right-hand rule to form a basis
///
/// The first euler angle describes the longitude (rotation around the Y axis) <=> pitch
/// The second euler angle describes the latitude (rotation around the X' modified axis) <=> yaw
/// The third euler angle describes a rotation deviation from the north pole (rotation around the Z'' modified axis) <=> roll
///
/// Equations come from this paper (Appendix 6):
/// https://ntrs.nasa.gov/api/citations/19770024290/downloads/19770024290.pdf
pub fn euler_yxz(&self) -> (Angle<S>, Angle<S>, Angle<S>) {
let m: Matrix4<S> = self.0.into();
let a = m.x.z.atan2(m.z.z);
let b = (-m.z.y).atan2((S::one() - m.z.y * m.z.y).sqrt());
let c = m.x.y.atan2(m.y.y);
(Angle(a), Angle(b), Angle(c))
}
}
use std::ops::Mul;

View File

@@ -38,11 +38,11 @@ pub enum Intersection {
// The segment does not intersect the region
Empty,
// The segment does intersect the region
Intersect { vertices: Box<[XYZWModel]> },
Intersect { vertices: Box<[XYZWModel<f64>]> },
}
impl Region {
pub fn from_vertices(vertices: &[XYZWModel], control_point: &XYZWModel) -> Self {
pub fn from_vertices(vertices: &[XYZWModel<f64>], control_point: &XYZWModel<f64>) -> Self {
let (vertices, (lon, lat)): (Vec<_>, (Vec<_>, Vec<_>)) = vertices
.iter()
.map(|v| {

View File

@@ -26,6 +26,7 @@ impl From<Error> for JsValue {
}
}
// Num of shapes
const _NUM_SHAPES: usize = 5;
pub struct Manager {
gl: WebGlContext,
@@ -240,10 +241,7 @@ impl Manager {
}
} else {
let depth = camera.get_texture_depth().min(7);
let cells: Vec<_> = camera
.get_hpx_cells(depth, CooSystem::ICRS)
.cloned()
.collect();
let cells = camera.get_hpx_cells(depth, CooSystem::ICRS);
for catalog in self.catalogs.values_mut() {
catalog.update(&cells);
@@ -457,7 +455,11 @@ impl Catalog {
#[cfg(feature = "webgl2")]
self.vertex_array_object_catalog
.bind_for_update()
.update_instanced_array("center", VecData(&sources));
.update_instanced_array(
"center",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&sources),
);
}
fn draw(

View File

@@ -1,485 +0,0 @@
use al_api::moc::MOC as Cfg;
use crate::camera::CameraViewPort;
use crate::healpix::cell::CellVertices;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::projection::ProjectionType;
use crate::renderable::coverage::Angle;
use crate::renderable::line::PathVertices;
use crate::renderable::line::RasterizedLineRenderer;
use al_api::color::ColorRGBA;
use al_api::coo_system::CooSystem;
use moclib::elem::cell::Cell;
use moclib::moc::range::CellAndEdges;
use moclib::moc::RangeMOCIterator;
use crate::HEALPixCell;
use cgmath::Vector2;
use healpix::compass_point::OrdinalMap;
pub struct MOC {
pub sky_fraction: f32,
pub max_order: u8,
inner: [Option<MOCIntern>; 3],
pub moc: HEALPixCoverage,
}
impl MOC {
pub(super) fn new(moc: HEALPixCoverage, cfg: &Cfg) -> Self {
let sky_fraction = moc.sky_fraction() as f32;
let max_order = moc.depth_max();
let inner = [
if cfg.perimeter {
// draw only perimeter
Some(MOCIntern::new(RenderModeType::Perimeter {
thickness: cfg.line_width,
color: cfg.color,
}))
} else {
None
},
if cfg.filled {
// change color
let fill_color = cfg.fill_color;
// draw the edges
Some(MOCIntern::new(RenderModeType::Filled { color: fill_color }))
} else {
None
},
if cfg.edges {
Some(MOCIntern::new(RenderModeType::Edge {
thickness: cfg.line_width,
color: cfg.color,
}))
} else {
None
},
];
Self {
inner,
max_order,
sky_fraction,
moc,
}
}
/*pub(super) fn cell_indices_in_view(&mut self, camera: &mut CameraViewPort) {
for render in &mut self.inner {
if let Some(render) = render.as_mut() {
render.cell_indices_in_view(camera);
}
}
}*/
/*pub(super) fn num_cells_in_view(&self, camera: &mut CameraViewPort) -> usize {
self.inner
.iter()
.filter_map(|moc| moc.as_ref())
.map(|moc| moc.num_cells_in_view(camera))
.sum()
}*/
/*pub(super) fn num_vertices_in_view(&self, camera: &mut CameraViewPort) -> usize {
let mut num_vertices = 0;
for render in &self.0 {
if let Some(render) = render.as_ref() {
num_vertices += render.num_vertices_in_view(camera);
}
}
num_vertices
}*/
pub fn sky_fraction(&self) -> f32 {
self.sky_fraction
}
pub fn max_order(&self) -> u8 {
self.max_order
}
pub(super) fn draw(
&self,
camera: &mut CameraViewPort,
proj: &ProjectionType,
rasterizer: &mut RasterizedLineRenderer,
) {
let view_depth = camera.get_texture_depth();
let view_moc = HEALPixCoverage::from_fixed_hpx_cells(
view_depth,
camera
.get_hpx_cells(view_depth, CooSystem::ICRS)
.map(|c| c.idx()),
None,
);
for render in &self.inner {
if let Some(render) = render.as_ref() {
render.draw(&view_moc, &self.moc, camera, proj, rasterizer)
}
}
}
}
struct MOCIntern {
// HEALPix index vector
// Used for fast HEALPix cell retrieval
//hpx_idx_vec: IdxVec,
// Node indices in view
//indices: Vec<Range<usize>>,
mode: RenderModeType,
}
#[derive(Clone)]
pub enum RenderModeType {
Perimeter { thickness: f32, color: ColorRGBA },
Edge { thickness: f32, color: ColorRGBA },
Filled { color: ColorRGBA },
}
use healpix::compass_point::Ordinal;
impl MOCIntern {
fn new(mode: RenderModeType) -> Self {
/*let hpx_idx_vec =
IdxVec::from_hpx_cells((&moc.0).into_range_moc_iter().cells().flat_map(|cell| {
let cell = HEALPixCell(cell.depth, cell.idx);
let dd = if 3 >= cell.depth() {
3 - cell.depth()
} else {
0
};
cell.get_tile_cells(dd)
}));
*/
Self {
//nodes,
//moc,
//hpx_idx_vec,
//indices: vec![],
mode,
}
}
/*fn cell_indices_in_view(&mut self, moc: &HEALPixCoverage, camera: &mut CameraViewPort) {
// Cache it for several reuse during the same frame
let view_depth = camera.get_texture_depth();
let cells_iter = camera.get_hpx_cells(view_depth, CooSystem::ICRS);
if moc.is_empty() {
self.indices = vec![0..0];
return;
}
/*let indices: Vec<_> = if view_depth > 7 {
// Binary search version, we are using this alternative for retrieving
// MOC's cells to render for deep fields of view
let first_cell_rng = &self.nodes[0].cell.z_29_rng();
let last_cell_rng = &self.nodes[self.nodes.len() - 1].cell.z_29_rng();
cells_iter
.filter_map(|cell| {
let cell_rng = cell.z_29_rng();
// Quick rejection test
if cell_rng.end <= first_cell_rng.start || cell_rng.start >= last_cell_rng.end {
None
} else {
let contains_val = |hash_z29: u64| -> Result<usize, usize> {
self.nodes.binary_search_by(|node| {
let node_cell_rng = node.cell.z_29_rng();
if hash_z29 < node_cell_rng.start {
// the node cell range contains hash_z29
Ordering::Greater
} else if hash_z29 >= node_cell_rng.end {
Ordering::Less
} else {
Ordering::Equal
}
})
};
let start_idx = contains_val(cell_rng.start);
let end_idx = contains_val(cell_rng.end);
let cell_indices = match (start_idx, end_idx) {
(Ok(l), Ok(r)) => {
if l == r {
l..(r + 1)
} else {
l..r
}
}
(Err(l), Ok(r)) => l..r,
(Ok(l), Err(r)) => l..r,
(Err(l), Err(r)) => l..r,
};
Some(cell_indices)
}
})
.collect()
} else {
// Index Vector 7 order version
cells_iter
.map(|cell| self.hpx_idx_vec.get_item_indices_inside_hpx_cell(&cell))
.collect()
};*/
let indices = cells_iter
.map(|cell| self.hpx_idx_vec.get_item_indices_inside_hpx_cell(&cell))
.collect();
let indices = crate::utils::merge_overlapping_intervals(indices);
self.indices = indices;
}*/
/*fn num_vertices_in_view(&self, camera: &CameraViewPort) -> usize {
self.cells_in_view(camera)
.filter_map(|n| n.vertices.as_ref())
.map(|n_vertices| {
n_vertices
.vertices
.iter()
.map(|edge| edge.len())
.sum::<usize>()
})
.sum()
}*/
/*fn num_cells_in_view(&self, _camera: &CameraViewPort) -> usize {
self.indices
.iter()
.map(|range| range.end - range.start)
.sum()
}*/
/*fn cells_in_view<'a>(&'a self, _camera: &CameraViewPort) -> impl Iterator<Item = Node> {
let nodes = &self.nodes;
self.indices
.iter()
.map(move |indices| nodes[indices.start..indices.end].iter())
.flatten()
}*/
fn vertices_in_view<'a>(
&self,
view_moc: &'a HEALPixCoverage,
moc: &'a HEALPixCoverage,
_camera: &mut CameraViewPort,
) -> impl Iterator<Item = [(f64, f64); 4]> + 'a {
//self.cells_in_view(camera)
// .filter_map(move |node| node.vertices.as_ref())
moc.overlapped_by_iter(&view_moc)
.cells()
.flat_map(|cell| {
let Cell { idx, depth } = cell;
let cell = HEALPixCell(depth, idx);
let dd = if 3 >= cell.depth() {
3 - cell.depth()
} else {
0
};
cell.get_tile_cells(dd)
})
.map(|hpx_cell| hpx_cell.vertices())
//.map(|Cell { idx, depth }| HEALPixCell(depth, idx).vertices())
}
fn draw(
&self,
view_moc: &HEALPixCoverage,
moc: &HEALPixCoverage,
camera: &mut CameraViewPort,
proj: &ProjectionType,
rasterizer: &mut RasterizedLineRenderer,
) {
let _ = crate::Time::measure_perf("rasterize moc", move || {
match self.mode {
RenderModeType::Perimeter { thickness, color } => {
let moc_in_view =
HEALPixCoverage(moc.overlapped_by_iter(view_moc).into_range_moc());
rasterizer.add_stroke_paths(
self.compute_perimeter_paths_iter(&moc_in_view, view_moc, camera, proj),
thickness,
&color,
&super::line::Style::None,
);
}
RenderModeType::Edge { thickness, color } => {
rasterizer.add_stroke_paths(
self.compute_edge_paths_iter(moc, view_moc, camera, proj),
thickness,
&color,
&super::line::Style::None,
);
}
RenderModeType::Filled { color } => {
rasterizer.add_fill_paths(
self.compute_edge_paths_iter(moc, view_moc, camera, proj),
&color,
);
}
}
Ok(())
});
}
fn compute_edge_paths_iter<'a>(
&self,
moc: &'a HEALPixCoverage,
view_moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
proj: &'a ProjectionType,
) -> impl Iterator<Item = PathVertices<[[f32; 2]; 5]>> + 'a {
let camera_coosys = camera.get_coo_system();
// Determine if the view may lead to crossing edges/triangles
// This is dependant on the projection used
let crossing_edges_testing = if proj.is_allsky() {
let sky_percent_covered = camera.get_cov(CooSystem::ICRS).sky_fraction();
//al_core::info!("sky covered: ", sky_percent_covered);
sky_percent_covered > 0.80
} else {
// The projection is not allsky.
false
};
self.vertices_in_view(view_moc, moc, camera)
.filter_map(move |cell_vertices| {
let mut ndc: [[f32; 2]; 5] =
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]];
let vertices = cell_vertices;
for i in 0..4 {
let line_vertices = vertices[i];
//for k in 0..line_vertices.len() {
let (lon, lat) = line_vertices;
let xyzw = crate::math::lonlat::radec_to_xyzw(Angle(lon), Angle(lat));
let xyzw =
crate::coosys::apply_coo_system(CooSystem::ICRS, camera_coosys, &xyzw);
if let Some(p) = proj.model_to_normalized_device_space(&xyzw, camera) {
if i > 0 && crossing_edges_testing {
let mag2 = crate::math::vector::dist2(
crate::math::projection::ndc_to_clip_space(&p, camera).as_ref(),
crate::math::projection::ndc_to_clip_space(
&Vector2::new(ndc[i - 1][0] as f64, ndc[i - 1][1] as f64),
camera,
)
.as_ref(),
);
//al_core::info!("mag", i, mag2);
if mag2 > 0.1 {
return None;
}
}
ndc[i] = [p.x as f32, p.y as f32];
} else {
return None;
}
//ndc[i] = [xyzw.x as f32, xyzw.y as f32];
//ndc[i] = [lon as f32, lat as f32];
}
ndc[4] = ndc[0].clone();
Some(PathVertices { vertices: ndc })
})
}
fn compute_perimeter_paths_iter<'a>(
&self,
moc: &'a HEALPixCoverage,
_view_moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
proj: &'a ProjectionType,
) -> impl Iterator<Item = PathVertices<Vec<[f32; 2]>>> + 'a {
let camera_coosys = camera.get_coo_system();
// Determine if the view may lead to crossing edges/triangles
// This is dependant on the projection used
let crossing_edges_testing = if proj.is_allsky() {
let sky_percent_covered = camera.get_cov(CooSystem::ICRS).sky_fraction();
//al_core::info!("sky covered: ", sky_percent_covered);
sky_percent_covered > 0.80
} else {
// The projection is not allsky.
false
};
moc.border_elementary_edges()
.filter_map(|CellAndEdges { uniq, edges }| {
let c = Cell::from_uniq_hpx(uniq);
let cell = HEALPixCell(c.depth, c.idx);
let mut map = OrdinalMap::new();
if edges.get(moclib::moc::range::Ordinal::SE) {
map.put(Ordinal::SE, 1);
}
if edges.get(moclib::moc::range::Ordinal::SW) {
map.put(Ordinal::SW, 1);
}
if edges.get(moclib::moc::range::Ordinal::NE) {
map.put(Ordinal::NE, 1);
}
if edges.get(moclib::moc::range::Ordinal::NW) {
map.put(Ordinal::NW, 1);
}
cell.path_along_sides(&map)
})
.filter_map(move |CellVertices { vertices }| {
let mut ndc = Vec::<[f32; 2]>::with_capacity(vertices.len() * 2);
for i in 0..vertices.len() {
let line_vertices = &vertices[i];
for k in 0..line_vertices.len() {
let (lon, lat) = line_vertices[k];
let xyzw = crate::math::lonlat::radec_to_xyzw(Angle(lon), Angle(lat));
let xyzw =
crate::coosys::apply_coo_system(CooSystem::ICRS, camera_coosys, &xyzw);
if let Some(p) = proj.model_to_normalized_device_space(&xyzw, camera) {
if ndc.len() > 0 && crossing_edges_testing {
let mag2 = crate::math::vector::dist2(
crate::math::projection::ndc_to_clip_space(&p, camera).as_ref(),
crate::math::projection::ndc_to_clip_space(
&Vector2::new(
ndc[ndc.len() - 1][0] as f64,
ndc[ndc.len() - 1][1] as f64,
),
camera,
)
.as_ref(),
);
//al_core::info!("mag", i, mag2);
if mag2 > 0.1 {
return None;
}
}
ndc.push([p.x as f32, p.y as f32]);
} else {
return None;
}
}
}
Some(PathVertices { vertices: ndc })
})
}
}

View File

@@ -1,323 +0,0 @@
use crate::{
healpix::{cell::HEALPixCell, coverage::HEALPixCoverage},
math::angle::Angle,
CameraViewPort, ShaderManager,
};
mod graph;
pub mod mode;
pub mod hierarchy;
pub mod moc;
use crate::renderable::line::RasterizedLineRenderer;
use wasm_bindgen::JsValue;
use hierarchy::MOCHierarchy;
use al_api::coo_system::CooSystem;
use al_api::moc::MOC as Cfg;
pub struct MOCRenderer {
mocs: Vec<MOCHierarchy>,
cfgs: Vec<Cfg>,
}
/*
use cgmath::Vector2;
use super::utils::triangle::Triangle;
fn is_crossing_projection(
cell: &HEALPixCell,
camera: &CameraViewPort,
projection: &ProjectionType,
) -> bool {
let vertices = cell
.vertices()
.iter()
.filter_map(|(lon, lat)| {
let xyzw = crate::math::lonlat::radec_to_xyzw(Angle(*lon), Angle(*lat));
let xyzw =
crate::coosys::apply_coo_system(CooSystem::ICRS, camera.get_coo_system(), &xyzw);
projection
.model_to_normalized_device_space(&xyzw, camera)
.map(|v| [v.x as f32, v.y as f32])
})
.collect::<Vec<_>>();
let cell_inside = vertices.len() == 4;
if cell_inside {
let c0 = &vertices[0];
let c1 = &vertices[1];
let c2 = &vertices[2];
let c3 = &vertices[3];
let t0 = Triangle::new(c0, c1, c2);
let t2 = Triangle::new(c2, c3, c0);
t0.is_invalid(camera) || t2.is_invalid(camera)
} else {
true
}
}
use al_api::cell::HEALPixCellProjeted;
fn rasterize_hpx_cell(
cell: &HEALPixCell,
n_segment_by_side: usize,
camera: &CameraViewPort,
idx_off: &mut u32,
proj: &ProjectionType,
) -> Option<(Vec<f32>, Vec<u32>)> {
let n_vertices_per_segment = n_segment_by_side + 1;
let vertices = cell
.grid(n_segment_by_side as u32)
.iter()
.filter_map(|(lon, lat)| {
let xyzw = crate::math::lonlat::radec_to_xyzw(Angle(*lon), Angle(*lat));
let xyzw =
crate::coosys::apply_coo_system(CooSystem::ICRS, camera.get_coo_system(), &xyzw);
proj.model_to_normalized_device_space(&xyzw, camera)
.map(|v| [v.x as f32, v.y as f32])
})
.flatten()
.collect::<Vec<_>>();
let cell_inside = vertices.len() == 2 * (n_segment_by_side + 1) * (n_segment_by_side + 1);
if cell_inside {
// Generate the iterator: idx_off + 1, idx_off + 1, .., idx_off + 4*n_segment - 1, idx_off + 4*n_segment - 1
let mut indices = Vec::with_capacity(n_segment_by_side * n_segment_by_side * 6);
let num_vertices = (n_segment_by_side + 1) * (n_segment_by_side + 1);
let longitude_reversed = camera.get_longitude_reversed();
let invalid_tri = |tri_ccw: bool, reversed_longitude: bool| -> bool {
(!reversed_longitude && !tri_ccw) || (reversed_longitude && tri_ccw)
};
for i in 0..n_segment_by_side {
for j in 0..n_segment_by_side {
let idx_0 = j + i * n_vertices_per_segment;
let idx_1 = j + 1 + i * n_vertices_per_segment;
let idx_2 = j + (i + 1) * n_vertices_per_segment;
let idx_3 = j + 1 + (i + 1) * n_vertices_per_segment;
let c0 = crate::math::projection::ndc_to_screen_space(
&Vector2::new(vertices[2 * idx_0] as f64, vertices[2 * idx_0 + 1] as f64),
camera,
);
let c1 = crate::math::projection::ndc_to_screen_space(
&Vector2::new(vertices[2 * idx_1] as f64, vertices[2 * idx_1 + 1] as f64),
camera,
);
let c2 = crate::math::projection::ndc_to_screen_space(
&Vector2::new(vertices[2 * idx_2] as f64, vertices[2 * idx_2 + 1] as f64),
camera,
);
let c3 = crate::math::projection::ndc_to_screen_space(
&Vector2::new(vertices[2 * idx_3] as f64, vertices[2 * idx_3 + 1] as f64),
camera,
);
let first_tri_ccw = !crate::math::vector::ccw_tri(&c0, &c1, &c2);
let second_tri_ccw = !crate::math::vector::ccw_tri(&c1, &c3, &c2);
if invalid_tri(first_tri_ccw, longitude_reversed)
|| invalid_tri(second_tri_ccw, longitude_reversed)
{
return None;
}
let vx = [c0.x, c1.x, c2.x, c3.x];
let vy = [c0.y, c1.y, c2.y, c3.y];
let projeted_cell = HEALPixCellProjeted {
ipix: cell.idx(),
vx,
vy,
};
crate::camera::view_hpx_cells::project(projeted_cell, camera, proj)?;
indices.push(*idx_off + idx_0 as u32);
indices.push(*idx_off + idx_1 as u32);
indices.push(*idx_off + idx_2 as u32);
indices.push(*idx_off + idx_1 as u32);
indices.push(*idx_off + idx_3 as u32);
indices.push(*idx_off + idx_2 as u32);
}
}
*idx_off += num_vertices as u32;
Some((vertices, indices))
} else {
None
}
}*/
use crate::ProjectionType;
use super::line;
impl MOCRenderer {
pub fn new() -> Result<Self, JsValue> {
// layout (location = 0) in vec2 ndc_pos;
//let vertices = vec![0.0; MAX_NUM_FLOATS_TO_DRAW];
//let indices = vec![0_u16; MAX_NUM_INDICES_TO_DRAW];
//let vertices = vec![];
/*let position = vec![];
let indices = vec![];
#[cfg(feature = "webgl2")]
vao.bind_for_update()
.add_array_buffer_single(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&indices),
)
.unbind();
#[cfg(feature = "webgl1")]
vao.bind_for_update()
.add_array_buffer(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&indices),
)
.unbind();
*/
let mocs = Vec::new();
let cfgs = Vec::new();
Ok(Self { mocs, cfgs })
}
pub fn push_back(
&mut self,
moc: HEALPixCoverage,
cfg: Cfg,
camera: &mut CameraViewPort,
proj: &ProjectionType,
) {
self.mocs.push(MOCHierarchy::from_full_res_moc(moc, &cfg));
self.cfgs.push(cfg);
camera.register_view_frame(CooSystem::ICRS, proj);
//self.layers.push(key);
}
pub fn get_hpx_coverage(&self, cfg: &Cfg) -> Option<&HEALPixCoverage> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
Some(&self.mocs[idx].get_full_moc())
} else {
None
}
}
pub fn remove(
&mut self,
cfg: &Cfg,
camera: &mut CameraViewPort,
proj: &ProjectionType,
) -> Option<Cfg> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
self.mocs.remove(idx);
camera.unregister_view_frame(CooSystem::ICRS, proj);
Some(self.cfgs.remove(idx))
} else {
None
}
}
pub fn set_cfg(
&mut self,
cfg: Cfg,
camera: &mut CameraViewPort,
projection: &ProjectionType,
line_renderer: &mut RasterizedLineRenderer,
) -> Option<Cfg> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
let old_cfg = self.cfgs[idx].clone();
self.cfgs[idx] = cfg;
self.update(camera, projection, line_renderer);
Some(old_cfg)
} else {
// the cfg has not been found
None
}
}
/*pub fn get(&self, cfg: &Cfg) -> Option<&HEALPixCoverage> {
let key = cfg.get_uuid();
self.mocs.get(key).map(|coverage| coverage.get_full_moc())
}*/
fn update(
&mut self,
camera: &mut CameraViewPort,
proj: &ProjectionType,
line_renderer: &mut RasterizedLineRenderer,
) {
for (hmoc, cfg) in self.mocs.iter_mut().zip(self.cfgs.iter()) {
if cfg.show {
let moc = hmoc.select_moc_from_view(camera);
moc.draw(camera, proj, line_renderer);
}
}
/*self.vao.bind_for_update()
.update_array(
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&self.position),
)
.update_element_array(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&self.indices),
);*/
}
pub fn is_empty(&self) -> bool {
self.cfgs.is_empty()
}
pub fn draw(
&mut self,
_shaders: &mut ShaderManager,
camera: &mut CameraViewPort,
projection: &ProjectionType,
line_renderer: &mut RasterizedLineRenderer,
) {
if self.is_empty() {
return;
}
self.update(camera, projection, line_renderer);
}
}

View File

@@ -6,9 +6,9 @@ use crate::ProjectionType;
use cgmath::InnerSpace;
use cgmath::Vector3;
use crate::grid::XYScreen;
use crate::math::angle::SerializeFmt;
use crate::math::lonlat::LonLat;
use crate::math::projection::coo_space::XYScreen;
use crate::math::TWICE_PI;
use crate::math::angle::ToAngle;
@@ -26,7 +26,7 @@ pub enum LabelOptions {
#[derive(Debug)]
pub struct Label {
// The position
pub position: XYScreen,
pub position: XYScreen<f64>,
// the string content
pub content: String,
// in radians

View File

@@ -7,7 +7,7 @@ use core::ops::Range;
use crate::math::MINUS_HALF_PI;
use crate::ProjectionType;
use crate::grid::angle::SerializeFmt;
use super::angle::SerializeFmt;
use crate::math::HALF_PI;
pub fn get_intersecting_meridian(

View File

@@ -2,22 +2,22 @@ pub mod label;
pub mod meridian;
pub mod parallel;
use crate::grid::parallel::Parallel;
use crate::math::projection::coo_space::XYScreen;
use crate::shader::ShaderManager;
use crate::Abort;
use al_core::VecData;
use parallel::Parallel;
use crate::camera::CameraViewPort;
use crate::math::angle;
use crate::math::HALF_PI;
use crate::renderable::line;
use crate::renderable::line::PathVertices;
use crate::renderable::Renderer;
use crate::ProjectionType;
use al_api::color::ColorRGBA;
use al_api::grid::GridCfg;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
use web_sys::WebGl2RenderingContext;
use crate::grid::label::Label;
use label::Label;
pub struct ProjetedGrid {
// Properties
pub color: ColorRGBA,
@@ -30,23 +30,22 @@ pub struct ProjetedGrid {
text_renderer: TextRenderManager,
fmt: angle::SerializeFmt,
line_style: line::Style,
//line_style: line::Style,
meridians: Vec<Meridian>,
parallels: Vec<Parallel>,
vao: VertexArrayObject,
gl: WebGlContext,
}
use crate::shader::ShaderManager;
use wasm_bindgen::JsValue;
use crate::renderable::line::RasterizedLineRenderer;
use crate::renderable::text::TextRenderManager;
use crate::renderable::Renderer;
use wasm_bindgen::JsValue;
use web_sys::HtmlElement;
use self::meridian::Meridian;
impl ProjetedGrid {
pub fn new(aladin_div: &HtmlElement) -> Result<ProjetedGrid, JsValue> {
pub fn new(gl: WebGlContext, aladin_div: &HtmlElement) -> Result<ProjetedGrid, JsValue> {
let text_renderer = TextRenderManager::new(aladin_div)?;
let color = ColorRGBA {
@@ -58,15 +57,44 @@ impl ProjetedGrid {
let show_labels = true;
let enabled = false;
let label_scale = 1.0;
let line_style = line::Style::None;
//let line_style = line::Style::None;
let fmt = angle::SerializeFmt::DMS;
let thickness = 2.0;
let meridians = Vec::new();
let parallels = Vec::new();
let mut vao = VertexArrayObject::new(&gl);
vao.bind_for_update()
// Store the cartesian position of the center of the source in the a instanced VBO
.add_instanced_array_buffer(
"ndc_pos",
4 * std::mem::size_of::<f32>(),
&[2, 2],
&[0, 2 * std::mem::size_of::<f32>()],
WebGl2RenderingContext::DYNAMIC_DRAW,
&[] as &[f32],
)
.add_array_buffer(
"vertices",
2 * std::mem::size_of::<f32>(),
&[2],
&[0],
WebGl2RenderingContext::STATIC_DRAW,
&[
0_f32, -0.5_f32, 1_f32, -0.5_f32, 1_f32, 0.5_f32, 0_f32, 0.5_f32,
] as &[f32],
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::STATIC_DRAW,
&[0_u16, 1_u16, 2_u16, 0_u16, 2_u16, 3_u16] as &[u16],
)
// Unbind the buffer
.unbind();
let grid = ProjetedGrid {
color,
line_style,
//line_style,
show_labels,
enabled,
label_scale,
@@ -76,6 +104,9 @@ impl ProjetedGrid {
meridians,
parallels,
fmt,
vao,
gl,
};
// Initialize the vertices & labels
//grid.force_update(camera, projection, line_renderer);
@@ -143,85 +174,6 @@ impl ProjetedGrid {
Ok(())
}
// Update the grid whenever the camera moved
fn update(
&mut self,
camera: &CameraViewPort,
projection: &ProjectionType,
rasterizer: &mut RasterizedLineRenderer,
) -> Result<(), JsValue> {
let fov = camera.get_field_of_view();
let bbox = fov.get_bounding_box();
let max_dim_px = camera.get_width().max(camera.get_height()) as f64;
let step_line_px = max_dim_px * 0.2;
// update meridians
self.meridians = {
// Select the good step with a binary search
let step_lon_precised =
(bbox.get_lon_size() as f64) * step_line_px / (camera.get_width() as f64);
let step_lon = select_fixed_step(step_lon_precised);
// Add meridians
let start_lon = bbox.lon_min() - (bbox.lon_min() % step_lon);
let mut stop_lon = bbox.lon_max();
if bbox.all_lon() {
stop_lon -= 1e-3;
}
let mut meridians = vec![];
let mut lon = start_lon;
while lon < stop_lon {
if let Some(p) =
meridian::get_intersecting_meridian(lon, camera, projection, &self.fmt)
{
meridians.push(p);
}
lon += step_lon;
}
meridians
};
self.parallels = {
let step_lat_precised =
(bbox.get_lat_size() as f64) * step_line_px / (camera.get_height() as f64);
let step_lat = select_fixed_step(step_lat_precised);
let mut start_lat = bbox.lat_min() - (bbox.lat_min() % step_lat);
if start_lat == -HALF_PI {
start_lat += step_lat;
}
let stop_lat = bbox.lat_max();
let mut lat = start_lat;
let mut parallels = vec![];
while lat < stop_lat {
if let Some(p) = parallel::get_intersecting_parallel(lat, camera, projection) {
parallels.push(p);
}
lat += step_lat;
}
parallels
};
// update the line buffers
let paths = self
.meridians
.iter()
.map(|meridian| meridian.get_lines_vertices())
.chain(
self.parallels
.iter()
.map(|parallel| parallel.get_lines_vertices()),
)
.flatten()
.map(|vertices| PathVertices { vertices });
rasterizer.add_stroke_paths(paths, self.thickness, &self.color, &self.line_style);
Ok(())
}
pub fn draw_labels(&mut self) -> Result<(), JsValue> {
if self.enabled && self.show_labels {
let labels = self
@@ -251,12 +203,107 @@ impl ProjetedGrid {
pub fn draw(
&mut self,
camera: &CameraViewPort,
_shaders: &mut ShaderManager,
projection: &ProjectionType,
rasterizer: &mut RasterizedLineRenderer,
shaders: &mut ShaderManager,
) -> Result<(), JsValue> {
if self.enabled {
self.update(camera, projection, rasterizer)?;
let fov = camera.get_field_of_view();
let bbox = fov.get_bounding_box();
let max_dim_px = camera.get_width().max(camera.get_height()) as f64;
let step_line_px = max_dim_px * 0.2;
// update meridians
self.meridians = {
// Select the good step with a binary search
let step_lon_precised =
(bbox.get_lon_size() as f64) * step_line_px / (camera.get_width() as f64);
let step_lon = select_fixed_step(step_lon_precised);
// Add meridians
let start_lon = bbox.lon_min() - (bbox.lon_min() % step_lon);
let mut stop_lon = bbox.lon_max();
if bbox.all_lon() {
stop_lon -= 1e-3;
}
let mut meridians = vec![];
let mut lon = start_lon;
while lon < stop_lon {
if let Some(p) =
meridian::get_intersecting_meridian(lon, camera, projection, &self.fmt)
{
meridians.push(p);
}
lon += step_lon;
}
meridians
};
self.parallels = {
let step_lat_precised =
(bbox.get_lat_size() as f64) * step_line_px / (camera.get_height() as f64);
let step_lat = select_fixed_step(step_lat_precised);
let mut start_lat = bbox.lat_min() - (bbox.lat_min() % step_lat);
if start_lat == -HALF_PI {
start_lat += step_lat;
}
let stop_lat = bbox.lat_max();
let mut lat = start_lat;
let mut parallels = vec![];
while lat < stop_lat {
if let Some(p) = parallel::get_intersecting_parallel(lat, camera, projection) {
parallels.push(p);
}
lat += step_lat;
}
parallels
};
// update the line buffers
let paths = self
.meridians
.iter()
.map(|meridian| meridian.get_lines_vertices())
.chain(
self.parallels
.iter()
.map(|parallel| parallel.get_lines_vertices()),
)
.flatten();
let mut buf: Vec<f32> = vec![];
for vertices in paths {
let vertices = vertices.as_ref();
let path_vertices_buf_iter = vertices
.iter()
.zip(vertices.iter().skip(1))
.map(|(a, b)| [a[0], a[1], b[0], b[1]])
.flatten();
buf.extend(path_vertices_buf_iter);
}
self.vao.bind_for_update().update_instanced_array(
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&buf),
);
let num_instances = buf.len() / 4;
crate::shader::get_shader(&self.gl, shaders, "line_inst_ndc.vert", "line_base.frag")?
.bind(&self.gl)
.attach_uniform("u_color", &self.color)
.attach_uniform("u_width", &self.thickness)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
num_instances as i32,
);
}
Ok(())

View File

@@ -27,8 +27,8 @@ use crate::{shader::ShaderManager, survey::config::HiPSConfig};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::math::lonlat::LonLat;
use crate::renderable::utils::index_patch::DefaultPatchIndexIter;
use crate::time::Time;
use std::collections::HashSet;
@@ -42,14 +42,12 @@ use crate::survey::texture::Texture;
use raytracing::RayTracer;
use uv::{TileCorner, TileUVW};
use cgmath::{Matrix};
use cgmath::Matrix;
use std::fmt::Debug;
use wasm_bindgen::JsValue;
use web_sys::WebGl2RenderingContext;
use super::utils::index_patch::CCWCheckPatchIndexIter;
const M: f64 = 280.0 * 280.0;
const N: f64 = 150.0 * 150.0;
const RAP: f64 = 0.7;
@@ -60,7 +58,7 @@ fn is_too_large(cell: &HEALPixCell, camera: &CameraViewPort, projection: &Projec
.iter()
.filter_map(|(lon, lat)| {
let vertex = crate::math::lonlat::radec_to_xyzw(Angle(*lon), Angle(*lat));
projection.view_to_screen_space(&vertex, camera)
projection.icrs_celestial_to_screen_space(&vertex, camera)
})
.collect::<Vec<_>>();
@@ -100,7 +98,7 @@ fn num_subdivision(cell: &HEALPixCell, camera: &CameraViewPort, projection: &Pro
let skewed_factor = (center_to_vertex_dist - smallest_center_to_vertex_dist)
/ (largest_center_to_vertex_dist - smallest_center_to_vertex_dist);
if is_too_large(cell, camera, projection) || cell.is_on_pole() || skewed_factor > 0.25 {
if skewed_factor > 0.25 || is_too_large(cell, camera, projection) || cell.is_on_pole() {
num_sub += 1;
}
@@ -286,28 +284,33 @@ pub fn get_raster_shader<'a>(
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(gl, shaders, "RasterizerVS", "RasterizerColorFS")
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"RasterizerVS",
"RasterizerGrayscale2ColormapUnsignedFS",
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"RasterizerVS",
"RasterizerGrayscale2ColormapIntegerFS",
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"RasterizerVS",
"RasterizerGrayscale2ColormapFS",
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap.frag",
)
}
}
@@ -321,24 +324,34 @@ pub fn get_raytracer_shader<'a>(
) -> Result<&'a Shader, JsValue> {
//let colored_hips = config.is_colored();
if config.get_format().is_colored() && cmap.label() == "native" {
crate::shader::get_shader(gl, shaders, "RayTracerVS", "RayTracerColorFS")
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color.frag",
)
} else {
if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"RayTracerVS",
"RayTracerGrayscale2ColormapUnsignedFS",
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"RayTracerVS",
"RayTracerGrayscale2ColormapIntegerFS",
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(gl, shaders, "RayTracerVS", "RayTracerGrayscale2ColormapFS")
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap.frag",
)
}
}
}
@@ -380,6 +393,9 @@ pub struct HiPS {
//min_depth_tile: u8,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
}
impl HiPS {
@@ -499,6 +515,7 @@ impl HiPS {
let gl = gl.clone();
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(HiPS {
// The image survey texture buffer
@@ -517,14 +534,15 @@ impl HiPS {
m1,
idx_vertices,
//min_depth_tile,
footprint_moc,
hpx_cells_in_view,
})
}
pub fn look_for_new_tiles<'a>(
&'a mut self,
camera: &'a mut CameraViewPort,
camera: &'a CameraViewPort,
proj: &ProjectionType,
) -> Option<impl Iterator<Item = HEALPixCell> + 'a> {
// do not add tiles if the view is already at depth 0
@@ -559,9 +577,9 @@ impl HiPS {
// let texture_cell = cell.get_texture_cell(delta_depth);
// texture_cell.get_tile_cells(delta_depth)
//})
.into_iter()
.flat_map(move |tile_cell| {
let tex_cell = tile_cell.get_texture_cell(dd);
//console_log(&format!("{:?}, dd:{:?}", tex_cell, dd));
tex_cell.get_tile_cells(dd)
})
.filter(move |tile_cell| {
@@ -607,13 +625,42 @@ impl HiPS {
pub fn update(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
let raytracing = camera.is_raytracing(projection);
let vertices_recomputation_needed =
!raytracing && (self.textures.reset_available_tiles() | camera.has_moved());
if vertices_recomputation_needed {
if raytracing {
return;
}
// rasterizer mode
let available_tiles = self.textures.reset_available_tiles();
let new_cells_in_view = self.retrieve_cells_in_camera(camera);
if new_cells_in_view || available_tiles {
self.recompute_vertices(camera, projection);
}
}
// returns a boolean if the view cells has changed with respect to the last frame
pub fn retrieve_cells_in_camera(&mut self, camera: &CameraViewPort) -> bool {
let cfg = self.textures.config();
// Get the coo system transformation matrix
let hips_frame = cfg.get_frame();
let depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let hpx_cells_in_view = camera.get_hpx_cells(depth, hips_frame);
let new_cells = if hpx_cells_in_view.len() != self.hpx_cells_in_view.len() {
true
} else {
!self
.hpx_cells_in_view
.iter()
.zip(hpx_cells_in_view.iter())
.all(|(&a, &b)| a == b)
};
self.hpx_cells_in_view = hpx_cells_in_view;
new_cells
}
#[inline]
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
@@ -695,35 +742,29 @@ impl HiPS {
let cfg = self.textures.config();
// Get the coo system transformation matrix
let selected_frame = camera.get_coo_system();
let channel = cfg.get_format().get_channel();
let hips_frame = cfg.get_frame();
// Retrieve the model and inverse model matrix
let mut off_indices = 0;
let depth = camera.get_texture_depth().min(cfg.get_max_depth_texture());
let view_cells: Vec<_> = camera.get_hpx_cells(depth, hips_frame).cloned().collect();
for cell in &view_cells {
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(cell) {
Some(cell)
if moc.intersects_cell(&cell) {
Some(&cell)
} else {
if channel == ChannelType::RGB8U {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
// but JPEG tiles do have black when no pixels data is found
// We therefore must draw in black for the tiles outside the HiPS MOC
Some(cell)
Some(&cell)
} else {
None
}
}
} else {
Some(cell)
Some(&cell)
};
if let Some(cell) = cell {
@@ -811,22 +852,20 @@ impl HiPS {
let n_vertices_per_segment = n_segments_by_side + 1;
let mut pos = vec![];
for (idx, lonlat) in
crate::healpix::utils::grid_lonlat::<f64>(cell, n_segments_by_side as u16)
.iter()
.enumerate()
{
let lon = lonlat.lon();
let lat = lonlat.lat();
let mut pos = Vec::with_capacity((n_segments_by_side + 1) * 4);
let xyzw = crate::math::lonlat::radec_to_xyzw(lon, lat);
let xyzw =
crate::coosys::apply_coo_system(hips_frame, selected_frame, &xyzw);
let grid_lonlat =
healpix::nested::grid(cell.depth(), cell.idx(), n_segments_by_side as u16);
let grid_lonlat_iter = grid_lonlat.into_iter();
let ndc = projection
.model_to_normalized_device_space(&xyzw, camera)
.map(|v| [v.x as f32, v.y as f32]);
for (idx, &(lon, lat)) in grid_lonlat_iter.enumerate() {
//let xyzw = crate::math::lonlat::radec_to_xyzw(lon, lat);
//let xyzw =
// crate::coosys::apply_coo_system(hips_frame, selected_frame, &xyzw);
//let ndc = projection
// .model_to_normalized_device_space(&xyzw, camera)
// .map(|v| [v.x as f32, v.y as f32]);
let i: usize = idx / n_vertices_per_segment;
let j: usize = idx % n_vertices_per_segment;
@@ -857,15 +896,13 @@ impl HiPS {
self.m1.push(miss_1);
self.time_tile_received.push(start_time);
pos.push(ndc);
pos.push([lon as f32, lat as f32]);
}
let patch_indices_iter = CCWCheckPatchIndexIter::new(
let patch_indices_iter = DefaultPatchIndexIter::new(
&(0..=n_segments_by_side),
&(0..=n_segments_by_side),
n_vertices_per_segment,
&pos,
camera,
)
.flatten()
.map(|indices| {
@@ -883,7 +920,7 @@ impl HiPS {
// Replace options with an arbitrary vertex
let position_iter = pos
.into_iter()
.map(|ndc| ndc.unwrap_or([0.0, 0.0]))
//.map(|ndc| ndc.unwrap_or([0.0, 0.0]))
.flatten();
self.position.extend(position_iter);
}
@@ -1017,10 +1054,6 @@ impl HiPS {
let hips_frame = hips_cfg.get_frame();
let c = selected_frame.to(hips_frame);
// Retrieve the model and inverse model matrix
let w2v = c * (*camera.get_w2m());
let v2w = w2v.transpose();
let raytracing = camera.is_raytracing(proj);
let config = self.get_config();
@@ -1041,6 +1074,8 @@ impl HiPS {
blend_cfg.enable(&self.gl, || {
if raytracing {
let w2v = c * (*camera.get_w2m());
let shader = get_raytracer_shader(cmap, &self.gl, shaders, &config)?;
let shader = shader.bind(&self.gl);
@@ -1051,13 +1086,14 @@ impl HiPS {
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniform("model", &w2v)
.attach_uniform("inv_model", &v2w)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniforms_from(colormaps);
raytracer.draw(&shader);
} else {
let v2w = (*camera.get_m2w()) * c.transpose();
// The rasterizer has a buffer containing:
// - The vertices of the HEALPix cells for the most refined survey
// - The starting and ending uv for the blending animation
@@ -1073,15 +1109,15 @@ impl HiPS {
let shader = get_raster_shader(cmap, &self.gl, shaders, &config)?.bind(&self.gl);
shader
.attach_uniforms_from(camera)
.attach_uniforms_from(&self.textures)
// send the cmap appart from the color config
.attach_uniforms_with_params_from(cmap, colormaps)
.attach_uniforms_from(color)
.attach_uniform("model", &w2v)
.attach_uniforms_from(camera)
.attach_uniform("inv_model", &v2w)
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(

View File

@@ -572,19 +572,30 @@ impl Image {
} = cfg;
let shader = match self.channel {
ChannelType::R32F => crate::shader::get_shader(&self.gl, shaders, "FitsVS", "FitsFS")?,
#[cfg(feature = "webgl2")]
ChannelType::R32I => {
crate::shader::get_shader(&self.gl, shaders, "FitsVS", "FitsFSInteger")?
ChannelType::R32F => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_sampler.frag")?
}
#[cfg(feature = "webgl2")]
ChannelType::R16I => {
crate::shader::get_shader(&self.gl, shaders, "FitsVS", "FitsFSInteger")?
}
ChannelType::R32I => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_isampler.frag",
)?,
#[cfg(feature = "webgl2")]
ChannelType::R8UI => {
crate::shader::get_shader(&self.gl, shaders, "FitsVS", "FitsFSUnsigned")?
}
ChannelType::R16I => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_isampler.frag",
)?,
#[cfg(feature = "webgl2")]
ChannelType::R8UI => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_usampler.frag",
)?,
_ => return Err(JsValue::from_str("Image format type not supported")),
};

View File

@@ -1,17 +1,12 @@
use cgmath::Vector3;
use crate::ProjectionType;
use crate::CameraViewPort;
use crate::ProjectionType;
use cgmath::Vector3;
use cgmath::InnerSpace;
use crate::math::angle::ToAngle;
use cgmath::InnerSpace;
use crate::coo_space::XYNDC;
use crate::coo_space::XYZModel;
use crate::coo_space::XYNDC;
use crate::LonLatT;
const MAX_ITERATION: usize = 5;
@@ -21,7 +16,14 @@ const MAX_ITERATION: usize = 5;
// * Longitudes between [0; 2\pi[
// * (lon1 - lon2).abs() < PI so that is can only either cross the preimary meridian or opposite primary meridian
// (the latest is handled because of the longitudes intervals)
pub fn project(lon1: f64, lat1: f64, lon2: f64, lat2: f64, camera: &CameraViewPort, projection: &ProjectionType) -> Vec<XYNDC> {
pub fn project(
lon1: f64,
lat1: f64,
lon2: f64,
lat2: f64,
camera: &CameraViewPort,
projection: &ProjectionType,
) -> Vec<XYNDC<f64>> {
let mut vertices = vec![];
let lonlat1 = LonLatT::new(lon1.to_angle(), lat1.to_angle());
@@ -36,18 +38,16 @@ pub fn project(lon1: f64, lat1: f64, lon2: f64, lat2: f64, camera: &CameraViewPo
match (p1, p2) {
(Some(_), Some(_)) => {
project_line(&mut vertices, &v1, &v2, camera, projection, 0);
},
}
(None, Some(_)) => {
let (v1, v2) = sub_valid_domain(v2, v1, projection, camera);
project_line(&mut vertices, &v1, &v2, camera, projection, 0);
},
}
(Some(_), None) => {
let (v1, v2) = sub_valid_domain(v1, v2, projection, camera);
project_line(&mut vertices, &v1, &v2, camera, projection, 0);
},
(None, None) => {
}
(None, None) => {}
}
vertices
@@ -57,7 +57,12 @@ pub fn project(lon1: f64, lat1: f64, lon2: f64, lat2: f64, camera: &CameraViewPo
// * angular distance between valid_lon and invalid_lon is < PI
// * valid_lon and invalid_lon are well defined, i.e. they can be between [-PI; PI] or [0, 2PI] depending
// whether they cross or not the zero meridian
fn sub_valid_domain(valid_v: XYZModel, invalid_v: XYZModel, projection: &ProjectionType, camera: &CameraViewPort) -> (XYZModel, XYZModel) {
fn sub_valid_domain(
valid_v: XYZModel<f64>,
invalid_v: XYZModel<f64>,
projection: &ProjectionType,
camera: &CameraViewPort,
) -> (XYZModel<f64>, XYZModel<f64>) {
let d_alpha = camera.get_aperture().to_radians() * 0.02;
let mut vv = valid_v;
@@ -77,9 +82,9 @@ fn sub_valid_domain(valid_v: XYZModel, invalid_v: XYZModel, projection: &Project
}
fn project_line(
vertices: &mut Vec<XYNDC>,
v1: &XYZModel,
v2: &XYZModel,
vertices: &mut Vec<XYNDC<f64>>,
v1: &XYZModel<f64>,
v2: &XYZModel<f64>,
camera: &CameraViewPort,
projection: &ProjectionType,
iter: usize,
@@ -91,25 +96,14 @@ fn project_line(
// Project them. We are always facing the camera
let vm = (v1 + v2).normalize();
let pm = projection.model_to_normalized_device_space(&vm.extend(1.0), camera);
match (p1, pm, p2) {
(Some(p1), Some(pm), Some(p2)) => {
let d12 = crate::math::vector::angle3(v1, v2).to_radians();
// Subdivide when until it is > 30 degrees
if d12 > 30.0_f64.to_radians() {
subdivide(
vertices,
v1,
v2,
&vm,
p1,
p2,
pm,
camera,
projection,
iter
);
subdivide(vertices, v1, v2, &vm, p1, p2, pm, camera, projection, iter);
} else {
// enough to stop the recursion
let ab = pm - p1;
@@ -131,7 +125,7 @@ fn project_line(
// not colinear but enough to stop
vertices.push(p1);
vertices.push(pm);
vertices.push(pm);
vertices.push(p2);
}
@@ -151,65 +145,39 @@ fn project_line(
}
} else {
// Subdivide a->b and b->c
subdivide(
vertices,
v1,
v2,
&vm,
p1,
p2,
pm,
camera,
projection,
iter
);
subdivide(vertices, v1, v2, &vm, p1, p2, pm, camera, projection, iter);
}
}
}
true
},
_ => false
}
_ => false,
}
} else {
false
}
}
fn subdivide(
vertices: &mut Vec<XYNDC>,
v1: &XYZModel,
v2: &XYZModel,
vm: &XYZModel,
p1: XYNDC,
p2: XYNDC,
pm: XYNDC,
vertices: &mut Vec<XYNDC<f64>>,
v1: &XYZModel<f64>,
v2: &XYZModel<f64>,
vm: &XYZModel<f64>,
p1: XYNDC<f64>,
p2: XYNDC<f64>,
pm: XYNDC<f64>,
camera: &CameraViewPort,
projection: &ProjectionType,
iter: usize
iter: usize,
) {
// Subdivide a->b and b->c
if !project_line(
vertices,
v1,
vm,
camera,
projection,
iter + 1
) {
if !project_line(vertices, v1, vm, camera, projection, iter + 1) {
vertices.push(p1);
vertices.push(pm);
}
if !project_line(
vertices,
vm,
v2,
camera,
projection,
iter + 1
) {
if !project_line(vertices, vm, v2, camera, projection, iter + 1) {
vertices.push(pm);
vertices.push(p2);
}

View File

@@ -2,8 +2,9 @@
pub mod great_circle_arc;
pub mod parallel_arc;
use crate::Abort;
use al_core::shader::Shader;
use crate::math::projection::ProjectionType;
use crate::shader::ShaderManager;
use al_api::coo_system::CooSystem;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
@@ -11,16 +12,12 @@ use super::Renderer;
use al_api::color::ColorRGBA;
use al_core::SliceData;
use lyon::algorithms::{
math::point,
measure::{PathMeasurements, SampleType},
path::Path,
};
struct Meta {
color: ColorRGBA,
thickness: f32,
off_indices: usize,
num_indices: usize,
coo_space: CooSpace,
}
#[derive(Clone)]
@@ -32,12 +29,16 @@ pub enum Style {
pub struct RasterizedLineRenderer {
gl: WebGlContext,
shader: Shader,
vao: VertexArrayObject,
vao_idx: usize,
vertices: Vec<f32>,
indices: Vec<u32>,
meta: Vec<Meta>,
instanced_line_vaos: Vec<VertexArrayObject>,
meta_instanced: Vec<Meta>,
}
use wasm_bindgen::JsValue;
@@ -46,15 +47,14 @@ use web_sys::WebGl2RenderingContext;
use crate::camera::CameraViewPort;
use lyon::tessellation::*;
use crate::coo_space::CooSpace;
#[repr(C)]
pub struct PathVertices<T>
pub struct PathVertices<V>
where
T: AsRef<[[f32; 2]]>,
V: AsRef<[[f32; 2]]>,
{
pub vertices: T,
//pub closed: bool,
pub vertices: V,
}
impl RasterizedLineRenderer {
@@ -63,11 +63,6 @@ impl RasterizedLineRenderer {
let vertices = vec![];
let indices = vec![];
// Create the VAO for the screen
let shader = Shader::new(
&gl,
include_str!("../../../../glsl/webgl2/line/line_vertex.glsl"),
include_str!("../../../../glsl/webgl2/line/line_frag.glsl"),
)?;
let mut vao = VertexArrayObject::new(&gl);
vao.bind_for_update()
@@ -87,10 +82,15 @@ impl RasterizedLineRenderer {
.unbind();
let meta = vec![];
let meta_instanced = vec![];
let gl = gl.clone();
let instanced_line_vaos = vec![];
Ok(Self {
gl,
shader,
vao_idx: 0,
instanced_line_vaos,
meta_instanced,
vao,
meta,
vertices,
@@ -98,12 +98,13 @@ impl RasterizedLineRenderer {
})
}
pub fn add_fill_paths<T>(
/*pub fn add_fill_paths<V>(
&mut self,
paths: impl Iterator<Item = PathVertices<T>>,
paths: impl Iterator<Item = PathVertices<V>>,
color: &ColorRGBA,
coo_space: CooSpace,
) where
T: AsRef<[[f32; 2]]>,
V: AsRef<[[f32; 2]]>,
{
let mut num_indices = 0;
let off_indices = self.indices.len();
@@ -119,7 +120,7 @@ impl RasterizedLineRenderer {
vertices, /*, closed */
} = path;
let line: &[[f32; 2]] = vertices.as_ref();
let line = vertices.as_ref();
if !line.is_empty() {
let v = &line[0];
@@ -167,22 +168,59 @@ impl RasterizedLineRenderer {
self.meta.push(Meta {
off_indices,
num_indices,
thickness: 1.0,
color: color.clone(),
coo_space,
});
}*/
fn create_instanced_vao(&mut self) {
let mut vao = VertexArrayObject::new(&self.gl);
vao.bind_for_update()
// Store the cartesian position of the center of the source in the a instanced VBO
.add_instanced_array_buffer(
"ndc_pos",
4 * std::mem::size_of::<f32>(),
&[2, 2],
&[0, 2 * std::mem::size_of::<f32>()],
WebGl2RenderingContext::DYNAMIC_DRAW,
&[] as &[f32],
)
.add_array_buffer(
"vertices",
2 * std::mem::size_of::<f32>(),
&[2],
&[0],
WebGl2RenderingContext::STATIC_DRAW,
&[
0_f32, -0.5_f32, 1_f32, -0.5_f32, 1_f32, 0.5_f32, 0_f32, 0.5_f32,
] as &[f32],
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::STATIC_DRAW,
&[0_u16, 1_u16, 2_u16, 0_u16, 2_u16, 3_u16] as &[u16],
)
// Unbind the buffer
.unbind();
self.instanced_line_vaos.push(vao);
}
pub fn add_stroke_paths<T>(
pub fn add_stroke_paths<V>(
&mut self,
paths: impl Iterator<Item = PathVertices<T>>,
paths: impl Iterator<Item = PathVertices<V>>,
thickness: f32,
color: &ColorRGBA,
style: &Style,
_style: &Style,
coo_space: CooSpace,
) where
T: AsRef<[[f32; 2]]>,
V: AsRef<[[f32; 2]]>,
{
let num_vertices = (self.vertices.len() / 2) as u32;
//let num_vertices = (self.vertices.len() / 2) as u32;
let mut path_builder = Path::builder();
/*let mut path_builder = Path::builder();
match &style {
Style::None => {
@@ -268,22 +306,51 @@ impl RasterizedLineRenderer {
.unwrap_abort();
}
let VertexBuffers { vertices, indices } = geometry;
let VertexBuffers { vertices, indices } = geometry;*/
if self.vao_idx == self.instanced_line_vaos.len() {
// create a vao
self.create_instanced_vao();
}
let num_indices = indices.len();
let off_indices = self.indices.len();
let vao = &mut self.instanced_line_vaos[self.vao_idx];
self.vao_idx += 1;
self.vertices.extend(vertices.iter().flatten());
self.indices.extend(indices.iter());
let mut buf: Vec<f32> = vec![];
self.meta.push(Meta {
off_indices,
num_indices,
for PathVertices { vertices } in paths {
let vertices = vertices.as_ref();
let path_vertices_buf_iter = vertices
.iter()
.zip(vertices.iter().skip(1))
.map(|(a, b)| [a[0], a[1], b[0], b[1]])
.flatten();
buf.extend(path_vertices_buf_iter);
}
vao.bind_for_update().update_instanced_array(
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&buf),
);
let num_instances = buf.len() / 4;
self.meta_instanced.push(Meta {
off_indices: 0,
thickness,
num_indices: num_instances,
color: color.clone(),
coo_space,
});
}
pub fn draw(&mut self, _camera: &CameraViewPort) -> Result<(), JsValue> {
pub fn draw(
&mut self,
shaders: &mut ShaderManager,
camera: &CameraViewPort,
proj: &ProjectionType,
) -> Result<(), JsValue> {
self.gl.enable(WebGl2RenderingContext::BLEND);
self.gl.blend_func_separate(
WebGl2RenderingContext::SRC_ALPHA,
@@ -293,21 +360,71 @@ impl RasterizedLineRenderer {
);
//self.gl.disable(WebGl2RenderingContext::CULL_FACE);
let shader = self.shader.bind(&self.gl);
for meta in self.meta.iter() {
shader
.attach_uniform("u_color", &meta.color) // Strengh of the kernel
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
Some(meta.num_indices as i32),
WebGl2RenderingContext::UNSIGNED_INT,
((meta.off_indices as usize) * std::mem::size_of::<u32>()) as i32,
);
{
let shader =
crate::shader::get_shader(&self.gl, shaders, "line_base.vert", "line_base.frag")?
.bind(&self.gl);
for meta in self.meta.iter() {
shader
.attach_uniform("u_color", &meta.color) // Strengh of the kernel
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
Some(meta.num_indices as i32),
WebGl2RenderingContext::UNSIGNED_INT,
((meta.off_indices as usize) * std::mem::size_of::<u32>()) as i32,
);
}
}
//self.gl.enable(WebGl2RenderingContext::CULL_FACE);
// draw the instanced lines
for (idx, meta) in self.meta_instanced.iter().enumerate() {
match meta.coo_space {
CooSpace::NDC => {
crate::shader::get_shader(
&self.gl,
shaders,
"line_inst_ndc.vert",
"line_base.frag",
)?
.bind(&self.gl)
.attach_uniform("u_color", &meta.color)
.attach_uniform("u_width", &meta.thickness)
.bind_vertex_array_object_ref(&self.instanced_line_vaos[idx])
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
meta.num_indices as i32,
);
}
CooSpace::LonLat => {
let icrs2view = CooSystem::ICRS.to(camera.get_coo_system());
let view2world = camera.get_m2w();
let icrs2world = view2world * icrs2view;
crate::shader::get_shader(
&self.gl,
shaders,
"line_inst_lonlat.vert",
"line_base.frag",
)?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("u_2world", &icrs2world)
.attach_uniform("u_color", &meta.color)
.attach_uniform("u_width", &meta.thickness)
.attach_uniform("u_proj", proj)
.bind_vertex_array_object_ref(&self.instanced_line_vaos[idx])
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
meta.num_indices as i32,
);
}
_ => (),
}
}
self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
@@ -318,8 +435,10 @@ impl Renderer for RasterizedLineRenderer {
fn begin(&mut self) {
self.vertices.clear();
self.indices.clear();
self.meta.clear();
self.meta_instanced.clear();
self.vao_idx = 0;
}
fn end(&mut self) {

View File

@@ -1,4 +1,4 @@
use super::moc::MOC;
use super::MOC;
use crate::{camera::CameraViewPort, HEALPixCoverage};
use al_api::moc::MOC as Cfg;
@@ -7,16 +7,16 @@ pub struct MOCHierarchy {
// MOC at different resolution
mocs: Vec<MOC>,
}
use al_core::WebGlContext;
impl MOCHierarchy {
pub fn from_full_res_moc(full_res_moc: HEALPixCoverage, cfg: &Cfg) -> Self {
pub fn from_full_res_moc(gl: WebGlContext, full_res_moc: HEALPixCoverage, cfg: &Cfg) -> Self {
let full_res_depth = full_res_moc.depth();
let mut mocs: Vec<_> = (0..full_res_depth)
.map(|d| MOC::new(HEALPixCoverage(full_res_moc.degraded(d)), cfg))
.map(|d| MOC::new(gl.clone(), HEALPixCoverage(full_res_moc.degraded(d)), cfg))
.collect();
mocs.push(MOC::new(full_res_moc, cfg));
mocs.push(MOC::new(gl, full_res_moc, cfg));
Self {
mocs,

View File

@@ -0,0 +1,654 @@
mod graph;
mod mode;
pub mod hierarchy;
pub mod renderer;
pub use renderer::MOCRenderer;
use crate::camera::CameraViewPort;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::projection::ProjectionType;
use crate::renderable::WebGl2RenderingContext;
use crate::shader::ShaderManager;
use al_api::moc::MOC as Cfg;
use wasm_bindgen::JsValue;
use crate::WebGlContext;
use al_core::VertexArrayObject;
use al_api::color::ColorRGBA;
use al_api::coo_system::CooSystem;
use moclib::elem::cell::Cell;
use moclib::moc::range::CellAndEdges;
use moclib::moc::RangeMOCIterator;
use crate::HEALPixCell;
use al_core::VecData;
pub struct MOC {
pub sky_fraction: f32,
pub max_order: u8,
inner: [Option<MOCIntern>; 3],
pub moc: HEALPixCoverage,
}
impl MOC {
pub(super) fn new(gl: WebGlContext, moc: HEALPixCoverage, cfg: &Cfg) -> Self {
let sky_fraction = moc.sky_fraction() as f32;
let max_order = moc.depth_max();
let inner = [
if cfg.perimeter {
// draw only perimeter
Some(MOCIntern::new(
gl.clone(),
RenderModeType::Perimeter {
thickness: cfg.line_width,
color: cfg.color,
},
))
} else {
None
},
if cfg.filled {
// change color
let fill_color = cfg.fill_color;
// draw the edges
Some(MOCIntern::new(
gl.clone(),
RenderModeType::Filled { color: fill_color },
))
} else {
None
},
if cfg.edges {
Some(MOCIntern::new(
gl,
RenderModeType::Edge {
thickness: cfg.line_width,
color: cfg.color,
},
))
} else {
None
},
];
Self {
inner,
max_order,
sky_fraction,
moc,
}
}
/*pub(super) fn cell_indices_in_view(&mut self, camera: &mut CameraViewPort) {
for render in &mut self.inner {
if let Some(render) = render.as_mut() {
render.cell_indices_in_view(camera);
}
}
}*/
/*pub(super) fn num_cells_in_view(&self, camera: &mut CameraViewPort) -> usize {
self.inner
.iter()
.filter_map(|moc| moc.as_ref())
.map(|moc| moc.num_cells_in_view(camera))
.sum()
}*/
/*pub(super) fn num_vertices_in_view(&self, camera: &mut CameraViewPort) -> usize {
let mut num_vertices = 0;
for render in &self.0 {
if let Some(render) = render.as_ref() {
num_vertices += render.num_vertices_in_view(camera);
}
}
num_vertices
}*/
pub fn sky_fraction(&self) -> f32 {
self.sky_fraction
}
pub fn max_order(&self) -> u8 {
self.max_order
}
pub(super) fn draw(
&mut self,
camera: &mut CameraViewPort,
proj: &ProjectionType,
shaders: &mut ShaderManager,
) -> Result<(), JsValue> {
for render in &mut self.inner {
if let Some(render) = render.as_mut() {
render.draw(&self.moc, camera, proj, shaders)?
}
}
Ok(())
}
}
struct MOCIntern {
// HEALPix index vector
// Used for fast HEALPix cell retrieval
//hpx_idx_vec: IdxVec,
// Node indices in view
//indices: Vec<Range<usize>>,
mode: RenderModeType,
gl: WebGlContext,
vao: VertexArrayObject,
}
#[derive(Clone)]
pub enum RenderModeType {
Perimeter { thickness: f32, color: ColorRGBA },
Edge { thickness: f32, color: ColorRGBA },
Filled { color: ColorRGBA },
}
impl MOCIntern {
fn new(gl: WebGlContext, mode: RenderModeType) -> Self {
let lonlat = vec![];
let vertices = [
0_f32, -0.5_f32, 1_f32, -0.5_f32, 1_f32, 0.5_f32, 0_f32, 0.5_f32,
];
let indices = [0_u16, 1_u16, 2_u16, 0_u16, 2_u16, 3_u16];
let vao = match mode {
RenderModeType::Perimeter { .. } | RenderModeType::Edge { .. } => {
let mut vao = VertexArrayObject::new(&gl);
vao.bind_for_update()
// Store the cartesian position of the center of the source in the a instanced VBO
.add_instanced_array_buffer(
"lonlat",
4 * std::mem::size_of::<f32>(),
&[2, 2],
&[0, 2 * std::mem::size_of::<f32>()],
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&lonlat),
)
.add_array_buffer(
"vertices",
2 * std::mem::size_of::<f32>(),
&[2],
&[0],
WebGl2RenderingContext::STATIC_DRAW,
&vertices as &[f32],
)
// Set the element buffer
.add_element_buffer(WebGl2RenderingContext::STATIC_DRAW, &indices as &[u16])
// Unbind the buffer
.unbind();
vao
}
RenderModeType::Filled { .. } => {
let mut vao = VertexArrayObject::new(&gl);
let indices = vec![];
vao.bind_for_update()
// Store the cartesian position of the center of the source in the a instanced VBO
.add_array_buffer(
"lonlat",
2 * std::mem::size_of::<f32>(),
&[2],
&[0],
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&lonlat),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&indices),
)
// Unbind the buffer
.unbind();
vao
}
};
/*let hpx_idx_vec =
IdxVec::from_hpx_cells((&moc.0).into_range_moc_iter().cells().flat_map(|cell| {
let cell = HEALPixCell(cell.depth, cell.idx);
let dd = if 3 >= cell.depth() {
3 - cell.depth()
} else {
0
};
cell.get_tile_cells(dd)
}));
*/
Self {
//nodes,
//moc,
//hpx_idx_vec,
//indices: vec![],
vao,
gl,
mode,
}
}
/*fn cell_indices_in_view(&mut self, moc: &HEALPixCoverage, camera: &mut CameraViewPort) {
// Cache it for several reuse during the same frame
let view_depth = camera.get_texture_depth();
let cells_iter = camera.get_hpx_cells(view_depth, CooSystem::ICRS);
if moc.is_empty() {
self.indices = vec![0..0];
return;
}
/*let indices: Vec<_> = if view_depth > 7 {
// Binary search version, we are using this alternative for retrieving
// MOC's cells to render for deep fields of view
let first_cell_rng = &self.nodes[0].cell.z_29_rng();
let last_cell_rng = &self.nodes[self.nodes.len() - 1].cell.z_29_rng();
cells_iter
.filter_map(|cell| {
let cell_rng = cell.z_29_rng();
// Quick rejection test
if cell_rng.end <= first_cell_rng.start || cell_rng.start >= last_cell_rng.end {
None
} else {
let contains_val = |hash_z29: u64| -> Result<usize, usize> {
self.nodes.binary_search_by(|node| {
let node_cell_rng = node.cell.z_29_rng();
if hash_z29 < node_cell_rng.start {
// the node cell range contains hash_z29
Ordering::Greater
} else if hash_z29 >= node_cell_rng.end {
Ordering::Less
} else {
Ordering::Equal
}
})
};
let start_idx = contains_val(cell_rng.start);
let end_idx = contains_val(cell_rng.end);
let cell_indices = match (start_idx, end_idx) {
(Ok(l), Ok(r)) => {
if l == r {
l..(r + 1)
} else {
l..r
}
}
(Err(l), Ok(r)) => l..r,
(Ok(l), Err(r)) => l..r,
(Err(l), Err(r)) => l..r,
};
Some(cell_indices)
}
})
.collect()
} else {
// Index Vector 7 order version
cells_iter
.map(|cell| self.hpx_idx_vec.get_item_indices_inside_hpx_cell(&cell))
.collect()
};*/
let indices = cells_iter
.map(|cell| self.hpx_idx_vec.get_item_indices_inside_hpx_cell(&cell))
.collect();
let indices = crate::utils::merge_overlapping_intervals(indices);
self.indices = indices;
}*/
/*fn num_vertices_in_view(&self, camera: &CameraViewPort) -> usize {
self.cells_in_view(camera)
.filter_map(|n| n.vertices.as_ref())
.map(|n_vertices| {
n_vertices
.vertices
.iter()
.map(|edge| edge.len())
.sum::<usize>()
})
.sum()
}*/
/*fn num_cells_in_view(&self, _camera: &CameraViewPort) -> usize {
self.indices
.iter()
.map(|range| range.end - range.start)
.sum()
}*/
/*fn cells_in_view<'a>(&'a self, _camera: &CameraViewPort) -> impl Iterator<Item = Node> {
let nodes = &self.nodes;
self.indices
.iter()
.map(move |indices| nodes[indices.start..indices.end].iter())
.flatten()
}*/
fn vertices_in_view<'a>(
&self,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = [(f64, f64); 4]> + 'a {
let view_moc = camera.get_cov(CooSystem::ICRS);
//self.cells_in_view(camera)
// .filter_map(move |node| node.vertices.as_ref())
moc.overlapped_by_iter(view_moc)
.cells()
.flat_map(|cell| {
let Cell { idx, depth } = cell;
let cell = HEALPixCell(depth, idx);
let dd = if 3 >= cell.depth() {
3 - cell.depth()
} else {
0
};
cell.get_tile_cells(dd)
})
.map(|hpx_cell| hpx_cell.vertices())
}
fn draw(
&mut self,
moc: &HEALPixCoverage,
camera: &mut CameraViewPort,
proj: &ProjectionType,
shaders: &mut ShaderManager,
) -> Result<(), JsValue> {
//let _ = crate::Time::measure_perf("rasterize moc", move || {
match self.mode {
RenderModeType::Perimeter { thickness, color } => {
let moc_in_view = moc
.overlapped_by_iter(&camera.get_cov(CooSystem::ICRS))
.into_range_moc();
let perimeter_vertices_iter = moc_in_view
.border_elementary_edges()
.filter_map(|CellAndEdges { uniq, edges }| {
if edges.is_empty() {
None
} else {
let mut paths = vec![];
let c = Cell::from_uniq_hpx(uniq);
let cell = HEALPixCell(c.depth, c.idx);
let v = cell.vertices();
if edges.get(moclib::moc::range::Ordinal::SE) {
paths.extend([
v[0].0 as f32,
v[0].1 as f32,
v[1].0 as f32,
v[1].1 as f32,
]);
}
if edges.get(moclib::moc::range::Ordinal::NE) {
paths.extend([
v[1].0 as f32,
v[1].1 as f32,
v[2].0 as f32,
v[2].1 as f32,
]);
}
if edges.get(moclib::moc::range::Ordinal::NW) {
paths.extend([
v[2].0 as f32,
v[2].1 as f32,
v[3].0 as f32,
v[3].1 as f32,
]);
}
if edges.get(moclib::moc::range::Ordinal::SW) {
paths.extend([
v[3].0 as f32,
v[3].1 as f32,
v[0].0 as f32,
v[0].1 as f32,
])
}
Some(paths)
}
})
.flatten();
let mut buf: Vec<_> = vec![];
buf.extend(perimeter_vertices_iter);
self.vao.bind_for_update().update_instanced_array(
"lonlat",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&buf),
);
let num_instances = buf.len() / 4;
let icrs2view = CooSystem::ICRS.to(camera.get_coo_system());
let view2world = camera.get_m2w();
let icrs2world = view2world * icrs2view;
crate::shader::get_shader(
&self.gl,
shaders,
"line_inst_lonlat.vert",
"line_base.frag",
)?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("u_2world", &icrs2world)
.attach_uniform("u_color", &color)
.attach_uniform("u_width", &thickness)
.attach_uniform("u_proj", proj)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
num_instances as i32,
);
}
RenderModeType::Edge { thickness, color } => {
let mut buf: Vec<_> = vec![];
buf.extend(self.compute_edge_paths_iter(moc, camera));
//let mut buf = self.compute_edge_paths_iter(moc, camera).collect();
self.vao.bind_for_update().update_instanced_array(
"lonlat",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&buf),
);
let num_instances = buf.len() / 4;
let icrs2view = CooSystem::ICRS.to(camera.get_coo_system());
let view2world = camera.get_m2w();
let icrs2world = view2world * icrs2view;
crate::shader::get_shader(
&self.gl,
shaders,
"line_inst_lonlat.vert",
"line_base.frag",
)?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("u_2world", &icrs2world)
.attach_uniform("u_color", &color)
.attach_uniform("u_width", &thickness)
.attach_uniform("u_proj", proj)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
num_instances as i32,
);
/*rasterizer.add_stroke_paths(
,
thickness,
&color,
&super::line::Style::None,
CooSpace::LonLat,
);*/
}
RenderModeType::Filled { color } => {
let mut off_idx = 0;
let mut indices: Vec<u32> = vec![];
let vertices = self
.vertices_in_view(moc, camera)
.map(|v| {
let vertices = [
v[0].0 as f32,
v[0].1 as f32,
v[1].0 as f32,
v[1].1 as f32,
v[2].0 as f32,
v[2].1 as f32,
v[3].0 as f32,
v[3].1 as f32,
];
indices.extend_from_slice(&[
off_idx + 1,
off_idx + 0,
off_idx + 3,
off_idx + 1,
off_idx + 3,
off_idx + 2,
]);
off_idx += 4;
vertices
})
.flatten()
.collect();
let num_idx = indices.len() as i32;
self.vao
.bind_for_update()
.update_array(
"lonlat",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData(&vertices),
)
.update_element_array(WebGl2RenderingContext::DYNAMIC_DRAW, VecData(&indices));
let icrs2view = CooSystem::ICRS.to(camera.get_coo_system());
let view2world = camera.get_m2w();
let icrs2world = view2world * icrs2view;
self.gl.enable(WebGl2RenderingContext::BLEND);
crate::shader::get_shader(&self.gl, shaders, "moc_base.vert", "moc_base.frag")?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("u_2world", &icrs2world)
.attach_uniform("u_color", &color)
.attach_uniform("u_proj", proj)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
Some(num_idx),
WebGl2RenderingContext::UNSIGNED_INT,
0,
);
self.gl.disable(WebGl2RenderingContext::BLEND);
}
}
Ok(())
//});
}
fn compute_edge_paths_iter<'a>(
&self,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = f32> + 'a {
/*self.vertices_in_view(view_moc, moc, camera)
.filter_map(move |cell_vertices| {
let mut ndc: [[f32; 2]; 5] =
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]];
let vertices = cell_vertices;
for i in 0..4 {
let line_vertices = vertices[i];
//for k in 0..line_vertices.len() {
let (lon, lat) = line_vertices;
let xyzw = crate::math::lonlat::radec_to_xyzw(Angle(lon), Angle(lat));
let xyzw =
crate::coosys::apply_coo_system(CooSystem::ICRS, camera_coosys, &xyzw);
if let Some(p) = proj.model_to_normalized_device_space(&xyzw, camera) {
if i > 0 && crossing_edges_testing {
let mag2 = crate::math::vector::dist2(
crate::math::projection::ndc_to_clip_space(&p, camera).as_ref(),
crate::math::projection::ndc_to_clip_space(
&Vector2::new(ndc[i - 1][0] as f64, ndc[i - 1][1] as f64),
camera,
)
.as_ref(),
);
//al_core::info!("mag", i, mag2);
if mag2 > 0.1 {
return None;
}
}
ndc[i] = [p.x as f32, p.y as f32];
} else {
return None;
}
//ndc[i] = [xyzw.x as f32, xyzw.y as f32];
//ndc[i] = [lon as f32, lat as f32];
}
ndc[4] = ndc[0].clone();
Some(PathVertices { vertices: ndc })
})*/
self.vertices_in_view(moc, camera)
.map(|v| {
let vertices = [
v[0].0 as f32,
v[0].1 as f32,
v[1].0 as f32,
v[1].1 as f32,
v[1].0 as f32,
v[1].1 as f32,
v[2].0 as f32,
v[2].1 as f32,
v[2].0 as f32,
v[2].1 as f32,
v[3].0 as f32,
v[3].1 as f32,
v[3].0 as f32,
v[3].1 as f32,
v[0].0 as f32,
v[0].1 as f32,
];
vertices
})
.flatten()
}
}

View File

@@ -1,5 +1,5 @@
use crate::healpix::cell::CellVertices;
use crate::renderable::coverage::HEALPixCell;
use crate::HEALPixCell;
use crate::HEALPixCoverage;
pub mod edge;

View File

@@ -0,0 +1,160 @@
use crate::{healpix::coverage::HEALPixCoverage, CameraViewPort, ShaderManager};
use web_sys::WebGl2RenderingContext;
use al_core::WebGlContext;
use wasm_bindgen::JsValue;
use super::hierarchy::MOCHierarchy;
use al_api::coo_system::CooSystem;
use al_api::moc::MOC as Cfg;
pub struct MOCRenderer {
mocs: Vec<MOCHierarchy>,
cfgs: Vec<Cfg>,
gl: WebGlContext,
}
use crate::ProjectionType;
impl MOCRenderer {
pub fn new(gl: &WebGlContext) -> Result<Self, JsValue> {
// layout (location = 0) in vec2 ndc_pos;
//let vertices = vec![0.0; MAX_NUM_FLOATS_TO_DRAW];
//let indices = vec![0_u16; MAX_NUM_INDICES_TO_DRAW];
//let vertices = vec![];
/*let position = vec![];
let indices = vec![];
#[cfg(feature = "webgl2")]
vao.bind_for_update()
.add_array_buffer_single(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&indices),
)
.unbind();
#[cfg(feature = "webgl1")]
vao.bind_for_update()
.add_array_buffer(
2,
"ndc_pos",
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<f32>(&position),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::DYNAMIC_DRAW,
VecData::<u32>(&indices),
)
.unbind();
*/
let mocs = Vec::new();
let cfgs = Vec::new();
Ok(Self {
gl: gl.clone(),
mocs,
cfgs,
})
}
pub fn push_back(
&mut self,
moc: HEALPixCoverage,
cfg: Cfg,
camera: &mut CameraViewPort,
proj: &ProjectionType,
) {
self.mocs
.push(MOCHierarchy::from_full_res_moc(self.gl.clone(), moc, &cfg));
self.cfgs.push(cfg);
camera.register_view_frame(CooSystem::ICRS, proj);
//self.layers.push(key);
}
pub fn get_hpx_coverage(&self, cfg: &Cfg) -> Option<&HEALPixCoverage> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
Some(&self.mocs[idx].get_full_moc())
} else {
None
}
}
pub fn remove(
&mut self,
cfg: &Cfg,
camera: &mut CameraViewPort,
proj: &ProjectionType,
) -> Option<Cfg> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
self.mocs.remove(idx);
camera.unregister_view_frame(CooSystem::ICRS, proj);
Some(self.cfgs.remove(idx))
} else {
None
}
}
pub fn set_cfg(
&mut self,
cfg: Cfg,
camera: &mut CameraViewPort,
projection: &ProjectionType,
shaders: &mut ShaderManager,
) -> Option<Cfg> {
let name = cfg.get_uuid();
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == name) {
let old_cfg = self.cfgs[idx].clone();
self.cfgs[idx] = cfg;
let _ = self.draw(camera, projection, shaders);
Some(old_cfg)
} else {
// the cfg has not been found
None
}
}
pub fn is_empty(&self) -> bool {
self.cfgs.is_empty()
}
pub fn draw(
&mut self,
camera: &mut CameraViewPort,
proj: &ProjectionType,
shaders: &mut ShaderManager,
) -> Result<(), JsValue> {
if !self.is_empty() {
self.gl.enable(WebGl2RenderingContext::CULL_FACE);
for (hmoc, cfg) in self.mocs.iter_mut().zip(self.cfgs.iter()) {
if cfg.show {
let moc = hmoc.select_moc_from_view(camera);
moc.draw(camera, proj, shaders)?;
}
}
self.gl.disable(WebGl2RenderingContext::CULL_FACE);
}
Ok(())
}
}

View File

@@ -1,9 +1,11 @@
pub mod catalog;
pub mod coverage;
pub mod final_pass;
pub mod grid;
pub mod hips;
pub mod image;
pub mod line;
pub mod moc;
pub mod shape;
pub mod text;
pub mod utils;
@@ -23,7 +25,6 @@ use al_api::image::ImageParams;
use al_core::colormap::Colormaps;
use al_core::shader::Shader;
use al_core::SliceData;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
@@ -38,7 +39,6 @@ use crate::{shader::ShaderManager, survey::config::HiPSConfig};
use hips::raytracing::RayTracer;
use std::borrow::Cow;
use std::collections::HashMap;
use wasm_bindgen::JsValue;
@@ -79,16 +79,19 @@ const DEFAULT_BACKGROUND_COLOR: ColorRGB = ColorRGB {
b: 0.05,
};
fn get_backgroundcolor_shader<'a>(gl: &WebGlContext, shaders: &'a mut ShaderManager) -> &'a Shader {
fn get_backgroundcolor_shader<'a>(
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
) -> Result<&'a Shader, JsValue> {
shaders
.get(
gl,
&ShaderId(
Cow::Borrowed("RayTracerFontVS"),
Cow::Borrowed("RayTracerFontFS"),
ShaderId(
"hips_raytracer_backcolor.vert",
"hips_raytracer_backcolor.frag",
),
)
.unwrap_abort()
.map_err(|e| e.into())
}
pub struct ImageCfg {
@@ -136,29 +139,12 @@ impl Layers {
2,
"pos_clip_space",
WebGl2RenderingContext::STATIC_DRAW,
SliceData::<f32>(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]),
&[-1.0_f32, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0] as &[f32],
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::STATIC_DRAW,
SliceData::<u16>(&[0, 1, 2, 0, 2, 3]),
)
// Unbind the buffer
.unbind();
#[cfg(feature = "webgl1")]
screen_vao
.bind_for_update()
.add_array_buffer(
2,
"pos_clip_space",
WebGl2RenderingContext::STATIC_DRAW,
SliceData::<f32>(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]),
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::STATIC_DRAW,
SliceData::<u16>(&[0, 1, 2, 0, 2, 3]),
&[0_u16, 1, 2, 0, 2, 3] as &[u16],
)
// Unbind the buffer
.unbind();
@@ -258,7 +244,7 @@ impl Layers {
&self.screen_vao
};
get_backgroundcolor_shader(&self.gl, shaders)
get_backgroundcolor_shader(&self.gl, shaders)?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("color", &background_color)

View File

@@ -0,0 +1 @@

View File

View File

@@ -0,0 +1,62 @@
use crate::math::{angle::Angle, lonlat::LonLatT};
use al_api::color::ColorRGBA;
use serde::Deserialize;
mod circle;
mod ellipsis;
mod image;
mod polyline;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Shape {
Box {
/// Center of the box
c: LonLatT<f32>,
/// Size following the RA axis
ra_w: Angle<f32>,
/// Size following the Dec axis
dec_h: Angle<f32>,
/// Rotation of the box in the RA-Dec space
rot: Angle<f32>,
},
Circle {
/// Center of the circle
c: LonLatT<f32>,
/// Radius of the circle
rad: Angle<f32>,
},
PolyLine(Box<[LonLatT<f32>]>),
Ellipsis {
/// Center of the ellipsis
c: LonLatT<f32>,
/// Semi-major axis
a: Angle<f32>,
/// Semi-minor axis
b: Angle<f32>,
/// Rotation angle of the ellipsis. Origin aligns the ellipsis' major axis with the north pole. Positive angle points towards the east.
rot: Angle<f32>,
},
// TODO
Image,
}
#[derive(Debug, Deserialize)]
pub enum Style {
None,
Dashed,
Dotted,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Footprint {
shapes: Vec<Shape>,
/// Some styling meta data
color: ColorRGBA,
filled: bool,
thickness: f32,
style: Style,
}
pub type Catalog = Footprint;

View File

@@ -0,0 +1,210 @@
use crate::math::projection::ProjectionType;
use crate::shader::ShaderManager;
use al_api::coo_system::CooSystem;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
use al_api::color::ColorRGBA;
pub struct PolylineRenderer {
gl: WebGlContext,
vao: VertexArrayObject,
color: ColorRGBA,
thickness: f32,
num_instances: usize,
}
use wasm_bindgen::JsValue;
use al_core::VecData;
use web_sys::WebGl2RenderingContext;
use crate::camera::CameraViewPort;
use super::Shape;
use super::Catalog;
impl PolylineRenderer {
/// Init the buffers, VAO and shader
pub fn new<'a>(gl: &WebGlContext, catalog: &Catalog) -> Result<Self, JsValue> {
let lines = catalog
.shapes
.iter()
.flat_map(|s| {
let mut v = vec![];
match s {
Shape::PolyLine(vertices) => {
for (v1, v2) in vertices.iter().zip(vertices.iter().skip(1)) {
v.extend_from_slice(&[
v1.lon().to_radians(),
v1.lat().to_radians(),
v2.lon().to_radians(),
v2.lat().to_radians(),
])
}
}
_ => (),
}
v
})
.collect::<Vec<_>>();
let num_instances = lines.len() / 4;
// Create the VAO for the screen
let mut vao = VertexArrayObject::new(&gl);
vao.bind_for_update()
// Store the cartesian position of the center of the source in the a instanced VBO
.add_instanced_array_buffer(
"line",
9 * std::mem::size_of::<f32>(),
&[2, 2],
&[0, 2 * std::mem::size_of::<f32>()],
WebGl2RenderingContext::STATIC_DRAW,
VecData::<f32>(&lines),
)
.add_array_buffer(
"vertices",
2 * std::mem::size_of::<f32>(),
&[2],
&[0],
WebGl2RenderingContext::STATIC_DRAW,
&[
0_f32, -0.5_f32, 1_f32, -0.5_f32, 1_f32, 0.5_f32, 0_f32, 0.5_f32,
] as &[f32],
)
// Set the element buffer
.add_element_buffer(
WebGl2RenderingContext::STATIC_DRAW,
&[0_u16, 1_u16, 2_u16, 0_u16, 2_u16, 3_u16] as &[u16],
)
// Unbind the buffer
.unbind();
let gl = gl.clone();
Ok(Self {
gl,
vao,
color: catalog.color,
thickness: catalog.thickness,
num_instances,
})
}
/*pub fn add_fill_paths<V>(
&mut self,
paths: impl Iterator<Item = PathVertices<V>>,
color: &ColorRGBA,
coo_space: CooSpace,
) where
V: AsRef<[[f32; 2]]>,
{
let mut num_indices = 0;
let off_indices = self.indices.len();
let mut geometry: VertexBuffers<[f32; 2], u32> = VertexBuffers::new();
let mut tessellator = FillTessellator::new();
//let mut num_vertices = 0;
for path in paths {
let mut path_builder = Path::builder();
let PathVertices {
vertices, /*, closed */
} = path;
let line = vertices.as_ref();
if !line.is_empty() {
let v = &line[0];
path_builder.begin(point(v[0], v[1]));
for v in line.iter().skip(1) {
//let v = clamp_ndc_vertex(v);
path_builder.line_to(point(v[0], v[1]));
}
path_builder.end(false);
}
// Create the destination vertex and index buffers.
let p = path_builder.build();
// Let's use our own custom vertex type instead of the default one.
// Will contain the result of the tessellation.
let num_vertices = (self.vertices.len() / 2) as u32;
// Compute the tessellation.
tessellator
.tessellate_with_ids(
p.id_iter(),
&p,
Some(&p),
&FillOptions::default()
.with_intersections(false)
.with_fill_rule(FillRule::NonZero)
.with_tolerance(5e-3),
&mut BuffersBuilder::new(&mut geometry, |vertex: FillVertex| {
vertex.position().to_array()
})
.with_vertex_offset(num_vertices),
)
.unwrap_abort();
}
let VertexBuffers { vertices, indices } = geometry;
num_indices += indices.len();
self.vertices.extend(vertices.iter().flatten());
self.indices.extend(indices.iter());
//al_core::info!("num vertices fill", nv);
self.meta.push(Meta {
off_indices,
num_indices,
thickness: 1.0,
color: color.clone(),
coo_space,
});
}*/
pub fn draw(
&mut self,
shaders: &mut ShaderManager,
camera: &CameraViewPort,
proj: &ProjectionType,
) -> Result<(), JsValue> {
self.gl.enable(WebGl2RenderingContext::BLEND);
self.gl.blend_func_separate(
WebGl2RenderingContext::SRC_ALPHA,
WebGl2RenderingContext::ONE_MINUS_SRC_ALPHA,
WebGl2RenderingContext::ONE,
WebGl2RenderingContext::ONE,
);
// draw the instanced lines
let icrs2view = CooSystem::ICRS.to(camera.get_coo_system());
let view2world = camera.get_m2w();
let icrs2world = view2world * icrs2view;
crate::shader::get_shader(&self.gl, shaders, "line_inst_lonlat.vert", "line_base.frag")?
.bind(&self.gl)
.attach_uniforms_from(camera)
.attach_uniform("u_2world", &icrs2world)
.attach_uniform("u_color", &self.color)
.attach_uniform("u_width", &self.thickness)
.attach_uniform("u_proj", proj)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_instanced_with_i32(
WebGl2RenderingContext::TRIANGLES,
0,
self.num_instances as i32,
);
self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
}
}

View File

@@ -1,4 +1,3 @@
use std::ops::RangeInclusive;
use super::triangle::Triangle;

View File

@@ -1,17 +1,15 @@
use al_core::shader::Shader;
use al_core::WebGlContext;
pub type VertId = Cow<'static, str>;
pub type FragId = Cow<'static, str>;
type FileId = Cow<'static, str>;
pub type VertId = &'static str;
pub type FragId = &'static str;
#[derive(PartialEq, Eq, Hash, Debug, Clone)]
pub struct ShaderId(pub VertId, pub FragId);
pub struct ShaderManager {
// Compiled shaders stored in an HashMap
shaders: HashMap<ShaderId, Shader>,
// Shaders sources coming from the javascript
src: HashMap<FileId, String>,
src: HashMap<&'static str, &'static str>,
}
#[derive(Debug)]
@@ -20,6 +18,7 @@ pub enum Error {
ShaderNotFound { message: &'static str },
ShaderCompilingLinking { message: JsValue },
FileNotFound { message: &'static str },
Io { message: String },
}
use wasm_bindgen::JsValue;
@@ -35,7 +34,8 @@ impl From<Error> for JsValue {
Error::FileNotFound { message } => {
JsValue::from_str(&format!("Shader not found: {:?}", message))
}
Error::ShaderCompilingLinking { message } => message
Error::ShaderCompilingLinking { message } => message,
Error::Io { message } => message.into(),
}
}
}
@@ -49,15 +49,40 @@ pub struct FileSrc {
use std::collections::hash_map::Entry;
use std::collections::HashMap;
impl ShaderManager {
pub fn new(_gl: &WebGlContext, files: Vec<FileSrc>) -> Result<ShaderManager, Error> {
let src = files
.into_iter()
.map(|file| {
let FileSrc { id, content } = file;
(Cow::Owned(id), content)
})
.collect::<HashMap<_, _>>();
pub fn new() -> Result<ShaderManager, Error> {
let src = crate::shaders::get_all();
// Loop over the entries in the directory
/*let _src = std::fs::read_dir("./shaders")
.map_err(|e| Error::Io {
message: e.to_string(),
})?
.into_iter()
.filter_map(|entry| {
let entry = entry.ok()?;
let path = entry.path();
console_log(&format!("aaa"));
if path.is_file() {
let file_name = path.to_str()?;
console_log(&format!("{}", file_name));
// read the file into a bufreader
let file = File::open(file_name).ok()?;
let mut reader = std::io::BufReader::new(file);
let mut content = String::new();
reader.read_to_string(&mut content).ok()?;
Some((Cow::Owned(file_name.to_owned()), content))
} else {
None
}
})
.collect::<HashMap<_, _>>();*/
Ok(ShaderManager {
shaders: HashMap::new(),
@@ -65,21 +90,23 @@ impl ShaderManager {
})
}
pub fn get(&mut self, gl: &WebGlContext, id: &ShaderId) -> Result<&Shader, Error> {
pub fn get(&mut self, gl: &WebGlContext, id: ShaderId) -> Result<&Shader, Error> {
let shader = match self.shaders.entry(id.clone()) {
Entry::Occupied(o) => o.into_mut(),
Entry::Vacant(v) => {
let ShaderId(vert_id, frag_id) = id;
let vert_src = self.src.get(vert_id).ok_or(Error::FileNotFound {
message: "Vert not found",
})?;
let frag_src = self.src.get(frag_id).ok_or(Error::FileNotFound {
message: "Frag not found",
})?;
let shader = Shader::new(gl, vert_src, frag_src).map_err(|err| Error::ShaderCompilingLinking {
message: err,
})?;
let &vert_src = self
.src
.get(vert_id)
.ok_or(Error::FileNotFound { message: vert_id })?;
let &frag_src = self
.src
.get(frag_id)
.ok_or(Error::FileNotFound { message: frag_id })?;
let shader = Shader::new(gl, vert_src, frag_src)
.map_err(|err| Error::ShaderCompilingLinking { message: err })?;
v.insert(shader)
}
};
@@ -87,60 +114,14 @@ impl ShaderManager {
Ok(shader)
}
}
use std::borrow::Cow;
/*use paste::paste;
macro_rules! define_shader_getter {
($renderer_type:ident, $shader_type:ident, $vert_key:tt, $frag_key:tt) => {
paste! {
pub fn [< get_ $renderer_type _shader_ $shader_type >]<'a>(
gl: &WebGlContext,
shaders: &'a mut ShaderManager
) -> &'a Shader {
shaders.get(
gl,
&ShaderId(
Cow::Borrowed($vert_key),
Cow::Borrowed($frag_key),
),
)
.unwrap_abort()
}
}
}
pub(crate) fn get_shader<'a>(
gl: &WebGlContext,
shaders: &'a mut ShaderManager,
vert: &'static str,
frag: &'static str,
) -> Result<&'a Shader, JsValue> {
shaders
.get(gl, ShaderId(vert, frag))
.map_err(|err| err.into())
}
/* Raytracer shaders */
define_shader_getter!(raytracer, color, "RayTracerVS", "RayTracerColorFS");
define_shader_getter!(raytracer, gray2colormap, "RayTracerVS", "RayTracerGrayscale2ColormapFS");
define_shader_getter!(raytracer, gray2color, "RayTracerVS", "RayTracerGrayscale2ColorFS");
define_shader_getter!(raytracer, gray2colormap_integer, "RayTracerVS", "RayTracerGrayscale2ColormapIntegerFS");
define_shader_getter!(raytracer, gray2color_integer, "RayTracerVS", "RayTracerGrayscale2ColorIntegerFS");
define_shader_getter!(raytracer, gray2colormap_unsigned, "RayTracerVS", "RayTracerGrayscale2ColormapUnsignedFS");
define_shader_getter!(raytracer, gray2color_unsigned, "RayTracerVS", "RayTracerGrayscale2ColorUnsignedFS");
/* Rasterizer shaders */
define_shader_getter!(raster, color, "RasterizerVS", "RasterizerColorFS");
define_shader_getter!(raster, gray2colormap, "RasterizerVS", "RasterizerGrayscale2ColormapFS");
define_shader_getter!(raster, gray2color, "RasterizerVS", "RasterizerGrayscale2ColorFS");
define_shader_getter!(raster, gray2colormap_integer, "RasterizerVS", "RasterizerGrayscale2ColormapIntegerFS");
define_shader_getter!(raster, gray2color_integer, "RasterizerVS", "RasterizerGrayscale2ColorIntegerFS");
define_shader_getter!(raster, gray2colormap_unsigned, "RasterizerVS", "RasterizerGrayscale2ColormapUnsignedFS");
define_shader_getter!(raster, gray2color_unsigned, "RasterizerVS", "RasterizerGrayscale2ColorUnsignedFS");
/* Pass shaders */
define_shader_getter!(pass, post, "PostVS", "PostFS");
/* Catalog shaders */
define_shader_getter!(catalog, ait, "CatalogAitoffVS", "CatalogFS");
define_shader_getter!(catalog, mol, "CatalogMollVS", "CatalogFS");
define_shader_getter!(catalog, arc, "CatalogArcVS", "CatalogFS");
define_shader_getter!(catalog, hpx, "CatalogHEALPixVS", "CatalogFS");
define_shader_getter!(catalog, mer, "CatalogMercatVS", "CatalogFS");
define_shader_getter!(catalog, ort, "CatalogOrthoVS", "CatalogOrthoFS");
define_shader_getter!(catalog, tan, "CatalogTanVS", "CatalogFS");*/
pub(crate) fn get_shader<'a>(gl: &WebGlContext, shaders: &'a mut ShaderManager, vert: &'static str, frag: &'static str) -> Result<&'a Shader, JsValue> {
shaders.get(
gl,
&ShaderId(Cow::Borrowed(vert), Cow::Borrowed(frag)),
).map_err(|err| err.into())
}

View File

@@ -2,7 +2,6 @@ use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use al_core::image::format::ChannelType;
use cgmath::Vector3;
@@ -134,7 +133,7 @@ pub struct ImageSurveyTextures {
size: usize,
pub textures: HashMap<HEALPixCell, Texture>,
//pub base_textures: [Texture; NUM_HPX_TILES_DEPTH_ZERO],
pub base_textures: [Texture; NUM_HPX_TILES_DEPTH_ZERO],
//pub cutoff_values_tile: Rc<RefCell<HashMap<HEALPixCell, (f32, f32)>>>,
// Array of 2D textures
@@ -169,15 +168,15 @@ fn create_texture_array<F: ImageFormat>(
impl ImageSurveyTextures {
pub fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<ImageSurveyTextures, JsValue> {
let size = config.num_textures();
let size = config.num_textures() - NUM_HPX_TILES_DEPTH_ZERO;
// Ensures there is at least space for the 12
// root textures
debug_assert!(size >= NUM_HPX_TILES_DEPTH_ZERO);
//debug_assert!(size >= NUM_HPX_TILES_DEPTH_ZERO);
let heap = HEALPixCellHeap::with_capacity(size);
let textures = HashMap::with_capacity(size);
let _now = Time::now();
/*let base_textures = [
let now = Time::now();
let base_textures = [
Texture::new(&HEALPixCell(0, 0), 0, now),
Texture::new(&HEALPixCell(0, 1), 1, now),
Texture::new(&HEALPixCell(0, 2), 2, now),
@@ -190,7 +189,7 @@ impl ImageSurveyTextures {
Texture::new(&HEALPixCell(0, 9), 9, now),
Texture::new(&HEALPixCell(0, 10), 10, now),
Texture::new(&HEALPixCell(0, 11), 11, now),
];*/
];
let channel = config.get_format().get_channel();
let texture_2d_array = match channel {
@@ -221,7 +220,7 @@ impl ImageSurveyTextures {
size,
//num_root_textures_available,
textures,
//base_textures,
base_textures,
//num_base_textures,
texture_2d_array,
available_tiles_during_frame,
@@ -252,8 +251,8 @@ impl ImageSurveyTextures {
ChannelType::R64F => create_texture_array::<R64F>(gl, &self.config)?,
};
let _now = Time::now();
/*self.base_textures = [
let now = Time::now();
self.base_textures = [
Texture::new(&HEALPixCell(0, 0), 0, now),
Texture::new(&HEALPixCell(0, 1), 1, now),
Texture::new(&HEALPixCell(0, 2), 2, now),
@@ -266,7 +265,7 @@ impl ImageSurveyTextures {
Texture::new(&HEALPixCell(0, 9), 9, now),
Texture::new(&HEALPixCell(0, 10), 10, now),
Texture::new(&HEALPixCell(0, 11), 11, now),
];*/
];
self.heap.clear();
self.textures.clear();
@@ -315,11 +314,12 @@ impl ImageSurveyTextures {
time_request: Time,
) -> Result<(), JsValue> {
if !self.contains_tile(cell) {
let dd = self.config.delta_depth();
// Get the texture cell in which the tile has to be
let tex_cell = cell.get_texture_cell(self.config.delta_depth());
let tex_cell = cell.get_texture_cell(dd);
let _tex_cell_is_root = tex_cell.is_root(self.config.delta_depth());
if !self.textures.contains_key(&tex_cell) {
let tex_cell_is_root = tex_cell.is_root(dd);
if !tex_cell_is_root && !self.textures.contains_key(&tex_cell) {
// The texture is not among the essential ones
// (i.e. is not a root texture)
let texture = if self.is_heap_full() {
@@ -346,6 +346,7 @@ impl ImageSurveyTextures {
// The heap buffer is not full, let's create a new
// texture with an unique idx
// The idx is computed based on the current size of the buffer
/*let idx = if tex_cell_is_root {
self.num_base_textures += 1;
tex_cell.idx() as usize
@@ -353,7 +354,8 @@ impl ImageSurveyTextures {
//NUM_HPX_TILES_DEPTH_ZERO + (self.heap.len() - self.num_base_textures)
self.heap.len()
};*/
let idx = self.heap.len();
//let idx = NUM_HPX_TILES_DEPTH_ZERO + (self.heap.len() - self.num_base_textures);
let idx = NUM_HPX_TILES_DEPTH_ZERO + self.heap.len();
Texture::new(&tex_cell, idx as i32, time_request)
};
@@ -370,14 +372,14 @@ impl ImageSurveyTextures {
// We can safely push it
// First get the texture
let texture = //if !tex_cell_is_root {
let texture = if !tex_cell_is_root {
self.textures
.get_mut(&tex_cell)
.expect("the cell has to be in the tile buffer");
/* } else {
.expect("the cell has to be in the tile buffer")
} else {
let HEALPixCell(_, idx) = tex_cell;
&mut self.base_textures[idx as usize]
};*/
};
let missing = image.is_none();
send_to_gpu(
@@ -445,22 +447,23 @@ impl ImageSurveyTextures {
// For that purpose, we first need to verify that its
// texture ancestor exists and then, it it contains the tile
pub fn contains_tile(&self, cell: &HEALPixCell) -> bool {
let texture_cell = cell.get_texture_cell(self.config.delta_depth());
let dd = self.config.delta_depth();
let texture_cell = cell.get_texture_cell(dd);
//let tex_cell_is_root = texture_cell.is_root(self.config.delta_depth());
//if tex_cell_is_root {
// let HEALPixCell(_, idx) = texture_cell;
// self.base_textures[idx as usize].contains(cell)
//} else {
if let Some(texture) = self.get(&texture_cell) {
// The texture is present in the buffer
// We must check whether it contains the tile
texture.contains(cell)
let tex_cell_is_root = texture_cell.is_root(dd);
if tex_cell_is_root {
let HEALPixCell(_, idx) = texture_cell;
self.base_textures[idx as usize].contains(cell)
} else {
// The texture in which cell should be is not present
false
if let Some(texture) = self.get(&texture_cell) {
// The texture is present in the buffer
// We must check whether it contains the tile
texture.contains(cell)
} else {
// The texture in which cell should be is not present
false
}
}
//}
}
// Update the priority of the texture containing the tile
@@ -469,10 +472,11 @@ impl ImageSurveyTextures {
debug_assert!(self.contains_tile(cell));
// Get the texture cell in which the tile has to be
let texture_cell = cell.get_texture_cell(self.config.delta_depth());
//if texture_cell.is_root(self.config().delta_depth()) {
// return;
//}
let dd = self.config.delta_depth();
let texture_cell = cell.get_texture_cell(dd);
if texture_cell.is_root(dd) {
return;
}
let texture = self
.textures
@@ -552,33 +556,33 @@ impl ImageSurveyTextures {
/// Accessors
pub fn get(&self, texture_cell: &HEALPixCell) -> Option<&Texture> {
//if texture_cell.is_root(self.config().delta_depth()) {
// let HEALPixCell(_, idx) = texture_cell;
// Some(&self.base_textures[*idx as usize])
//} else {
self.textures.get(texture_cell)
//}
if texture_cell.is_root(self.config().delta_depth()) {
let HEALPixCell(_, idx) = texture_cell;
Some(&self.base_textures[*idx as usize])
} else {
self.textures.get(texture_cell)
}
}
// Get the nearest parent tile found in the CPU buffer
pub fn get_nearest_parent(&self, cell: &HEALPixCell) -> Option<HEALPixCell> {
let dd = self.config.delta_depth();
/*if cell.is_root(dd) {
if cell.is_root(dd) {
// Root cells are in the buffer by definition
*cell
} else {*/
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_root(dd) {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
Some(*cell)
} else {
None
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_root(dd) {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
} else {
None
}
}
//}
}
pub fn config(&self) -> &HiPSConfig {
@@ -680,14 +684,14 @@ impl SendUniforms for ImageSurveyTextures {
for idx in 0..NUM_HPX_TILES_DEPTH_ZERO {
let cell = HEALPixCell(0, idx as u64);
if let Some(texture) = self.get(&cell) {
let texture_uniforms = TextureUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
} else {
let texture = self.get(&cell).unwrap();
let texture_uniforms = TextureUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
/*else {
let texture = &Texture::new(&cell, idx as i32, Time::now());
let texture_uniforms = TextureUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
}
}*/
}
//}

View File

@@ -1,31 +0,0 @@
precision lowp float;
attribute vec2 offset;
attribute vec2 uv;
attribute vec3 center;
uniform float current_time;
uniform mat4 model;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
varying vec2 out_uv;
varying vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_aitoff(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,29 +0,0 @@
precision lowp float;
attribute vec2 offset;
attribute vec2 uv;
attribute vec3 center;
uniform float current_time;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
varying vec2 out_uv;
varying vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_arc(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,14 +0,0 @@
precision lowp float;
varying vec2 out_uv;
varying vec3 out_p;
uniform sampler2D kernel_texture;
uniform float fov;
uniform float strength;
void main() {
vec4 color = texture2D(kernel_texture, out_uv) / max(log2(fov*100.0), 1.0);
color.r *= strength;
gl_FragColor = color;
}

View File

@@ -1,30 +0,0 @@
precision lowp float;
attribute vec2 offset;
attribute in vec2 uv;
attribute in vec3 center;
uniform float current_time;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
varying vec2 out_uv;
varying vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_mercator(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,29 +0,0 @@
precision lowp float;
attribute vec2 offset;
attribute vec2 uv;
attribute vec3 center;
uniform float current_time;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
out vec2 out_uv;
out vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_mollweide(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,18 +0,0 @@
precision lowp float;
varying vec2 out_uv;
varying vec3 out_p;
uniform sampler2D kernel_texture;
uniform float fov;
uniform float strength;
void main() {
if (out_p.z < 0.0) {
discard;
}
vec4 color = texture2D(kernel_texture, out_uv).rgba / max(log2(fov*100.0), 1.0);
color.r *= strength;
gl_FragColor = color;
}

View File

@@ -1,29 +0,0 @@
precision lowp float;
attribute vec3 center;
attribute vec2 offset;
attribute vec2 uv;
uniform float current_time;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
varying vec2 out_uv;
varying vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_orthographic(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,31 +0,0 @@
precision lowp float;
attribute vec2 offset;
attribute vec2 uv;
attribute vec3 center;
attribute vec2 center_lonlat;
uniform float current_time;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
uniform vec2 kernel_size;
varying vec2 out_uv;
varying vec3 out_p;
@import ../hips/projection;
void main() {
vec3 p = vec3(inv_model * vec4(center, 1.0));
//p = check_inversed_longitude(p);
vec2 center_pos_clip_space = world2clip_gnomonic(p);
vec2 pos_clip_space = center_pos_clip_space;
gl_Position = vec4((pos_clip_space / (ndc_to_clip * czf)) + offset * kernel_size , 0.0, 1.0);
out_uv = uv;
out_p = p;
}

View File

@@ -1,20 +0,0 @@
precision lowp float;
precision lowp sampler2D;
varying vec2 out_uv;
uniform sampler2D texture_fbo;
uniform float alpha;
@import ./colormap;
void main() {
float opacity = texture2D(texture_fbo, out_uv).r;
float o = smoothstep(0.0, 0.1, opacity);
vec4 color = colormap_f(opacity);
color.a = o * alpha;
gl_FragColor = color;
}

View File

@@ -1,12 +0,0 @@
uniform sampler2D colormaps;
uniform float num_colormaps;
uniform float colormap_id;
// can be either 0 or 1
uniform float reversed;
vec4 colormap_f(float x) {
x = mix(x, 1.0 - x, reversed);
float id = (colormap_id + 0.5) / num_colormaps;
return texture2D(colormaps, vec2(x, id));
}

View File

@@ -1,12 +0,0 @@
precision lowp float;
precision lowp sampler2D;
attribute vec2 position;
attribute vec2 uv;
varying vec2 out_uv;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
out_uv = uv;
}

View File

@@ -1,7 +0,0 @@
precision lowp float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}

View File

@@ -1,7 +0,0 @@
precision lowp float;
attribute vec2 ndc_pos;
void main() {
gl_Position = vec4(ndc_pos, 0.0, 1.0);
}

View File

@@ -1,82 +0,0 @@
//const int MAX_NUM_TEX = 3;
uniform sampler2D tex1;
uniform sampler2D tex2;
uniform sampler2D tex3;
uniform int num_tex;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float size_tile_uv;
uniform int tex_storing_fits;
@import ../colormaps/colormap;
@import ./transfer_funcs;
vec4 get_pixels(vec3 uv) {
int idx_texture = int(uv.z);
if (idx_texture == 0) {
return texture2D(tex1, uv.xy);
} else if (idx_texture == 1) {
return texture2D(tex2, uv.xy);
} else if (idx_texture == 2) {
return texture2D(tex3, uv.xy);
} else {
return vec4(0.0, 1.0, 1.0, 1.0);
}
}
vec3 reverse_uv(vec3 uv) {
uv.y = size_tile_uv + 2.0*size_tile_uv*floor(uv.y / size_tile_uv) - uv.y;
return uv;
}
vec4 get_color_from_texture(vec3 UV) {
return get_pixels(UV);
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
vec3 uv = UV;
// FITS data pixels are reversed along the y axis
if (tex_storing_fits == 1) {
uv = reverse_uv(uv);
}
float x = get_pixels(uv).r;
//if (x == blank) {
// return blank_color;
//} else {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
return mix(colormap_f(alpha), vec4(0.0), float(alpha == 0.0));
//}
}
uniform vec4 C;
uniform float K;
vec4 get_color_from_grayscale_texture(vec3 UV) {
vec3 uv = UV;
// FITS data pixels are reversed along the y axis
if (tex_storing_fits == 1) {
uv = reverse_uv(uv);
}
float x = get_pixels(uv).r;
//if (x == blank) {
// return blank_color;
//} else {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
return mix(vec4(C.rgb * K * alpha, C.a), vec4(0.0), float(alpha == 0.0));
//}
}

View File

@@ -1,158 +0,0 @@
const float PI = 3.1415926535897932384626433832795;
//uniform int inversed_longitude;
const mat4 GAL2J2000 = mat4(
-0.4448296299195045,
0.7469822444763707,
0.4941094279435681,
0.0,
-0.1980763734646737,
0.4559837762325372,
-0.8676661489811610,
0.0,
-0.873437090247923,
-0.4838350155267381,
-0.0548755604024359,
0.0,
0.0,
0.0,
0.0,
1.0
);
const mat4 J20002GAL = mat4(
-0.4448296299195045,
-0.1980763734646737,
-0.873437090247923,
0.0,
0.7469822444763707,
0.4559837762325372,
-0.4838350155267381,
0.0,
0.4941094279435681,
-0.8676661489811610,
-0.0548755604024359,
0.0,
0.0,
0.0,
0.0,
1.0
);
vec2 world2clip_orthographic(vec3 p) {
return vec2(p.x, p.y);
}
vec2 world2clip_aitoff(vec3 p) {
float delta = asin(p.y);
float theta = atan(p.x, p.z);
float theta_by_two = theta * 0.5;
float alpha = acos(cos(delta)*cos(theta_by_two));
float inv_sinc_alpha = 1.0;
if (alpha > 1e-3) {
inv_sinc_alpha = alpha / sin(alpha);
}
// The minus is an astronomical convention.
// longitudes are increasing from right to left
float x = 2.0 * inv_sinc_alpha * cos(delta) * sin(theta_by_two);
float y = inv_sinc_alpha * sin(delta);
return vec2(x / PI, y / PI);
}
const int max_iter = 10;
vec2 world2clip_mollweide(vec3 p) {
// X in [-1, 1]
// Y in [-1/2; 1/2] and scaled by the screen width/height ratio
float delta = asin(p.y);
float theta = atan(p.x, p.z);
float cst = PI * sin(delta);
float phi = delta;
float f = phi + sin(phi) - cst;
int k = 0;
for (int k = 0; k < max_iter; k++) {
phi = phi - f / (1.0 + cos(phi));
f = phi + sin(phi) - cst;
if (abs(f) <= 1e-6) {
break;
}
}
phi = phi * 0.5;
// The minus is an astronomical convention.
// longitudes are increasing from right to left
float x = (theta / PI) * cos(phi);
float y = 0.5 * sin(phi);
return vec2(x, y);
}
float asinh(float x) {
return log(x + sqrt(x*x + 1.0));
}
vec2 world2clip_mercator(vec3 p) {
// X in [-1, 1]
// Y in [-1/2; 1/2] and scaled by the screen width/height ratio
float delta = asin(p.y);
float theta = atan(p.x, p.z);
float x = theta / PI;
float y = asinh(tan(delta / PI));
return vec2(x, y);
}
float arc_sinc(float x) {
if (x > 1e-4) {
return asin(x) / x;
} else {
// If a is mall, use Taylor expension of asin(a) / a
// a = 1e-4 => a^4 = 1.e-16
float x2 = x*x;
return 1.0 + x2 * (1.0 + x2 * 9.0 / 20.0) / 6.0;
}
}
vec2 world2clip_arc(vec3 p) {
if (p.z > -1.0) {
// Distance in the Euclidean plane (xy)
// Angular distance is acos(x), but for small separation, asin(r)
// is more accurate.
float r = length(p.xy);
if (p.z > 0.0) { // Angular distance < PI/2, angular distance = asin(r)
r = arc_sinc(r);
} else { // Angular distance > PI/2, angular distance = acos(x)
r = acos(p.z) / r;
}
float x = p.x * r;
float y = p.y * r;
return vec2(x / PI, y / PI);
} else {
return vec2(1.0, 0.0);
}
}
vec2 world2clip_gnomonic(vec3 p) {
if (p.z <= 1e-2) { // Back hemisphere (x < 0) + diverges near x=0
return vec2(1.0, 0.0);
} else {
return vec2((p.x/p.z) / PI , (p.y/p.z) / PI);
}
}

View File

@@ -1,23 +0,0 @@
precision mediump float;
precision mediump sampler2D;
precision mediump int;
varying vec3 frag_uv_start;
varying vec3 frag_uv_end;
varying float frag_blending_factor;
varying float m_start;
varying float m_end;
uniform float opacity;
@import ../color;
void main() {
vec4 color_start = get_color_from_texture(frag_uv_start);
vec4 color_end = get_color_from_texture(frag_uv_end);
vec4 out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;
gl_FragColor = out_frag_color;
}

View File

@@ -1,29 +0,0 @@
precision mediump float;
precision mediump sampler2D;
precision mediump int;
varying vec3 frag_uv_start;
varying vec3 frag_uv_end;
varying float frag_blending_factor;
varying float m_start;
varying float m_end;
@import ../color;
uniform float opacity;
vec4 get_color(vec3 uv, float empty) {
vec4 c = get_colormap_from_grayscale_texture(uv);
vec4 color = mix(c, vec4(0.0), empty);
return color;
}
void main() {
vec4 color_start = get_color(frag_uv_start, m_start);
vec4 color_end = get_color(frag_uv_end, m_end);
vec4 out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;
gl_FragColor = out_frag_color;
}

View File

@@ -1,41 +0,0 @@
precision mediump float;
precision mediump int;
attribute vec3 position;
attribute vec3 uv_start;
attribute vec3 uv_end;
attribute float time_tile_received;
attribute float m0;
attribute float m1;
varying vec3 frag_uv_start;
varying vec3 frag_uv_end;
varying float frag_blending_factor;
varying float m_start;
varying float m_end;
uniform float czf;
uniform mat4 inv_model;
uniform vec2 ndc_to_clip;
// current time in ms
uniform float current_time;
@import ../projection;
void main() {
vec3 world_pos = vec3(inv_model * vec4(position, 1.0));
//world_pos = check_inversed_longitude(world_pos);
vec2 ndc_pos = world2clip_aitoff(world_pos) / (ndc_to_clip * czf);
gl_Position = vec4(ndc_pos, 0.0, 1.0);
frag_uv_start = uv_start;
frag_uv_end = uv_end;
frag_blending_factor = min((current_time - time_tile_received) / 200.0, 1.0);
m_start = m0;
m_end = m1;
}

View File

@@ -1,106 +0,0 @@
precision highp float;
precision highp sampler2D;
precision highp int;
varying vec3 out_vert_pos;
varying vec2 out_clip_pos;
struct Tile {
int uniq; // Healpix cell
int texture_idx; // Index in the texture buffer
float start_time; // Absolute time that the load has been done in ms
float empty;
};
uniform Tile textures_tiles[192];
uniform int num_tiles;
uniform float current_time; // current time in ms
@import ../color;
@import ./healpix;
uniform float opacity;
Tile get_tile(int idx) {
for(int i = 0; i < 12; i++) {
if( i == idx ) {
return textures_tiles[i];
}
}
}
Tile binary_search_tile(int uniq) {
int l = 0;
int r = 11;
for (int v = 0; v <= 5; v++) {
int mid = (l + r) / 2;
Tile tile = get_tile(mid);
if(tile.uniq == uniq) {
return tile;
} else if(tile.uniq < uniq) {
l = mid + 1;
} else {
r = mid - 1;
}
// before exiting the loop
if (l >= r) {
return get_tile(l);
}
}
}
vec4 get_tile_color(vec3 pos) {
float delta = asin(pos.y);
float theta = atan(pos.x, pos.z);
HashDxDy result = hash_with_dxdy(vec2(theta, delta));
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
//return vec4(uv, 1.0, 1.0);
int uniq = 16 + idx;
Tile tile = binary_search_tile(uniq);
int idx_texture = tile.texture_idx / 64;
int off = tile.texture_idx - idx_texture * 64;
int idx_row = off / 8; // in [0; 7]
int idx_col = off - idx_row * 8; // in [0; 7]
vec2 offset = (vec2(idx_col, idx_row) + uv) * 0.125;
vec3 UV = vec3(offset.x, offset.y, 0.0);
vec4 color = get_color_from_texture(UV);
color.a *= (1.0 - float(tile.empty));
return color;
}
uniform sampler2D position_tex;
uniform mat4 model;
void main() {
vec2 uv = out_clip_pos * 0.5 + 0.5;
vec3 n = texture2D(position_tex, uv).rgb;
/* Taylor DL
float x = out_clip_pos.x;
float y = out_clip_pos.y;
float x2 = x*x;
float y2 = y*y;
float x4 = x2*x2;
float y4 = y2*y2;
n = vec3(
-x,
y,
-0.5*x2 - 0.5*y2 + 1.0
);
*/
vec3 frag_pos = vec3(model * vec4(n, 1.0));
// Get the HEALPix cell idx and the uv in the texture
vec4 c = get_tile_color(frag_pos);
gl_FragColor = vec4(c.rgb, c.a * opacity);
}

View File

@@ -1,92 +0,0 @@
precision mediump float;
precision mediump sampler2D;
precision mediump int;
varying vec3 out_vert_pos;
varying vec2 out_clip_pos;
struct Tile {
int uniq; // Healpix cell
int texture_idx; // Index in the texture buffer
float start_time; // Absolute time that the load has been done in ms
float empty;
};
uniform Tile textures_tiles[192];
uniform int num_tiles;
uniform float current_time; // current time in ms
@import ../color;
@import ./healpix;
uniform float opacity;
Tile get_tile(int idx) {
for(int i = 0; i < 12; i++) {
if( i == idx ) {
return textures_tiles[i];
}
}
}
Tile binary_search_tile(int uniq) {
int l = 0;
int r = 11;
for (int v = 0; v <= 5; v++) {
int mid = (l + r) / 2;
Tile tile = get_tile(mid);
if(tile.uniq == uniq) {
return tile;
} else if(tile.uniq < uniq) {
l = mid + 1;
} else {
r = mid - 1;
}
// before exiting the loop
if (l >= r) {
return get_tile(l);
}
}
}
vec4 get_tile_color(vec3 pos) {
float delta = asin(pos.y);
float theta = atan(pos.x, pos.z);
HashDxDy result = hash_with_dxdy(vec2(theta, delta));
int idx = result.idx;
vec2 uv = vec2(clamp(result.dy, 0.0, 1.0), result.dx);
int uniq = 16 + idx;
Tile tile = binary_search_tile(uniq);
int idx_texture = tile.texture_idx / 64;
int off = tile.texture_idx - idx_texture * 64;
int idx_row = off / 8; // in [0; 7]
int idx_col = off - idx_row * 8; // in [0; 7]
vec2 offset = (vec2(float(idx_col), float(idx_row)) + uv)*0.125;
vec3 UV = vec3(offset, float(idx_texture));
vec4 c = get_colormap_from_grayscale_texture(UV);
// handle empty tiles
vec4 color = mix(c, vec4(0.0), tile.empty);
return color;
}
uniform sampler2D position_tex;
uniform mat4 model;
void main() {
vec2 uv = out_clip_pos * 0.5 + 0.5;
vec3 n = texture2D(position_tex, uv).rgb;
vec3 frag_pos = vec3(model * vec4(n, 1.0));
vec4 c = get_tile_color(frag_pos);
c.a = c.a * opacity;
gl_FragColor = c;
}

View File

@@ -1,20 +0,0 @@
const float TWICE_PI = 6.28318530718;
const float PI = 3.141592653589793;
struct HashDxDy {
int idx;
float dx;
float dy;
};
uniform sampler2D u_ang2pixd;
HashDxDy hash_with_dxdy(vec2 radec) {
vec2 uv = vec2(radec.x/TWICE_PI, radec.y/PI) + 0.5;
vec3 v = texture2D(u_ang2pixd, uv).rgb;
return HashDxDy(
int(v.x),
v.y,
v.z
);
}

View File

@@ -1,13 +0,0 @@
precision highp float;
precision highp int;
attribute vec2 pos_clip_space;
varying vec2 out_clip_pos;
uniform vec2 ndc_to_clip;
uniform float czf;
void main() {
gl_Position = vec4(pos_clip_space / (ndc_to_clip * czf), 0.0, 1.0);
out_clip_pos = pos_clip_space;
}

View File

@@ -1,41 +0,0 @@
float linear_f(float x, float min_value, float max_value) {
return clamp((x - min_value)/(max_value - min_value), 0.0, 1.0);
}
float sqrt_f(float x, float min_value, float max_value) {
float a = linear_f(x, min_value, max_value);
return sqrt(a);
}
float log_f(float x, float min_value, float max_value) {
float y = linear_f(x, min_value, max_value);
float a = 1000.0;
return log(a*y + 1.0)/log(a);
}
float asinh(float x) {
return log(x + sqrt(x*x + 1.0));
}
float asinh_f(float x, float min_value, float max_value) {
float d = linear_f(x, min_value, max_value);
return asinh(10.0*d)/3.0;
}
float pow2_f(float x, float min_value, float max_value) {
float d = linear_f(x, min_value, max_value);
return d*d;
}
float transfer_func(int H, float x, float min_value, float max_value) {
if (H == 0) {
return linear_f(x, min_value, max_value);
} else if (H == 1) {
return sqrt_f(x, min_value, max_value);
} else if (H == 2) {
return log_f(x, min_value, max_value);
} else if (H == 3) {
return asinh_f(x, min_value, max_value);
} else {
return pow2_f(x, min_value, max_value);
}
}

View File

@@ -1,8 +0,0 @@
precision highp float;
varying vec4 v_rgba;
void main() {
// Multiply vertex color with texture color (in linear space).
// Linear color is written and blended in Framebuffer and converted to sRGB later
gl_FragColor = v_rgba;
}

View File

@@ -1,17 +0,0 @@
precision highp float;
attribute vec2 pos;
uniform vec2 u_screen_size;
uniform vec4 u_color;
varying vec4 v_rgba;
void main() {
gl_Position = vec4(
pos,
0.0,
1.0
);
v_rgba = u_color;
}

View File

@@ -1,16 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
uniform vec4 text_color;
uniform sampler2DArray font_textures;
in vec3 out_uv;
out vec4 color;
void main() {
vec3 uv = vec3(out_uv.x, 1.f - out_uv.y, out_uv.z);
vec4 mask = texture(font_textures, uv);
color = text_color * mask;
//color = vec4(1.0, 0.0, 0.0, 1.0);
}

View File

@@ -1,32 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
layout (location = 0) in vec2 pos;
layout (location = 1) in vec2 uv;
// Per instance attributes
layout (location = 2) in vec2 center_letter;
layout (location = 3) in vec2 size_letter;
layout (location = 4) in vec2 pos_uv;
layout (location = 5) in vec2 size_uv;
layout (location = 6) in float idx_page;
out vec3 out_uv;
uniform vec2 window_size;
uniform float scaling;
vec2 screen_to_ndc(vec2 p) {
// Change of origin
vec2 origin = p - window_size/2.0;
// Scale to fit in [-1, 1]
return vec2(2.0 * (origin.x/window_size.x), -2.0 * (origin.y/window_size.y));
}
void main() {
vec2 ndc_pos = screen_to_ndc(center_letter + pos*32.0);
gl_Position = vec4(ndc_pos, 0.f, 1.f);
out_uv = vec3(uv, idx_page);
}

View File

@@ -1,25 +0,0 @@
precision mediump float;
varying vec2 v_tc;
varying vec4 color;
uniform sampler2D fbo_tex;
// 0-255 sRGB from 0-1 linear
vec3 srgb_from_linear(vec3 rgb) {
bvec3 cutoff = lessThan(rgb, vec3(0.0031308));
vec3 lower = rgb * vec3(3294.6);
vec3 higher = vec3(269.025) * pow(rgb, vec3(1.0 / 2.4)) - vec3(14.025);
return mix(higher, lower, vec3(cutoff));
}
// 0-255 sRGBA from 0-1 linear
vec4 srgba_from_linear(vec4 rgba) {
return vec4(srgb_from_linear(rgba.rgb), 255.0 * rgba.a);
}
void main() {
gl_FragColor = texture2D(fbo_tex, v_tc);
//color = srgba_from_linear(color) / 255.;
}

View File

@@ -1,9 +0,0 @@
precision mediump float;
attribute vec2 a_pos;
varying vec2 v_tc;
void main() {
gl_Position = vec4(a_pos * 2. - 1., 0.0, 1.0);
v_tc = a_pos;
}

View File

@@ -1,17 +0,0 @@
precision highp float;
varying vec4 v_rgba;
varying vec2 v_tc;
varying vec4 color;
uniform sampler2D u_sampler_font;
void main() {
// The texture is set up with `SRGB8_ALPHA8`, so no need to decode here!
float alpha = texture2D(u_sampler_font, v_tc).r;
// Multiply vertex color with texture color (in linear space).
// Linear color is written and blended in Framebuffer and converted to sRGB later
gl_FragColor = v_rgba * alpha;
gl_FragColor.a = alpha;
}

View File

@@ -1,24 +0,0 @@
attribute vec2 pos;
attribute vec2 tx;
varying vec4 v_rgba;
varying vec2 v_tc;
uniform vec2 u_screen_size;
uniform vec4 u_color;
uniform vec2 u_screen_pos;
uniform mat2 u_rot;
uniform float u_scale;
uniform float u_dpi;
void main() {
vec2 p = u_rot * u_scale * u_dpi * pos;
vec2 ndc_pos = vec2(
2.0 * (p.x + u_screen_pos.x * u_dpi) / u_screen_size.x - 1.0,
1.0 - 2.0 * (p.y + u_screen_pos.y * u_dpi) / u_screen_size.y
);
gl_Position = vec4(ndc_pos, 0.0, 1.0);
v_rgba = u_color;
v_tc = tx;
}

Some files were not shown because too many files have changed in this diff Show More