Compare commits

..

2 Commits

Author SHA1 Message Date
Matthieu Baumann
ac4af8fb18 fix Circle::intersectBbox 2025-08-29 11:54:31 +02:00
Erik Mellegard
279f93c4ba Fix Circle intersectBBox 2025-06-13 10:14:15 +02:00
108 changed files with 2882 additions and 2678 deletions

View File

@@ -16,7 +16,6 @@
aladin.displayFITS(
//'https://fits.gsfc.nasa.gov/samples/FOCx38i0101t_c0f.fits', // url of the fits file
'data/fits/panstarrs-g-m61.fits',
//'https://almascience.eso.org/dataPortal/member.uid___A001_X88f_X297.calibrated_final_cont_Sgr_B1off.pbcor.fits',
{
name: 'm61',
colormap: 'viridis'

View File

@@ -8,7 +8,7 @@
import A from '../src/js/A.js';
A.init.then(() => {
let aladin = A.aladin('#aladin-lite-div', {fov: 30, target: "280 +0", projection: "AIT", showShareControl:true, showSettingsControl: true, showContextMenu:true});
aladin.setOverlayImageLayer(A.image(
"https://www.virtualastronomy.org/files/avm_examples/spitzer/ssc2005-24a1.jpg",
{

View File

@@ -14,7 +14,7 @@
{
name: "M61",
wcs: {
NAXIS: 2, // Minimal header
NAXIS: 0, // Minimal header
CTYPE1: 'RA---TAN', // TAN (gnomic) projection
CTYPE2: 'DEC--TAN', // TAN (gnomic) projection
EQUINOX: 2000.0, // Equatorial coordinates definition (yr)

View File

@@ -26,7 +26,8 @@ wasm-bindgen = "=0.2.92"
wasm-streams = "0.3.0"
async-channel = "1.8.0"
mapproj = "0.3.0"
fitsrs = "0.3.4"
fitsrs = "0.2.11"
wcs = "0.3.1"
colorgrad = "0.6.2"
[features]
@@ -50,7 +51,7 @@ version = "0.7.3"
[dependencies.moclib]
package = "moc"
version = "0.18.0"
version = "0.17.0"
[dependencies.serde]
version = "^1.0.183"

View File

@@ -92,7 +92,7 @@ impl fmt::Display for BlendFactor {
BlendFactor::OneMinusSrcAlpha => "OneMinusSrcAlpha",
BlendFactor::OneMinusConstantColor => "OneMinusConstantColor",
};
write!(f, "{str}")
write!(f, "{}", str)
}
}
impl fmt::Display for BlendFunc {
@@ -111,6 +111,6 @@ impl fmt::Display for BlendFunc {
#[cfg(feature = "webgl2")]
BlendFunc::Max => "Max",*/
};
write!(f, "{str}")
write!(f, "{}", str)
}
}

View File

@@ -48,21 +48,14 @@ pub struct HiPSProperties {
hips_initial_fov: Option<f64>,
hips_initial_ra: Option<f64>,
hips_initial_dec: Option<f64>,
// HiPS cube
hips_cube_depth: Option<u32>,
// HiPS 3D keywords
hips_order_freq: Option<u8>,
hips_tile_depth: Option<u8>,
// Parametrable by the user
#[allow(unused)]
min_cutout: Option<f32>,
#[allow(unused)]
max_cutout: Option<f32>,
dataproduct_type: Option<DataproductType>,
creator_did: String,
request_credentials: String,
@@ -70,20 +63,6 @@ pub struct HiPSProperties {
}
impl HiPSProperties {
#[inline(always)]
pub fn get_hips_order_freq(&self) -> Option<u8> {
self.hips_order_freq
}
#[inline(always)]
pub fn get_hips_tile_depth(&self) -> Option<u8> {
self.hips_tile_depth
}
#[inline(always)]
pub fn get_dataproduct_type(&self) -> Option<DataproductType> {
self.dataproduct_type
}
#[inline(always)]
pub fn get_url(&self) -> &str {
&self.url
@@ -170,15 +149,6 @@ pub enum ImageExt {
Webp,
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[wasm_bindgen]
#[serde(rename_all = "camelCase")]
pub enum DataproductType {
SpectralCube,
Image,
Cube,
}
impl std::fmt::Display for ImageExt {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {

View File

@@ -9,7 +9,7 @@ js-sys = "0.3.47"
cgmath = "*"
jpeg-decoder = "0.3.0"
png = "0.17.6"
fitsrs = "0.3.4"
fitsrs = "0.2.10"
al-api = { path = "../al-api" }
serde = { version = "^1.0.59", features = ["derive"] }
serde_json = "1.0"

View File

@@ -2,11 +2,11 @@ use std::collections::HashMap;
use colorgrad::Color;
use crate::image::format;
use crate::shader::SendUniformsWithParams;
use crate::Texture2D;
use crate::WebGlContext;
use crate::texture::format::RGBA8U;
use crate::webgl_ctx::WebGlRenderingCtx;
use wasm_bindgen::JsValue;
@@ -68,7 +68,7 @@ fn build_cmaps_texture(gl: &WebGlContext, cmaps: &[Colormap]) -> Result<Texture2
),
];
Texture2D::create_from_raw_pixels::<RGBA8U>(
Texture2D::create_from_raw_pixels::<format::RGBA8U>(
gl,
WIDTH_CMAP_TEX as i32,
cmaps.len() as i32,
@@ -134,14 +134,14 @@ impl Colormaps {
Colormap::new("grayscale", {
colorgrad::CustomGradient::new()
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("inferno", colorgrad::inferno()),
Colormap::new("magma", colorgrad::magma()),
Colormap::new("native", {
colorgrad::CustomGradient::new()
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("parula", {
colorgrad::CustomGradient::new()
@@ -155,7 +155,7 @@ impl Colormaps {
Color::from_rgba8(249, 250, 20, 255),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("plasma", colorgrad::plasma()),
Colormap::new("rainbow", {
@@ -173,7 +173,7 @@ impl Colormaps {
Color::from_rgba8(255, 0, 0, 255),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("rdbu", colorgrad::rd_bu()),
Colormap::new("rdylbu", colorgrad::rd_yl_bu()),
@@ -186,7 +186,7 @@ impl Colormaps {
Color::new(1.0, 1.0, 1.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("sinebow", colorgrad::sinebow()),
Colormap::new("spectral", colorgrad::spectral()),
@@ -201,7 +201,7 @@ impl Colormaps {
Color::new(1.0, 0.0, 0.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("green", {
colorgrad::CustomGradient::new()
@@ -210,7 +210,7 @@ impl Colormaps {
Color::new(0.0, 1.0, 0.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("blue", {
colorgrad::CustomGradient::new()
@@ -219,7 +219,7 @@ impl Colormaps {
Color::new(0.0, 0.0, 1.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
];
@@ -246,7 +246,8 @@ impl Colormaps {
&self.cmaps[id as usize]
} else {
crate::log::console_warn(format!(
"{label:?} is not a valid colormap, replaced with 'grayscale'.",
"{:?} is not a valid colormap, replaced with 'grayscale'.",
label
));
let id_greys = self.get_id("grayscale").unwrap_abort();
&self.cmaps[*id_greys as usize]

View File

@@ -6,11 +6,11 @@ pub struct Bitmap<F> {
format: std::marker::PhantomData<F>,
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
impl<F> Bitmap<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(image: web_sys::ImageBitmap) -> Self {
Self {
@@ -23,7 +23,7 @@ use crate::texture::Tex3D;
use wasm_bindgen::JsValue;
impl<F> Image for Bitmap<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,

View File

@@ -7,7 +7,7 @@ pub struct Canvas<F> {
impl<F> Canvas<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(canvas: web_sys::HtmlCanvasElement) -> Self {
Self {
@@ -17,14 +17,14 @@ where
}
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
use crate::texture::Tex3D;
use cgmath::Vector3;
use wasm_bindgen::JsValue;
impl<F> Image for Canvas<F>
where
F: TextureFormat,
F: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,

View File

@@ -1,113 +1,68 @@
use crate::texture::format::TextureFormat;
use crate::texture::format::R8U;
use cgmath::Vector3;
use fitsrs::card::Value;
use fitsrs::hdu::header::extension::image::Image as XImage;
use fitsrs::hdu::header::Bitpix;
use fitsrs::hdu::header::Header;
use fitsrs::WCS;
use fitsrs::{Fits, HDU};
use std::fmt::Debug;
use std::io::Cursor;
use wasm_bindgen::JsValue;
use cgmath::{Vector2, Vector3};
#[derive(Debug)]
pub struct FitsImage<'a> {
// get a reference to the header
pub header: Header<XImage>,
// image size
pub width: u32,
pub height: u32,
pub depth: u32,
// bitpix
pub bitpix: Bitpix,
// 1.0 by default
pub bscale: f32,
// 0.0 by default
pub bzero: f32,
// blank
pub blank: Option<f32>,
// optional wcs
pub wcs: Option<WCS>,
// raw bytes of the data image (in Big-Endian)
pub raw_bytes: &'a [u8],
pub struct Fits<'a> {
// Tile size
size: Vector2<i32>,
pub data: Data<'a>,
}
impl<'a> FitsImage<'a> {
/// Get all the hdu images from a fits file
pub fn from_raw_bytes(bytes: &'a [u8]) -> Result<Vec<Self>, JsValue> {
let mut fits = Fits::from_reader(Cursor::new(bytes));
let mut images = vec![];
use std::borrow::Cow;
use std::fmt::Debug;
#[derive(Debug)]
pub enum Data<'a> {
U8(Cow<'a, [u8]>),
I16(Cow<'a, [i16]>),
I32(Cow<'a, [i32]>),
F32(Cow<'a, [f32]>),
}
use fitsrs::{fits::Fits as FitsData, hdu::data::InMemData};
use std::io::Cursor;
while let Some(Ok(hdu)) = fits.next() {
match hdu {
HDU::XImage(hdu) | HDU::Primary(hdu) => {
// Prefer getting the dimension directly from NAXIS1/NAXIS2 instead of from the WCS
// because it may not exist in all HDU images
let width = hdu.get_header().get_xtension().get_naxisn(1);
let height = hdu.get_header().get_xtension().get_naxisn(2);
impl<'a> Fits<'a> {
pub fn from_byte_slice(bytes_reader: &'a mut Cursor<&[u8]>) -> Result<Self, JsValue> {
let FitsData { hdu } = FitsData::from_reader(bytes_reader)
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
if let (Some(&width), Some(&height)) = (width, height) {
let depth =
*hdu.get_header().get_xtension().get_naxisn(3).unwrap_or(&1) as u32;
let header = hdu.get_header();
let xtension = header.get_xtension();
let width = xtension
.get_naxisn(1)
.ok_or_else(|| JsValue::from_str("NAXIS1 not found in the fits"))?;
let header = hdu.get_header();
let height = xtension
.get_naxisn(2)
.ok_or_else(|| JsValue::from_str("NAXIS2 not found in the fits"))?;
let bscale = match header.get("BSCALE") {
Some(Value::Integer { value, .. }) => *value as f32,
Some(Value::Float { value, .. }) => *value as f32,
_ => 1.0,
};
let bzero = match header.get("BZERO") {
Some(Value::Integer { value, .. }) => *value as f32,
Some(Value::Float { value, .. }) => *value as f32,
_ => 0.0,
};
let blank = match header.get("BLANK") {
Some(Value::Integer { value, .. }) => Some(*value as f32),
Some(Value::Float { value, .. }) => Some(*value as f32),
_ => None,
};
let off = hdu.get_data_unit_byte_offset() as usize;
let len = hdu.get_data_unit_byte_size() as usize;
let raw_bytes = &bytes[off..(off + len)];
let bitpix = hdu.get_header().get_xtension().get_bitpix();
let wcs = hdu.wcs().ok();
images.push(Self {
header: hdu.get_header().clone(),
width: width as u32,
height: height as u32,
depth,
bitpix,
bscale,
wcs,
bzero,
blank,
raw_bytes,
});
}
}
_ => (),
let data = hdu.get_data();
let data = match *data {
InMemData::U8(slice) => Data::U8(Cow::Borrowed(slice)),
InMemData::I16(slice) => Data::I16(Cow::Borrowed(slice)),
InMemData::I32(slice) => Data::I32(Cow::Borrowed(slice)),
InMemData::I64(slice) => {
let data = slice.iter().map(|v| *v as i32).collect();
Data::I32(Cow::Owned(data))
}
}
InMemData::F32(slice) => Data::F32(Cow::Borrowed(slice)),
InMemData::F64(slice) => {
let data = slice.iter().map(|v| *v as f32).collect();
Data::F32(Cow::Owned(data))
}
};
if !images.is_empty() {
Ok(images)
} else {
Err(JsValue::from_str("Image HDU not found in the FITS"))
}
Ok(Self {
// Tile size
size: Vector2::new(*width as i32, *height as i32),
// Allocation info of the layout
data,
})
}
}
use crate::{image::Image, texture::Tex3D};
impl Image for FitsImage<'_> {
impl Image for Fits<'_> {
fn insert_into_3d_texture<T: Tex3D>(
&self,
// The texture array
@@ -115,21 +70,98 @@ impl Image for FitsImage<'_> {
// An offset to write the image in the texture array
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
let view = unsafe { R8U::view(self.raw_bytes) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.width as i32,
self.height as i32,
self.depth as i32,
Some(view.as_ref()),
);
match &self.data {
Data::U8(data) => {
let view = unsafe { R8UI::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
Data::I16(data) => {
let view = unsafe { R16I::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
Data::I32(data) => {
let view = unsafe { R32I::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
Data::F32(data) => {
let view = unsafe {
R8UI::view(std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
))
};
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
}
Ok(())
}
fn get_size(&self) -> (u32, u32) {
(self.width, self.height)
(self.size.x as u32, self.size.y as u32)
}
}
use crate::image::format::ImageFormat;
use wasm_bindgen::JsValue;
pub trait FitsImageFormat: ImageFormat {
const BITPIX: i8;
}
use crate::image::R32F;
impl FitsImageFormat for R32F {
const BITPIX: i8 = -32;
}
#[cfg(feature = "webgl2")]
use crate::image::{R16I, R32I, R64F, R8UI};
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R64F {
const BITPIX: i8 = -64;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R32I {
const BITPIX: i8 = 32;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R16I {
const BITPIX: i8 = 16;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R8UI {
const BITPIX: i8 = 8;
}

View File

@@ -1,9 +1,311 @@
use crate::texture::format::PixelType;
use crate::texture::pixel::Pixel;
use al_api::hips::ImageExt;
pub enum Bytes<'a> {
Borrowed(&'a [u8]),
Owned(Vec<u8>),
}
pub trait ImageFormat {
type P: Pixel;
type ArrayBufferView: AsRef<js_sys::Object>;
const NUM_CHANNELS: usize;
const FORMAT: u32;
const INTERNAL_FORMAT: i32;
const TYPE: u32;
const CHANNEL_TYPE: ChannelType;
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
///
/// # Safety
///
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
///
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
///
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
}
use crate::webgl_ctx::WebGlRenderingCtx;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB8U;
impl ImageFormat for RGB8U {
type P = [u8; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::RGB8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA8U;
#[cfg(feature = "webgl2")]
impl ImageFormat for RGBA8U {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA32F;
impl ImageFormat for RGBA32F {
type P = [f32; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
#[cfg(feature = "webgl2")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA as i32;
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA32F;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Float32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB32F;
impl ImageFormat for RGB32F {
type P = [f32; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
#[cfg(feature = "webgl2")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB as i32;
const CHANNEL_TYPE: ChannelType = ChannelType::RGB32F;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Float32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32F;
impl ImageFormat for R32F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R64F;
impl ImageFormat for R64F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R8UI;
#[cfg(feature = "webgl2")]
impl ImageFormat for R8UI {
type P = [u8; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8UI as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R8UI;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R16I;
#[cfg(feature = "webgl2")]
impl ImageFormat for R16I {
type P = [i16; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R16I as i32;
const TYPE: u32 = WebGlRenderingCtx::SHORT;
const CHANNEL_TYPE: ChannelType = ChannelType::R16I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Int16Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32I;
#[cfg(feature = "webgl2")]
impl ImageFormat for R32I {
type P = [i32; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R32I as i32;
const TYPE: u32 = WebGlRenderingCtx::INT;
const CHANNEL_TYPE: ChannelType = ChannelType::R32I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Int32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub enum ChannelType {
RGBA32F,
RGB32F,
RGBA8U,
RGB8U,
R32F,
#[cfg(feature = "webgl2")]
R64F,
#[cfg(feature = "webgl2")]
R8UI,
#[cfg(feature = "webgl2")]
R16I,
#[cfg(feature = "webgl2")]
R32I,
}
impl ChannelType {
pub fn is_colored(&self) -> bool {
matches!(
self,
ChannelType::RGBA32F | ChannelType::RGB32F | ChannelType::RGBA8U | ChannelType::RGB8U
)
}
}
pub const NUM_CHANNELS: usize = 9;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct ImageFormatType {
pub ext: ImageExt,
pub fmt: PixelType,
pub channel: ChannelType,
}
impl ImageFormatType {
@@ -11,11 +313,11 @@ impl ImageFormatType {
&self.ext
}
pub fn get_pixel_format(&self) -> PixelType {
self.fmt
pub fn get_channel(&self) -> ChannelType {
self.channel
}
pub fn is_colored(&self) -> bool {
!matches!(self.ext, ImageExt::Fits)
self.channel.is_colored()
}
}

View File

@@ -7,7 +7,7 @@ pub struct HTMLImage<F> {
impl<F> HTMLImage<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(image: web_sys::HtmlImageElement) -> Self {
Self {
@@ -17,14 +17,14 @@ where
}
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
use crate::texture::Tex3D;
use cgmath::Vector3;
use wasm_bindgen::JsValue;
impl<F> Image for HTMLImage<F>
where
F: TextureFormat,
F: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,

View File

@@ -6,9 +6,9 @@ pub mod html;
pub mod raw;
use crate::image::bitmap::Bitmap;
use crate::image::format::RGB8U;
use crate::image::format::RGBA8U;
use crate::image::raw::ImageBuffer;
use crate::texture::format::RGB8U;
use crate::texture::format::RGBA8U;
pub trait ArrayBuffer: AsRef<js_sys::Object> + std::fmt::Debug {
type Item: std::cmp::PartialOrd + Clone + Copy + std::fmt::Debug + cgmath::Zero;
@@ -179,7 +179,6 @@ impl ArrayBuffer for ArrayF64 {
}
use self::canvas::Canvas;
use self::fits::FitsImage;
use self::html::HTMLImage;
use wasm_bindgen::JsValue;
pub trait Image {
@@ -211,14 +210,13 @@ where
Ok(())
}
#[inline]
fn get_size(&self) -> (u32, u32) {
let image = &**self;
image.get_size()
}
}
use std::rc::Rc;
use std::{io::Cursor, rc::Rc};
impl<I> Image for Rc<I>
where
I: Image,
@@ -236,19 +234,21 @@ where
Ok(())
}
#[inline]
fn get_size(&self) -> (u32, u32) {
let image = &**self;
image.get_size()
}
}
use crate::texture::format::{R16I, R32F, R32I, R8U};
use crate::texture::Tex3D;
#[cfg(feature = "webgl2")]
use crate::image::format::{R16I, R32I, R64F, R8UI};
use crate::{image::format::R32F, texture::Tex3D};
use fits::Fits;
#[derive(Debug)]
#[cfg(feature = "webgl2")]
pub enum ImageType {
FitsRawBytes {
FitsImage {
raw_bytes: js_sys::Uint8Array,
size: (u32, u32),
},
@@ -283,7 +283,7 @@ pub enum ImageType {
image: ImageBuffer<R16I>,
},
RawR8ui {
image: ImageBuffer<R8U>,
image: ImageBuffer<R8UI>,
},
}
@@ -297,16 +297,17 @@ impl Image for ImageType {
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
match self {
ImageType::FitsRawBytes {
ImageType::FitsImage {
raw_bytes: raw_bytes_buf,
..
} => {
let raw_bytes = raw_bytes_buf.to_vec();
let num_bytes = raw_bytes_buf.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
raw_bytes_buf.copy_to(&mut raw_bytes[..]);
let images = FitsImage::from_raw_bytes(&raw_bytes)?;
for image in images {
image.insert_into_3d_texture(textures, offset)?
}
let mut bytes_reader = Cursor::new(raw_bytes.as_slice());
let fits_img = Fits::from_byte_slice(&mut bytes_reader)?;
fits_img.insert_into_3d_texture(textures, offset)?
}
ImageType::Canvas { canvas } => canvas.insert_into_3d_texture(textures, offset)?,
ImageType::ImageRgba8u { image } => image.insert_into_3d_texture(textures, offset)?,
@@ -330,7 +331,7 @@ impl Image for ImageType {
fn get_size(&self) -> (u32, u32) {
match self {
ImageType::FitsRawBytes { size, .. } => *size,
ImageType::FitsImage { size, .. } => *size,
ImageType::Canvas { canvas } => canvas.get_size(),
ImageType::ImageRgba8u { image } => image.get_size(),
ImageType::ImageRgb8u { image } => image.get_size(),

View File

@@ -1,18 +1,17 @@
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use crate::texture::pixel::Pixel;
use crate::texture::Tex3D;
#[derive(Debug)]
#[allow(dead_code)]
pub struct ImageBuffer<T>
where
T: TextureFormat,
T: ImageFormat,
{
pub data: Vec<<<T as TextureFormat>::P as Pixel>::Item>,
pub data: Vec<<<T as ImageFormat>::P as Pixel>::Item>,
pub size: Vector2<i32>,
}
use crate::texture::format::Bytes;
use crate::image::format::Bytes;
pub struct ImageBufferView {
pub x: i32,
@@ -23,13 +22,9 @@ pub struct ImageBufferView {
use wasm_bindgen::JsValue;
impl<T> ImageBuffer<T>
where
T: TextureFormat,
T: ImageFormat,
{
pub fn new(
data: Vec<<<T as TextureFormat>::P as Pixel>::Item>,
width: i32,
height: i32,
) -> Self {
pub fn new(data: Vec<<<T as ImageFormat>::P as Pixel>::Item>, width: i32, height: i32) -> Self {
let size_buf = width * height * (T::NUM_CHANNELS as i32);
debug_assert!(size_buf == data.len() as i32);
//let buf = <<T as ImageFormat>::P as Pixel>::Container::new(buf);
@@ -49,10 +44,9 @@ where
let decoded_pixels = unsafe {
decoded_bytes.set_len(
decoded_bytes.len()
/ std::mem::size_of::<<<T as TextureFormat>::P as Pixel>::Item>(),
decoded_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
);
std::mem::transmute::<Vec<u8>, Vec<<<T as TextureFormat>::P as Pixel>::Item>>(
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(
decoded_bytes,
)
};
@@ -65,8 +59,10 @@ where
debug_assert!(size_buf == raw_bytes.len() as i32);
let decoded_pixels = unsafe {
raw_bytes.set_len(raw_bytes.len() / std::mem::size_of::<<T::P as Pixel>::Item>());
std::mem::transmute::<Vec<u8>, Vec<<T::P as Pixel>::Item>>(raw_bytes)
raw_bytes.set_len(
raw_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
);
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(raw_bytes)
};
Self::new(decoded_pixels, width, height)
@@ -77,7 +73,7 @@ where
Self { data: vec![], size }
}
pub fn allocate(pixel_fill: &T::P, width: i32, height: i32) -> ImageBuffer<T> {
pub fn allocate(pixel_fill: &<T as ImageFormat>::P, width: i32, height: i32) -> ImageBuffer<T> {
let size_buf = ((width * height) as usize) * (T::NUM_CHANNELS);
let data = pixel_fill
@@ -116,11 +112,11 @@ where
}
}
pub fn iter(&self) -> impl Iterator<Item = &<T::P as Pixel>::Item> {
pub fn iter(&self) -> impl Iterator<Item = &<<T as ImageFormat>::P as Pixel>::Item> {
self.data.iter()
}
pub fn get_data(&self) -> &[<T::P as Pixel>::Item] {
pub fn get_data(&self) -> &[<<T as ImageFormat>::P as Pixel>::Item] {
&self.data
}
@@ -133,12 +129,12 @@ where
}
}
use crate::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
use crate::image::format::{R16I, R32F, R32I, R8UI, RGB8U, RGBA8U};
pub enum ImageBufferType {
JPG(ImageBuffer<RGB8U>),
PNG(ImageBuffer<RGBA8U>),
R32F(ImageBuffer<R32F>),
R8UI(ImageBuffer<R8U>),
R8UI(ImageBuffer<R8UI>),
R16I(ImageBuffer<R16I>),
R32I(ImageBuffer<R32I>),
}
@@ -147,7 +143,7 @@ use crate::image::{ArrayBuffer, Image};
use cgmath::{Vector2, Vector3};
impl<I> Image for ImageBuffer<I>
where
I: TextureFormat,
I: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,
@@ -156,7 +152,8 @@ where
// An offset to write the image in the texture array
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
let js_array = <<I::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
let js_array =
<<<I as ImageFormat>::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,

View File

@@ -6,11 +6,6 @@ extern "C" {
pub fn log(s: &str);
}
#[macro_export]
macro_rules! al_print {
($($arg:tt)*) => { al_core::log(&format!("{:?}", $($arg),*)) };
}
// ----------------------------------------------------------------------------
// Helpers to hide some of the verbosity of web_sys

View File

@@ -2,7 +2,7 @@ use {wasm_bindgen::prelude::*, web_sys::WebGlFramebuffer};
use crate::webgl_ctx::WebGlRenderingCtx;
// Internal format used for the framebuffer final texture
use crate::texture::format::RGBA8U;
use crate::image::format::RGBA8U;
pub struct FrameBufferObject {
gl: WebGlContext,

View File

@@ -330,7 +330,6 @@ impl SendUniformsWithParams<Colormaps> for HiPSColor {
let cmap = cmaps.get(self.cmap_name.as_ref());
shader
.attach_uniforms_from(cmaps)
.attach_uniforms_with_params_from(cmap, cmaps)
.attach_uniform("H", &self.stretch)
.attach_uniform("min_value", &self.min_cut.unwrap_or(0.0))

View File

@@ -1,4 +1,4 @@
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
@@ -23,7 +23,7 @@ pub struct Texture3D {
}
impl Texture3D {
pub fn create_empty<F: TextureFormat>(
pub fn create_empty<F: ImageFormat>(
gl: &WebGlContext,
// The weight of the individual textures
width: i32,
@@ -54,9 +54,10 @@ impl Texture3D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture3D {

View File

@@ -1,9 +1,9 @@
use crate::texture::format::PixelType;
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
use crate::texture::pixel::Pixel;
use crate::texture::ChannelType;
use crate::texture::Texture2DMeta;
use crate::webgl_ctx::WebGlContext;
use crate::webgl_ctx::WebGlRenderingCtx;
@@ -22,7 +22,7 @@ pub struct Texture2DArray {
}
impl Texture2DArray {
pub fn create_empty<F: TextureFormat>(
pub fn create_empty<F: ImageFormat>(
gl: &WebGlContext,
// The weight of the individual textures
width: i32,
@@ -53,9 +53,10 @@ impl Texture2DArray {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
pixel_type: F::PIXEL_TYPE,
ty: F::TYPE,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2DArray {
@@ -115,31 +116,37 @@ impl Texture2DArray {
self.gl
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
let value = match metadata.pixel_type {
PixelType::R8U => {
#[cfg(feature = "webgl2")]
let value = match metadata.channel_type {
ChannelType::R8UI => {
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R16I => {
ChannelType::R16I => {
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32I => {
ChannelType::R32I => {
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32F => {
ChannelType::R32F => {
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
crate::log(&format!("{:?}", p));
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::RGB8U => {
ChannelType::RGB8U => {
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
_ => Err(JsValue::from_str(
"Pixel retrieval not implemented for that texture format.",
)),
};
// Unbind the framebuffer

View File

@@ -1,204 +0,0 @@
use crate::texture::pixel::Pixel;
pub type Bytes<'a> = std::borrow::Cow<'a, [u8]>;
pub trait TextureFormat {
type P: Pixel;
type ArrayBufferView: AsRef<js_sys::Object>;
const NUM_CHANNELS: usize;
const FORMAT: u32;
const INTERNAL_FORMAT: i32;
const TYPE: u32;
const PIXEL_TYPE: PixelType;
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
///
/// # Safety
///
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
///
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
///
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
}
use crate::webgl_ctx::WebGlRenderingCtx;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB8U;
impl TextureFormat for RGB8U {
type P = [u8; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::RGB8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA8U;
impl TextureFormat for RGBA8U {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::RGBA8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32F;
impl TextureFormat for R32F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R8U;
impl TextureFormat for R8U {
type P = [u8; 1];
const FORMAT: u32 = WebGlRenderingCtx::RED;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const NUM_CHANNELS: usize = 1;
const PIXEL_TYPE: PixelType = PixelType::R8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R16I;
impl TextureFormat for R16I {
type P = [u8; 2];
const NUM_CHANNELS: usize = 2;
const FORMAT: u32 = WebGlRenderingCtx::RG;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RG8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::R16I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32I;
impl TextureFormat for R32I {
type P = [u8; 4];
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const NUM_CHANNELS: usize = 4;
const PIXEL_TYPE: PixelType = PixelType::R32I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub enum PixelType {
R8U,
R16I,
R32I,
R32F,
RGB8U,
RGBA8U,
}
impl PixelType {
pub const fn num_channels(&self) -> usize {
match self {
Self::RGB8U => 3,
Self::RGBA8U => 4,
_ => 1,
}
}
}
pub const NUM_CHANNELS: usize = 6;

View File

@@ -1,7 +1,6 @@
pub mod array;
pub use array::Texture2DArray;
pub mod format;
pub mod pixel;
pub use pixel::*;
@@ -12,7 +11,7 @@ pub use mod_3d::Texture3D;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
use crate::texture::format::PixelType;
use crate::image::format::ChannelType;
use crate::webgl_ctx::WebGlContext;
use crate::webgl_ctx::WebGlRenderingCtx;
use wasm_bindgen::prelude::*;
@@ -25,8 +24,9 @@ pub static mut CUR_IDX_TEX_UNIT: u8 = 0;
#[allow(dead_code)]
pub struct Texture2DMeta {
pub format: u32,
pub internal_format: i32,
pub ty: u32,
pub pixel_type: PixelType,
pub channel_type: ChannelType,
pub width: u32,
pub height: u32,
@@ -47,13 +47,13 @@ pub enum SamplerType {
Unsigned,
}
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
//use super::pixel::PixelType;
use std::cell::RefCell;
use std::path::Path;
use std::rc::Rc;
impl Texture2D {
pub fn create_from_path<P: AsRef<Path>, F: TextureFormat>(
pub fn create_from_path<P: AsRef<Path>, F: ImageFormat>(
gl: &WebGlContext,
name: &'static str,
src: &P,
@@ -61,11 +61,12 @@ impl Texture2D {
) -> Result<Texture2D, JsValue> {
let image = HtmlImageElement::new().unwrap_abort();
#[cfg(feature = "webgl2")]
let texture = gl.create_texture();
let onerror = {
Closure::wrap(Box::new(move || {
println!("Cannot load texture located at: {name:?}");
println!("Cannot load texture located at: {:?}", name);
}) as Box<dyn Fn()>)
};
@@ -75,11 +76,13 @@ impl Texture2D {
let metadata = Rc::new(RefCell::new(Texture2DMeta {
width,
height,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
}));
#[cfg(feature = "webgl2")]
let onload = {
let image = image.clone();
let gl = gl.clone();
@@ -129,6 +132,7 @@ impl Texture2D {
let gl = gl.clone();
Ok(Texture2D {
#[cfg(feature = "webgl2")]
texture,
gl,
@@ -137,7 +141,7 @@ impl Texture2D {
})
}
pub fn create_from_raw_pixels<F: TextureFormat>(
pub fn create_from_raw_pixels<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
@@ -162,12 +166,12 @@ impl Texture2D {
Ok(texture)
}
pub fn create_from_raw_bytes<F: TextureFormat>(
pub fn create_from_raw_bytes<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
tex_params: &'static [(u32, u32)],
bytes: &[u8],
bytes: Option<&[u8]>,
) -> Result<Texture2D, JsValue> {
let texture = gl.create_texture();
@@ -184,14 +188,7 @@ impl Texture2D {
width,
height,
);
let view = unsafe {
let len = bytes.len() / (std::mem::size_of::<<F::P as Pixel>::Item>());
let pixels =
std::slice::from_raw_parts(bytes.as_ptr() as *const <F::P as Pixel>::Item, len);
F::view(pixels)
};
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_array_buffer_view(
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingCtx::TEXTURE_2D,
0,
0,
@@ -200,7 +197,7 @@ impl Texture2D {
height,
F::FORMAT,
F::TYPE,
Some(view.as_ref()),
bytes,
)
.expect("Texture 2D");
@@ -208,9 +205,10 @@ impl Texture2D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2D {
@@ -222,7 +220,7 @@ impl Texture2D {
})
}
pub fn create_empty_with_format<F: TextureFormat>(
pub fn create_empty_with_format<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
@@ -248,14 +246,16 @@ impl Texture2D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2D {
texture,
gl,
metadata,
})
}
@@ -335,31 +335,37 @@ impl Texture2D {
self.gl
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
let value = match metadata.pixel_type {
PixelType::R8U => {
#[cfg(feature = "webgl2")]
let value = match metadata.channel_type {
ChannelType::R8UI => {
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R16I => {
ChannelType::R16I => {
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32I => {
ChannelType::R32I => {
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32F => {
ChannelType::R32F => {
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
crate::log(&format!("{:?}", p));
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::RGB8U => {
ChannelType::RGB8U => {
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
_ => Err(JsValue::from_str(
"Pixel retrieval not implemented for that texture format.",
)),
};
// Unbind the framebuffer

View File

@@ -21,6 +21,70 @@ pub trait Pixel:
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue>;
}
impl Pixel for [f32; 4] {
type Item = f32;
type Container = ArrayF32;
const BLACK: Self = [f32::NAN; 4];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(4);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA32F,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1], pixels[2], pixels[3]])
}
}
impl Pixel for [f32; 3] {
type Item = f32;
type Container = ArrayF32;
const BLACK: Self = [f32::NAN; 3];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(3);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGB32F,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGB,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1], pixels[2]])
}
}
impl Pixel for [f32; 1] {
type Item = f32;
type Container = ArrayF32;
@@ -46,7 +110,38 @@ impl Pixel for [f32; 1] {
])])
}
}
/*use crate::image::ArrayF64;
impl Pixel for [f64; 1] {
type Item = f64;
type Container = ArrayF64;
const BLACK: Self = [std::f64::NAN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(1);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RED,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::LUMINANCE_ALPHA,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
Ok([pixels.to_vec()[0] as f64])
}
}*/
impl Pixel for [u8; 4] {
type Item = u8;
type Container = ArrayU8;
@@ -88,27 +183,7 @@ impl Pixel for [u8; 3] {
Ok([pixels[0], pixels[1], pixels[2]])
}
}
impl Pixel for [u8; 2] {
type Item = u8;
type Container = ArrayU8;
const BLACK: Self = [0, 0];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Uint8Array::new_with_length(2);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RG,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [u8; 1] {
type Item = u8;
type Container = ArrayU8;
@@ -129,50 +204,45 @@ impl Pixel for [u8; 1] {
Ok([pixels.to_vec()[0]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [i16; 1] {
type Item = i16;
type Container = ArrayI16;
const BLACK: Self = [i16::MIN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let p = js_sys::Uint8Array::new_with_length(2);
let pixels = js_sys::Int16Array::new_with_length(1);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RG,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&p),
WebGlRenderingCtx::RED_INTEGER,
WebGlRenderingCtx::SHORT,
Some(&pixels),
)?;
Ok([i16::from_le_bytes([p.at(0).unwrap(), p.at(1).unwrap()])])
Ok([pixels.to_vec()[0]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [i32; 1] {
type Item = i32;
type Container = ArrayI32;
const BLACK: Self = [i32::MIN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let p = js_sys::Uint8Array::new_with_length(4);
let pixels = js_sys::Int32Array::new_with_length(1);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&p),
WebGlRenderingCtx::RED_INTEGER,
WebGlRenderingCtx::INT,
Some(&pixels),
)?;
Ok([i32::from_le_bytes([
p.at(0).unwrap(),
p.at(1).unwrap(),
p.at(2).unwrap(),
p.at(3).unwrap(),
])])
Ok([pixels.to_vec()[0]])
}
}

View File

@@ -4,7 +4,10 @@ use wasm_bindgen::JsCast;
use wasm_bindgen::JsValue;
use web_sys::HtmlElement;
#[cfg(feature = "webgl2")]
pub type WebGlRenderingCtx = web_sys::WebGl2RenderingContext;
#[cfg(feature = "webgl1")]
pub type WebGlRenderingCtx = web_sys::WebGlRenderingContext;
#[derive(Clone)]
pub struct WebGlContext {

View File

@@ -31,7 +31,8 @@ fn generate_shaders() -> std::result::Result<(), Box<dyn Error>> {
let src = read_shader(path)?;
shaders.insert(out_file_name, src);
println!("cargo:rerun-if-changed=src/shaders/{file_name}");
//fs::write(&out_name, result)?;
println!("cargo:rerun-if-changed=src/shaders/{}", file_name);
}
}
}
@@ -55,8 +56,6 @@ fn read_shader<P: AsRef<std::path::Path>>(path: P) -> std::io::Result<String> {
let incl_file_name_rel = incl_file_names[1];
let incl_file_name = path.parent().unwrap().join(incl_file_name_rel);
println!("{}", incl_file_name.to_string_lossy());
read_shader(incl_file_name.to_str().unwrap()).unwrap()
} else {
l

View File

@@ -1,13 +1,12 @@
use crate::math::angle::ToAngle;
use crate::math::spectra::Freq;
use crate::renderable::hips::HiPS;
use crate::renderable::image::Image;
use crate::renderable::ImageLayer;
use crate::tile_fetcher::HiPSLocalFiles;
use crate::math::angle::ToAngle;
use crate::renderable::hips::HiPS;
use crate::{
camera::CameraViewPort,
downloader::Downloader,
healpix::moc::SpaceMoc,
healpix::coverage::HEALPixCoverage,
inertia::Inertia,
math::{
self,
@@ -22,10 +21,7 @@ use crate::{
time::DeltaTime,
};
use al_api::moc::MOCOptions;
use al_core::image::fits::FitsImage;
use al_core::image::ImageType;
use fitsrs::WCS;
use std::io::Cursor;
use wcs::WCS;
use wasm_bindgen::prelude::*;
@@ -38,8 +34,7 @@ use al_api::{
grid::GridCfg,
hips::{HiPSCfg, ImageMetadata},
};
use crate::healpix::moc::Moc;
use fitsrs::{fits::AsyncFits, hdu::extension::AsyncXtensionHDU};
use web_sys::{HtmlElement, WebGl2RenderingContext};
@@ -102,15 +97,17 @@ pub struct App {
pub projection: ProjectionType,
// Async data receivers
//img_send: async_channel::Sender<ImageLayer>,
img_send: async_channel::Sender<ImageLayer>,
img_recv: async_channel::Receiver<ImageLayer>,
ack_img_send: async_channel::Sender<ImageParams>,
//ack_img_recv: async_channel::Receiver<ImageParams>,
ack_img_recv: async_channel::Receiver<ImageParams>,
// callbacks
//callback_position_changed: js_sys::Function,
}
use cgmath::{Vector2, Vector3};
use futures::io::BufReader; // for `next`
use crate::math::projection::*;
pub const BLENDING_ANIM_DURATION: DeltaTime = DeltaTime::from_millis(200.0); // in ms
@@ -192,8 +189,8 @@ impl App {
let moc = MOCRenderer::new(&gl)?;
gl.clear_color(0.1, 0.1, 0.1, 1.0);
let (_, img_recv) = async_channel::unbounded::<ImageLayer>();
let (ack_img_send, _) = async_channel::unbounded::<ImageParams>();
let (img_send, img_recv) = async_channel::unbounded::<ImageLayer>();
let (ack_img_send, ack_img_recv) = async_channel::unbounded::<ImageParams>();
//let line_renderer = RasterizedLineRenderer::new(&gl)?;
@@ -250,10 +247,10 @@ impl App {
colormaps,
projection,
//img_send,
img_send,
img_recv,
ack_img_send,
//ack_img_recv,
ack_img_recv,
})
}
@@ -263,16 +260,13 @@ impl App {
// Loop over the hipss
for hips in self.layers.get_mut_hipses() {
if self.camera.get_tile_depth() == 0 {
match hips {
HiPS::D2(h) => {
let query = query::Allsky::new(h.get_config(), None);
if self.downloader.borrow().is_queried(&query.id) {
// do not ask for tiles if we download the allsky
continue;
}
}
// no Allsky generated for HiPS3D
HiPS::D3(h) => (),
let allsky_query = match hips {
HiPS::D2(h) => query::Allsky::new(h.get_config(), None),
HiPS::D3(h) => query::Allsky::new(h.get_config(), Some(h.get_slice() as u32)),
};
if self.downloader.borrow().is_queried(&allsky_query.id) {
// do not ask for tiles if we download the allsky
continue;
}
}
@@ -306,9 +300,9 @@ impl App {
}
}
HiPS::D3(hips) => {
let freq = hips.get_freq();
let slice = hips.get_slice();
for ancestor in ancestors {
if !hips.contains_tile(&ancestor, freq) {
if !hips.contains_tile(&ancestor, slice) {
self.tile_fetcher.append(hips.get_tile_query(&ancestor));
}
}
@@ -480,11 +474,15 @@ impl App {
self.catalog_loaded
}
pub(crate) fn get_moc(&self, moc_uuid: &str) -> Option<&SpaceMoc> {
pub(crate) fn get_moc(&self, moc_uuid: &str) -> Option<&HEALPixCoverage> {
self.moc.get_hpx_coverage(moc_uuid)
}
pub(crate) fn add_moc(&mut self, moc: SpaceMoc, options: MOCOptions) -> Result<(), JsValue> {
pub(crate) fn add_moc(
&mut self,
moc: HEALPixCoverage,
options: MOCOptions,
) -> Result<(), JsValue> {
self.moc
.push_back(moc, options, &mut self.camera, &self.projection);
self.request_redraw = true;
@@ -610,25 +608,69 @@ impl App {
//let _depth = tile.cell().depth();
// do not perform tex_sub costly GPU calls while the camera is zooming
if tile.cell().is_root() || included_in_coverage {
//let is_missing = tile.missing();
/*self.tile_fetcher.notify_tile(
&tile,
true,
false,
&mut self.downloader,
);*/
/*let image = if is_missing {
// Otherwise we push nothing, it is probably the case where:
// - an request error occured on a valid tile
// - the tile is not present, e.g. chandra HiPS have not the 0, 1 and 2 order tiles
None
} else {
Some(image)
};*/
use al_core::image::ImageType;
use fitsrs::fits::Fits;
use std::io::Cursor;
//if let Some(image) = image.as_ref() {
if let Some(ImageType::FitsRawBytes {
if let Some(ImageType::FitsImage {
raw_bytes: raw_bytes_buf,
..
}) = &*tile.image.borrow()
{
// check if the metadata has not been set
if hips.get_fits_params().is_none() {
let raw_bytes = raw_bytes_buf.to_vec();
if !cfg.fits_metadata {
let num_bytes = raw_bytes_buf.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
raw_bytes_buf.copy_to(&mut raw_bytes[..]);
let FitsImage {
bscale,
bzero,
blank,
..
} = FitsImage::from_raw_bytes(raw_bytes.as_slice())?[0];
hips.set_fits_params(bscale, bzero, blank);
let mut bytes_reader = Cursor::new(raw_bytes.as_slice());
let Fits { hdu } = Fits::from_reader(&mut bytes_reader)
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
let header = hdu.get_header();
let bscale =
if let Some(fitsrs::card::Value::Float(bscale)) =
header.get(b"BSCALE ")
{
*bscale as f32
} else {
1.0
};
let bzero = if let Some(fitsrs::card::Value::Float(bzero)) =
header.get(b"BZERO ")
{
*bzero as f32
} else {
0.0
};
let blank = if let Some(fitsrs::card::Value::Float(blank)) =
header.get(b"BLANK ")
{
*blank as f32
} else {
f32::NAN
};
cfg.set_fits_metadata(bscale, bzero, blank);
}
};
//}
let image = tile.image.clone();
if let Some(img) = &*image.borrow() {
@@ -679,21 +721,25 @@ impl App {
}
}
}
Resource::Moc(fetched_moc) => {
let moc_hips_cdid = fetched_moc.get_hips_cdid();
Resource::PixelMetadata(metadata) => {
if let Some(hips) = self.layers.get_mut_hips_from_cdid(&metadata.hips_cdid) {
let cfg = hips.get_config_mut();
if let Some(metadata) = &*metadata.value.borrow() {
cfg.blank = metadata.blank;
cfg.offset = metadata.offset;
cfg.scale = metadata.scale;
}
}
}
Resource::Moc(moc) => {
let moc_hips_cdid = moc.get_hips_cdid();
//let url = &moc_url[..moc_url.find("/Moc.fits").unwrap_abort()];
if let Some(hips) = self.layers.get_mut_hips_from_cdid(moc_hips_cdid) {
let request::moc::FetchedMoc { moc, .. } = fetched_moc;
let request::moc::Moc { moc, .. } = moc;
if let Some(moc) = &*moc.borrow() {
match (hips, moc) {
(HiPS::D2(hips), Moc::Space(moc)) => {
hips.set_moc(moc.clone());
}
(HiPS::D3(hips), Moc::FreqSpace(moc)) => {
hips.set_moc(moc.clone());
}
_ => (),
}
hips.set_moc(moc.clone());
self.request_for_new_tiles = true;
self.request_redraw = true;
@@ -928,122 +974,255 @@ impl App {
Ok(())
}
pub(crate) fn add_rgba_image(
pub(crate) fn add_image_from_blob_and_wcs(
&mut self,
layer: String,
bytes: &[u8],
stream: web_sys::ReadableStream,
wcs: WCS,
cfg: ImageMetadata,
) -> Result<js_sys::Promise, JsValue> {
let gl = self.gl.clone();
let img_sender = self.img_send.clone();
let ack_img_recv = self.ack_img_recv.clone();
// Stop the current inertia
self.inertia = None;
// And disable it while the fits has not been loaded
let disable_inertia = self.disable_inertia.clone();
*(disable_inertia.borrow_mut()) = true;
let camera_coo_sys = self.camera.get_coo_system();
match Image::from_rgba_bytes(&gl, bytes, wcs, camera_coo_sys) {
Ok(image) => {
let layer = ImageLayer {
images: vec![image],
id: layer.clone(),
layer,
meta: cfg,
};
let fut = async move {
use crate::renderable::image::Image;
use futures::future::Either;
use futures::TryStreamExt;
use js_sys::Uint8Array;
use wasm_streams::ReadableStream;
let params = layer.get_params();
let body = ReadableStream::from_raw(stream.dyn_into()?);
self.layers.add_image(
layer,
&mut self.camera,
&self.projection,
&mut self.tile_fetcher,
)?;
// Convert the JS ReadableStream to a Rust stream
let bytes_reader = match body.try_into_async_read() {
Ok(async_read) => Either::Left(async_read),
Err((_err, body)) => Either::Right(
body.into_stream()
.map_ok(|js_value| {
js_value.dyn_into::<Uint8Array>().unwrap_throw().to_vec()
})
.map_err(|_js_error| std::io::Error::other("failed to read"))
.into_async_read(),
),
};
use al_core::image::format::RGBA8U;
match Image::from_reader_and_wcs::<_, RGBA8U>(
&gl,
bytes_reader,
wcs,
None,
None,
None,
camera_coo_sys,
)
.await
{
Ok(image) => {
let img = ImageLayer {
images: vec![image],
id: layer.clone(),
layer,
meta: cfg,
};
self.request_redraw = true;
img_sender.send(img).await.unwrap();
let promise = js_sys::Promise::resolve(&serde_wasm_bindgen::to_value(&params)?);
Ok(promise)
// Wait for the ack here
let image_params = ack_img_recv
.recv()
.await
.map_err(|_| JsValue::from_str("Problem receiving fits"))?;
serde_wasm_bindgen::to_value(&image_params).map_err(|e| e.into())
}
Err(error) => Err(error),
}
Err(error) => Err(error),
}
};
let reenable_inertia = Closure::new(move || {
// renable inertia again
*(disable_inertia.borrow_mut()) = false;
});
let promise = wasm_bindgen_futures::future_to_promise(fut)
// Reenable inertia independantly from whether the
// fits has been correctly parsed or not
.finally(&reenable_inertia);
// forget the closure, it is not very proper to do this as
// it won't be deallocated
reenable_inertia.forget();
Ok(promise)
}
pub(crate) fn add_fits_image(
pub(crate) fn add_image_fits(
&mut self,
bytes: &[u8],
stream: web_sys::ReadableStream,
meta: ImageMetadata,
layer: String,
) -> Result<js_sys::Promise, JsValue> {
let gl = self.gl.clone();
let fits_sender = self.img_send.clone();
let ack_fits_recv = self.ack_img_recv.clone();
// Stop the current inertia
self.inertia = None;
// And disable it while the fits has not been loaded
let disable_inertia = self.disable_inertia.clone();
*(disable_inertia.borrow_mut()) = true;
let camera_coo_sys = self.camera.get_coo_system();
// FIXME: this is done to prevent the view inerting after being unblocked
let fut = async move {
use crate::renderable::image::Image;
use futures::future::Either;
use futures::TryStreamExt;
use js_sys::Uint8Array;
use wasm_streams::ReadableStream;
let gz = fitsrs::gz::GzReader::new(Cursor::new(bytes))
.map_err(|_| JsValue::from_str("Error creating gz wrapper"))?;
// Get the response's body as a JS ReadableStream
let body = ReadableStream::from_raw(stream.dyn_into()?);
let parse_fits_images_from_bytes = |raw_bytes: &[u8]| -> Result<Vec<Image>, JsValue> {
Ok(FitsImage::from_raw_bytes(raw_bytes)?
.into_iter()
.filter_map(
|FitsImage {
bitpix,
bscale,
bzero,
blank,
wcs,
raw_bytes,
..
}| {
if let Some(wcs) = wcs {
let image = Image::from_fits_hdu(
&gl,
wcs,
bitpix,
raw_bytes,
bscale,
bzero,
blank,
camera_coo_sys,
)
.ok()?;
Some(image)
} else {
None
}
},
)
.collect::<Vec<_>>())
};
let images = match gz {
fitsrs::gz::GzReader::GzReader(bytes) => parse_fits_images_from_bytes(bytes.get_ref())?,
fitsrs::gz::GzReader::Reader(bytes) => parse_fits_images_from_bytes(bytes.get_ref())?,
};
if images.is_empty() {
Err(JsValue::from_str("no images have been parsed"))
} else {
let layer = ImageLayer {
images,
id: layer.clone(),
layer,
meta,
// Convert the JS ReadableStream to a Rust stream
let bytes_reader = match body.try_into_async_read() {
Ok(async_read) => Either::Left(async_read),
Err((_err, body)) => Either::Right(
body.into_stream()
.map_ok(|js_value| {
js_value.dyn_into::<Uint8Array>().unwrap_throw().to_vec()
})
.map_err(|_js_error| std::io::Error::other("failed to read"))
.into_async_read(),
),
};
let params = layer.get_params();
self.layers.add_image(
layer,
&mut self.camera,
&self.projection,
&mut self.tile_fetcher,
)?;
self.request_redraw = true;
let mut reader = BufReader::new(bytes_reader);
let promise = js_sys::Promise::resolve(&serde_wasm_bindgen::to_value(&params)?);
Ok(promise)
}
let AsyncFits { mut hdu } = AsyncFits::from_reader(&mut reader)
.await
.map_err(|e| JsValue::from_str(&format!("Fits file parsing: reason: {}", e)))?;
let mut hdu_ext_idx = 0;
let mut images = vec![];
match Image::from_fits_hdu_async(&gl, &mut hdu.0, camera_coo_sys).await {
Ok(image) => {
images.push(image);
let mut hdu_ext = hdu.next().await;
// Continue parsing the file extensions here
while let Ok(Some(mut xhdu)) = hdu_ext {
match &mut xhdu {
AsyncXtensionHDU::Image(xhdu_img) => {
match Image::from_fits_hdu_async(&gl, xhdu_img, camera_coo_sys)
.await
{
Ok(image) => {
images.push(image);
}
Err(error) => {
al_core::log::console_warn(format!("The extension {hdu_ext_idx} has not been parsed, reason:")
);
al_core::log::console_warn(error);
}
}
}
_ => {
al_core::log::console_warn(format!("The extension {hdu_ext_idx} is a BinTable/AsciiTable and is thus discarded")
);
}
}
hdu_ext_idx += 1;
hdu_ext = xhdu.next().await;
}
}
Err(error) => {
al_core::log::console_warn(error);
let mut hdu_ext = hdu.next().await;
while let Ok(Some(mut xhdu)) = hdu_ext {
match &mut xhdu {
AsyncXtensionHDU::Image(xhdu_img) => {
match Image::from_fits_hdu_async(&gl, xhdu_img, camera_coo_sys)
.await
{
Ok(image) => {
images.push(image);
}
Err(error) => {
al_core::log::console_warn(format!("The extension {hdu_ext_idx} has not been parsed, reason:")
);
al_core::log::console_warn(error);
}
}
}
_ => {
al_core::log::console_warn(format!("The extension {hdu_ext_idx} is a BinTable/AsciiTable and is thus discarded")
);
}
}
hdu_ext_idx += 1;
hdu_ext = xhdu.next().await;
}
}
}
if images.is_empty() {
Err(JsValue::from_str("no images have been parsed"))
} else {
let fits = ImageLayer {
images,
id: layer.clone(),
layer,
meta,
};
fits_sender.send(fits).await.unwrap();
// Wait for the ack here
let image_params = ack_fits_recv
.recv()
.await
.map_err(|_| JsValue::from_str("Problem receiving fits"))?;
serde_wasm_bindgen::to_value(&image_params).map_err(|e| e.into())
}
};
let reenable_inertia = Closure::new(move || {
// renable inertia again
*(disable_inertia.borrow_mut()) = false;
});
let promise = wasm_bindgen_futures::future_to_promise(fut)
// Reenable inertia independantly from whether the
// fits has been correctly parsed or not
.finally(&reenable_inertia);
// forget the closure, it is not very proper to do this as
// it won't be deallocated
reenable_inertia.forget();
Ok(promise)
}
pub(crate) fn get_layer_cfg(&self, layer: &str) -> Result<ImageMetadata, JsValue> {
@@ -1061,7 +1240,7 @@ impl App {
match hips {
HiPS::D2(_) => Err(JsValue::from_str("layer do not refers to a cube")),
HiPS::D3(hips) => {
hips.set_freq(Freq(slice as f64));
hips.set_slice(slice as u16);
Ok(())
}

View File

@@ -8,8 +8,8 @@ pub use fov::FieldOfView;
pub mod view_hpx_cells;
use crate::CooSystem;
use crate::HEALPixCoverage;
use crate::ProjectionType;
use crate::SpaceMoc;
pub fn build_fov_coverage(
depth: u8,
@@ -18,7 +18,7 @@ pub fn build_fov_coverage(
camera_frame: CooSystem,
frame: CooSystem,
proj: &ProjectionType,
) -> SpaceMoc {
) -> HEALPixCoverage {
if let Some(vertices) = fov.get_vertices() {
// The vertices coming from the camera are in a specific coo sys
// but cdshealpix accepts them to be given in ICRS coo sys
@@ -44,20 +44,20 @@ pub fn build_fov_coverage(
::healpix::nested::hash(depth, lon.to_radians(), lat.to_radians())
});
SpaceMoc::from_fixed_hpx_cells(depth, hpx_idxs_iter, Some(vertices.len()))
HEALPixCoverage::from_fixed_hpx_cells(depth, hpx_idxs_iter, Some(vertices.len()))
} else {
// The polygon is not too small for the depth asked
let inside_vertex = crate::coosys::apply_coo_system(camera_frame, frame, camera_center);
// Prefer to query from_polygon with depth >= 2
SpaceMoc::from_3d_coos(depth, vertices_iter, &inside_vertex)
HEALPixCoverage::from_3d_coos(depth, vertices_iter, &inside_vertex)
}
} else {
let center_xyz = crate::coosys::apply_coo_system(camera_frame, frame, camera_center);
let biggest_fov_rad = proj.aperture_start().to_radians();
let lonlat = center_xyz.lonlat();
SpaceMoc::from_cone(&lonlat, biggest_fov_rad * 0.5, depth)
HEALPixCoverage::from_cone(&lonlat, biggest_fov_rad * 0.5, depth)
}
}

View File

@@ -3,7 +3,7 @@ use crate::healpix::cell::HEALPixCell;
use crate::math::projection::*;
use crate::SpaceMoc;
use crate::HEALPixCoverage;
use moclib::moc::{range::op::degrade::degrade, RangeMOCIterator};
@@ -84,7 +84,7 @@ impl ViewHpxCells {
self.hpx_cells[frame as usize].get_cells(depth)
}
pub(super) fn get_cov(&self, frame: CooSystem) -> &SpaceMoc {
pub(super) fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.hpx_cells[frame as usize].get_cov()
}
@@ -109,7 +109,7 @@ pub struct HpxCells {
// An index vector referring to the indices of each depth cells
//idx_rng: [Option<Range<usize>>; MAX_HPX_DEPTH as usize + 1],
// Coverage created in the frame
cov: SpaceMoc,
cov: HEALPixCoverage,
// boolean refering to if the cells in the view has changed
//new_cells: bool,
}
@@ -127,7 +127,7 @@ use super::FieldOfView;
impl HpxCells {
pub fn new(frame: CooSystem) -> Self {
//let cells = Vec::new();
let cov = SpaceMoc::empty(29);
let cov = HEALPixCoverage::empty(29);
//let idx_rng = Default::default();
@@ -203,7 +203,7 @@ impl HpxCells {
if depth == cov_depth {
self.cov
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(depth, idx))
.map(move |idx| HEALPixCell(depth, idx))
.collect()
} else if depth > self.cov.depth_max() {
let cov_d = self.cov.depth_max();
@@ -212,7 +212,7 @@ impl HpxCells {
self.cov
.flatten_to_fixed_depth_cells()
.flat_map(|idx| {
.flat_map(move |idx| {
// idx is at depth_max
HEALPixCell(cov_d, idx).get_children_cells(dd)
})
@@ -221,7 +221,7 @@ impl HpxCells {
// compute the cells from the coverage
degrade((&self.cov.0).into_range_moc_iter(), depth)
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(depth, idx))
.map(move |idx| HEALPixCell(depth, idx))
.collect()
}
}
@@ -257,7 +257,7 @@ impl HpxCells {
}*/
#[inline(always)]
pub fn get_cov(&self) -> &SpaceMoc {
pub fn get_cov(&self) -> &HEALPixCoverage {
&self.cov
}

View File

@@ -12,7 +12,7 @@ const ID_R: &Matrix3<f64> = &Matrix3::new(-1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.
use super::{fov::FieldOfView, view_hpx_cells::ViewHpxCells};
use crate::healpix::cell::HEALPixCell;
use crate::healpix::moc::SpaceMoc;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::angle::ToAngle;
use crate::math::{projection::coo_space::XYZModel, projection::domain::sdf::ProjDef};
use cgmath::{InnerSpace, Vector3};
@@ -216,7 +216,7 @@ impl CameraViewPort {
self.view_hpx_cells.has_changed()
}*/
pub fn get_cov(&self, frame: CooSystem) -> &SpaceMoc {
pub fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.view_hpx_cells.get_cov(frame)
}

View File

@@ -9,7 +9,6 @@ pub trait Query: Sized {
pub type QueryId = String;
use al_api::hips::DataproductType;
use al_core::image::format::ImageFormatType;
#[derive(Eq, PartialEq, Clone)]
@@ -27,14 +26,9 @@ pub struct Tile {
pub channel: Option<u32>,
}
/*pub enum {
}*/
use crate::healpix::cell::HEALPixCell;
use crate::renderable::hips::config::HiPSConfig;
use crate::renderable::CreatorDid;
use crate::tile_fetcher::HiPSLocalFiles;
use web_sys::{RequestCredentials, RequestMode};
impl Tile {
pub fn new(cell: &HEALPixCell, channel: Option<u32>, cfg: &HiPSConfig) -> Self {
@@ -50,17 +44,17 @@ impl Tile {
let dir_idx = (idx / 10000) * 10000;
let mut url = format!("{hips_url}/Norder{depth}/Dir{dir_idx}/Npix{idx}");
let mut url = format!("{}/Norder{}/Dir{}/Npix{}", hips_url, depth, dir_idx, idx);
// handle cube case
if let Some(channel) = channel {
if channel > 0 {
url.push_str(&format!("_{channel:?}"));
url.push_str(&format!("_{:?}", channel));
}
}
// add the tile format
url.push_str(&format!(".{ext}"));
url.push_str(&format!(".{}", ext));
let id = format!(
"{}{}{}{}{}",
@@ -71,7 +65,7 @@ impl Tile {
ext
);
let size = cfg.get_tile_size() as u32;
let size = cfg.get_tile_size();
Tile {
hips_cdid: hips_cdid.to_string(),
url,
@@ -81,7 +75,7 @@ impl Tile {
mode,
id,
channel,
size,
size: size as u32,
}
}
}
@@ -127,12 +121,12 @@ impl Allsky {
// handle cube case
if let Some(channel) = channel {
if channel > 0 {
url.push_str(&format!("_{channel:?}"));
url.push_str(&format!("_{:?}", channel));
}
}
// add the tile format
url.push_str(&format!(".{ext}"));
url.push_str(&format!(".{}", ext));
let id = format!(
"{}Allsky{}{}",
@@ -165,8 +159,43 @@ impl Query for Allsky {
}
/* ---------------------------------- */
use al_api::moc::MOCOptions;
pub struct PixelMetadata {
pub format: ImageFormatType,
// The root url of the HiPS
pub hips_cdid: CreatorDid,
// The total url of the query
pub url: Url,
pub id: QueryId,
}
impl PixelMetadata {
pub fn new(cfg: &HiPSConfig) -> Self {
let hips_cdid = cfg.get_creator_did().to_string();
let format = cfg.get_format();
let ext = format.get_ext_file();
let url = format!("{}/Norder3/Allsky.{}", cfg.get_root_url(), ext);
let id = format!("{}Allsky{}", hips_cdid, ext);
PixelMetadata {
hips_cdid,
url,
format,
id,
}
}
}
use super::request::blank::PixelMetadataRequest;
impl Query for PixelMetadata {
type Request = PixelMetadataRequest;
fn id(&self) -> &QueryId {
&self.id
}
}
use al_api::moc::MOCOptions;
/* ---------------------------------- */
pub struct Moc {
// The total url of the query
pub url: Url,
@@ -174,41 +203,21 @@ pub struct Moc {
pub credentials: RequestCredentials,
pub params: MOCOptions,
pub hips_cdid: CreatorDid,
pub dataproduct_type: DataproductType,
}
use std::collections::HashMap;
impl Moc {
pub fn new(
cfg: &HiPSConfig,
hips_local_files: &HashMap<String, HiPSLocalFiles>,
url: String,
mode: RequestMode,
credentials: RequestCredentials,
hips_cdid: CreatorDid,
params: MOCOptions,
) -> Self {
// Try to fetch the MOC
let hips_cdid = cfg.get_creator_did();
let url = if let Some(local_hips) = hips_local_files.get(hips_cdid) {
if let Ok(url) =
web_sys::Url::create_object_url_with_blob(local_hips.get_moc().as_ref())
{
url
} else {
format!("{}/Moc.fits", cfg.get_root_url())
}
} else {
format!("{}/Moc.fits", cfg.get_root_url())
};
let mode = cfg.get_request_mode();
let credentials = cfg.get_request_credentials();
let hips_cdid = cfg.get_creator_did().to_string();
let dataproduct_type = cfg.dataproduct_type;
Moc {
url,
params,
hips_cdid,
mode,
credentials,
dataproduct_type,
}
}
}

View File

@@ -1,9 +1,11 @@
use std::io::Cursor;
use crate::downloader::query;
use crate::renderable::CreatorDid;
use al_core::image::fits::FitsImage;
use al_core::image::format::ChannelType;
use al_core::image::ImageType;
use al_core::texture::format::PixelType;
use fitsrs::hdu::header::Bitpix;
use fitsrs::{fits::Fits, hdu::data::InMemData};
use super::{Request, RequestType};
use crate::downloader::QueryId;
@@ -76,12 +78,12 @@ impl From<query::Allsky> for AllskyRequest {
} = query;
//let depth_tile = crate::math::utils::log_2_unchecked(texture_size / tile_size) as u8;
let channel = format.get_pixel_format();
let channel = format.get_channel();
let url_clone = url.clone();
let request = Request::new(async move {
match channel {
PixelType::RGB8U => {
ChannelType::RGB8U => {
let allsky = query_allsky(&url_clone, credentials).await?;
let allsky_tiles =
@@ -102,7 +104,7 @@ impl From<query::Allsky> for AllskyRequest {
Ok(allsky_tiles)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let allsky = query_allsky(&url_clone, credentials).await?;
let allsky_tiles = handle_allsky_file(allsky, allsky_tile_size, tile_size)?
@@ -130,66 +132,61 @@ impl From<query::Allsky> for AllskyRequest {
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;*/
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
let FitsImage {
raw_bytes, bitpix, ..
} = FitsImage::from_raw_bytes(raw_bytes.as_slice())?[0];
match bitpix {
Bitpix::U8 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
let num_bytes = bytes_buffer.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
bytes_buffer.copy_to(&mut raw_bytes[..]);
let mut reader = Cursor::new(&raw_bytes[..]);
let Fits { hdu } = Fits::from_reader(&mut reader)
.map_err(|_| JsValue::from_str("Parsing fits error of allsky"))?;
let data = hdu.get_data();
match data {
InMemData::U8(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR8ui { image })
.collect())
}
Bitpix::I16 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
InMemData::I16(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR16i { image })
.collect())
}
Bitpix::I32 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
InMemData::I32(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
Bitpix::I64 => {
let data = unsafe {
std::slice::from_raw_parts(
raw_bytes.as_ptr() as *const i64,
raw_bytes.len() / 8,
)
};
InMemData::I64(data) => {
let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>();
let raw_bytes = unsafe {
Ok(handle_allsky_fits(&data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
InMemData::F32(data) => {
let data = unsafe {
std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
)
};
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
Bitpix::F32 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawRgba8u { image })
.collect())
}
Bitpix::F64 => {
let data = unsafe {
std::slice::from_raw_parts(
raw_bytes.as_ptr() as *const f64,
raw_bytes.len() / 8,
)
};
InMemData::F64(data) => {
let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>();
let raw_bytes = unsafe {
let data = unsafe {
std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
)
};
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawRgba8u { image })
.collect())
}
@@ -209,9 +206,9 @@ impl From<query::Allsky> for AllskyRequest {
}
}
use al_core::image::format::ImageFormat;
use al_core::image::raw::ImageBufferView;
use al_core::texture::format::TextureFormat;
fn handle_allsky_file<F: TextureFormat>(
fn handle_allsky_file<F: ImageFormat>(
image: ImageBuffer<F>,
allsky_tile_size: i32,
tile_size: i32,
@@ -220,8 +217,11 @@ fn handle_allsky_file<F: TextureFormat>(
let mut src_idx = 0;
let tiles = (0..12).map(move |_| {
let mut base_tile =
ImageBuffer::<F>::allocate(&F::P::BLACK, allsky_tile_size, allsky_tile_size);
let mut base_tile = ImageBuffer::<F>::allocate(
&<F as ImageFormat>::P::BLACK,
allsky_tile_size,
allsky_tile_size,
);
for idx_tile in 0..64 {
let (x, y) = crate::utils::unmortonize(idx_tile as u64);
let dx = x * (d3_tile_allsky_size as u32);
@@ -253,8 +253,8 @@ fn handle_allsky_file<F: TextureFormat>(
Ok(tiles)
}
fn handle_allsky_fits<F: TextureFormat>(
image: &[<F::P as Pixel>::Item],
fn handle_allsky_fits<F: ImageFormat>(
image: &[<<F as ImageFormat>::P as Pixel>::Item],
tile_size: i32,
allsky_tile_size: i32,
@@ -292,7 +292,7 @@ fn handle_allsky_fits<F: TextureFormat>(
Ok(allsky_tiles_iter)
}
use al_core::texture::format::RGBA8U;
use al_core::image::format::RGBA8U;
use crate::time::Time;
use std::cell::RefCell;

View File

@@ -0,0 +1,161 @@
use al_core::image::format::ChannelType;
use std::io::Cursor;
use crate::downloader::query;
use crate::renderable::CreatorDid;
use fitsrs::fits::Fits;
#[derive(Debug, Clone, Copy)]
pub struct Metadata {
pub blank: f32,
pub scale: f32,
pub offset: f32,
}
impl Default for Metadata {
fn default() -> Self {
Metadata {
blank: -1.0,
scale: 1.0,
offset: 0.0,
}
}
}
use super::{Request, RequestType};
use crate::downloader::QueryId;
pub struct PixelMetadataRequest {
pub id: QueryId,
pub url: Url,
pub hips_cdid: CreatorDid,
request: Request<Metadata>,
}
impl From<PixelMetadataRequest> for RequestType {
fn from(request: PixelMetadataRequest) -> Self {
RequestType::PixelMetadata(request)
}
}
use super::Url;
use wasm_bindgen::JsCast;
use wasm_bindgen::JsValue;
use wasm_bindgen_futures::JsFuture;
use web_sys::{RequestInit, RequestMode, Response};
impl From<query::PixelMetadata> for PixelMetadataRequest {
// Create a tile request associated to a HiPS
fn from(query: query::PixelMetadata) -> Self {
let query::PixelMetadata {
format,
url,
hips_cdid,
id,
} = query;
let url_clone = url.clone();
let channel = format.get_channel();
let window = web_sys::window().unwrap_abort();
let request = match channel {
ChannelType::R32F | ChannelType::R32I | ChannelType::R16I | ChannelType::R8UI => {
Request::new(async move {
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;*/
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
let num_bytes = bytes_buffer.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
bytes_buffer.copy_to(&mut raw_bytes[..]);
let mut reader = Cursor::new(&raw_bytes[..]);
let Fits { hdu } = Fits::from_reader(&mut reader)
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
let header = hdu.get_header();
let scale =
if let Some(fitsrs::card::Value::Float(bscale)) = header.get(b"BSCALE ") {
*bscale as f32
} else {
1.0
};
let offset =
if let Some(fitsrs::card::Value::Float(bzero)) = header.get(b"BZERO ") {
*bzero as f32
} else {
0.0
};
let blank =
if let Some(fitsrs::card::Value::Float(blank)) = header.get(b"BLANK ") {
*blank as f32
} else {
f32::NAN
};
Ok(Metadata {
blank,
scale,
offset,
})
})
}
_ => Request::new(async move { Ok(Metadata::default()) }),
};
Self {
id,
url,
hips_cdid,
request,
}
}
}
use std::cell::RefCell;
use std::rc::Rc;
#[derive(Debug)]
pub struct PixelMetadata {
pub value: Rc<RefCell<Option<Metadata>>>,
pub hips_cdid: CreatorDid,
pub url: String,
}
use crate::Abort;
impl<'a> From<&'a PixelMetadataRequest> for Option<PixelMetadata> {
fn from(request: &'a PixelMetadataRequest) -> Self {
let PixelMetadataRequest {
request,
hips_cdid,
url,
..
} = request;
if request.is_resolved() {
let Request::<Metadata> { data, .. } = request;
// It will always be resolved and found as we will request a well know tile (Norder0/Tile0)
Some(PixelMetadata {
hips_cdid: hips_cdid.clone(),
url: url.to_string(),
value: data.clone(),
})
} else {
None
}
}
}

View File

@@ -3,9 +3,7 @@ use crate::renderable::CreatorDid;
use super::{Request, RequestType};
use crate::healpix::moc::Moc;
use crate::healpix::moc::{FreqSpaceMoc, SpaceMoc};
use al_api::hips::DataproductType;
use crate::healpix::coverage::Smoc;
use moclib::deser::fits::MocType;
use moclib::qty::Hpx;
@@ -13,7 +11,7 @@ pub struct MOCRequest {
//pub id: QueryId,
pub hips_cdid: CreatorDid,
pub params: MOCOptions,
request: Request<Moc>,
request: Request<HEALPixCoverage>,
}
impl From<MOCRequest> for RequestType {
@@ -23,13 +21,31 @@ impl From<MOCRequest> for RequestType {
}
use super::Url;
use moclib::deser::fits;
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::JsFuture;
use web_sys::{RequestInit, Response};
use moclib::moc::range::op::convert::convert_to_u64;
/// Convenient type for Space-MOCs
pub fn from_fits_hpx<T: Idx>(moc: MocType<T, Hpx<T>, Cursor<&[u8]>>) -> Smoc {
match moc {
MocType::Ranges(moc) => convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc).into_range_moc(),
MocType::Cells(moc) => {
convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc.into_cell_moc_iter().ranges())
.into_range_moc()
}
}
}
use crate::healpix::coverage::HEALPixCoverage;
use crate::Abort;
use al_api::moc::MOCOptions;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::idx::Idx;
use moclib::moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator};
use std::io::Cursor;
use wasm_bindgen::JsValue;
impl From<query::Moc> for MOCRequest {
@@ -41,7 +57,6 @@ impl From<query::Moc> for MOCRequest {
hips_cdid,
credentials,
mode,
dataproduct_type,
} = query;
let url_clone = url.clone();
@@ -60,16 +75,22 @@ impl From<query::Moc> for MOCRequest {
let resp: Response = resp_value.dyn_into()?;
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let buf = js_sys::Uint8Array::new(&array_buffer);
let bytes = buf.to_vec();
let bytes_buf = js_sys::Uint8Array::new(&array_buffer);
let num_bytes = bytes_buf.length() as usize;
let mut bytes = vec![0; num_bytes];
bytes_buf.copy_to(&mut bytes[..]);
// Coosys is permissive because we load a moc
Ok(match dataproduct_type {
DataproductType::SpectralCube => {
Moc::FreqSpace(FreqSpaceMoc::from_fits_raw_bytes(&bytes)?)
}
_ => Moc::Space(SpaceMoc::from_fits_raw_bytes(&bytes)?),
})
let smoc = match fits::from_fits_ivoa_custom(Cursor::new(&bytes[..]), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
Ok(HEALPixCoverage(smoc))
});
Self {
@@ -84,19 +105,19 @@ impl From<query::Moc> for MOCRequest {
use std::cell::RefCell;
use std::rc::Rc;
pub struct FetchedMoc {
pub moc: Rc<RefCell<Option<Moc>>>,
pub struct Moc {
pub moc: Rc<RefCell<Option<HEALPixCoverage>>>,
pub params: MOCOptions,
pub hips_cdid: Url,
}
impl FetchedMoc {
impl Moc {
pub fn get_hips_cdid(&self) -> &Url {
&self.hips_cdid
}
}
impl<'a> From<&'a MOCRequest> for Option<FetchedMoc> {
impl<'a> From<&'a MOCRequest> for Option<Moc> {
fn from(request: &'a MOCRequest) -> Self {
let MOCRequest {
request,
@@ -105,8 +126,8 @@ impl<'a> From<&'a MOCRequest> for Option<FetchedMoc> {
..
} = request;
if request.is_resolved() {
let Request::<Moc> { data, .. } = request;
Some(FetchedMoc {
let Request::<HEALPixCoverage> { data, .. } = request;
Some(Moc {
// This is a clone on a Arc, it is supposed to be fast
moc: data.clone(),
hips_cdid: hips_cdid.clone(),

View File

@@ -1,6 +1,7 @@
// A request image should not be used outside this module
// but contained inside a more specific type of query (e.g. for a tile or allsky)
pub mod allsky;
pub mod blank;
pub mod moc;
pub mod tile;
@@ -78,11 +79,13 @@ where
}
use allsky::AllskyRequest;
use blank::PixelMetadataRequest;
use moc::MOCRequest;
use tile::TileRequest;
pub enum RequestType {
Tile(TileRequest),
Allsky(AllskyRequest),
PixelMetadata(PixelMetadataRequest),
Moc(MOCRequest), //..
}
@@ -92,6 +95,7 @@ impl RequestType {
match self {
RequestType::Tile(request) => &request.id,
RequestType::Allsky(request) => &request.id,
RequestType::PixelMetadata(request) => &request.id,
RequestType::Moc(request) => &request.hips_cdid,
}
}
@@ -102,23 +106,27 @@ impl<'a> From<&'a RequestType> for Option<Resource> {
match request {
RequestType::Tile(request) => Option::<Tile>::from(request).map(Resource::Tile),
RequestType::Allsky(request) => Option::<Allsky>::from(request).map(Resource::Allsky),
RequestType::Moc(request) => Option::<FetchedMoc>::from(request).map(Resource::Moc),
RequestType::PixelMetadata(request) => {
Option::<PixelMetadata>::from(request).map(Resource::PixelMetadata)
}
RequestType::Moc(request) => Option::<Moc>::from(request).map(Resource::Moc),
}
}
}
use crate::Abort;
use allsky::Allsky;
use blank::PixelMetadata;
use moc::Moc;
use tile::Tile;
pub enum Resource {
Tile(Tile),
Allsky(Allsky),
Moc(FetchedMoc),
PixelMetadata(PixelMetadata),
Moc(Moc),
}
use web_sys::RequestCredentials;
use self::moc::FetchedMoc;
async fn query_html_image(
url: &str,
credentials: RequestCredentials,

View File

@@ -1,7 +1,6 @@
use crate::healpix::cell::HEALPixCell;
use crate::renderable::CreatorDid;
use al_core::image::format::ImageFormatType;
use al_core::texture::format::{PixelType, RGB8U, RGBA8U};
use al_core::image::format::{ChannelType, ImageFormatType, RGB8U, RGBA8U};
use crate::downloader::query;
use al_core::image::ImageType;
@@ -43,16 +42,43 @@ impl From<query::Tile> for TileRequest {
credentials,
mode,
id,
channel,
channel: slice,
size,
} = query;
let url_clone = url.clone();
let pixel_format = format.get_pixel_format();
let channel = format.get_channel();
let window = web_sys::window().unwrap_abort();
let request = match pixel_format {
PixelType::RGB8U => Request::new(async move {
let request = match channel {
ChannelType::RGB8U => Request::new(async move {
/*let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;*/
/*/// Bitmap version
let blob = JsFuture::from(resp.blob()?).await?.into();
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
.await?
.into();
let image = Bitmap::new(image);
Ok(ImageType::JpgImageRgb8u { image })*/
/*
/// Raw image decoding
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let image = ImageBuffer::<RGB8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
Ok(ImageType::RawRgb8u { image })
*/
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
@@ -60,7 +86,34 @@ impl From<query::Tile> for TileRequest {
image: HTMLImage::<RGB8U>::new(image),
})
}),
PixelType::RGBA8U => Request::new(async move {
ChannelType::RGBA8U => Request::new(async move {
/*let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;*/
/*/// Bitmap version
let blob = JsFuture::from(resp.blob()?).await?.into();
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
.await?
.into();
let image = Bitmap::new(image);
Ok(ImageType::PngImageRgba8u { image })*/
/*
/// Raw image decoding
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let image = ImageBuffer::<RGBA8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
Ok(ImageType::RawRgba8u { image })
*/
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
@@ -68,42 +121,45 @@ impl From<query::Tile> for TileRequest {
image: HTMLImage::<RGBA8U>::new(image),
})
}),
PixelType::R32F | PixelType::R32I | PixelType::R16I | PixelType::R8U => {
Request::new(async move {
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(mode);
opts.credentials(credentials);
ChannelType::R32F
| ChannelType::R64F
| ChannelType::R32I
| ChannelType::R16I
| ChannelType::R8UI => Request::new(async move {
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(mode);
opts.credentials(credentials);
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;
*/
if resp.ok() {
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;
*/
if resp.ok() {
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
Ok(ImageType::FitsRawBytes {
raw_bytes,
size: (size, size),
})
} else {
Err(JsValue::from_str(
"Response status code not between 200-299.",
))
}
})
}
Ok(ImageType::FitsImage {
raw_bytes,
size: (size, size),
})
} else {
Err(JsValue::from_str(
"Response status code not between 200-299.",
))
}
}),
_ => todo!(),
};
Self {
@@ -113,7 +169,7 @@ impl From<query::Tile> for TileRequest {
hips_cdid,
url,
request,
channel,
channel: slice,
}
}
}

View File

@@ -3,7 +3,6 @@ use crate::math::PI;
use crate::math::{self, lonlat::LonLat};
use cgmath::Vector3;
use moclib::moc::RangeMOCIntoIterator;
use moclib::{
moc::range::{CellSelection, RangeMOC},
qty::Hpx,
@@ -13,65 +12,9 @@ pub type Smoc = RangeMOC<u64, Hpx<u64>>;
use crate::healpix::cell::HEALPixCell;
#[derive(Clone, Debug)]
pub struct SpaceMoc(pub Smoc);
use wasm_bindgen::JsValue;
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::idx::Idx;
use moclib::moc::range::op::convert::convert_to_u64;
use moclib::moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator};
/// Convenient type for Space-MOCs
pub fn from_fits_hpx<T: Idx>(moc: MocType<T, Hpx<T>, Cursor<&[u8]>>) -> Smoc {
match moc {
MocType::Ranges(moc) => convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc).into_range_moc(),
MocType::Cells(moc) => {
convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc.into_cell_moc_iter().ranges())
.into_range_moc()
}
}
}
use moclib::deser::fits::MocType;
use std::io::Cursor;
impl SpaceMoc {
pub fn from_fits_raw_bytes(bytes: &[u8]) -> Result<Self, JsValue> {
let smoc = match fits::from_fits_ivoa_custom(Cursor::new(bytes), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
Ok(Self(smoc))
}
pub fn from_json(s: &str) -> Result<Self, JsValue> {
let moc = moclib::deser::json::from_json_aladin::<u64, Hpx<u64>>(s)
.map_err(|e| JsValue::from(js_sys::Error::new(&e.to_string())))?
.into_cell_moc_iter()
.ranges()
.into_range_moc();
Ok(Self(moc))
}
pub fn serialize_to_json(&self) -> Result<String, JsValue> {
let mut buf: Vec<u8> = Default::default();
let json = (&self.0)
.into_range_moc_iter()
.cells()
.to_json_aladin(None, &mut buf)
.map(|()| unsafe { String::from_utf8_unchecked(buf) })
.map_err(|err| JsValue::from_str(&format!("{err:?}")));
json
}
pub struct HEALPixCoverage(pub Smoc);
impl HEALPixCoverage {
pub fn from_3d_coos<T: LonLat<f64>>(
// The depth of the smallest HEALPix cells contained in it
depth: u8,
@@ -95,7 +38,7 @@ impl SpaceMoc {
depth,
CellSelection::All,
);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_fixed_hpx_cells(
@@ -104,7 +47,7 @@ impl SpaceMoc {
cap: Option<usize>,
) -> Self {
let moc = RangeMOC::from_fixed_depth_cells(depth, hpx_idx, cap);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_hpx_cells<'a>(
@@ -115,14 +58,14 @@ impl SpaceMoc {
let cells_it = hpx_cell_it.map(|HEALPixCell(depth, idx)| (*depth, *idx));
let moc = RangeMOC::from_cells(depth, cells_it, cap);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_cone(lonlat: &LonLatT<f64>, rad: f64, depth: u8) -> Self {
if rad >= PI {
Self::allsky(depth)
} else {
SpaceMoc(RangeMOC::from_cone(
HEALPixCoverage(RangeMOC::from_cone(
lonlat.lon().to_radians(),
lonlat.lat().to_radians(),
rad,
@@ -135,7 +78,12 @@ impl SpaceMoc {
pub fn allsky(depth_max: u8) -> Self {
let moc = RangeMOC::new_full_domain(depth_max);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn contains_coo(&self, coo: &Vector3<f64>) -> bool {
let (lon, lat) = math::lonlat::xyz_to_radec(coo);
self.0.is_in(lon.to_radians(), lat.to_radians())
}
pub fn contains_lonlat(&self, lonlat: &LonLatT<f64>) -> bool {
@@ -150,9 +98,9 @@ impl SpaceMoc {
self.0.moc_ranges().intersects_range(&z29_rng)
}
/*pub fn is_intersecting(&self, other: &Self) -> bool {
pub fn is_intersecting(&self, other: &Self) -> bool {
!self.0.intersection(&other.0).is_empty()
}*/
}
pub fn depth(&self) -> u8 {
self.0.depth_max()
@@ -163,16 +111,16 @@ impl SpaceMoc {
}
pub fn not(&self) -> Self {
SpaceMoc(self.0.not())
HEALPixCoverage(self.0.not())
}
pub fn empty(depth: u8) -> Self {
SpaceMoc(RangeMOC::new_empty(depth))
HEALPixCoverage(RangeMOC::new_empty(depth))
}
}
use core::ops::Deref;
impl Deref for SpaceMoc {
impl Deref for HEALPixCoverage {
type Target = Smoc;
fn deref(&'_ self) -> &'_ Self::Target {

View File

@@ -1,161 +0,0 @@
use crate::math::lonlat::LonLatT;
use crate::math::PI;
use crate::math::{self, lonlat::LonLat};
use cgmath::Vector3;
use moclib::elemset::range::uniq::HpxUniqRanges;
use moclib::hpxranges2d::HpxRanges2D;
use moclib::moc::RangeMOCIntoIterator;
use moclib::moc2d::{HasTwoMaxDepth, RangeMOC2Iterator};
use moclib::ranges::ranges2d::Ranges2D;
use moclib::{
moc::range::{CellSelection, RangeMOC},
moc2d::range::RangeMOC2,
qty::Hpx,
ranges::SNORanges,
};
use moclib::qty::Frequency;
use crate::healpix::cell::HEALPixCell;
#[derive(Debug)]
pub struct FreqSpaceMoc(pub moclib::hpxranges2d::FreqSpaceMoc<u64, u64>);
impl Clone for FreqSpaceMoc {
fn clone(&self) -> Self {
let HpxRanges2D(Moc2DRanges {
ranges2d: Ranges2D { x, y },
..
}) = &**self;
Self(HpxRanges2D(Moc2DRanges::new(x.clone(), y.clone())))
}
}
use wasm_bindgen::JsValue;
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::deser::fits::RangeMoc2DIterFromFits;
use moclib::idx::Idx;
use moclib::moc::range::op::convert::convert_to_u64;
use moclib::moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator};
use moclib::mocranges2d::Moc2DRanges;
use moclib::deser::fits::MocType;
use std::io::Cursor;
use crate::math::spectra::Freq;
use crate::math::spectra::SpectralUnit;
impl FreqSpaceMoc {
pub fn from_fits_raw_bytes(bytes: &[u8]) -> Result<Self, JsValue> {
let sfmoc = match fits::from_fits_ivoa_custom(Cursor::new(bytes), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
//MocIdxType::U16(MocQtyType::<u16, _>::FreqHpx(moc)) => Ok(from_fits_hpx(moc)),
//MocIdxType::U32(MocQtyType::<u32, _>::FreqHpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::FreqHpx(ranges_iter)) => {
let moc_2d_ranges = Moc2DRanges::from_ranges_it(ranges_iter);
let inner = moclib::hpxranges2d::HpxRanges2D(moc_2d_ranges);
Ok(inner)
}
_ => Err(JsValue::from_str(
"MOC not supported. Must be a FREQ|HPX 2DMOC coded on U64 only",
)),
}?;
Ok(Self(sfmoc))
}
/// This methods builds a SFMOC made of:
/// * the cells in the spatial viewport at a specific frequency f
/// * the +/- f_window cells containing inside lonlat on the frequency axis
/// This is the method to use when looking for new cube HiPS3D tiles
pub fn from_coos_freq<L: LonLat<f64>, F: SpectralUnit>(
// The depth of the smallest HEALPix cells contained in it
depth: u8,
// The vertices of the polygon delimiting the coverage
vertices_iter: impl Iterator<Item = L>,
// A vertex being inside the coverage,
// typically the center of projection
inside: &L,
// The freq at which we want to compute the sfmoc
f: F,
// Frequency window i.e. the number of cells around f to query
f_window: u8,
) -> Self {
let freq: Freq = f.into();
todo!();
/*let lonlat = vertices_iter
.map(|vertex| {
let LonLatT(lon, lat) = vertex.lonlat();
(lon.to_radians(), lat.to_radians())
})
.collect::<Vec<_>>();
let LonLatT(in_lon, in_lat) = inside.lonlat();
let moc = RangeMOC2::from_freqranges_in_hz_and_coos(
&lonlat[..],
(in_lon.to_radians(), in_lat.to_radians()),
depth,
CellSelection::All,
);
SpaceFreqMoc(moc)*/
}
/*pub fn from_fixed_hpx_cells(
depth: u8,
hpx_idx: impl Iterator<Item = u64>,
cap: Option<usize>,
) -> Self {
let moc = RangeMOC::from_fixed_depth_cells(depth, hpx_idx, cap);
SpaceMoc(moc)
}
pub fn from_hpx_cells<'a>(
depth: u8,
hpx_cell_it: impl Iterator<Item = &'a HEALPixCell>,
cap: Option<usize>,
) -> Self {
let cells_it = hpx_cell_it.map(|HEALPixCell(depth, idx)| (*depth, *idx));
let moc = RangeMOC::from_cells(depth, cells_it, cap);
SpaceMoc(moc)
}*/
pub fn f_max_depth(&self) -> u8 {
self.0.compute_min_depth().0
}
pub fn s_max_depth(&self) -> u8 {
self.0.compute_min_depth().1
}
pub fn sky_fraction(&self) -> f64 {
todo!()
}
pub fn intersects_cell(&self, hpx_cell: &HEALPixCell, f: Freq) -> bool {
let z29_rng = hpx_cell.z_29_rng();
let f_hash = f.hash();
self.0.contains(f_hash, &z29_rng)
}
/*/// provide the list of (hash hpx, hash freq) of the cells contained in the sfmoc
pub fn cells(&self) -> impl Iterator<Item = (u64, u64)> {
todo!()
}*/
}
use core::ops::Deref;
impl Deref for FreqSpaceMoc {
type Target = moclib::hpxranges2d::FreqSpaceMoc<u64, u64>;
fn deref(&'_ self) -> &'_ Self::Target {
&self.0
}
}

View File

@@ -1,10 +0,0 @@
mod freq_space;
mod space;
pub use freq_space::FreqSpaceMoc;
pub use space::SpaceMoc;
pub enum Moc {
FreqSpace(FreqSpaceMoc),
Space(SpaceMoc),
}

View File

@@ -1,4 +1,4 @@
pub mod cell;
pub mod coverage;
pub mod index_vector;
pub mod moc;
pub mod utils;

View File

@@ -92,8 +92,6 @@ use al_api::moc::MOCOptions;
use wasm_bindgen::prelude::*;
use web_sys::HtmlElement;
use fitsrs::{WCSParams, WCS};
use crate::math::angle::ToAngle;
mod app;
@@ -112,10 +110,14 @@ mod shader;
mod tile_fetcher;
mod time;
use crate::downloader::request::moc::from_fits_hpx;
use crate::{
camera::CameraViewPort, healpix::moc::SpaceMoc, math::lonlat::LonLatT, shader::ShaderManager,
time::DeltaTime,
camera::CameraViewPort, healpix::coverage::HEALPixCoverage, math::lonlat::LonLatT,
shader::ShaderManager, time::DeltaTime,
};
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use std::io::Cursor;
@@ -132,6 +134,10 @@ use cgmath::{Vector2, Vector3};
use crate::healpix::cell::HEALPixCell;
use math::angle::ArcDeg;
use moclib::{
moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator},
qty::Hpx,
};
#[wasm_bindgen]
pub struct WebClient {
@@ -344,31 +350,33 @@ impl WebClient {
Ok(())
}
#[wasm_bindgen(js_name = addFITSImage)]
pub fn add_fits_image(
#[wasm_bindgen(js_name = addImageFITS)]
pub fn add_image_fits(
&mut self,
bytes: &[u8],
stream: web_sys::ReadableStream,
cfg: JsValue,
layer: String,
) -> Result<js_sys::Promise, JsValue> {
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
self.app.add_fits_image(bytes, cfg, layer)
self.app.add_image_fits(stream, cfg, layer)
}
#[wasm_bindgen(js_name = addRGBAImage)]
pub fn add_rgba_image(
#[wasm_bindgen(js_name = addImageWithWCS)]
pub fn add_image_with_wcs(
&mut self,
bytes: &[u8],
stream: web_sys::ReadableStream,
wcs: JsValue,
cfg: JsValue,
layer: String,
) -> Result<js_sys::Promise, JsValue> {
use wcs::{WCSParams, WCS};
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
let wcs_params: WCSParams = serde_wasm_bindgen::from_value(wcs)?;
let wcs = WCS::new(&wcs_params).map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
let wcs = WCS::new(&wcs_params).map_err(|e| JsValue::from_str(&format!("{e:?}")))?;
self.app.add_rgba_image(layer, bytes, wcs, cfg)
self.app
.add_image_from_blob_and_wcs(layer, stream, wcs, cfg)
}
#[wasm_bindgen(js_name = removeLayer)]
@@ -974,7 +982,7 @@ impl WebClient {
let grad = colorgrad::CustomGradient::new()
.colors(&rgba_colors?)
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?;
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?;
let cmap = Colormap::new(&label, grad);
self.app.add_cmap(label, cmap)?;
@@ -1054,8 +1062,13 @@ impl WebClient {
pub fn add_json_moc(&mut self, options: MOCOptions, data: &JsValue) -> Result<(), JsValue> {
let str: String = js_sys::JSON::stringify(data)?.into();
let smoc = SpaceMoc::from_json(&str)?;
self.app.add_moc(smoc, options)?;
let moc = moclib::deser::json::from_json_aladin::<u64, Hpx<u64>>(&str)
.map_err(|e| JsValue::from(js_sys::Error::new(&e.to_string())))?
.into_cell_moc_iter()
.ranges()
.into_range_moc();
self.app.add_moc(HEALPixCoverage(moc), options)?;
Ok(())
}
@@ -1063,8 +1076,18 @@ impl WebClient {
#[wasm_bindgen(js_name = addFITSMOC)]
pub fn add_fits_moc(&mut self, options: MOCOptions, data: &[u8]) -> Result<(), JsValue> {
//let bytes = js_sys::Uint8Array::new(array_buffer).to_vec();
let smoc = SpaceMoc::from_fits_raw_bytes(data)?;
self.app.add_moc(smoc, options)?;
let moc = match fits::from_fits_ivoa_custom(Cursor::new(data), false)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => {
Ok(crate::downloader::request::moc::from_fits_hpx(moc))
}
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
self.app.add_moc(HEALPixCoverage(moc), options)?;
Ok(())
}
@@ -1079,7 +1102,7 @@ impl WebClient {
) -> Result<(), JsValue> {
let tile_d = self.app.get_norder();
let pixel_d = tile_d + 9;
let moc = SpaceMoc::from_cone(
let moc = HEALPixCoverage::from_cone(
&LonLatT::new(
ra_deg.to_radians().to_angle(),
dec_deg.to_radians().to_angle(),
@@ -1113,7 +1136,7 @@ impl WebClient {
let v_in = &Vector3::new(1.0, 0.0, 0.0);
let mut moc = SpaceMoc::from_3d_coos(pixel_d as u8 - 1, vertex_it, v_in);
let mut moc = HEALPixCoverage::from_3d_coos(pixel_d as u8 - 1, vertex_it, v_in);
if moc.sky_fraction() > 0.5 {
moc = moc.not();
}
@@ -1159,9 +1182,15 @@ impl WebClient {
.get_moc(&moc_uuid)
.ok_or_else(|| JsValue::from(js_sys::Error::new("MOC not found")))?;
let json = moc.serialize_to_json()?;
let mut buf: Vec<u8> = Default::default();
let json = (&moc.0)
.into_range_moc_iter()
.cells()
.to_json_aladin(None, &mut buf)
.map(|()| unsafe { String::from_utf8_unchecked(buf) })
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?;
serde_wasm_bindgen::to_value(&json).map_err(|err| JsValue::from_str(&format!("{err:?}")))
serde_wasm_bindgen::to_value(&json).map_err(|err| JsValue::from_str(&format!("{:?}", err)))
}
#[wasm_bindgen(js_name = getMOCSkyFraction)]

View File

@@ -49,8 +49,8 @@ where
}
use crate::math::angle::ToAngle;
impl From<fitsrs::wcs::LonLat> for LonLatT<f64> {
fn from(lonlat: fitsrs::wcs::LonLat) -> Self {
impl From<wcs::LonLat> for LonLatT<f64> {
fn from(lonlat: wcs::LonLat) -> Self {
Self(lonlat.lon().to_angle(), lonlat.lat().to_angle())
}
}

View File

@@ -9,8 +9,6 @@ pub const SQRT_TWO: f64 = std::f64::consts::SQRT_2;
pub const ZERO: f64 = 0.0;
pub mod spectra;
pub mod angle;
pub mod lonlat;
pub mod projection;

View File

@@ -1,62 +0,0 @@
pub trait SpectralUnit: Into<Freq> + Clone + Copy {
fn hash(&self) -> u64 {
let f: Freq = (*self).into();
Frequency::freq2hash(f.0)
}
}
use moclib::qty::Frequency;
/// Frequency in Hz unit
#[derive(Clone, Copy)]
pub struct Freq(pub f64);
impl Freq {
fn from_hash(hash: u64) -> Self {
let f = Frequency::hash2freq(hash);
Freq(f)
}
}
/// Wavelength in meter unit
#[derive(Clone, Copy)]
pub struct Wavelength(f64);
/// Velocity in meter/sec unit
#[derive(Clone, Copy)]
pub struct Velocity {
/// A rest frequency to compute the velocity from
/// given by the obs_restfreq HiPS property
rest_freq: Freq,
/// The velocity in m/s
velocity: f64,
}
const SPEED_OF_LIGHT: f64 = 299792458.0;
impl From<Velocity> for Freq {
fn from(v: Velocity) -> Self {
let Velocity {
rest_freq,
velocity,
} = v;
// v = c * (of - f) / of
// v * of = c * (of - f)
// c * f = c * of - v * of = of * (c - v)
// f = of * (c - v) / c = of * (1 - v / c)
Freq(rest_freq.0 * (1.0 - velocity / SPEED_OF_LIGHT))
}
}
impl From<Wavelength> for Freq {
fn from(lambda: Wavelength) -> Self {
Freq(SPEED_OF_LIGHT / lambda.0)
}
}
impl SpectralUnit for Freq {}
impl SpectralUnit for Wavelength {}
impl SpectralUnit for Velocity {}

View File

@@ -223,7 +223,7 @@ impl Manager {
pub fn get_mut_catalog(&mut self, name: &str) -> Result<&mut Catalog, Error> {
self.catalogs.get_mut(name).ok_or(Error::CatalogNotPresent {
message: format!("{name} catalog is not present!"),
message: format!("{} catalog is not present!", name),
})
}

View File

@@ -1,7 +1,6 @@
use al_api::hips::{DataproductType, ImageExt};
use al_api::hips::ImageExt;
use al_core::image::format::ImageFormatType;
use al_core::texture::format::PixelType;
use al_core::image::format::{ChannelType, ImageFormatType};
use web_sys::{RequestCredentials, RequestMode};
#[derive(Debug)]
@@ -10,31 +9,35 @@ pub struct HiPSConfig {
// HiPS image format
// TODO: Make that independant of the HiPS but of the ImageFormat
// Size of the tiles
pub tile_size: i32,
// The size of the texture images
tile_size: i32,
// Number of slices for HiPS cubes
min_depth_tile: u8,
// the number of slices for cubes
cube_depth: Option<u32>,
// Max depth of the current HiPS tiles
max_depth_tile: u8,
// Min depth of the current HiPS tiles
min_depth_tile: u8,
// For HiPS3D
max_depth_freq: Option<u8>,
// For HiPS3D
pub tile_depth: Option<u8>,
pub is_allsky: bool,
// TODO: store this values in the ImageSurvey
// These are proper to the survey (FITS one) and not
// to a specific survey color
pub fits_metadata: bool,
pub scale: f32,
pub offset: f32,
pub blank: f32,
pub tex_storing_integers: bool,
pub tex_storing_fits: bool,
pub tex_storing_unsigned_int: bool,
pub frame: CooSystem,
// For FITS HiPSes
pub bitpix: Option<i32>,
format: ImageFormatType,
pub dataproduct_type: DataproductType,
//dataproduct_subtype: Option<Vec<String>>,
//colored: bool,
pub creator_did: String,
pub request_credentials: RequestCredentials,
@@ -65,7 +68,12 @@ impl HiPSConfig {
// Determine the size of the texture to copy
// it cannot be > to 512x512px
let _fmt = properties.get_formats();
let bitpix = properties.get_bitpix();
let mut tex_storing_unsigned_int = false;
let mut tex_storing_integers = false;
let mut tex_storing_fits = false;
if !properties.get_formats().contains(&img_ext) {
return Err(js_sys::Error::new("HiPS format not available").into());
@@ -75,18 +83,45 @@ impl HiPSConfig {
ImageExt::Fits => {
// Check the bitpix to determine the internal format of the tiles
if let Some(bitpix) = bitpix {
let fmt = (match bitpix {
8 => Ok(PixelType::R8U),
16 => Ok(PixelType::R16I),
32 => Ok(PixelType::R32I),
-32 => Ok(PixelType::R32F),
-64 => Ok(PixelType::R32F),
let channel = (match bitpix {
#[cfg(feature = "webgl2")]
8 => {
tex_storing_fits = true;
tex_storing_unsigned_int = true;
Ok(ChannelType::R8UI)
}
#[cfg(feature = "webgl2")]
16 => {
tex_storing_fits = true;
tex_storing_integers = true;
Ok(ChannelType::R16I)
}
#[cfg(feature = "webgl2")]
32 => {
tex_storing_fits = true;
tex_storing_integers = true;
Ok(ChannelType::R32I)
}
-32 => {
tex_storing_fits = true;
tex_storing_integers = false;
Ok(ChannelType::R32F)
}
-64 => {
tex_storing_fits = true;
tex_storing_integers = false;
//Err(JsValue::from_str("f64 FITS files not supported"))
Ok(ChannelType::R64F)
}
_ => Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not correct in the property file",
)),
})?;
Ok(ImageFormatType { ext: img_ext, fmt })
Ok(ImageFormatType {
ext: img_ext,
channel,
})
} else {
Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not found",
@@ -95,11 +130,11 @@ impl HiPSConfig {
}
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
ext: img_ext,
fmt: PixelType::RGBA8U,
channel: ChannelType::RGBA8U,
}),
ImageExt::Jpeg => Ok(ImageFormatType {
ext: img_ext,
fmt: PixelType::RGB8U,
channel: ChannelType::RGB8U,
}),
}?;
@@ -124,12 +159,6 @@ impl HiPSConfig {
_ => RequestMode::Cors,
};
let dataproduct_type = properties.get_dataproduct_type().ok_or(JsValue::from_str(
"dataproduct_type keyword is required in the HiPS properties file",
))?;
let max_depth_freq = properties.get_hips_order_freq();
let tile_depth = properties.get_hips_tile_depth();
let hips_config = HiPSConfig {
creator_did,
// HiPS name
@@ -139,12 +168,17 @@ impl HiPSConfig {
is_allsky,
// HiPSCube
cube_depth,
fits_metadata: false,
scale: 1.0,
offset: 0.0,
blank: -1.0, // by default, set it to -1
// HiPS3D
tile_depth,
max_depth_freq,
tex_storing_fits,
tex_storing_integers,
tex_storing_unsigned_int,
// the number of slices in a cube
cube_depth,
frame,
bitpix,
@@ -152,7 +186,6 @@ impl HiPSConfig {
tile_size,
request_credentials,
request_mode,
dataproduct_type,
};
Ok(hips_config)
@@ -163,32 +196,66 @@ impl HiPSConfig {
ImageExt::Fits => {
// Check the bitpix to determine the internal format of the tiles
if let Some(bitpix) = self.bitpix {
let fmt = (match bitpix {
8 => Ok(PixelType::R8U),
16 => Ok(PixelType::R16I),
32 => Ok(PixelType::R32I),
-32 => Ok(PixelType::R32F),
-64 => Ok(PixelType::R32F),
let channel = (match bitpix {
#[cfg(feature = "webgl2")]
8 => {
self.tex_storing_fits = true;
self.tex_storing_unsigned_int = true;
Ok(ChannelType::R8UI)
}
#[cfg(feature = "webgl2")]
16 => {
self.tex_storing_fits = true;
self.tex_storing_integers = true;
Ok(ChannelType::R16I)
}
#[cfg(feature = "webgl2")]
32 => {
self.tex_storing_fits = true;
self.tex_storing_integers = true;
Ok(ChannelType::R32I)
}
-32 => {
self.tex_storing_fits = true;
self.tex_storing_integers = false;
Ok(ChannelType::R32F)
}
-64 => {
self.tex_storing_fits = true;
self.tex_storing_integers = false;
//Err(JsValue::from_str("f64 FITS files not supported"))
Ok(ChannelType::R64F)
}
_ => Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not correct in the property file",
)),
})?;
Ok(ImageFormatType { ext, fmt })
Ok(ImageFormatType { ext, channel })
} else {
Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not found",
))
}
}
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
ext,
fmt: PixelType::RGBA8U,
}),
ImageExt::Jpeg => Ok(ImageFormatType {
ext,
fmt: PixelType::RGB8U,
}),
ImageExt::Png | ImageExt::Webp => {
self.tex_storing_fits = false;
self.tex_storing_unsigned_int = false;
self.tex_storing_integers = false;
Ok(ImageFormatType {
ext,
channel: ChannelType::RGBA8U,
})
}
ImageExt::Jpeg => {
self.tex_storing_fits = false;
self.tex_storing_unsigned_int = false;
self.tex_storing_integers = false;
Ok(ImageFormatType {
ext,
channel: ChannelType::RGB8U,
})
}
}?;
self.format = format;
@@ -210,6 +277,14 @@ impl HiPSConfig {
self.cube_depth
}
#[inline(always)]
pub fn set_fits_metadata(&mut self, bscale: f32, bzero: f32, blank: f32) {
self.scale = bscale;
self.offset = bzero;
self.blank = blank;
self.fits_metadata = true;
}
#[inline(always)]
pub fn allsky_tile_size(&self) -> i32 {
(self.get_tile_size() << 3).min(512)
@@ -266,7 +341,12 @@ use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for HiPSConfig {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
// Send max depth
shader.attach_uniform("max_depth", &(self.max_depth_tile as i32));
shader
.attach_uniform("max_depth", &(self.max_depth_tile as i32))
.attach_uniform("tex_storing_fits", &self.tex_storing_fits)
.attach_uniform("scale", &self.scale)
.attach_uniform("offset", &self.offset)
.attach_uniform("blank", &self.blank);
shader
}

View File

@@ -2,7 +2,7 @@ use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use crate::renderable::hips::HpxTile;
use cgmath::Vector3;
@@ -10,9 +10,9 @@ use cgmath::Vector3;
use al_api::hips::ImageExt;
use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::image::format::{R16I, R32F, R32I, R64F, R8UI, RGB8U, RGBA8U};
use al_core::image::Image;
use al_core::shader::{SendUniforms, ShaderBound};
use al_core::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
use al_core::Texture2DArray;
use al_core::WebGlContext;
@@ -150,7 +150,7 @@ pub struct HiPS2DBuffer {
fn create_hpx_texture_storage(
gl: &WebGlContext,
// The texture image channel definition
channel: PixelType,
channel: ChannelType,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles: i32,
// The size of the tile
@@ -182,38 +182,46 @@ fn create_hpx_texture_storage(
),
];
match channel {
PixelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
ChannelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
ChannelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R32F => Texture2DArray::create_empty::<R32F>(
ChannelType::R32F => Texture2DArray::create_empty::<R32F>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R8U => Texture2DArray::create_empty::<R8U>(
#[cfg(feature = "webgl2")]
ChannelType::R8UI => Texture2DArray::create_empty::<R8UI>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R16I => Texture2DArray::create_empty::<R16I>(
#[cfg(feature = "webgl2")]
ChannelType::R16I => Texture2DArray::create_empty::<R16I>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R32I => Texture2DArray::create_empty::<R32I>(
#[cfg(feature = "webgl2")]
ChannelType::R32I => Texture2DArray::create_empty::<R32I>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
#[cfg(feature = "webgl2")]
ChannelType::R64F => Texture2DArray::create_empty::<R64F>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
_ => unimplemented!(),
}
}
@@ -372,8 +380,6 @@ impl HiPS2DBuffer {
cell: &HEALPixCell,
dx: f64,
dy: f64,
scale: f32,
offset: f32,
) -> Result<JsValue, JsValue> {
let value = if let Some(tile) = self.get(cell) {
// Index of the texture in the total set of textures
@@ -389,27 +395,28 @@ impl HiPS2DBuffer {
tile_idx,
);
match self.config.get_format().get_pixel_format() {
PixelType::RGB8U | PixelType::RGBA8U => self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?,
_ => {
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
pos_tex.y = (uvy * tile_size) as i32;
// Offset in the slice in pixels
if self.config.tex_storing_fits {
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
let f64_v = self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?
.as_f64()
.ok_or("Error unwraping the pixel read value.")?;
pos_tex.y = (uvy * tile_size) as i32;
}
// 1 channel
// scale the value
let scale = scale as f64;
let offset = offset as f64;
let value = self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?;
JsValue::from_f64(f64_v * scale + offset)
}
if self.config.tex_storing_fits {
// scale the value
let f64_v = value
.as_f64()
.ok_or("Error unwraping the pixel read value.")?;
let scale = self.config.scale as f64;
let offset = self.config.offset as f64;
JsValue::from_f64(f64_v * scale + offset)
} else {
value
}
} else {
JsValue::null()
@@ -450,7 +457,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
HpxTexture2D::new(&HEALPixCell(0, 10), 10, now),
HpxTexture2D::new(&HEALPixCell(0, 11), 11, now),
];
let channel = config.get_format().get_pixel_format();
let channel = config.get_format().get_channel();
let tile_size = config.get_tile_size();
let tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;
@@ -483,7 +490,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
fn set_image_ext(&mut self, gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
self.config.set_image_ext(ext)?;
let channel = self.config.get_format().get_pixel_format();
let channel = self.config.get_format().get_channel();
let tile_size = self.config.get_tile_size();
self.tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;

View File

@@ -9,12 +9,10 @@ use al_api::hips::ImageExt;
use al_api::hips::ImageMetadata;
use al_core::colormap::Colormap;
use al_core::colormap::Colormaps;
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use cgmath::Vector2;
use cgmath::Vector3;
use crate::renderable::hips::FitsParams;
use al_core::image::Image;
use al_core::shader::Shader;
@@ -33,7 +31,7 @@ use crate::shader::ShaderManager;
use crate::utils;
use crate::downloader::request::allsky::Allsky;
use crate::healpix::{cell::HEALPixCell, moc::SpaceMoc};
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::time::Time;
use super::config::HiPSConfig;
@@ -99,49 +97,43 @@ pub fn get_raster_shader<'a>(
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
match config.get_format().get_pixel_format() {
PixelType::R8U => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_u8.frag",
),
PixelType::R16I => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_i16.frag",
),
PixelType::R32I => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_i32.frag",
),
PixelType::R32F => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_f32.frag",
),
// color case
_ => {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_rgba.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_rgba2cmap.frag",
)
}
if config.get_format().is_colored() {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color_to_colormap.frag",
)
}
} else if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap.frag",
)
}
}
@@ -151,49 +143,44 @@ pub fn get_raytracer_shader<'a>(
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
match config.get_format().get_pixel_format() {
PixelType::R8U => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_u8.frag",
),
PixelType::R16I => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_i16.frag",
),
PixelType::R32I => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_i32.frag",
),
PixelType::R32F => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_f32.frag",
),
// color case
_ => {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_rgba.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_rgba2cmap.frag",
)
}
//let colored_hips = config.is_colored();
if config.get_format().is_colored() {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color_to_colormap.frag",
)
}
} else if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap.frag",
)
}
}
@@ -226,12 +213,10 @@ pub struct HiPS2D {
vao: VertexArrayObject,
gl: WebGlContext,
moc: Option<SpaceMoc>,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
pub(crate) fits_params: Option<FitsParams>,
}
use super::HpxTileBuffer;
@@ -292,7 +277,7 @@ impl HiPS2D {
let buffer = HiPS2DBuffer::new(gl, config)?;
let gl = gl.clone();
let moc = None;
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(Self {
@@ -304,8 +289,6 @@ impl HiPS2D {
gl,
fits_params: None,
position,
uv_start,
uv_end,
@@ -313,7 +296,7 @@ impl HiPS2D {
idx_vertices,
moc,
footprint_moc,
hpx_cells_in_view,
})
}
@@ -330,12 +313,19 @@ impl HiPS2D {
.max(cfg.get_min_depth_tile());
let survey_frame = cfg.get_frame();
let mut already_considered_tiles = HashSet::new();
let tile_cells_iter = camera
.get_hpx_cells(depth_tile, survey_frame)
.into_iter()
.filter(move |tile_cell| {
if let Some(moc) = self.moc.as_ref() {
if already_considered_tiles.contains(tile_cell) {
return false;
}
already_considered_tiles.insert(*tile_cell);
if let Some(moc) = self.footprint_moc.as_ref() {
moc.intersects_cell(tile_cell) && !self.update_priority_tile(tile_cell)
} else {
!self.update_priority_tile(tile_cell)
@@ -394,13 +384,13 @@ impl HiPS2D {
}
#[inline]
pub fn set_moc(&mut self, moc: SpaceMoc) {
self.moc = Some(moc);
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
}
#[inline]
pub fn get_moc(&self) -> Option<&SpaceMoc> {
self.moc.as_ref()
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
self.footprint_moc.as_ref()
}
pub fn set_image_ext(&mut self, ext: ImageExt) -> Result<(), JsValue> {
@@ -433,13 +423,7 @@ impl HiPS2D {
let (pix, dx, dy) = crate::healpix::utils::hash_with_dxdy(depth, &lonlat);
let tile_cell = HEALPixCell(depth, pix);
let (bscale, bzero) = if let Some(FitsParams { bscale, bzero, .. }) = self.fits_params {
(bscale, bzero)
} else {
(1.0, 0.0)
};
self.buffer.read_pixel(&tile_cell, dx, dy, bscale, bzero)
self.buffer.read_pixel(&tile_cell, dx, dy)
} else {
Err(JsValue::from_str("Out of projection"))
}
@@ -454,7 +438,7 @@ impl HiPS2D {
let cfg = self.buffer.config();
// Get the coo system transformation matrix
let channel = cfg.get_format().get_pixel_format();
let channel = cfg.get_format().get_channel();
// Retrieve the model and inverse model matrix
let mut off_indices = 0;
@@ -472,7 +456,7 @@ impl HiPS2D {
// super::subdivide::num_hpx_subdivision(&self.hpx_cells_in_view[0], camera, projection);
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell_in_cov = if let Some(moc) = self.moc.as_ref() {
let cell_in_cov = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(cell) {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
@@ -547,7 +531,7 @@ impl HiPS2D {
} else {
// No ancestor has been found in the buffer to draw.
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
if channel == PixelType::RGB8U {
if channel == ChannelType::RGB8U {
Some(HpxDrawData::new(cell))
} else {
None
@@ -556,7 +540,7 @@ impl HiPS2D {
} else {
// No ancestor has been found in the buffer to draw.
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
if channel == PixelType::RGB8U {
if channel == ChannelType::RGB8U {
Some(HpxDrawData::new(cell))
} else {
None
@@ -770,7 +754,7 @@ impl HiPS2D {
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform(
"no_tile_color",
&(if config.get_format().get_pixel_format() == PixelType::RGB8U {
&(if config.get_format().get_channel() == ChannelType::RGB8U {
Vector4::new(0.0, 0.0, 0.0, 1.0)
} else {
Vector4::new(0.0, 0.0, 0.0, 0.0)
@@ -779,10 +763,6 @@ impl HiPS2D {
.attach_uniform("opacity", opacity)
.attach_uniforms_from(colormaps);
if let Some(fits_params) = self.fits_params.as_ref() {
shader.attach_uniforms_from(fits_params);
}
raytracer.draw(&shader);
} else {
let v2w = (*camera.get_m2w()) * c.transpose();
@@ -811,13 +791,7 @@ impl HiPS2D {
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps);
if let Some(fits_params) = self.fits_params.as_ref() {
shader.attach_uniforms_from(fits_params);
}
shader
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
@@ -832,14 +806,7 @@ impl HiPS2D {
})?;
//self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
}
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
self.fits_params = Some(FitsParams {
bscale,
bzero,
blank,
});
}
}

View File

@@ -134,7 +134,7 @@ pub struct HpxTexture2DUniforms<'a> {
impl<'a> HpxTexture2DUniforms<'a> {
pub fn new(texture: &'a HpxTexture2D, idx_texture: i32) -> Self {
let name = format!("textures_tiles[{idx_texture}].");
let name = format!("textures_tiles[{}].", idx_texture);
HpxTexture2DUniforms { texture, name }
}
}

View File

@@ -13,7 +13,7 @@ use crate::Abort;
use crate::JsValue;
use al_api::hips::ImageExt;
// Fixed sized binary heap
pub struct HiPSCubeBuffer {
pub struct HiPS3DBuffer {
// Some information about the HiPS
textures: HashMap<HEALPixCell, HpxTexture3D>,
@@ -24,7 +24,7 @@ pub struct HiPSCubeBuffer {
gl: WebGlContext,
}
impl HiPSCubeBuffer {
impl HiPS3DBuffer {
pub fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let textures = HashMap::new();
@@ -124,7 +124,7 @@ impl HiPSCubeBuffer {
}
}
impl HpxTileBuffer for HiPSCubeBuffer {
impl HpxTileBuffer for HiPS3DBuffer {
type T = HpxTexture3D;
fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
@@ -182,14 +182,14 @@ impl HpxTileBuffer for HiPSCubeBuffer {
use al_core::shader::SendUniforms;
use al_core::shader::ShaderBound;
impl SendUniforms for HiPSCubeBuffer {
impl SendUniforms for HiPS3DBuffer {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader.attach_uniforms_from(&self.config)
}
}
impl Drop for HiPSCubeBuffer {
impl Drop for HiPS3DBuffer {
fn drop(&mut self) {
// drop all the 3D block textures
self.textures.clear();

View File

@@ -1,15 +1,12 @@
pub mod cube;
pub mod buffer;
pub mod texture;
use crate::healpix::moc::FreqSpaceMoc;
use crate::math::spectra::SpectralUnit;
use crate::renderable::hips::HpxTile;
use al_api::hips::DataproductType;
use al_api::hips::ImageExt;
use al_api::hips::ImageMetadata;
use al_core::colormap::Colormap;
use al_core::colormap::Colormaps;
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use al_core::image::Image;
@@ -29,17 +26,16 @@ use crate::downloader::query;
use crate::shader::ShaderManager;
use crate::downloader::request::allsky::Allsky;
use crate::healpix::cell::HEALPixCell;
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::time::Time;
use super::config::HiPSConfig;
use super::FitsParams;
use std::collections::HashSet;
// Recursively compute the number of subdivision needed for a cell
// to not be too much skewed
use cube::HiPSCubeBuffer;
use buffer::HiPS3DBuffer;
use super::uv::{TileCorner, TileUVW};
@@ -54,39 +50,50 @@ pub fn get_raster_shader<'a>(
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
match config.get_format().get_pixel_format() {
PixelType::R8U => {
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_u8.frag")
}
PixelType::R16I => {
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_i16.frag")
}
PixelType::R32I => {
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_i32.frag")
}
PixelType::R32F => {
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_f32.frag")
}
// color case
_ => {
if cmap.label() == "native" {
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_rgba.frag")
} else {
crate::shader::get_shader(
gl,
shaders,
"hips3d_raster.vert",
"hips3d_rgba2cmap.frag",
)
}
if config.get_format().is_colored() {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_color_to_colormap.frag",
)
}
} else if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_grayscale_to_colormap.frag",
)
}
}
pub struct HiPS3D {
//color: Color,
// The image survey texture buffer
buffer: HiPSCubeBuffer,
buffer: HiPS3DBuffer,
// The projected vertices data
// For WebGL2 wasm, the data are interleaved
@@ -103,30 +110,29 @@ pub struct HiPS3D {
vao: VertexArrayObject,
gl: WebGlContext,
moc: Option<FreqSpaceMoc>,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
pub(crate) fits_params: Option<FitsParams>,
// The current slice index
freq: Freq,
slice: u16,
num_indices: Vec<usize>,
slice_indices: Vec<usize>,
cells: Vec<HEALPixCell>,
}
use super::HpxTileBuffer;
use crate::math::spectra::Freq;
impl HiPS3D {
pub fn new(config: HiPSConfig, gl: &WebGlContext) -> Result<Self, JsValue> {
let mut vao = VertexArrayObject::new(gl);
let freq = Freq(0.0);
let slice = 0;
let num_indices = vec![];
let slice_indices = vec![];
// layout (location = 0) in vec2 lonlat;
// layout (location = 1) in vec3 position;
// layout (location = 2) in vec3 uv_start;
@@ -161,12 +167,12 @@ impl HiPS3D {
)
.unbind();
let buffer = HiPSCubeBuffer::new(gl, config)?;
let buffer = HiPS3DBuffer::new(gl, config)?;
let cells = vec![];
let gl = gl.clone();
let moc = None;
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(Self {
@@ -181,14 +187,13 @@ impl HiPS3D {
uv,
idx_vertices,
fits_params: None,
moc,
footprint_moc,
hpx_cells_in_view,
freq,
slice,
cells,
num_indices,
slice_indices,
})
}
@@ -205,6 +210,8 @@ impl HiPS3D {
.max(cfg.get_min_depth_tile());
let survey_frame = cfg.get_frame();
let mut already_considered_tiles = HashSet::new();
// raytracer is rendering and the shader only renders HPX texture cells of depth 0
/*if camera.is_raytracing(proj) {
depth_tile = 0;
@@ -214,8 +221,14 @@ impl HiPS3D {
.get_hpx_cells(depth_tile, survey_frame)
.into_iter()
.filter(move |tile_cell| {
if let Some(moc) = self.moc.as_ref() {
moc.intersects_cell(tile_cell, self.freq)
if already_considered_tiles.contains(tile_cell) {
return false;
}
already_considered_tiles.insert(*tile_cell);
if let Some(moc) = self.footprint_moc.as_ref() {
moc.intersects_cell(tile_cell)
} else {
true
}
@@ -224,21 +237,17 @@ impl HiPS3D {
Some(tile_cells_iter)
}
pub fn set_freq(&mut self, f: Freq) {
self.freq = f;
pub fn set_slice(&mut self, slice: u16) {
self.slice = slice;
}
pub fn get_tile_query(&self, cell: &HEALPixCell) -> query::Tile {
let cfg = self.get_config();
match cfg.dataproduct_type {
DataproductType::Cube => query::Tile::new(cell, Some(self.freq.0 as u32), cfg),
DataproductType::SpectralCube => todo!(),
_ => unreachable!(),
}
query::Tile::new(cell, Some(self.get_slice() as u32), cfg)
}
pub fn contains_tile(&self, cell: &HEALPixCell, freq: Freq) -> bool {
self.buffer.contains_tile(cell, freq.0 as u16)
pub fn contains_tile(&self, cell: &HEALPixCell, slice: u16) -> bool {
self.buffer.contains_tile(cell, slice)
}
pub fn draw(
@@ -267,12 +276,9 @@ impl HiPS3D {
//}
}
pub fn get_freq(&self) -> Freq {
self.freq
}
fn recompute_vertices(&mut self, camera: &CameraViewPort, proj: &ProjectionType) {
self.cells.clear();
self.slice_indices.clear();
self.position.clear();
self.uv.clear();
@@ -282,7 +288,7 @@ impl HiPS3D {
let mut off_indices = 0;
let channel = self.get_config().get_format().get_pixel_format();
let channel = self.get_config().get_format().get_channel();
// Define a global level of subdivisions for all the healpix tile cells in the view
// This should prevent seeing many holes
@@ -298,10 +304,10 @@ impl HiPS3D {
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell = if let Some(moc) = self.moc.as_ref() {
if moc.intersects_cell(cell, self.freq) {
let cell = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(cell) {
Some(&cell)
} else if channel == PixelType::RGB8U {
} else if channel == ChannelType::RGB8U {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
// but JPEG tiles do have black when no pixels data is found
@@ -314,11 +320,13 @@ impl HiPS3D {
Some(&cell)
};
let mut slice_contained = 0;
if let Some(cell) = cell {
let hpx_cell_texture = if self.contains_tile(cell, self.freq) {
let hpx_cell_texture = if self.buffer.contains_tile(cell, self.slice) {
slice_contained = self.slice;
self.buffer.get(cell)
// if the freq is not found we just draw nothing
/*} else if let Some(next_slice) = self.buffer.find_nearest_slice(cell, self.slice) {
} else if let Some(next_slice) = self.buffer.find_nearest_slice(cell, self.slice) {
slice_contained = next_slice;
self.buffer.get(cell)
} else if let Some(parent_cell) = self.buffer.get_nearest_parent(cell) {
@@ -328,17 +336,15 @@ impl HiPS3D {
.find_nearest_slice(&parent_cell, self.slice)
.unwrap();
self.buffer.get(&parent_cell)
*/
} else {
None
};
if let Some(texture) = hpx_cell_texture {
self.slice_indices.push(slice_contained as usize);
self.cells.push(*texture.cell());
// The slice is sure to be contained so we can unwrap
let hpx_slice_tex = texture
.extract_2d_slice_texture(self.freq.0 as u16)
.unwrap();
let hpx_slice_tex = texture.extract_2d_slice_texture(slice_contained).unwrap();
let uv_1 = TileUVW::new(cell, &hpx_slice_tex);
let d01e = uv_1[TileCorner::BottomRight].x - uv_1[TileCorner::BottomLeft].x;
@@ -461,21 +467,13 @@ impl HiPS3D {
}
#[inline]
pub fn set_moc(&mut self, moc: FreqSpaceMoc) {
self.moc = Some(moc);
}
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
self.fits_params = Some(FitsParams {
bscale,
bzero,
blank,
});
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
}
#[inline]
pub fn get_moc(&self) -> Option<&FreqSpaceMoc> {
self.moc.as_ref()
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
self.footprint_moc.as_ref()
}
pub fn set_image_ext(&mut self, ext: ImageExt) -> Result<(), JsValue> {
@@ -541,7 +539,11 @@ impl HiPS3D {
let shader = get_raster_shader(cmap, &self.gl, shaders, hips_cfg)?;
for (cell, num_indices) in self.cells.iter().zip(self.num_indices.iter()) {
for (slice_idx, (cell, num_indices)) in self
.slice_indices
.iter()
.zip(self.cells.iter().zip(self.num_indices.iter()))
{
blend_cfg.enable(&self.gl, || {
// Bind the shader at each draw of a cell to not exceed the max number of tex image units bindable
// to a shader. It is 32 in my case
@@ -553,7 +555,7 @@ impl HiPS3D {
self.buffer
.get(cell)
.unwrap()
.get_3d_block_from_slice(self.freq.0 as u16)
.get_3d_block_from_slice(*slice_idx as u16)
.unwrap(),
)
.attach_uniforms_from(&self.buffer)
@@ -563,13 +565,7 @@ impl HiPS3D {
.attach_uniform("inv_model", &v2w)
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps);
if let Some(fits_params) = self.fits_params.as_ref() {
shaderbound.attach_uniforms_from(fits_params);
}
shaderbound
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
@@ -605,6 +601,11 @@ impl HiPS3D {
self.buffer.push_allsky(allsky)
}
#[inline]
pub fn get_slice(&self) -> u16 {
self.slice
}
/* Accessors */
#[inline]
pub fn get_config(&self) -> &HiPSConfig {

View File

@@ -1,8 +1,10 @@
use crate::renderable::hips::d2::texture::HpxTexture2D;
use crate::{healpix::cell::HEALPixCell, time::Time};
use al_core::image::format::{
ChannelType, R16I, R32F, R32I, R64F, R8UI, RGB32F, RGB8U, RGBA32F, RGBA8U,
};
use al_core::image::Image;
use al_core::texture::format::{PixelType, R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
use al_core::texture::Texture3D;
use al_core::webgl_ctx::WebGlRenderingCtx;
use cgmath::Vector3;
@@ -253,23 +255,32 @@ impl HpxTexture3D {
),
];
let texture = match cfg.get_format().get_pixel_format() {
PixelType::RGBA8U => {
let texture = match cfg.get_format().get_channel() {
ChannelType::RGBA32F => {
Texture3D::create_empty::<RGBA32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGB32F => {
Texture3D::create_empty::<RGB32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGBA8U => {
Texture3D::create_empty::<RGBA8U>(gl, tile_size, tile_size, 32, params)
}
PixelType::RGB8U => {
ChannelType::RGB8U => {
Texture3D::create_empty::<RGB8U>(gl, tile_size, tile_size, 32, params)
}
PixelType::R32F => {
ChannelType::R32F => {
Texture3D::create_empty::<R32F>(gl, tile_size, tile_size, 32, params)
}
PixelType::R8U => {
Texture3D::create_empty::<R8U>(gl, tile_size, tile_size, 32, params)
ChannelType::R64F => {
Texture3D::create_empty::<R64F>(gl, tile_size, tile_size, 32, params)
}
PixelType::R16I => {
ChannelType::R8UI => {
Texture3D::create_empty::<R8UI>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R16I => {
Texture3D::create_empty::<R16I>(gl, tile_size, tile_size, 32, params)
}
PixelType::R32I => {
ChannelType::R32I => {
Texture3D::create_empty::<R32I>(gl, tile_size, tile_size, 32, params)
}
};

View File

@@ -13,7 +13,7 @@ use crate::renderable::HiPSConfig;
use crate::time::Time;
use crate::CameraViewPort;
use crate::HEALPixCell;
use crate::SpaceMoc;
use crate::HEALPixCoverage;
use crate::WebGlContext;
use al_api::hips::ImageExt;
use wasm_bindgen::JsValue;
@@ -128,13 +128,13 @@ impl HiPS {
}
}
/*#[inline]
pub fn set_moc(&mut self, moc: SpaceMoc) {
#[inline]
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
match self {
D2(hips) => hips.set_moc(moc),
D3(hips) => hips.set_moc(moc),
}
}*/
}
#[inline]
pub fn get_tile_query(&self, cell: &HEALPixCell) -> query::Tile {
@@ -155,40 +155,4 @@ impl HiPS {
pub fn is_allsky(&self) -> bool {
self.get_config().is_allsky
}
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
match self {
HiPS::D2(hips) => hips.set_fits_params(bscale, bzero, blank),
HiPS::D3(hips) => hips.set_fits_params(bscale, bzero, blank),
}
}
pub(crate) fn get_fits_params(&self) -> &Option<FitsParams> {
match self {
HiPS::D2(hips) => &hips.fits_params,
HiPS::D3(hips) => &hips.fits_params,
}
}
}
pub(crate) struct FitsParams {
pub bscale: f32,
pub bzero: f32,
pub blank: Option<f32>,
}
use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for FitsParams {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader
.attach_uniform("scale", &self.bscale)
.attach_uniform("offset", &self.bzero);
if let Some(blank) = &self.blank {
shader.attach_uniform("blank", blank);
}
shader
}
}

View File

@@ -1,11 +1,12 @@
use cgmath::Vector3;
use std::ops::RangeInclusive;
use wcs::ImgXY;
use crate::camera::CameraViewPort;
use crate::math::projection::ProjectionType;
use crate::renderable::utils::index_patch::CCWCheckPatchIndexIter;
use al_api::coo_system::CooSystem;
use fitsrs::wcs::{ImgXY, WCS};
use wcs::WCS;
pub fn get_grid_params(
xy_min: &(f64, f64),
@@ -93,6 +94,7 @@ fn get_coord_uv_it(
let x_it = std::iter::once((xmin, get_uv_in_tex_chunk(xmin)))
.chain(
tex_patch_x
.clone()
.skip(1)
.flat_map(|x1| vec![(x1, 1.0), (x1, 0.0)]),
)
@@ -172,7 +174,6 @@ pub fn vertices(
camera: &CameraViewPort,
wcs: &WCS,
projection: &ProjectionType,
rgba: bool,
) -> (Vec<f32>, Vec<f32>, Vec<u16>, Vec<u32>) {
let (x_it, y_it) = get_grid_params(
xy_min,
@@ -189,15 +190,7 @@ pub fn vertices(
let mut uv = vec![];
let pos = y_it
.flat_map(|(mut y, uvy)| {
// In FITS, the origin in lower left corner whereas in JPEG/PNG it is in upper left corner
// the WCS is aligned with the FITS convention so we must invert it for compressed RGBA images
y = if rgba {
wcs.img_dimensions()[1] as u64 - y
} else {
y
};
.flat_map(|(y, uvy)| {
x_it.clone().map(move |(x, uvx)| {
let ndc = if let Some(xyz) = wcs.unproj_xyz(&ImgXY::new(x as f64, y as f64)) {
let xyz = crate::coosys::apply_coo_system(
@@ -218,15 +211,14 @@ pub fn vertices(
})
.map(|(p, uu)| {
uv.extend_from_slice(&uu);
p
})
.collect::<Vec<_>>();
let mut indices = vec![];
let mut num_indices = vec![];
for idx_y_range in &idx_y_ranges {
for idx_x_range in &idx_x_ranges {
for idx_x_range in &idx_x_ranges {
for idx_y_range in &idx_y_ranges {
let build_indices_iter =
CCWCheckPatchIndexIter::new(idx_x_range, idx_y_range, num_x_vertices, &pos, camera);

View File

@@ -2,25 +2,28 @@ pub mod cuts;
pub mod grid;
pub mod subdivide_texture;
use al_core::texture::format::PixelType;
use al_core::texture::format::RGBA8U;
use al_core::texture::format::{R16I, R32F, R32I, R8U};
use al_core::convert::Cast;
use al_core::webgl_ctx::WebGlRenderingCtx;
use fitsrs::hdu::header::Bitpix;
use std::fmt::Debug;
use std::marker::Unpin;
use std::vec;
use al_api::coo_system::CooSystem;
use cgmath::Vector3;
use futures::stream::TryStreamExt;
use futures::AsyncRead;
use wasm_bindgen::JsValue;
use web_sys::WebGl2RenderingContext;
use fitsrs::wcs::{ImgXY, WCS};
use fitsrs::hdu::data::stream;
use wcs::{ImgXY, WCS};
use al_api::fov::CenteredFoV;
use al_api::hips::ImageMetadata;
use al_core::image::format::*;
use al_core::webgl_ctx::GlWrapper;
use al_core::VecData;
use al_core::WebGlContext;
@@ -33,9 +36,7 @@ use crate::ProjectionType;
use crate::ShaderManager;
use std::ops::Range;
use self::subdivide_texture::crop_image;
use self::subdivide_texture::ImagePatches;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub struct Image {
/// A reference to the GL context
@@ -48,82 +49,179 @@ pub struct Image {
pos: Vec<f32>,
uv: Vec<f32>,
/// WCS allowing to locate the image on the sky
/// Parameters extracted from the fits
wcs: WCS,
/// Some parameters, only defined for image coming from FITS files
blank: Option<f32>,
bscale: f32,
bzero: f32,
scale: f32,
offset: f32,
cuts: Range<f32>,
/// The center of the fits
centered_fov: CenteredFoV,
//+ Texture format
pixel_type: PixelType,
channel: ChannelType,
/// Texture chunks objects
textures: Vec<Texture2D>,
/// Texture indices that must be drawn
idx_tex: Vec<usize>,
/// The size of a textured image patch
/// that can be uploaded to the GPU
w_patch: usize,
h_patch: usize,
/// The maximum webgl supported texture size
max_tex_size_x: usize,
max_tex_size_y: usize,
reg: Region,
// The coo system in which the polygonal region has been defined
coo_sys: CooSystem,
}
use al_core::pixel::Pixel;
use fitsrs::hdu::header::extension;
use fitsrs::hdu::AsyncHDU;
use futures::io::BufReader;
use futures::AsyncReadExt;
const TEX_PARAMS: &[(u32, u32)] = &[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
];
impl Image {
#[allow(clippy::too_many_arguments)]
fn init_buffers(
gl: WebGlContext,
patches: ImagePatches,
pub async fn from_reader_and_wcs<R, F>(
gl: &WebGlContext,
mut reader: R,
wcs: WCS,
bscale: f32,
bzero: f32,
scale: Option<f32>,
offset: Option<f32>,
blank: Option<f32>,
// Coo sys of the view
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
) -> Result<Self, JsValue>
where
F: ImageFormat,
R: AsyncReadExt + Unpin,
{
let (width, height) = wcs.img_dimensions();
let ImagePatches {
pixel_type,
texture_patches: textures,
initial_cuts: cuts,
w_patch,
h_patch,
} = patches;
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let mut max_tex_size_x = max_tex_size;
let mut max_tex_size_y = max_tex_size;
// apply bscale to the cuts
let offset = offset.unwrap_or(0.0);
let scale = scale.unwrap_or(1.0);
let (textures, cuts) = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
max_tex_size_x = width as usize;
max_tex_size_y = height as usize;
// can fit in one texture
let num_pixels_to_read = (width as usize) * (height as usize);
let num_bytes_to_read = num_pixels_to_read * std::mem::size_of::<F::P>();
let mut buf = vec![0; num_bytes_to_read];
reader
.read_exact(&mut buf[..num_bytes_to_read])
.await
.map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
// bytes aligned
unsafe {
let data = std::slice::from_raw_parts_mut(
buf[..].as_mut_ptr() as *mut PixelItem<F>,
num_pixels_to_read * F::NUM_CHANNELS,
);
let texture = Texture2D::create_from_raw_pixels::<F>(
gl,
width as i32,
height as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
Some(data),
)?;
let cuts = match F::CHANNEL_TYPE {
ChannelType::R32F | ChannelType::R64F => {
let pixels =
std::slice::from_raw_parts(data.as_ptr() as *const f32, data.len() / 4);
let mut sub_pixels = pixels
.iter()
.step_by(100)
.filter(|pixel| (*pixel).is_finite())
.cloned()
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
// BLANK is only valid for those channels/BITPIX (> 0)
if let Some(blank) = blank {
let mut sub_pixels = data
.iter()
.step_by(100)
.filter_map(|pixel| {
let pixel = <PixelItem<F> as Cast<f32>>::cast(*pixel);
if pixel != blank {
Some(pixel)
} else {
None
}
})
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else {
// No blank value => we consider all the values
let mut sub_pixels = data
.iter()
.step_by(100)
.map(|pixel| <PixelItem<F> as Cast<f32>>::cast(*pixel))
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
}
// RGB(A) images
_ => 0.0..1.0,
};
(vec![texture], cuts)
}
} else {
subdivide_texture::crop_image::<F, R>(
gl,
width,
height,
reader,
max_tex_size as u64,
blank,
)
.await?
};
for tex in &textures {
tex.generate_mipmap();
}
let start = cuts.start * bscale + bzero;
let end = cuts.end * bscale + bzero;
let start = cuts.start * scale + offset;
let end = cuts.end * scale + offset;
let cuts = start..end;
@@ -133,7 +231,7 @@ impl Image {
let uv = vec![];
// Define the buffers
let vao = {
let mut vao = VertexArrayObject::new(&gl);
let mut vao = VertexArrayObject::new(gl);
#[cfg(feature = "webgl2")]
vao.bind_for_update()
@@ -159,6 +257,7 @@ impl Image {
vao
};
let gl = gl.clone();
// Compute the fov
let center = wcs
@@ -205,7 +304,7 @@ impl Image {
let idx_tex = (0..textures.len()).collect();
Ok(Self {
Ok(Image {
gl,
// The positions
@@ -218,19 +317,19 @@ impl Image {
// Metadata extracted from the fits
wcs,
// CooSystem of the wcs, this should belong to the WCS
bscale,
bzero,
scale,
offset,
blank,
// Centered field of view allowing to locate the fits
centered_fov,
// Texture parameters
pixel_type,
channel: F::CHANNEL_TYPE,
textures,
cuts,
w_patch,
h_patch,
max_tex_size_x,
max_tex_size_y,
// Indices of textures that must be drawn
idx_tex,
// The polygonal region in the sky
@@ -240,348 +339,127 @@ impl Image {
})
}
#[allow(clippy::too_many_arguments)]
pub fn from_fits_hdu(
gl: &WebGlContext,
// wcs extracted from the image HDU
wcs: fitsrs::WCS,
// bitpix extracted from the image HDU
bitpix: fitsrs::hdu::header::Bitpix,
// bytes slice extracted from the HDU
bytes: &[u8],
// other keywords extracted from the header of the image HDU
bscale: f32,
bzero: f32,
blank: Option<f32>,
// Coo sys of the view
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let patches = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
// can fit in one texture
// bytes aligned
match bitpix {
Bitpix::I64 => {
// one must convert the data to i32
let bytes_from_i32 = bytes
.chunks(8)
.flat_map(|bytes| {
let l = i64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let i = l as i32;
i32::to_be_bytes(i)
})
.collect::<Vec<_>>();
let texture = Texture2D::create_from_raw_bytes::<R32I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes_from_i32.as_slice(),
)?;
let mut sub_pixels = bytes_from_i32
.chunks(std::mem::size_of::<i32>())
.step_by(100)
.filter_map(|p| {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::F64 => {
// one must convert the data to f32
let bytes_from_f32 = bytes
.chunks(8)
.flat_map(|bytes| {
let d = f64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let f = d as f32;
f32::to_be_bytes(f)
})
.collect::<Vec<_>>();
let texture = Texture2D::create_from_raw_bytes::<R32F>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes_from_f32.as_slice(),
)?;
let mut sub_pixels = bytes_from_f32
.chunks(std::mem::size_of::<f32>())
.step_by(100)
.filter_map(|p| {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32F,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::U8 => {
let texture = Texture2D::create_from_raw_bytes::<R8U>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.iter()
.step_by(100)
.filter_map(|p| {
let p = *p as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R8U,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::I16 => {
let texture = Texture2D::create_from_raw_bytes::<R16I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(2)
.step_by(100)
.filter_map(|p| {
let p = i16::from_be_bytes([p[0], p[1]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R16I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::I32 => {
let texture = Texture2D::create_from_raw_bytes::<R32I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(4)
.step_by(100)
.filter_map(|p| {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::F32 => {
let texture = Texture2D::create_from_raw_bytes::<R32F>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(std::mem::size_of::<f32>())
.step_by(100)
.filter_map(|p| {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32F,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
}
} else {
// We cut the image in 4096x4096 patches. It is already 64MB to allocate for a f32 image of this dimensions.
match bitpix {
Bitpix::U8 => crop_image::<R8U>(gl, width, height, bytes, 4096, blank)?,
Bitpix::I16 => crop_image::<R16I>(gl, width, height, bytes, 4096, blank)?,
Bitpix::I32 => crop_image::<R32I>(gl, width, height, bytes, 4096, blank)?,
Bitpix::F32 => crop_image::<R32F>(gl, width, height, bytes, 4096, blank)?,
Bitpix::F64 => {
let bytes_from_f32 = bytes
.chunks(8)
.flat_map(|bytes| {
let d = f64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let f = d as f32;
f32::to_be_bytes(f)
})
.collect::<Vec<_>>();
crop_image::<R32F>(gl, width, height, &bytes_from_f32, 4096, blank)?
}
_ => {
return Err(JsValue::from_str(
"I64/F64 for big fits images not supported.",
))
}
}
};
Self::init_buffers(gl.clone(), patches, wcs, bscale, bzero, blank, coo_sys)
pub fn get_cuts(&self) -> &Range<f32> {
&self.cuts
}
pub fn from_rgba_bytes(
pub async fn from_fits_hdu_async<'a, R>(
gl: &WebGlContext,
// bytes in TextureFormat
bytes: &[u8],
// wcs extracted from the image HDU
wcs: fitsrs::WCS,
// Coo sys of the view
hdu: &mut AsyncHDU<'a, BufReader<R>, extension::image::Image>,
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
) -> Result<Self, JsValue>
where
R: AsyncRead + Unpin + Debug + 'a,
{
// Load the FITS file
let header = hdu.get_header();
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let scale = header.get_parsed::<f64>(b"BSCALE ").map(|v| v.unwrap());
let offset = header.get_parsed::<f64>(b"BZERO ").map(|v| v.unwrap());
let blank = header.get_parsed::<f64>(b"BLANK ").map(|v| v.unwrap());
let bscale = 1.0;
let bzero = 0.0;
let blank = None;
// Create a WCS from a specific header unit
let wcs = WCS::from_fits_header(header)
.map_err(|e| JsValue::from_str(&format!("WCS parsing error: reason: {}", e)))?;
let image_patches = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
// small image case, can fit into a webgl texture
let textures = vec![Texture2D::create_from_raw_bytes::<RGBA8U>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?];
let pixel_ty = PixelType::RGBA8U;
let cuts = 0.0..1.0;
let data = hdu.get_data_mut();
ImagePatches::new(pixel_ty, textures, cuts, width as usize, height as usize)
} else {
crop_image::<RGBA8U>(gl, width, height, bytes, 4096, None)?
};
match data {
stream::Data::U8(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::init_buffers(
gl.clone(),
image_patches,
wcs,
bscale,
bzero,
blank,
coo_sys,
)
Self::from_reader_and_wcs::<_, R8UI>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I16(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R16I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I32(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R32I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I64(data) => {
let reader = data
.map_ok(|v| {
let v = v[0] as i32;
v.to_le_bytes()
})
.into_async_read();
Self::from_reader_and_wcs::<_, R32I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::F32(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R32F>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::F64(data) => {
let reader = data
.map_ok(|v| {
let v = v[0] as f32;
v.to_le_bytes()
})
.into_async_read();
Self::from_reader_and_wcs::<_, R32F>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
}
}
pub fn recompute_vertices(
@@ -589,8 +467,9 @@ impl Image {
camera: &CameraViewPort,
projection: &ProjectionType,
) -> Result<(), JsValue> {
let dim = self.wcs.img_dimensions();
let (width, height) = (dim[0] as f64, dim[1] as f64);
let (width, height) = self.wcs.img_dimensions();
let width = width as f64;
let height = height as f64;
let (x_mesh_range, y_mesh_range) =
if camera.get_field_of_view().intersects_region(&self.reg) {
@@ -612,13 +491,12 @@ impl Image {
let (pos, uv, indices, num_indices) = grid::vertices(
&(x_mesh_range.start, y_mesh_range.start),
&(x_mesh_range.end.ceil(), y_mesh_range.end.ceil()),
self.w_patch as u64,
self.h_patch as u64,
self.max_tex_size_x as u64,
self.max_tex_size_y as u64,
num_vertices,
camera,
&self.wcs,
projection,
self.pixel_type == PixelType::RGB8U || self.pixel_type == PixelType::RGBA8U,
);
self.pos = pos;
@@ -661,8 +539,7 @@ impl Image {
if self.coo_sys != camera.get_coo_system() {
self.coo_sys = camera.get_coo_system();
let dim = self.wcs.img_dimensions();
let (width, height) = (dim[0] as usize, dim[1] as usize);
let (width, height) = self.wcs.img_dimensions();
// the camera coo system is not sync with the one in which the region
// has been defined
@@ -722,50 +599,59 @@ impl Image {
..
} = cfg;
let shader = match self.pixel_type {
PixelType::RGBA8U => crate::shader::get_shader(
let shader = match self.channel {
ChannelType::RGBA8U => crate::shader::get_shader(
&self.gl,
shaders,
"image_base.vert",
"image_sampler.frag",
)?,
PixelType::RGB8U => crate::shader::get_shader(
ChannelType::R32F => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_sampler.frag")?
}
#[cfg(feature = "webgl2")]
ChannelType::R32I => crate::shader::get_shader(
&self.gl,
shaders,
"image_base.vert",
"image_sampler.frag",
"fits_base.vert",
"fits_isampler.frag",
)?,
PixelType::R32F => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_f32.frag")?
}
PixelType::R32I => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i32.frag")?
}
PixelType::R16I => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i16.frag")?
}
PixelType::R8U => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_u8.frag")?
}
#[cfg(feature = "webgl2")]
ChannelType::R16I => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_isampler.frag",
)?,
#[cfg(feature = "webgl2")]
ChannelType::R8UI => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_usampler.frag",
)?,
_ => return Err(JsValue::from_str("Image format type not supported")),
};
//self.gl.disable(WebGl2RenderingContext::CULL_FACE);
// 2. Draw it if its opacity is not null
blend_cfg.enable(&self.gl, || {
let mut off_indices = 0;
for &idx_tex in self.idx_tex.iter() {
for (idx, &idx_tex) in self.idx_tex.iter().enumerate() {
let texture = &self.textures[idx_tex];
let num_indices = self.num_indices[idx_tex] as i32;
let num_indices = self.num_indices[idx] as i32;
let shader_bound = shader.bind(&self.gl);
shader_bound
.attach_uniforms_from(colormaps)
.attach_uniforms_with_params_from(color, colormaps)
.attach_uniform("opacity", opacity)
.attach_uniform("tex", texture)
.attach_uniform("scale", &self.bscale)
.attach_uniform("offset", &self.bzero);
.attach_uniform("scale", &self.scale)
.attach_uniform("offset", &self.offset);
if let Some(blank) = self.blank {
shader_bound.attach_uniform("blank", &blank);
@@ -797,9 +683,4 @@ impl Image {
pub fn get_centered_fov(&self) -> &CenteredFoV {
&self.centered_fov
}
#[inline]
pub fn get_cuts(&self) -> &Range<f32> {
&self.cuts
}
}

View File

@@ -1,238 +1,194 @@
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use wasm_bindgen::JsValue;
use futures::AsyncReadExt;
use super::cuts;
use al_core::texture::format::TextureFormat;
use al_core::image::format::ImageFormat;
use al_core::texture::pixel::Pixel;
use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::Texture2D;
use al_core::WebGlContext;
use std::ops::Range;
pub fn crop_image<F>(
use al_core::convert::Cast;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub async fn crop_image<F, R>(
gl: &WebGlContext,
width: u64,
height: u64,
bytes: &[u8],
mut reader: R,
max_tex_size: u64,
blank: Option<f32>,
) -> Result<ImagePatches, JsValue>
) -> Result<(Vec<Texture2D>, Range<f32>), JsValue>
where
F: TextureFormat,
F: ImageFormat,
R: AsyncReadExt + Unpin,
{
let mut tex_chunks = vec![];
let num_texture_x = ((width / max_tex_size) + 1) as usize;
let num_texture_y = ((height / max_tex_size) + 1) as usize;
// Subdivision
let num_textures = ((width / max_tex_size) + 1) * ((height / max_tex_size) + 1);
let mut w = Vec::with_capacity(num_texture_x);
let mut h = Vec::with_capacity(num_texture_x);
let mut buf = vec![
0;
(max_tex_size as usize)
* std::mem::size_of::<<F::P as Pixel>::Item>()
* F::NUM_CHANNELS
];
for i in 0..num_texture_x {
let w_patch = if i == num_texture_x - 1 {
width % max_tex_size
for _ in 0..num_textures {
let tex_chunk = Texture2D::create_empty_with_format::<F>(
gl,
max_tex_size as i32,
max_tex_size as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
)?;
tex_chunk.generate_mipmap();
tex_chunks.push(tex_chunk);
}
let mut pixels_written = 0;
let num_pixels = width * height;
const PIXEL_STEP: u64 = 256;
let step_x_cut = (width / PIXEL_STEP) as usize;
let step_y_cut = (height / PIXEL_STEP) as usize;
let mut sub_pixels = vec![];
let step_cut = step_x_cut.max(step_y_cut) + 1;
let num_texture_x = (width / max_tex_size) + 1;
let num_texture_y = (height / max_tex_size) + 1;
while pixels_written < num_pixels {
// Get the id of the texture to fill
let id_tx = (pixels_written % width) / max_tex_size;
let id_ty = (pixels_written / width) / max_tex_size;
let id_t = id_ty + id_tx * num_texture_y;
// For textures along the right-x border
let num_pixels_to_read = if id_tx == num_texture_x - 1 {
width - (pixels_written % width)
} else {
max_tex_size
};
let h_patch = max_tex_size;
w.push(w_patch as usize);
h.push(h_patch as usize);
}
let num_bytes_to_read = (num_pixels_to_read as usize)
* std::mem::size_of::<<F::P as Pixel>::Item>()
* F::NUM_CHANNELS;
let create_next_patches = |num_patches_per_row: usize| -> Vec<Vec<u8>> {
(0..num_patches_per_row)
.map(|_| {
vec![0_u8; (max_tex_size as usize) * (max_tex_size as usize) * F::NUM_CHANNELS]
})
.collect::<Vec<_>>()
};
if let Ok(()) = reader.read_exact(&mut buf[..num_bytes_to_read]).await {
// Tell where the data must go inside the texture
let off_y_px = id_ty * max_tex_size;
let mut buf = create_next_patches(num_texture_x);
let dy = (pixels_written / width) - off_y_px;
let view = unsafe {
let data = std::slice::from_raw_parts(
buf[..num_bytes_to_read].as_ptr() as *const <F::P as Pixel>::Item,
(num_pixels_to_read as usize) * F::NUM_CHANNELS,
);
let mut pixels_written = 0_usize;
let num_pixels = (width * height) as usize;
// compute the cuts if the pixel is grayscale
if (pixels_written / width) % (step_cut as u64) == 0 {
// We are in a good line
let xmin = pixels_written % width;
// Sampled pixels for computing automatic min/max cut values
const PIXEL_STEP: usize = 256;
let mut sub_pixels = vec![];
match F::CHANNEL_TYPE {
ChannelType::R32F | ChannelType::R64F => {
let pixels = std::slice::from_raw_parts(
data.as_ptr() as *const f32,
data.len() / 4,
);
let step_x_cut = (width as usize) / PIXEL_STEP;
let step_y_cut = (height as usize) / PIXEL_STEP;
let step_cut = step_x_cut.max(step_y_cut) + 1_usize;
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let mut id_tx = 0;
let mut id_ty = 0;
while pixels_written < num_pixels {
let bytes_written = pixels_written * F::NUM_CHANNELS;
// For textures along the right-x border
let w_patch = w[id_tx];
let h_patch = h[id_tx];
let num_pixels_to_read = w_patch;
let num_bytes_to_read = num_pixels_to_read * F::NUM_CHANNELS;
// Tell where the data must go inside the texture
let off_y_px = id_ty * h_patch;
// line index
let y = pixels_written / (width as usize);
let dy = y - off_y_px;
let off_bytes_src = bytes_written;
let off_bytes_dst = dy * (max_tex_size as usize) * F::NUM_CHANNELS;
buf[id_tx][off_bytes_dst..(off_bytes_dst + num_bytes_to_read)]
.copy_from_slice(&bytes[off_bytes_src..(off_bytes_src + num_bytes_to_read)]);
pixels_written += num_pixels_to_read;
if F::PIXEL_TYPE.num_channels() == 1 && y % step_cut == 0 {
// on a good line
let bytes_line = &buf[id_tx][off_bytes_dst..(off_bytes_dst + num_bytes_to_read)];
for x_in_patch in (0..w_patch).step_by(step_cut) {
let x_byte_off = x_in_patch * F::NUM_CHANNELS;
let p = &bytes_line[x_byte_off..(x_byte_off + F::NUM_CHANNELS)];
let v = match F::PIXEL_TYPE {
PixelType::R8U => {
let p = p[0] as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
if pixels[j].is_finite() {
sub_pixels.push(pixels[j]);
}
}
}
} else {
Some(p)
}
}
PixelType::R16I => {
let p = i16::from_be_bytes([p[0], p[1]]) as f32;
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
if let Some(blank) = blank {
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
if let Some(blank) = blank {
if p != blank {
Some(p)
let pixel = <PixelItem<F> as Cast<f32>>::cast(data[j]);
if pixel != blank {
sub_pixels.push(pixel);
}
}
}
} else {
None
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let pixel = <PixelItem<F> as Cast<f32>>::cast(data[j]);
sub_pixels.push(pixel);
}
}
}
} else {
Some(p)
}
// colored pixels
_ => (),
}
PixelType::R32I => {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
}
PixelType::R32F => {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
}
_ => unreachable!(),
};
if let Some(v) = v {
sub_pixels.push(v);
}
}
F::view(data)
};
tex_chunks[id_t as usize]
.bind()
.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_array_buffer_view(
0,
dy as i32,
num_pixels_to_read as i32,
1,
Some(view.as_ref()),
);
pixels_written += num_pixels_to_read;
} else {
return Err(JsValue::from_str(
"invalid data with respect to the NAXIS given in the WCS",
));
}
if (((dy + 1) % (max_tex_size as usize) == 0) && id_tx == buf.len() - 1)
|| pixels_written >= num_pixels
{
// we can create new textures of size max_tex_size
for patch_buf in &buf {
let tex_chunk = Texture2D::create_from_raw_bytes::<F>(
gl,
max_tex_size as i32,
max_tex_size as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
patch_buf,
)?;
tex_chunks.push(tex_chunk);
}
//buf.clear();
//buf = create_next_patches(num_texture_x);
id_ty = (id_ty + 1) % num_texture_y;
}
id_tx = (id_tx + 1) % num_texture_x;
}
let cuts = if F::PIXEL_TYPE.num_channels() == 1 {
let cuts = if F::CHANNEL_TYPE.is_colored() {
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else {
0.0..1.0
};
Ok(ImagePatches {
pixel_type: F::PIXEL_TYPE,
texture_patches: tex_chunks,
initial_cuts: cuts,
w_patch: max_tex_size as usize,
h_patch: max_tex_size as usize,
})
}
pub struct ImagePatches {
pub pixel_type: PixelType,
pub texture_patches: Vec<Texture2D>,
pub initial_cuts: Range<f32>,
pub w_patch: usize,
pub h_patch: usize,
}
impl ImagePatches {
pub fn new(
pixel_type: PixelType,
texture_patches: Vec<Texture2D>,
initial_cuts: Range<f32>,
w_patch: usize,
h_patch: usize,
) -> Self {
Self {
pixel_type,
texture_patches,
initial_cuts,
w_patch,
h_patch,
}
}
Ok((tex_chunks, cuts))
}

View File

@@ -1,5 +1,5 @@
use super::MOC;
use crate::{camera::CameraViewPort, SpaceMoc};
use crate::{camera::CameraViewPort, HEALPixCoverage};
use al_api::moc::MOCOptions;
pub struct MOCHierarchy {
@@ -12,13 +12,19 @@ use al_core::WebGlContext;
impl MOCHierarchy {
pub fn from_full_res_moc(
gl: WebGlContext,
full_res_moc: SpaceMoc,
full_res_moc: HEALPixCoverage,
options: &MOCOptions,
) -> Self {
let full_res_depth = full_res_moc.depth();
let mut mocs: Vec<_> = (0..full_res_depth)
.map(|d| MOC::new(gl.clone(), SpaceMoc(full_res_moc.degraded(d)), options))
.map(|d| {
MOC::new(
gl.clone(),
HEALPixCoverage(full_res_moc.degraded(d)),
options,
)
})
.collect();
mocs.push(MOC::new(gl.clone(), full_res_moc, options));
@@ -74,7 +80,7 @@ impl MOCHierarchy {
&mut self.mocs[d]
}
pub fn get_full_moc(&self) -> &SpaceMoc {
pub fn get_full_moc(&self) -> &HEALPixCoverage {
&self.mocs.last().unwrap().moc
}

View File

@@ -3,7 +3,7 @@ pub mod renderer;
pub use renderer::MOCRenderer;
use crate::camera::CameraViewPort;
use crate::healpix::moc::SpaceMoc;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::projection::ProjectionType;
use crate::renderable::WebGl2RenderingContext;
use crate::shader::ShaderManager;
@@ -32,11 +32,11 @@ pub struct MOC {
inner: [Option<MOCIntern>; 3],
pub moc: SpaceMoc,
pub moc: HEALPixCoverage,
}
impl MOC {
pub(super) fn new(gl: WebGlContext, moc: SpaceMoc, cfg: &MOCOptions) -> Self {
pub(super) fn new(gl: WebGlContext, moc: HEALPixCoverage, cfg: &MOCOptions) -> Self {
let sky_fraction = moc.sky_fraction() as f32;
let max_order = moc.depth_max();
@@ -228,7 +228,7 @@ impl MOCIntern {
fn vertices_in_view<'a>(
&self,
moc: &'a SpaceMoc,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = [(f64, f64); 4]> + 'a {
let view_moc = camera.get_cov(CooSystem::ICRS);
@@ -250,7 +250,7 @@ impl MOCIntern {
fn draw(
&mut self,
moc: &SpaceMoc,
moc: &HEALPixCoverage,
camera: &mut CameraViewPort,
proj: &ProjectionType,
shaders: &mut ShaderManager,
@@ -457,7 +457,7 @@ impl MOCIntern {
fn compute_edge_paths_iter<'a>(
&self,
moc: &'a SpaceMoc,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = f32> + 'a {
self.vertices_in_view(moc, camera).flat_map(|v| {

View File

@@ -1,4 +1,4 @@
use crate::{healpix::moc::SpaceMoc, CameraViewPort, ShaderManager};
use crate::{healpix::coverage::HEALPixCoverage, CameraViewPort, ShaderManager};
use al_core::WebGlContext;
use wasm_bindgen::JsValue;
@@ -67,7 +67,7 @@ impl MOCRenderer {
pub fn push_back(
&mut self,
moc: SpaceMoc,
moc: HEALPixCoverage,
cfg: MOCOptions,
camera: &mut CameraViewPort,
proj: &ProjectionType,
@@ -80,7 +80,7 @@ impl MOCRenderer {
//self.layers.push(key);
}
pub fn get_hpx_coverage(&self, moc_uuid: &str) -> Option<&SpaceMoc> {
pub fn get_hpx_coverage(&self, moc_uuid: &str) -> Option<&HEALPixCoverage> {
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == moc_uuid) {
Some(self.mocs[idx].get_full_moc())
} else {

View File

@@ -12,8 +12,9 @@ pub mod utils;
use crate::renderable::image::Image;
use crate::tile_fetcher::TileFetcherQueue;
use al_core::image::format::ChannelType;
use al_api::color::ColorRGB;
use al_api::hips::DataproductType;
use al_api::hips::HiPSCfg;
use al_api::hips::ImageMetadata;
use al_api::image::ImageParams;
@@ -21,7 +22,6 @@ use al_api::image::ImageParams;
use al_core::colormap::Colormaps;
use al_core::shader::Shader;
use al_core::texture::format::PixelType;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
@@ -226,8 +226,8 @@ impl Layers {
if let Some(hips) = self.hipses.get(cdid) {
// Check if a HiPS is fully opaque so that we cannot see the background
// In that case, no need to draw a background because a HiPS will fully cover it
let full_covering_hips = (hips.get_config().get_format().get_pixel_format()
== PixelType::RGB8U
let full_covering_hips = (hips.get_config().get_format().get_channel()
== ChannelType::RGB8U
|| hips.is_allsky())
&& meta.opacity == 1.0;
if full_covering_hips {
@@ -296,8 +296,10 @@ impl Layers {
proj: &ProjectionType,
tile_fetcher: &mut TileFetcherQueue,
) -> Result<usize, JsValue> {
let err_layer_not_found =
JsValue::from_str(&format!("Layer {layer:?} not found, so cannot be removed."));
let err_layer_not_found = JsValue::from_str(&format!(
"Layer {:?} not found, so cannot be removed.",
layer
));
// Color configs, and urls are indexed by layer
self.meta.remove(layer).ok_or(err_layer_not_found.clone())?;
let id = self.ids.remove(layer).ok_or(err_layer_not_found.clone())?;
@@ -331,15 +333,18 @@ impl Layers {
Ok(id_layer)
} else {
Err(JsValue::from_str(&format!(
"Url found {id:?} is associated to no 2D HiPSes."
"Url found {:?} is associated to no 2D HiPSes.",
id
)))
}
}
}
pub fn rename_layer(&mut self, layer: &str, new_layer: &str) -> Result<(), JsValue> {
let err_layer_not_found =
JsValue::from_str(&format!("Layer {layer:?} not found, so cannot be removed."));
let err_layer_not_found = JsValue::from_str(&format!(
"Layer {:?} not found, so cannot be removed.",
layer
));
// layer from layers does also need to be removed
let id_layer = self
@@ -366,14 +371,16 @@ impl Layers {
.iter()
.position(|l| l == first_layer)
.ok_or(JsValue::from_str(&format!(
"Layer {first_layer:?} not found, so cannot be removed."
"Layer {:?} not found, so cannot be removed.",
first_layer
)))?;
let id_second_layer =
self.layers
.iter()
.position(|l| l == second_layer)
.ok_or(JsValue::from_str(&format!(
"Layer {second_layer:?} not found, so cannot be removed.",
"Layer {:?} not found, so cannot be removed.",
second_layer
)))?;
self.layers.swap(id_first_layer, id_second_layer);
@@ -436,13 +443,11 @@ impl Layers {
}*/
camera.register_view_frame(cfg.get_frame(), proj);
let hips = match &cfg.dataproduct_type {
let hips = if cfg.get_cube_depth().is_some() {
// HiPS cube
DataproductType::Cube => HiPS::D3(HiPS3D::new(cfg, gl)?),
// HiPS 3D
DataproductType::SpectralCube => HiPS::D3(HiPS3D::new(cfg, gl)?),
// Typical HiPS image
_ => HiPS::D2(HiPS2D::new(cfg, gl)?),
HiPS::D3(HiPS3D::new(cfg, gl)?)
} else {
HiPS::D2(HiPS2D::new(cfg, gl)?)
};
// add the frame to the camera
@@ -493,6 +498,17 @@ impl Layers {
let fits_already_found = self.images.keys().any(|image_id| image_id == &id);
if !fits_already_found {
// The fits has not been loaded yet
/*if let Some(initial_ra) = properties.get_initial_ra() {
if let Some(initial_dec) = properties.get_initial_dec() {
camera.set_center::<P>(&LonLatT::new(Angle((initial_ra).to_radians()), Angle((initial_dec).to_radians())), &properties.get_frame());
}
}
if let Some(initial_fov) = properties.get_initial_fov() {
camera.set_aperture::<P>(Angle((initial_fov).to_radians()));
}*/
self.images.insert(id.clone(), images);
}
@@ -515,7 +531,7 @@ impl Layers {
pub fn set_layer_cfg(&mut self, layer: String, meta: ImageMetadata) -> Result<(), JsValue> {
// Expect the image hips to be found in the hash map
self.meta.insert(layer.clone(), meta).ok_or_else(|| {
JsValue::from(js_sys::Error::new(&format!("{layer:?} layer not found")))
JsValue::from(js_sys::Error::new(&format!("{:?} layer not found", layer)))
})?;
Ok(())

View File

@@ -26,13 +26,13 @@ impl From<Error> for JsValue {
fn from(e: Error) -> Self {
match e {
Error::ShaderAlreadyInserted { message } => {
JsValue::from_str(&format!("Shader already inserted: {message:?}"))
JsValue::from_str(&format!("Shader already inserted: {:?}", message))
}
Error::ShaderNotFound { message } => {
JsValue::from_str(&format!("Shader not found: {message:?}"))
JsValue::from_str(&format!("Shader not found: {:?}", message))
}
Error::FileNotFound { message } => {
JsValue::from_str(&format!("Shader not found: {message:?}"))
JsValue::from_str(&format!("Shader not found: {:?}", message))
}
Error::ShaderCompilingLinking { message } => message,
Error::Io { message } => message.into(),

View File

@@ -72,10 +72,8 @@ impl HiPSLocalFiles {
tiles_per_fmt[d].get(&i)
}
}
impl HiPSLocalFiles {
pub fn get_moc(&self) -> &web_sys::File {
fn get_moc(&self) -> &web_sys::File {
&self.moc
}
}
@@ -191,10 +189,28 @@ impl TileFetcherQueue {
downloader: Rc<RefCell<Downloader>>,
) {
let cfg = hips.get_config();
// Request for the allsky first
// The allsky is not mandatory present in a HiPS service but it is better to first try to search for it
//downloader.fetch(query::PixelMetadata::new(cfg));
// Try to fetch the MOC
let hips_cdid = cfg.get_creator_did();
let moc_url = if let Some(local_hips) = self.hips_local_files.get(hips_cdid) {
if let Ok(url) =
web_sys::Url::create_object_url_with_blob(local_hips.get_moc().as_ref())
{
url
} else {
format!("{}/Moc.fits", cfg.get_root_url())
}
} else {
format!("{}/Moc.fits", cfg.get_root_url())
};
downloader.borrow_mut().fetch(query::Moc::new(
cfg,
&self.hips_local_files,
moc_url,
cfg.get_request_mode(),
cfg.get_request_credentials(),
cfg.get_creator_did().to_string(),
MOCOptions::default(),
));
@@ -203,21 +219,20 @@ impl TileFetcherQueue {
// Request the allsky
let dl = downloader.clone();
// Allsky query
match hips {
HiPS::D2(_) => {
let allsky_query = query::Allsky::new(cfg, None);
let allsky_query = query::Allsky::new(
cfg,
match hips {
HiPS::D2(_) => None,
HiPS::D3(h) => Some(h.get_slice() as u32),
},
);
crate::utils::set_timeout(
move || {
dl.borrow_mut().fetch(allsky_query);
},
100,
);
}
// Do not ask for allsky for HiPS3D
HiPS::D3(_) => (),
}
crate::utils::set_timeout(
move || {
dl.borrow_mut().fetch(allsky_query);
},
100,
);
if cfg.get_min_depth_tile() == 0 {
for tile_cell in crate::healpix::cell::ALLSKY_HPX_CELLS_D0 {

View File

@@ -12,7 +12,7 @@ impl Time {
let r = f()?;
let duration = Time::now() - start_time;
// print the duration in the console
al_core::log(&format!("{label:?} time: {duration:?}"));
al_core::log(&format!("{:?} time: {:?}", label, duration));
Ok(r)
}

View File

@@ -1,45 +0,0 @@
// Utils methods for decoding texture bytes to f32, i32, i16, u8
highp float decode_f32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
if (abs(Exponent + 127.0) < 1e-3) {
return 0.0;
}
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
int decode_i32(vec4 rgba) {
int r = int(rgba.r * 255.0 + 0.5);
int g = int(rgba.g * 255.0 + 0.5);
int b = int(rgba.b * 255.0 + 0.5);
int a = int(rgba.a * 255.0 + 0.5);
// GLSL int automatically handle the top-most sign bit (two's complement behaviour)
int value = (r << 24) | (g << 16) | (b << 8) | a; // Combine into a 16-bit integer
return value;
}
int decode_i16(vec2 rg) {
int r = int(rg.r * 255.0 + 0.5);
int g = int(rg.g * 255.0 + 0.5);
int value = (r << 8) | g; // Combine into a 16-bit integer
// Convert from unsigned to signed 16-bit
if (value >= 32768) {
value -= 65536;
}
return value;
}
uint decode_u8(float r) {
uint value = uint(r * 255.0 + 0.5);
return value;
}

View File

@@ -1,57 +0,0 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ../colormaps/colormap.glsl;
#include ../transfer_funcs.glsl;
#include ../tonal_corrections.glsl;
#include ../decode.glsl;
/////////////////////////////////////////////
/// FITS sampler
vec4 val2c_f32(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
return apply_tonal(new_color);
}
vec4 val2c(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
vec4 uv2c_f32(vec2 uv) {
float val = decode_f32(texture(tex, uv).rgba*255.0);
return val2c_f32(val);
}
vec4 uv2c_i32(vec2 uv) {
float val = float(decode_i32(texture(tex, uv).rgba));
return val2c(val);
}
vec4 uv2c_i16(vec2 uv) {
float val = float(decode_i16(texture(tex, uv).rg));
return val2c(val);
}
vec4 uv2c_u8(vec2 uv) {
float val = float(decode_u8(texture(tex, uv).r));
return val2c(val);
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision highp float;
precision highp sampler2D;
precision highp int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_f32(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_i16(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_i32(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,43 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform isampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x, float a) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
void main() {
ivec4 color = texture(tex, frag_uv);
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,55 @@
#version 300 es
precision highp float;
precision highp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision highp int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
return apply_tonal(new_color);
}
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
if (abs(Exponent + 127.0) < 1e-3) {
return 0.0;
}
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
void main() {
highp float value = decode32(texture(tex, frag_uv).abgr*255.0);
// reconstruct the float value
out_frag_color = apply_colormap_to_grayscale(value);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_u8(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,43 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform usampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x, float a) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
void main() {
uvec4 color = texture(tex, frag_uv);
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -5,82 +5,67 @@ uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ../transfer_funcs.glsl;
#include ../tonal_corrections.glsl;
#include ../hsv.glsl;
#include ../decode.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
#include ./hsv.glsl;
/////////////////////////////////////////////
/// RGBA sampler
vec4 uvw2c_rgba(vec3 uv) {
vec4 c = texture(tex, uv).rgba;
c.r = transfer_func(H, c.r, min_value, max_value);
c.g = transfer_func(H, c.g, min_value, max_value);
c.b = transfer_func(H, c.b, min_value, max_value);
// apply reversed
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
return apply_tonal(c);
vec4 get_pixels(vec3 uv) {
return texture(tex, uv);
}
vec4 uvw2cmap_rgba(vec3 uv) {
float v = texture(tex, uv).r;
// apply transfer f
v = transfer_func(H, v, min_value, max_value);
// apply cmap
vec4 c = colormap_f(v);
// apply reversed
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
return apply_tonal(c);
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
/////////////////////////////////////////////
/// FITS sampler
vec4 apply_color_settings(vec4 color) {
color.r = transfer_func(H, color.r, min_value, max_value);
color.g = transfer_func(H, color.g, min_value, max_value);
color.b = transfer_func(H, color.b, min_value, max_value);
vec4 val2c_f32(float x) {
// apply reversed
color.rgb = mix(color.rgb, 1.0 - color.rgb, reversed);
return apply_tonal(color);
}
vec4 get_color_from_texture(vec3 UV) {
vec4 color = get_pixels(UV);
return apply_color_settings(color);
}
vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x) || isnan(x)));
return apply_tonal(new_color);
}
vec4 val2c(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
vec4 uvw2c_f32(vec3 uv) {
float val = decode_f32(texture(tex, uv).rgba*255.0);
return val2c_f32(val);
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float value = decode32(get_pixels(uv).abgr*255.0);
return apply_colormap_to_grayscale(value);
}
vec4 uvw2c_i32(vec3 uv) {
float val = float(decode_i32(texture(tex, uv).rgba));
return val2c(val);
}
vec4 uvw2c_i16(vec3 uv) {
float val = float(decode_i16(texture(tex, uv).rg));
return val2c(val);
}
vec4 uvw2c_u8(vec3 uv) {
float val = float(decode_u8(texture(tex, uv).r));
return val2c(val);
}
vec4 get_colormap_from_color_texture(vec3 uv) {
float value = get_pixels(uv).r;
return apply_colormap_to_grayscale(value);
}

View File

@@ -0,0 +1,39 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
ivec4 get_pixels(vec3 uv) {
return ivec4(texture(tex, uv));
}
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float x = float(get_pixels(uv).r);
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
return apply_tonal(new_color);
}

View File

@@ -0,0 +1,38 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
uvec4 get_pixels(vec3 uv) {
return uvec4(texture(tex, uv));
}
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float x = float(get_pixels(uv).r);
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
return apply_tonal(new_color);
}

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -14,8 +16,8 @@ uniform float opacity;
#include ../color.glsl;
void main() {
vec4 color_start = uvw2cmap_rgba(frag_uv_start);
vec4 color_end = uvw2cmap_rgba(frag_uv_end);
vec4 color_start = get_color_from_texture(frag_uv_start);
vec4 color_end = get_color_from_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -14,8 +16,8 @@ uniform float opacity;
#include ../color.glsl;
void main() {
vec4 color_start = uvw2c_rgba(frag_uv_start);
vec4 color_end = uvw2c_rgba(frag_uv_end);
vec4 color_start = get_colormap_from_color_texture(frag_uv_start);
vec4 color_end = get_colormap_from_color_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -15,14 +17,8 @@ out vec4 out_frag_color;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_u8(uv0);
vec4 color_end = uvw2c_u8(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,8 +1,10 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
uniform isampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
@@ -10,19 +12,13 @@ in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
#include ../color_i.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_f32(uv0);
vec4 color_end = uvw2c_f32(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,8 +1,10 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
uniform usampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
@@ -10,19 +12,13 @@ in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
#include ../color_u.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_i16(uv0);
vec4 color_end = uvw2c_i16(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,29 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
uniform sampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_i32(uv0);
vec4 color_end = uvw2c_i32(uv1);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -2,6 +2,7 @@
precision highp float;
layout (location = 0) in vec3 xyz;
//layout (location = 0) in vec2 lonlat;
layout (location = 1) in vec3 uv_start;
layout (location = 2) in vec3 uv_end;
layout (location = 3) in float time_tile_received;

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp usampler2DArray;
precision lowp isampler2DArray;
precision mediump int;
uniform sampler2DArray tex;
@@ -20,16 +22,26 @@ uniform Tile textures_tiles[12];
#include ../color.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
uniform float opacity;
uniform vec4 no_tile_color;
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
vec4 c = uvw2c_rgba(uv);
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
//c = mix(c, no_tile_color, tile.empty);
out_frag_color = c;
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
vec4 color = mix(get_pixels(UV), no_tile_color, tile.empty);
return apply_color_settings(color);
}
void main() {
// Get the HEALPix cell idx and the uv in the texture
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = vec4(c.rgb, opacity * c.a);
}

View File

@@ -23,12 +23,23 @@ uniform sampler2DArray tex;
#include ../color.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
float value = mix(get_pixels(UV).r, 0.0, tile.empty);
return apply_colormap_to_grayscale(value);
}
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
vec4 c = uvw2cmap_rgba(uv);
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision mediump int;
in vec3 frag_pos;
@@ -27,16 +29,24 @@ struct TileColor {
#include ../color.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
vec4 color = get_colormap_from_grayscale_texture(UV);
color.a *= (1.0 - tile.empty);
return color;
}
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
uv.y = 1.0 - uv.y;
vec4 c = uvw2c_f32(uv);
//c.a *= (1.0 - tile.empty);
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -5,7 +5,7 @@ precision lowp usampler2DArray;
precision lowp isampler2DArray;
precision mediump int;
uniform sampler2DArray tex;
uniform isampler2DArray tex;
in vec3 frag_pos;
in vec2 out_clip_pos;
@@ -22,18 +22,26 @@ uniform Tile textures_tiles[12];
uniform float opacity;
#include ../color.glsl;
#include ../color_i.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
vec4 color = get_colormap_from_grayscale_texture(UV);
color.a *= (1.0 - tile.empty);
return color;
}
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
uv.y = 1.0 - uv.y;
vec4 c = uvw2c_u8(uv);
//c.a *= (1.0 - tile.empty);
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -5,7 +5,7 @@ precision lowp usampler2DArray;
precision lowp isampler2DArray;
precision mediump int;
uniform sampler2DArray tex;
uniform usampler2DArray tex;
in vec3 frag_pos;
in vec2 out_clip_pos;
@@ -22,17 +22,26 @@ uniform Tile textures_tiles[12];
uniform float opacity;
#include ../color_u.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
#include ./../color.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
vec4 color = get_colormap_from_grayscale_texture(UV);
color.a *= (1.0 - tile.empty);
return color;
}
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
uv.y = 1.0 - uv.y;
vec4 c = uvw2c_i32(uv);
//c.a *= (1.0 - tile.empty);
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,37 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision mediump int;
uniform sampler2DArray tex;
in vec3 frag_pos;
in vec2 out_clip_pos;
out vec4 out_frag_color;
struct Tile {
int uniq; // Healpix cell
int texture_idx; // Index in the texture buffer
float start_time; // Absolute time that the load has been done in ms
float empty;
};
uniform Tile textures_tiles[12];
uniform float opacity;
#include ../color.glsl;
#include ../../projection/hpx_proj.glsl;
#include ./utils.glsl;
void main() {
vec3 uv = xyz2uv(normalize(frag_pos));
uv.y = 1.0 - uv.y;
vec4 c = uvw2c_i16(uv);
//c.a *= (1.0 - tile.empty);
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,9 +0,0 @@
vec3 xyz2uv(vec3 xyz) {
HashDxDy result = hash_with_dxdy(0, xyz.zxy);
int idx = result.idx;
vec2 offset = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
return vec3(offset, float(tile.texture_idx));
}

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler3D;
precision lowp isampler3D;
precision lowp usampler3D;
uniform sampler3D tex;
@@ -9,11 +11,10 @@ in vec3 frag_uv;
out vec4 out_frag_color;
uniform float opacity;
#include ../hips/color.glsl;
#include ../../hips/color.glsl;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2c_rgba(uv);
vec4 color = get_color_from_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler3D;
precision lowp isampler3D;
precision lowp usampler3D;
uniform sampler3D tex;
@@ -9,11 +11,10 @@ in vec3 frag_uv;
out vec4 out_frag_color;
uniform float opacity;
#include ../hips/color.glsl;
#include ../../hips/color.glsl;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2cmap_rgba(uv);
vec4 color = get_colormap_from_color_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -10,13 +10,12 @@ in vec3 frag_uv;
out vec4 out_frag_color;
#include ../hips/color.glsl;
#include ../../hips/color.glsl;
uniform float opacity;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2c_i16(uv);
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -4,19 +4,18 @@ precision lowp sampler3D;
precision lowp isampler3D;
precision lowp usampler3D;
uniform sampler3D tex;
uniform isampler3D tex;
in vec3 frag_uv;
out vec4 out_frag_color;
#include ../hips/color.glsl;
#include ../../hips/color_i.glsl;
uniform float opacity;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2c_i32(uv);
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,20 +1,21 @@
#version 300 es
precision lowp float;
precision lowp sampler3D;
precision lowp isampler3D;
precision lowp usampler3D;
uniform sampler3D tex;
uniform usampler3D tex;
in vec3 frag_uv;
out vec4 out_frag_color;
#include ../hips/color.glsl;
#include ../../hips/color_u.glsl;
uniform float opacity;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2c_f32(uv);
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -11,7 +11,7 @@ uniform mat3 inv_model;
uniform vec2 ndc_to_clip;
uniform float czf;
#include ../projection/projection.glsl;
#include ../../projection/projection.glsl;
void main() {
vec3 p_xyz = lonlat2xyz(lonlat);

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler3D;
uniform sampler3D tex;
in vec3 frag_uv;
out vec4 out_frag_color;
#include ../hips/color.glsl;
uniform float opacity;
void main() {
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
vec4 color = uvw2c_u8(uv);
out_frag_color = color;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -9,6 +9,6 @@ uniform sampler2D tex;
uniform float opacity;
void main() {
out_frag_color = texture(tex, frag_uv);
out_frag_color = texture(tex, vec2(frag_uv.x, 1.0 - frag_uv.y));
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -45,7 +45,6 @@ export let DefaultActionsForContextMenu = (function () {
const a = aladinInstance;
const selectObjects = (selection) => {
console.log(selection)
a.view.selectObjects(selection);
};
return [

Some files were not shown because too many files have changed in this diff Show More