mirror of
https://github.com/cds-astro/aladin-lite.git
synced 2025-12-12 15:49:18 -08:00
Update to the new version of fitsrs
* Async code is replaced by sync one. It does not take that much of time to convert the js blob to wasm memory for 1 or 2 GB fits files and the code is much more simplier to write * GLSL/rust refac has been done to only call fitsrs from only one place in the code * Raw fits data unit is directly given to the GPU. Then the GPU perform the big to little endian conversion on the fly * impl GZIP fits support #241 * fix #293
This commit is contained in:
committed by
Matthieu Baumann
parent
5d6e113c19
commit
e1f85bab97
@@ -8,7 +8,7 @@
|
||||
import A from '../src/js/A.js';
|
||||
A.init.then(() => {
|
||||
let aladin = A.aladin('#aladin-lite-div', {fov: 30, target: "280 +0", projection: "AIT", showShareControl:true, showSettingsControl: true, showContextMenu:true});
|
||||
|
||||
|
||||
aladin.setOverlayImageLayer(A.image(
|
||||
"https://www.virtualastronomy.org/files/avm_examples/spitzer/ssc2005-24a1.jpg",
|
||||
{
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
{
|
||||
name: "M61",
|
||||
wcs: {
|
||||
NAXIS: 0, // Minimal header
|
||||
NAXIS: 2, // Minimal header
|
||||
CTYPE1: 'RA---TAN', // TAN (gnomic) projection
|
||||
CTYPE2: 'DEC--TAN', // TAN (gnomic) projection
|
||||
EQUINOX: 2000.0, // Equatorial coordinates definition (yr)
|
||||
|
||||
@@ -26,8 +26,7 @@ wasm-bindgen = "=0.2.92"
|
||||
wasm-streams = "0.3.0"
|
||||
async-channel = "1.8.0"
|
||||
mapproj = "0.3.0"
|
||||
fitsrs = "0.2.11"
|
||||
wcs = "0.3.1"
|
||||
fitsrs = { git = "https://github.com/cds-astro/fitsrs", branch = "master" }
|
||||
colorgrad = "0.6.2"
|
||||
|
||||
[features]
|
||||
|
||||
@@ -9,7 +9,7 @@ js-sys = "0.3.47"
|
||||
cgmath = "*"
|
||||
jpeg-decoder = "0.3.0"
|
||||
png = "0.17.6"
|
||||
fitsrs = "0.2.10"
|
||||
fitsrs = { git = "https://github.com/cds-astro/fitsrs", branch = "master" }
|
||||
al-api = { path = "../al-api" }
|
||||
serde = { version = "^1.0.59", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::collections::HashMap;
|
||||
|
||||
use colorgrad::Color;
|
||||
|
||||
use crate::image::format;
|
||||
use crate::shader::SendUniformsWithParams;
|
||||
use crate::Texture2D;
|
||||
use crate::WebGlContext;
|
||||
|
||||
use crate::texture::format::RGBA8U;
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
use wasm_bindgen::JsValue;
|
||||
|
||||
@@ -68,7 +68,7 @@ fn build_cmaps_texture(gl: &WebGlContext, cmaps: &[Colormap]) -> Result<Texture2
|
||||
),
|
||||
];
|
||||
|
||||
Texture2D::create_from_raw_pixels::<format::RGBA8U>(
|
||||
Texture2D::create_from_raw_pixels::<RGBA8U>(
|
||||
gl,
|
||||
WIDTH_CMAP_TEX as i32,
|
||||
cmaps.len() as i32,
|
||||
|
||||
@@ -6,11 +6,11 @@ pub struct Bitmap<F> {
|
||||
format: std::marker::PhantomData<F>,
|
||||
}
|
||||
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::image::Image;
|
||||
use crate::texture::format::TextureFormat;
|
||||
impl<F> Bitmap<F>
|
||||
where
|
||||
F: ImageFormat + Clone,
|
||||
F: TextureFormat + Clone,
|
||||
{
|
||||
pub fn new(image: web_sys::ImageBitmap) -> Self {
|
||||
Self {
|
||||
@@ -23,7 +23,7 @@ use crate::texture::Tex3D;
|
||||
use wasm_bindgen::JsValue;
|
||||
impl<F> Image for Bitmap<F>
|
||||
where
|
||||
F: ImageFormat + Clone,
|
||||
F: TextureFormat + Clone,
|
||||
{
|
||||
fn insert_into_3d_texture<T: Tex3D>(
|
||||
&self,
|
||||
|
||||
@@ -7,7 +7,7 @@ pub struct Canvas<F> {
|
||||
|
||||
impl<F> Canvas<F>
|
||||
where
|
||||
F: ImageFormat + Clone,
|
||||
F: TextureFormat + Clone,
|
||||
{
|
||||
pub fn new(canvas: web_sys::HtmlCanvasElement) -> Self {
|
||||
Self {
|
||||
@@ -17,14 +17,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::image::Image;
|
||||
use crate::texture::format::TextureFormat;
|
||||
use crate::texture::Tex3D;
|
||||
use cgmath::Vector3;
|
||||
use wasm_bindgen::JsValue;
|
||||
impl<F> Image for Canvas<F>
|
||||
where
|
||||
F: ImageFormat,
|
||||
F: TextureFormat,
|
||||
{
|
||||
fn insert_into_3d_texture<T: Tex3D>(
|
||||
&self,
|
||||
|
||||
@@ -1,68 +1,112 @@
|
||||
use cgmath::{Vector2, Vector3};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Fits<'a> {
|
||||
// Tile size
|
||||
size: Vector2<i32>,
|
||||
|
||||
pub data: Data<'a>,
|
||||
}
|
||||
|
||||
use std::borrow::Cow;
|
||||
use crate::texture::format::TextureFormat;
|
||||
use crate::texture::format::R8U;
|
||||
use cgmath::Vector3;
|
||||
use fitsrs::card::Value;
|
||||
use fitsrs::gz::GzReader;
|
||||
use fitsrs::hdu::header::Bitpix;
|
||||
use fitsrs::WCS;
|
||||
use fitsrs::{Fits, HDU};
|
||||
use std::fmt::Debug;
|
||||
#[derive(Debug)]
|
||||
pub enum Data<'a> {
|
||||
U8(Cow<'a, [u8]>),
|
||||
I16(Cow<'a, [i16]>),
|
||||
I32(Cow<'a, [i32]>),
|
||||
F32(Cow<'a, [f32]>),
|
||||
}
|
||||
use fitsrs::{fits::Fits as FitsData, hdu::data::InMemData};
|
||||
use std::io::Cursor;
|
||||
use wasm_bindgen::JsValue;
|
||||
|
||||
impl<'a> Fits<'a> {
|
||||
pub fn from_byte_slice(bytes_reader: &'a mut Cursor<&[u8]>) -> Result<Self, JsValue> {
|
||||
let FitsData { hdu } = FitsData::from_reader(bytes_reader)
|
||||
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
|
||||
#[derive(Debug)]
|
||||
pub struct FitsImage<'a> {
|
||||
// image size
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub depth: u32,
|
||||
// bitpix
|
||||
pub bitpix: Bitpix,
|
||||
// 1.0 by default
|
||||
pub bscale: f32,
|
||||
// 0.0 by default
|
||||
pub bzero: f32,
|
||||
// blank
|
||||
pub blank: Option<f32>,
|
||||
// optional wcs
|
||||
pub wcs: Option<WCS>,
|
||||
// raw bytes of the data image (in Big-Endian)
|
||||
pub raw_bytes: &'a [u8],
|
||||
}
|
||||
|
||||
let header = hdu.get_header();
|
||||
let xtension = header.get_xtension();
|
||||
let width = xtension
|
||||
.get_naxisn(1)
|
||||
.ok_or_else(|| JsValue::from_str("NAXIS1 not found in the fits"))?;
|
||||
impl<'a> FitsImage<'a> {
|
||||
/// Get all the hdu images from a fits file
|
||||
pub fn from_raw_bytes(bytes: &'a [u8]) -> Result<Vec<Self>, JsValue> {
|
||||
let mut fits = Fits::from_reader(Cursor::new(bytes));
|
||||
let mut images = vec![];
|
||||
|
||||
let height = xtension
|
||||
.get_naxisn(2)
|
||||
.ok_or_else(|| JsValue::from_str("NAXIS2 not found in the fits"))?;
|
||||
while let Some(Ok(hdu)) = fits.next() {
|
||||
match hdu {
|
||||
HDU::XImage(hdu) | HDU::Primary(hdu) => {
|
||||
// Prefer getting the dimension directly from NAXIS1/NAXIS2 instead of from the WCS
|
||||
// because it may not exist in all HDU images
|
||||
let width = *hdu
|
||||
.get_header()
|
||||
.get_xtension()
|
||||
.get_naxisn(1)
|
||||
.ok_or(JsValue::from_str("NAXIS1 not found"))?
|
||||
as u32;
|
||||
let height = *hdu
|
||||
.get_header()
|
||||
.get_xtension()
|
||||
.get_naxisn(2)
|
||||
.ok_or(JsValue::from_str("NAXIS2 not found"))?
|
||||
as u32;
|
||||
let depth = *hdu.get_header().get_xtension().get_naxisn(3).unwrap_or(&1) as u32;
|
||||
|
||||
let data = hdu.get_data();
|
||||
let data = match *data {
|
||||
InMemData::U8(slice) => Data::U8(Cow::Borrowed(slice)),
|
||||
InMemData::I16(slice) => Data::I16(Cow::Borrowed(slice)),
|
||||
InMemData::I32(slice) => Data::I32(Cow::Borrowed(slice)),
|
||||
InMemData::I64(slice) => {
|
||||
let data = slice.iter().map(|v| *v as i32).collect();
|
||||
Data::I32(Cow::Owned(data))
|
||||
let header = hdu.get_header();
|
||||
|
||||
let bscale = match header.get("BSCALE") {
|
||||
Some(Value::Integer { value, .. }) => *value as f32,
|
||||
Some(Value::Float { value, .. }) => *value as f32,
|
||||
_ => 1.0,
|
||||
};
|
||||
let bzero = match header.get("BZERO") {
|
||||
Some(Value::Integer { value, .. }) => *value as f32,
|
||||
Some(Value::Float { value, .. }) => *value as f32,
|
||||
_ => 0.0,
|
||||
};
|
||||
let blank = match header.get("BLANK") {
|
||||
Some(Value::Integer { value, .. }) => Some(*value as f32),
|
||||
Some(Value::Float { value, .. }) => Some(*value as f32),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let off = hdu.get_data_unit_byte_offset() as usize;
|
||||
let len = hdu.get_data_unit_byte_size() as usize;
|
||||
|
||||
let raw_bytes = &bytes[off..(off + len)];
|
||||
|
||||
let bitpix = hdu.get_header().get_xtension().get_bitpix();
|
||||
let wcs = hdu.wcs().ok();
|
||||
|
||||
images.push(Self {
|
||||
width,
|
||||
height,
|
||||
depth,
|
||||
bitpix,
|
||||
bscale,
|
||||
wcs,
|
||||
bzero,
|
||||
blank,
|
||||
raw_bytes,
|
||||
});
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
InMemData::F32(slice) => Data::F32(Cow::Borrowed(slice)),
|
||||
InMemData::F64(slice) => {
|
||||
let data = slice.iter().map(|v| *v as f32).collect();
|
||||
Data::F32(Cow::Owned(data))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
// Tile size
|
||||
size: Vector2::new(*width as i32, *height as i32),
|
||||
|
||||
// Allocation info of the layout
|
||||
data,
|
||||
})
|
||||
if !images.is_empty() {
|
||||
Ok(images)
|
||||
} else {
|
||||
Err(JsValue::from_str("Image HDU not found in the FITS"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::{image::Image, texture::Tex3D};
|
||||
impl Image for Fits<'_> {
|
||||
impl Image for FitsImage<'_> {
|
||||
fn insert_into_3d_texture<T: Tex3D>(
|
||||
&self,
|
||||
// The texture array
|
||||
@@ -70,98 +114,21 @@ impl Image for Fits<'_> {
|
||||
// An offset to write the image in the texture array
|
||||
offset: &Vector3<i32>,
|
||||
) -> Result<(), JsValue> {
|
||||
match &self.data {
|
||||
Data::U8(data) => {
|
||||
let view = unsafe { R8UI::view(data) };
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
offset.z,
|
||||
self.size.x,
|
||||
self.size.y,
|
||||
1,
|
||||
Some(view.as_ref()),
|
||||
);
|
||||
}
|
||||
Data::I16(data) => {
|
||||
let view = unsafe { R16I::view(data) };
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
offset.z,
|
||||
self.size.x,
|
||||
self.size.y,
|
||||
1,
|
||||
Some(view.as_ref()),
|
||||
);
|
||||
}
|
||||
Data::I32(data) => {
|
||||
let view = unsafe { R32I::view(data) };
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
offset.z,
|
||||
self.size.x,
|
||||
self.size.y,
|
||||
1,
|
||||
Some(view.as_ref()),
|
||||
);
|
||||
}
|
||||
Data::F32(data) => {
|
||||
let view = unsafe {
|
||||
R8UI::view(std::slice::from_raw_parts(
|
||||
data.as_ptr() as *const u8,
|
||||
data.len() * 4,
|
||||
))
|
||||
};
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
offset.z,
|
||||
self.size.x,
|
||||
self.size.y,
|
||||
1,
|
||||
Some(view.as_ref()),
|
||||
);
|
||||
}
|
||||
}
|
||||
let view = unsafe { R8U::view(self.raw_bytes) };
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
offset.z,
|
||||
self.width as i32,
|
||||
self.height as i32,
|
||||
self.depth as i32,
|
||||
Some(view.as_ref()),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_size(&self) -> (u32, u32) {
|
||||
(self.size.x as u32, self.size.y as u32)
|
||||
(self.width, self.height)
|
||||
}
|
||||
}
|
||||
|
||||
use crate::image::format::ImageFormat;
|
||||
use wasm_bindgen::JsValue;
|
||||
|
||||
pub trait FitsImageFormat: ImageFormat {
|
||||
const BITPIX: i8;
|
||||
}
|
||||
|
||||
use crate::image::R32F;
|
||||
impl FitsImageFormat for R32F {
|
||||
const BITPIX: i8 = -32;
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
use crate::image::{R16I, R32I, R64F, R8UI};
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl FitsImageFormat for R64F {
|
||||
const BITPIX: i8 = -64;
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl FitsImageFormat for R32I {
|
||||
const BITPIX: i8 = 32;
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl FitsImageFormat for R16I {
|
||||
const BITPIX: i8 = 16;
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl FitsImageFormat for R8UI {
|
||||
const BITPIX: i8 = 8;
|
||||
}
|
||||
|
||||
@@ -1,311 +1,9 @@
|
||||
use crate::texture::pixel::Pixel;
|
||||
use crate::texture::format::PixelType;
|
||||
use al_api::hips::ImageExt;
|
||||
|
||||
pub enum Bytes<'a> {
|
||||
Borrowed(&'a [u8]),
|
||||
Owned(Vec<u8>),
|
||||
}
|
||||
|
||||
pub trait ImageFormat {
|
||||
type P: Pixel;
|
||||
type ArrayBufferView: AsRef<js_sys::Object>;
|
||||
|
||||
const NUM_CHANNELS: usize;
|
||||
|
||||
const FORMAT: u32;
|
||||
const INTERNAL_FORMAT: i32;
|
||||
const TYPE: u32;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType;
|
||||
|
||||
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
|
||||
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
|
||||
///
|
||||
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
|
||||
///
|
||||
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
|
||||
}
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGB8U;
|
||||
impl ImageFormat for RGB8U {
|
||||
type P = [u8; 3];
|
||||
|
||||
const NUM_CHANNELS: usize = 3;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGB;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::RGB8U;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
let mut decoder = jpeg::Decoder::new(raw_bytes);
|
||||
let bytes = decoder
|
||||
.decode()
|
||||
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
|
||||
|
||||
Ok(Bytes::Owned(bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGBA8U;
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl ImageFormat for RGBA8U {
|
||||
type P = [u8; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA8U;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
let mut decoder = jpeg::Decoder::new(raw_bytes);
|
||||
let bytes = decoder
|
||||
.decode()
|
||||
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
|
||||
|
||||
Ok(Bytes::Owned(bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGBA32F;
|
||||
impl ImageFormat for RGBA32F {
|
||||
type P = [f32; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA32F as i32;
|
||||
#[cfg(feature = "webgl1")]
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA as i32;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA32F;
|
||||
|
||||
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Float32Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGB32F;
|
||||
impl ImageFormat for RGB32F {
|
||||
type P = [f32; 3];
|
||||
|
||||
const NUM_CHANNELS: usize = 3;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGB;
|
||||
#[cfg(feature = "webgl2")]
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB32F as i32;
|
||||
#[cfg(feature = "webgl1")]
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB as i32;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::RGB32F;
|
||||
|
||||
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Float32Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R32F;
|
||||
impl ImageFormat for R32F {
|
||||
type P = [u8; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R64F;
|
||||
impl ImageFormat for R64F {
|
||||
type P = [u8; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R8UI;
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl ImageFormat for R8UI {
|
||||
type P = [u8; 1];
|
||||
|
||||
const NUM_CHANNELS: usize = 1;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8UI as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::R8UI;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R16I;
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl ImageFormat for R16I {
|
||||
type P = [i16; 1];
|
||||
|
||||
const NUM_CHANNELS: usize = 1;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R16I as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::SHORT;
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::R16I;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Int16Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R32I;
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl ImageFormat for R32I {
|
||||
type P = [i32; 1];
|
||||
|
||||
const NUM_CHANNELS: usize = 1;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R32I as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::INT;
|
||||
|
||||
const CHANNEL_TYPE: ChannelType = ChannelType::R32I;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Int32Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub enum ChannelType {
|
||||
RGBA32F,
|
||||
RGB32F,
|
||||
RGBA8U,
|
||||
RGB8U,
|
||||
R32F,
|
||||
#[cfg(feature = "webgl2")]
|
||||
R64F,
|
||||
#[cfg(feature = "webgl2")]
|
||||
R8UI,
|
||||
#[cfg(feature = "webgl2")]
|
||||
R16I,
|
||||
#[cfg(feature = "webgl2")]
|
||||
R32I,
|
||||
}
|
||||
|
||||
impl ChannelType {
|
||||
pub fn is_colored(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
ChannelType::RGBA32F | ChannelType::RGB32F | ChannelType::RGBA8U | ChannelType::RGB8U
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub const NUM_CHANNELS: usize = 9;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub struct ImageFormatType {
|
||||
pub ext: ImageExt,
|
||||
pub channel: ChannelType,
|
||||
pub fmt: PixelType,
|
||||
}
|
||||
|
||||
impl ImageFormatType {
|
||||
@@ -313,11 +11,14 @@ impl ImageFormatType {
|
||||
&self.ext
|
||||
}
|
||||
|
||||
pub fn get_channel(&self) -> ChannelType {
|
||||
self.channel
|
||||
pub fn get_pixel_format(&self) -> PixelType {
|
||||
self.fmt
|
||||
}
|
||||
|
||||
pub fn is_colored(&self) -> bool {
|
||||
self.channel.is_colored()
|
||||
match self.ext {
|
||||
ImageExt::Fits => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ pub struct HTMLImage<F> {
|
||||
|
||||
impl<F> HTMLImage<F>
|
||||
where
|
||||
F: ImageFormat + Clone,
|
||||
F: TextureFormat + Clone,
|
||||
{
|
||||
pub fn new(image: web_sys::HtmlImageElement) -> Self {
|
||||
Self {
|
||||
@@ -17,14 +17,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::image::Image;
|
||||
use crate::texture::format::TextureFormat;
|
||||
use crate::texture::Tex3D;
|
||||
use cgmath::Vector3;
|
||||
use wasm_bindgen::JsValue;
|
||||
impl<F> Image for HTMLImage<F>
|
||||
where
|
||||
F: ImageFormat,
|
||||
F: TextureFormat,
|
||||
{
|
||||
fn insert_into_3d_texture<T: Tex3D>(
|
||||
&self,
|
||||
|
||||
@@ -6,9 +6,9 @@ pub mod html;
|
||||
pub mod raw;
|
||||
|
||||
use crate::image::bitmap::Bitmap;
|
||||
use crate::image::format::RGB8U;
|
||||
use crate::image::format::RGBA8U;
|
||||
use crate::image::raw::ImageBuffer;
|
||||
use crate::texture::format::RGB8U;
|
||||
use crate::texture::format::RGBA8U;
|
||||
pub trait ArrayBuffer: AsRef<js_sys::Object> + std::fmt::Debug {
|
||||
type Item: std::cmp::PartialOrd + Clone + Copy + std::fmt::Debug + cgmath::Zero;
|
||||
|
||||
@@ -179,6 +179,7 @@ impl ArrayBuffer for ArrayF64 {
|
||||
}
|
||||
|
||||
use self::canvas::Canvas;
|
||||
use self::fits::FitsImage;
|
||||
use self::html::HTMLImage;
|
||||
use wasm_bindgen::JsValue;
|
||||
pub trait Image {
|
||||
@@ -210,13 +211,14 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_size(&self) -> (u32, u32) {
|
||||
let image = &**self;
|
||||
image.get_size()
|
||||
}
|
||||
}
|
||||
|
||||
use std::{io::Cursor, rc::Rc};
|
||||
use std::rc::Rc;
|
||||
impl<I> Image for Rc<I>
|
||||
where
|
||||
I: Image,
|
||||
@@ -234,21 +236,19 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_size(&self) -> (u32, u32) {
|
||||
let image = &**self;
|
||||
image.get_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
use crate::image::format::{R16I, R32I, R64F, R8UI};
|
||||
use crate::{image::format::R32F, texture::Tex3D};
|
||||
use crate::texture::format::{R16I, R32F, R32I, R8U};
|
||||
use crate::texture::Tex3D;
|
||||
|
||||
use fits::Fits;
|
||||
#[derive(Debug)]
|
||||
#[cfg(feature = "webgl2")]
|
||||
pub enum ImageType {
|
||||
FitsImage {
|
||||
FitsRawBytes {
|
||||
raw_bytes: js_sys::Uint8Array,
|
||||
size: (u32, u32),
|
||||
},
|
||||
@@ -283,7 +283,7 @@ pub enum ImageType {
|
||||
image: ImageBuffer<R16I>,
|
||||
},
|
||||
RawR8ui {
|
||||
image: ImageBuffer<R8UI>,
|
||||
image: ImageBuffer<R8U>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -297,17 +297,16 @@ impl Image for ImageType {
|
||||
offset: &Vector3<i32>,
|
||||
) -> Result<(), JsValue> {
|
||||
match self {
|
||||
ImageType::FitsImage {
|
||||
ImageType::FitsRawBytes {
|
||||
raw_bytes: raw_bytes_buf,
|
||||
..
|
||||
} => {
|
||||
let num_bytes = raw_bytes_buf.length() as usize;
|
||||
let mut raw_bytes = vec![0; num_bytes];
|
||||
raw_bytes_buf.copy_to(&mut raw_bytes[..]);
|
||||
let raw_bytes = raw_bytes_buf.to_vec();
|
||||
|
||||
let mut bytes_reader = Cursor::new(raw_bytes.as_slice());
|
||||
let fits_img = Fits::from_byte_slice(&mut bytes_reader)?;
|
||||
fits_img.insert_into_3d_texture(textures, offset)?
|
||||
let images = FitsImage::from_raw_bytes(&raw_bytes)?;
|
||||
for image in images {
|
||||
image.insert_into_3d_texture(textures, offset)?
|
||||
}
|
||||
}
|
||||
ImageType::Canvas { canvas } => canvas.insert_into_3d_texture(textures, offset)?,
|
||||
ImageType::ImageRgba8u { image } => image.insert_into_3d_texture(textures, offset)?,
|
||||
@@ -331,7 +330,7 @@ impl Image for ImageType {
|
||||
|
||||
fn get_size(&self) -> (u32, u32) {
|
||||
match self {
|
||||
ImageType::FitsImage { size, .. } => *size,
|
||||
ImageType::FitsRawBytes { size, .. } => *size,
|
||||
ImageType::Canvas { canvas } => canvas.get_size(),
|
||||
ImageType::ImageRgba8u { image } => image.get_size(),
|
||||
ImageType::ImageRgb8u { image } => image.get_size(),
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::texture::format::TextureFormat;
|
||||
|
||||
use crate::texture::pixel::Pixel;
|
||||
use crate::texture::Tex3D;
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub struct ImageBuffer<T>
|
||||
where
|
||||
T: ImageFormat,
|
||||
T: TextureFormat,
|
||||
{
|
||||
pub data: Vec<<<T as ImageFormat>::P as Pixel>::Item>,
|
||||
pub data: Vec<<<T as TextureFormat>::P as Pixel>::Item>,
|
||||
pub size: Vector2<i32>,
|
||||
}
|
||||
|
||||
use crate::image::format::Bytes;
|
||||
use crate::texture::format::Bytes;
|
||||
|
||||
pub struct ImageBufferView {
|
||||
pub x: i32,
|
||||
@@ -22,9 +23,13 @@ pub struct ImageBufferView {
|
||||
use wasm_bindgen::JsValue;
|
||||
impl<T> ImageBuffer<T>
|
||||
where
|
||||
T: ImageFormat,
|
||||
T: TextureFormat,
|
||||
{
|
||||
pub fn new(data: Vec<<<T as ImageFormat>::P as Pixel>::Item>, width: i32, height: i32) -> Self {
|
||||
pub fn new(
|
||||
data: Vec<<<T as TextureFormat>::P as Pixel>::Item>,
|
||||
width: i32,
|
||||
height: i32,
|
||||
) -> Self {
|
||||
let size_buf = width * height * (T::NUM_CHANNELS as i32);
|
||||
debug_assert!(size_buf == data.len() as i32);
|
||||
//let buf = <<T as ImageFormat>::P as Pixel>::Container::new(buf);
|
||||
@@ -44,9 +49,10 @@ where
|
||||
|
||||
let decoded_pixels = unsafe {
|
||||
decoded_bytes.set_len(
|
||||
decoded_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
|
||||
decoded_bytes.len()
|
||||
/ std::mem::size_of::<<<T as TextureFormat>::P as Pixel>::Item>(),
|
||||
);
|
||||
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(
|
||||
std::mem::transmute::<Vec<u8>, Vec<<<T as TextureFormat>::P as Pixel>::Item>>(
|
||||
decoded_bytes,
|
||||
)
|
||||
};
|
||||
@@ -59,10 +65,8 @@ where
|
||||
debug_assert!(size_buf == raw_bytes.len() as i32);
|
||||
|
||||
let decoded_pixels = unsafe {
|
||||
raw_bytes.set_len(
|
||||
raw_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
|
||||
);
|
||||
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(raw_bytes)
|
||||
raw_bytes.set_len(raw_bytes.len() / std::mem::size_of::<<T::P as Pixel>::Item>());
|
||||
std::mem::transmute::<Vec<u8>, Vec<<T::P as Pixel>::Item>>(raw_bytes)
|
||||
};
|
||||
|
||||
Self::new(decoded_pixels, width, height)
|
||||
@@ -73,7 +77,7 @@ where
|
||||
Self { data: vec![], size }
|
||||
}
|
||||
|
||||
pub fn allocate(pixel_fill: &<T as ImageFormat>::P, width: i32, height: i32) -> ImageBuffer<T> {
|
||||
pub fn allocate(pixel_fill: &T::P, width: i32, height: i32) -> ImageBuffer<T> {
|
||||
let size_buf = ((width * height) as usize) * (T::NUM_CHANNELS);
|
||||
|
||||
let data = pixel_fill
|
||||
@@ -112,11 +116,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &<<T as ImageFormat>::P as Pixel>::Item> {
|
||||
pub fn iter(&self) -> impl Iterator<Item = &<T::P as Pixel>::Item> {
|
||||
self.data.iter()
|
||||
}
|
||||
|
||||
pub fn get_data(&self) -> &[<<T as ImageFormat>::P as Pixel>::Item] {
|
||||
pub fn get_data(&self) -> &[<T::P as Pixel>::Item] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
@@ -129,12 +133,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
use crate::image::format::{R16I, R32F, R32I, R8UI, RGB8U, RGBA8U};
|
||||
use crate::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
|
||||
pub enum ImageBufferType {
|
||||
JPG(ImageBuffer<RGB8U>),
|
||||
PNG(ImageBuffer<RGBA8U>),
|
||||
R32F(ImageBuffer<R32F>),
|
||||
R8UI(ImageBuffer<R8UI>),
|
||||
R8UI(ImageBuffer<R8U>),
|
||||
R16I(ImageBuffer<R16I>),
|
||||
R32I(ImageBuffer<R32I>),
|
||||
}
|
||||
@@ -143,7 +147,7 @@ use crate::image::{ArrayBuffer, Image};
|
||||
use cgmath::{Vector2, Vector3};
|
||||
impl<I> Image for ImageBuffer<I>
|
||||
where
|
||||
I: ImageFormat,
|
||||
I: TextureFormat,
|
||||
{
|
||||
fn insert_into_3d_texture<T: Tex3D>(
|
||||
&self,
|
||||
@@ -152,8 +156,7 @@ where
|
||||
// An offset to write the image in the texture array
|
||||
offset: &Vector3<i32>,
|
||||
) -> Result<(), JsValue> {
|
||||
let js_array =
|
||||
<<<I as ImageFormat>::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
|
||||
let js_array = <<I::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
|
||||
textures.tex_sub_image_3d_with_opt_array_buffer_view(
|
||||
offset.x,
|
||||
offset.y,
|
||||
|
||||
@@ -2,7 +2,7 @@ use {wasm_bindgen::prelude::*, web_sys::WebGlFramebuffer};
|
||||
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
// Internal format used for the framebuffer final texture
|
||||
use crate::image::format::RGBA8U;
|
||||
use crate::texture::format::RGBA8U;
|
||||
|
||||
pub struct FrameBufferObject {
|
||||
gl: WebGlContext,
|
||||
|
||||
@@ -330,6 +330,7 @@ impl SendUniformsWithParams<Colormaps> for HiPSColor {
|
||||
|
||||
let cmap = cmaps.get(self.cmap_name.as_ref());
|
||||
shader
|
||||
.attach_uniforms_from(cmaps)
|
||||
.attach_uniforms_with_params_from(cmap, cmaps)
|
||||
.attach_uniform("H", &self.stretch)
|
||||
.attach_uniform("min_value", &self.min_cut.unwrap_or(0.0))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::texture::format::TextureFormat;
|
||||
use web_sys::HtmlCanvasElement;
|
||||
use web_sys::WebGlTexture;
|
||||
|
||||
@@ -23,7 +23,7 @@ pub struct Texture3D {
|
||||
}
|
||||
|
||||
impl Texture3D {
|
||||
pub fn create_empty<F: ImageFormat>(
|
||||
pub fn create_empty<F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
// The weight of the individual textures
|
||||
width: i32,
|
||||
@@ -54,10 +54,9 @@ impl Texture3D {
|
||||
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
|
||||
width: width as u32,
|
||||
height: height as u32,
|
||||
internal_format: F::INTERNAL_FORMAT,
|
||||
format: F::FORMAT,
|
||||
ty: F::TYPE,
|
||||
channel_type: F::CHANNEL_TYPE,
|
||||
pixel_type: F::PIXEL_TYPE,
|
||||
})));
|
||||
|
||||
Ok(Texture3D {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::image::format::ImageFormat;
|
||||
use crate::texture::format::PixelType;
|
||||
use crate::texture::format::TextureFormat;
|
||||
use web_sys::HtmlCanvasElement;
|
||||
use web_sys::WebGlTexture;
|
||||
|
||||
use crate::texture::pixel::Pixel;
|
||||
use crate::texture::ChannelType;
|
||||
use crate::texture::Texture2DMeta;
|
||||
use crate::webgl_ctx::WebGlContext;
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
@@ -22,7 +22,7 @@ pub struct Texture2DArray {
|
||||
}
|
||||
|
||||
impl Texture2DArray {
|
||||
pub fn create_empty<F: ImageFormat>(
|
||||
pub fn create_empty<F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
// The weight of the individual textures
|
||||
width: i32,
|
||||
@@ -53,10 +53,9 @@ impl Texture2DArray {
|
||||
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
|
||||
width: width as u32,
|
||||
height: height as u32,
|
||||
internal_format: F::INTERNAL_FORMAT,
|
||||
format: F::FORMAT,
|
||||
pixel_type: F::PIXEL_TYPE,
|
||||
ty: F::TYPE,
|
||||
channel_type: F::CHANNEL_TYPE,
|
||||
format: F::FORMAT,
|
||||
})));
|
||||
|
||||
Ok(Texture2DArray {
|
||||
@@ -116,37 +115,31 @@ impl Texture2DArray {
|
||||
self.gl
|
||||
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
let value = match metadata.channel_type {
|
||||
ChannelType::R8UI => {
|
||||
let value = match metadata.pixel_type {
|
||||
PixelType::R8U => {
|
||||
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R16I => {
|
||||
PixelType::R16I => {
|
||||
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R32I => {
|
||||
PixelType::R32I => {
|
||||
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R32F => {
|
||||
PixelType::R32F => {
|
||||
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
crate::log(&format!("{:?}", p));
|
||||
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::RGB8U => {
|
||||
PixelType::RGB8U => {
|
||||
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p)?)
|
||||
}
|
||||
ChannelType::RGBA8U => {
|
||||
PixelType::RGBA8U => {
|
||||
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p)?)
|
||||
}
|
||||
_ => Err(JsValue::from_str(
|
||||
"Pixel retrieval not implemented for that texture format.",
|
||||
)),
|
||||
};
|
||||
|
||||
// Unbind the framebuffer
|
||||
|
||||
204
src/core/al-core/src/texture/format.rs
Normal file
204
src/core/al-core/src/texture/format.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use crate::texture::pixel::Pixel;
|
||||
|
||||
pub type Bytes<'a> = std::borrow::Cow<'a, [u8]>;
|
||||
|
||||
pub trait TextureFormat {
|
||||
type P: Pixel;
|
||||
type ArrayBufferView: AsRef<js_sys::Object>;
|
||||
|
||||
const NUM_CHANNELS: usize;
|
||||
|
||||
const FORMAT: u32;
|
||||
const INTERNAL_FORMAT: i32;
|
||||
const TYPE: u32;
|
||||
|
||||
const PIXEL_TYPE: PixelType;
|
||||
|
||||
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
|
||||
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
|
||||
///
|
||||
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
|
||||
///
|
||||
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
|
||||
}
|
||||
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGB8U;
|
||||
impl TextureFormat for RGB8U {
|
||||
type P = [u8; 3];
|
||||
|
||||
const NUM_CHANNELS: usize = 3;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGB;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const PIXEL_TYPE: PixelType = PixelType::RGB8U;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
let mut decoder = jpeg::Decoder::new(raw_bytes);
|
||||
let bytes = decoder
|
||||
.decode()
|
||||
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
|
||||
|
||||
Ok(Bytes::Owned(bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct RGBA8U;
|
||||
impl TextureFormat for RGBA8U {
|
||||
type P = [u8; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const PIXEL_TYPE: PixelType = PixelType::RGBA8U;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
let mut decoder = jpeg::Decoder::new(raw_bytes);
|
||||
let bytes = decoder
|
||||
.decode()
|
||||
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
|
||||
|
||||
Ok(Bytes::Owned(bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R32F;
|
||||
impl TextureFormat for R32F {
|
||||
type P = [u8; 4];
|
||||
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const PIXEL_TYPE: PixelType = PixelType::R32F;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R8U;
|
||||
impl TextureFormat for R8U {
|
||||
type P = [u8; 1];
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RED;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const NUM_CHANNELS: usize = 1;
|
||||
const PIXEL_TYPE: PixelType = PixelType::R8U;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R16I;
|
||||
impl TextureFormat for R16I {
|
||||
type P = [u8; 2];
|
||||
|
||||
const NUM_CHANNELS: usize = 2;
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RG;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RG8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
|
||||
const PIXEL_TYPE: PixelType = PixelType::R16I;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct R32I;
|
||||
impl TextureFormat for R32I {
|
||||
type P = [u8; 4];
|
||||
|
||||
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
|
||||
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
|
||||
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
|
||||
const NUM_CHANNELS: usize = 4;
|
||||
|
||||
const PIXEL_TYPE: PixelType = PixelType::R32I;
|
||||
|
||||
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
|
||||
Ok(Bytes::Borrowed(raw_bytes))
|
||||
}
|
||||
|
||||
type ArrayBufferView = js_sys::Uint8Array;
|
||||
|
||||
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
|
||||
Self::ArrayBufferView::view(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
|
||||
pub enum PixelType {
|
||||
R8U,
|
||||
R16I,
|
||||
R32I,
|
||||
R32F,
|
||||
RGB8U,
|
||||
RGBA8U,
|
||||
}
|
||||
|
||||
impl PixelType {
|
||||
pub const fn num_channels(&self) -> usize {
|
||||
match self {
|
||||
Self::RGB8U => 3,
|
||||
Self::RGBA8U => 4,
|
||||
_ => 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const NUM_CHANNELS: usize = 6;
|
||||
@@ -1,6 +1,7 @@
|
||||
pub mod array;
|
||||
pub use array::Texture2DArray;
|
||||
|
||||
pub mod format;
|
||||
pub mod pixel;
|
||||
pub use pixel::*;
|
||||
|
||||
@@ -11,7 +12,7 @@ pub use mod_3d::Texture3D;
|
||||
use web_sys::HtmlCanvasElement;
|
||||
use web_sys::WebGlTexture;
|
||||
|
||||
use crate::image::format::ChannelType;
|
||||
use crate::texture::format::PixelType;
|
||||
use crate::webgl_ctx::WebGlContext;
|
||||
use crate::webgl_ctx::WebGlRenderingCtx;
|
||||
use wasm_bindgen::prelude::*;
|
||||
@@ -24,9 +25,8 @@ pub static mut CUR_IDX_TEX_UNIT: u8 = 0;
|
||||
#[allow(dead_code)]
|
||||
pub struct Texture2DMeta {
|
||||
pub format: u32,
|
||||
pub internal_format: i32,
|
||||
pub ty: u32,
|
||||
pub channel_type: ChannelType,
|
||||
pub pixel_type: PixelType,
|
||||
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
@@ -47,13 +47,13 @@ pub enum SamplerType {
|
||||
Unsigned,
|
||||
}
|
||||
|
||||
use crate::image::format::ImageFormat;
|
||||
//use super::pixel::PixelType;
|
||||
use crate::texture::format::TextureFormat;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::path::Path;
|
||||
use std::rc::Rc;
|
||||
impl Texture2D {
|
||||
pub fn create_from_path<P: AsRef<Path>, F: ImageFormat>(
|
||||
pub fn create_from_path<P: AsRef<Path>, F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
name: &'static str,
|
||||
src: &P,
|
||||
@@ -61,7 +61,6 @@ impl Texture2D {
|
||||
) -> Result<Texture2D, JsValue> {
|
||||
let image = HtmlImageElement::new().unwrap_abort();
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
let texture = gl.create_texture();
|
||||
|
||||
let onerror = {
|
||||
@@ -76,13 +75,11 @@ impl Texture2D {
|
||||
let metadata = Rc::new(RefCell::new(Texture2DMeta {
|
||||
width,
|
||||
height,
|
||||
internal_format: F::INTERNAL_FORMAT,
|
||||
format: F::FORMAT,
|
||||
ty: F::TYPE,
|
||||
channel_type: F::CHANNEL_TYPE,
|
||||
pixel_type: F::PIXEL_TYPE,
|
||||
}));
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
let onload = {
|
||||
let image = image.clone();
|
||||
let gl = gl.clone();
|
||||
@@ -132,7 +129,6 @@ impl Texture2D {
|
||||
|
||||
let gl = gl.clone();
|
||||
Ok(Texture2D {
|
||||
#[cfg(feature = "webgl2")]
|
||||
texture,
|
||||
|
||||
gl,
|
||||
@@ -141,7 +137,7 @@ impl Texture2D {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_from_raw_pixels<F: ImageFormat>(
|
||||
pub fn create_from_raw_pixels<F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
width: i32,
|
||||
height: i32,
|
||||
@@ -166,12 +162,12 @@ impl Texture2D {
|
||||
Ok(texture)
|
||||
}
|
||||
|
||||
pub fn create_from_raw_bytes<F: ImageFormat>(
|
||||
pub fn create_from_raw_bytes<F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
width: i32,
|
||||
height: i32,
|
||||
tex_params: &'static [(u32, u32)],
|
||||
bytes: Option<&[u8]>,
|
||||
bytes: &[u8],
|
||||
) -> Result<Texture2D, JsValue> {
|
||||
let texture = gl.create_texture();
|
||||
|
||||
@@ -188,7 +184,14 @@ impl Texture2D {
|
||||
width,
|
||||
height,
|
||||
);
|
||||
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
|
||||
let view = unsafe {
|
||||
let len = bytes.len() / (std::mem::size_of::<<F::P as Pixel>::Item>());
|
||||
let pixels =
|
||||
std::slice::from_raw_parts(bytes.as_ptr() as *const <F::P as Pixel>::Item, len);
|
||||
F::view(pixels)
|
||||
};
|
||||
|
||||
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_array_buffer_view(
|
||||
WebGlRenderingCtx::TEXTURE_2D,
|
||||
0,
|
||||
0,
|
||||
@@ -197,7 +200,7 @@ impl Texture2D {
|
||||
height,
|
||||
F::FORMAT,
|
||||
F::TYPE,
|
||||
bytes,
|
||||
Some(view.as_ref()),
|
||||
)
|
||||
.expect("Texture 2D");
|
||||
|
||||
@@ -205,10 +208,9 @@ impl Texture2D {
|
||||
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
|
||||
width: width as u32,
|
||||
height: height as u32,
|
||||
internal_format: F::INTERNAL_FORMAT,
|
||||
format: F::FORMAT,
|
||||
ty: F::TYPE,
|
||||
channel_type: F::CHANNEL_TYPE,
|
||||
pixel_type: F::PIXEL_TYPE,
|
||||
})));
|
||||
|
||||
Ok(Texture2D {
|
||||
@@ -220,7 +222,7 @@ impl Texture2D {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_empty_with_format<F: ImageFormat>(
|
||||
pub fn create_empty_with_format<F: TextureFormat>(
|
||||
gl: &WebGlContext,
|
||||
width: i32,
|
||||
height: i32,
|
||||
@@ -246,16 +248,14 @@ impl Texture2D {
|
||||
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
|
||||
width: width as u32,
|
||||
height: height as u32,
|
||||
internal_format: F::INTERNAL_FORMAT,
|
||||
format: F::FORMAT,
|
||||
ty: F::TYPE,
|
||||
channel_type: F::CHANNEL_TYPE,
|
||||
pixel_type: F::PIXEL_TYPE,
|
||||
})));
|
||||
|
||||
Ok(Texture2D {
|
||||
texture,
|
||||
|
||||
gl,
|
||||
|
||||
metadata,
|
||||
})
|
||||
}
|
||||
@@ -335,31 +335,28 @@ impl Texture2D {
|
||||
self.gl
|
||||
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
let value = match metadata.channel_type {
|
||||
ChannelType::R8UI => {
|
||||
let value = match metadata.pixel_type {
|
||||
PixelType::R8U => {
|
||||
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R16I => {
|
||||
PixelType::R16I => {
|
||||
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R32I => {
|
||||
PixelType::R32I => {
|
||||
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::R32F => {
|
||||
PixelType::R32F => {
|
||||
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
|
||||
crate::log(&format!("{:?}", p));
|
||||
|
||||
Ok(serde_wasm_bindgen::to_value(&p[0])?)
|
||||
}
|
||||
ChannelType::RGB8U => {
|
||||
PixelType::RGB8U => {
|
||||
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p)?)
|
||||
}
|
||||
ChannelType::RGBA8U => {
|
||||
PixelType::RGBA8U => {
|
||||
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
|
||||
Ok(serde_wasm_bindgen::to_value(&p)?)
|
||||
}
|
||||
|
||||
@@ -21,70 +21,6 @@ pub trait Pixel:
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue>;
|
||||
}
|
||||
|
||||
impl Pixel for [f32; 4] {
|
||||
type Item = f32;
|
||||
type Container = ArrayF32;
|
||||
const BLACK: Self = [f32::NAN; 4];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Float32Array::new_with_length(4);
|
||||
#[cfg(feature = "webgl2")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RGBA32F,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
#[cfg(feature = "webgl1")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RGBA,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
|
||||
let pixels = pixels.to_vec();
|
||||
Ok([pixels[0], pixels[1], pixels[2], pixels[3]])
|
||||
}
|
||||
}
|
||||
impl Pixel for [f32; 3] {
|
||||
type Item = f32;
|
||||
type Container = ArrayF32;
|
||||
const BLACK: Self = [f32::NAN; 3];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Float32Array::new_with_length(3);
|
||||
#[cfg(feature = "webgl2")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RGB32F,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
#[cfg(feature = "webgl1")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RGB,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
|
||||
let pixels = pixels.to_vec();
|
||||
Ok([pixels[0], pixels[1], pixels[2]])
|
||||
}
|
||||
}
|
||||
impl Pixel for [f32; 1] {
|
||||
type Item = f32;
|
||||
type Container = ArrayF32;
|
||||
@@ -110,38 +46,7 @@ impl Pixel for [f32; 1] {
|
||||
])])
|
||||
}
|
||||
}
|
||||
/*use crate::image::ArrayF64;
|
||||
impl Pixel for [f64; 1] {
|
||||
type Item = f64;
|
||||
type Container = ArrayF64;
|
||||
const BLACK: Self = [std::f64::NAN];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Float32Array::new_with_length(1);
|
||||
#[cfg(feature = "webgl2")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RED,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
#[cfg(feature = "webgl1")]
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::LUMINANCE_ALPHA,
|
||||
WebGlRenderingCtx::FLOAT,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
|
||||
Ok([pixels.to_vec()[0] as f64])
|
||||
}
|
||||
}*/
|
||||
impl Pixel for [u8; 4] {
|
||||
type Item = u8;
|
||||
type Container = ArrayU8;
|
||||
@@ -183,7 +88,27 @@ impl Pixel for [u8; 3] {
|
||||
Ok([pixels[0], pixels[1], pixels[2]])
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
impl Pixel for [u8; 2] {
|
||||
type Item = u8;
|
||||
type Container = ArrayU8;
|
||||
const BLACK: Self = [0, 0];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Uint8Array::new_with_length(2);
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RG,
|
||||
WebGlRenderingCtx::UNSIGNED_BYTE,
|
||||
Some(&pixels),
|
||||
)?;
|
||||
let pixels = pixels.to_vec();
|
||||
Ok([pixels[0], pixels[1]])
|
||||
}
|
||||
}
|
||||
|
||||
impl Pixel for [u8; 1] {
|
||||
type Item = u8;
|
||||
type Container = ArrayU8;
|
||||
@@ -204,45 +129,50 @@ impl Pixel for [u8; 1] {
|
||||
Ok([pixels.to_vec()[0]])
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
|
||||
impl Pixel for [i16; 1] {
|
||||
type Item = i16;
|
||||
type Container = ArrayI16;
|
||||
const BLACK: Self = [i16::MIN];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Int16Array::new_with_length(1);
|
||||
let p = js_sys::Uint8Array::new_with_length(2);
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RED_INTEGER,
|
||||
WebGlRenderingCtx::SHORT,
|
||||
Some(&pixels),
|
||||
WebGlRenderingCtx::RG,
|
||||
WebGlRenderingCtx::UNSIGNED_BYTE,
|
||||
Some(&p),
|
||||
)?;
|
||||
|
||||
Ok([pixels.to_vec()[0]])
|
||||
Ok([i16::from_le_bytes([p.at(0).unwrap(), p.at(1).unwrap()])])
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
|
||||
impl Pixel for [i32; 1] {
|
||||
type Item = i32;
|
||||
type Container = ArrayI32;
|
||||
const BLACK: Self = [i32::MIN];
|
||||
|
||||
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
|
||||
let pixels = js_sys::Int32Array::new_with_length(1);
|
||||
let p = js_sys::Uint8Array::new_with_length(4);
|
||||
gl.read_pixels_with_opt_array_buffer_view(
|
||||
x,
|
||||
y,
|
||||
1,
|
||||
1,
|
||||
WebGlRenderingCtx::RED_INTEGER,
|
||||
WebGlRenderingCtx::INT,
|
||||
Some(&pixels),
|
||||
WebGlRenderingCtx::RGBA,
|
||||
WebGlRenderingCtx::UNSIGNED_BYTE,
|
||||
Some(&p),
|
||||
)?;
|
||||
|
||||
Ok([pixels.to_vec()[0]])
|
||||
Ok([i32::from_le_bytes([
|
||||
p.at(0).unwrap(),
|
||||
p.at(1).unwrap(),
|
||||
p.at(2).unwrap(),
|
||||
p.at(3).unwrap(),
|
||||
])])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,7 @@ use wasm_bindgen::JsCast;
|
||||
use wasm_bindgen::JsValue;
|
||||
use web_sys::HtmlElement;
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
pub type WebGlRenderingCtx = web_sys::WebGl2RenderingContext;
|
||||
#[cfg(feature = "webgl1")]
|
||||
pub type WebGlRenderingCtx = web_sys::WebGlRenderingContext;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct WebGlContext {
|
||||
|
||||
@@ -56,6 +56,8 @@ fn read_shader<P: AsRef<std::path::Path>>(path: P) -> std::io::Result<String> {
|
||||
let incl_file_name_rel = incl_file_names[1];
|
||||
let incl_file_name = path.parent().unwrap().join(incl_file_name_rel);
|
||||
|
||||
println!("{}", incl_file_name.to_string_lossy());
|
||||
|
||||
read_shader(incl_file_name.to_str().unwrap()).unwrap()
|
||||
} else {
|
||||
l
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
use crate::renderable::image::Image;
|
||||
use crate::renderable::ImageLayer;
|
||||
use crate::tile_fetcher::HiPSLocalFiles;
|
||||
use al_core::image::fits::FitsImage;
|
||||
use al_core::image::ImageType;
|
||||
use al_core::texture::format::{R16I, R32F, R32I, R8U, RGBA8U};
|
||||
use fitsrs::WCS;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::math::angle::ToAngle;
|
||||
use crate::renderable::hips::HiPS;
|
||||
@@ -21,7 +27,6 @@ use crate::{
|
||||
time::DeltaTime,
|
||||
};
|
||||
use al_api::moc::MOCOptions;
|
||||
use wcs::WCS;
|
||||
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
@@ -34,7 +39,6 @@ use al_api::{
|
||||
grid::GridCfg,
|
||||
hips::{HiPSCfg, ImageMetadata},
|
||||
};
|
||||
use fitsrs::{fits::AsyncFits, hdu::extension::AsyncXtensionHDU};
|
||||
|
||||
use web_sys::{HtmlElement, WebGl2RenderingContext};
|
||||
|
||||
@@ -608,69 +612,25 @@ impl App {
|
||||
//let _depth = tile.cell().depth();
|
||||
// do not perform tex_sub costly GPU calls while the camera is zooming
|
||||
if tile.cell().is_root() || included_in_coverage {
|
||||
//let is_missing = tile.missing();
|
||||
/*self.tile_fetcher.notify_tile(
|
||||
&tile,
|
||||
true,
|
||||
false,
|
||||
&mut self.downloader,
|
||||
);*/
|
||||
|
||||
/*let image = if is_missing {
|
||||
// Otherwise we push nothing, it is probably the case where:
|
||||
// - an request error occured on a valid tile
|
||||
// - the tile is not present, e.g. chandra HiPS have not the 0, 1 and 2 order tiles
|
||||
None
|
||||
} else {
|
||||
Some(image)
|
||||
};*/
|
||||
use al_core::image::ImageType;
|
||||
use fitsrs::fits::Fits;
|
||||
use std::io::Cursor;
|
||||
//if let Some(image) = image.as_ref() {
|
||||
if let Some(ImageType::FitsImage {
|
||||
if let Some(ImageType::FitsRawBytes {
|
||||
raw_bytes: raw_bytes_buf,
|
||||
..
|
||||
}) = &*tile.image.borrow()
|
||||
{
|
||||
// check if the metadata has not been set
|
||||
if !cfg.fits_metadata {
|
||||
let num_bytes = raw_bytes_buf.length() as usize;
|
||||
let mut raw_bytes = vec![0; num_bytes];
|
||||
raw_bytes_buf.copy_to(&mut raw_bytes[..]);
|
||||
if hips.get_fits_params().is_none() {
|
||||
let raw_bytes = raw_bytes_buf.to_vec();
|
||||
|
||||
let mut bytes_reader = Cursor::new(raw_bytes.as_slice());
|
||||
let Fits { hdu } = Fits::from_reader(&mut bytes_reader)
|
||||
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
|
||||
|
||||
let header = hdu.get_header();
|
||||
let bscale =
|
||||
if let Some(fitsrs::card::Value::Float(bscale)) =
|
||||
header.get(b"BSCALE ")
|
||||
{
|
||||
*bscale as f32
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
let bzero = if let Some(fitsrs::card::Value::Float(bzero)) =
|
||||
header.get(b"BZERO ")
|
||||
{
|
||||
*bzero as f32
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let blank = if let Some(fitsrs::card::Value::Float(blank)) =
|
||||
header.get(b"BLANK ")
|
||||
{
|
||||
*blank as f32
|
||||
} else {
|
||||
f32::NAN
|
||||
};
|
||||
|
||||
cfg.set_fits_metadata(bscale, bzero, blank);
|
||||
let FitsImage {
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
..
|
||||
} = FitsImage::from_raw_bytes(raw_bytes.as_slice())?[0];
|
||||
hips.set_fits_params(bscale, bzero, blank);
|
||||
}
|
||||
};
|
||||
//}
|
||||
|
||||
let image = tile.image.clone();
|
||||
if let Some(img) = &*image.borrow() {
|
||||
@@ -721,17 +681,6 @@ impl App {
|
||||
}
|
||||
}
|
||||
}
|
||||
Resource::PixelMetadata(metadata) => {
|
||||
if let Some(hips) = self.layers.get_mut_hips_from_cdid(&metadata.hips_cdid) {
|
||||
let cfg = hips.get_config_mut();
|
||||
|
||||
if let Some(metadata) = &*metadata.value.borrow() {
|
||||
cfg.blank = metadata.blank;
|
||||
cfg.offset = metadata.offset;
|
||||
cfg.scale = metadata.scale;
|
||||
}
|
||||
}
|
||||
}
|
||||
Resource::Moc(moc) => {
|
||||
let moc_hips_cdid = moc.get_hips_cdid();
|
||||
//let url = &moc_url[..moc_url.find("/Moc.fits").unwrap_abort()];
|
||||
@@ -974,255 +923,123 @@ impl App {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn add_image_from_blob_and_wcs(
|
||||
pub(crate) fn add_rgba_image(
|
||||
&mut self,
|
||||
layer: String,
|
||||
stream: web_sys::ReadableStream,
|
||||
bytes: &[u8],
|
||||
wcs: WCS,
|
||||
cfg: ImageMetadata,
|
||||
) -> Result<js_sys::Promise, JsValue> {
|
||||
let gl = self.gl.clone();
|
||||
|
||||
let img_sender = self.img_send.clone();
|
||||
let ack_img_recv = self.ack_img_recv.clone();
|
||||
// Stop the current inertia
|
||||
self.inertia = None;
|
||||
// And disable it while the fits has not been loaded
|
||||
let disable_inertia = self.disable_inertia.clone();
|
||||
*(disable_inertia.borrow_mut()) = true;
|
||||
|
||||
let camera_coo_sys = self.camera.get_coo_system();
|
||||
|
||||
let fut = async move {
|
||||
use crate::renderable::image::Image;
|
||||
use futures::future::Either;
|
||||
use futures::TryStreamExt;
|
||||
use js_sys::Uint8Array;
|
||||
use wasm_streams::ReadableStream;
|
||||
match Image::from_rgba_bytes(&gl, bytes, wcs, camera_coo_sys) {
|
||||
Ok(image) => {
|
||||
let layer = ImageLayer {
|
||||
images: vec![image],
|
||||
id: layer.clone(),
|
||||
layer,
|
||||
meta: cfg,
|
||||
};
|
||||
|
||||
let body = ReadableStream::from_raw(stream.dyn_into()?);
|
||||
let params = layer.get_params();
|
||||
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let bytes_reader = match body.try_into_async_read() {
|
||||
Ok(async_read) => Either::Left(async_read),
|
||||
Err((_err, body)) => Either::Right(
|
||||
body.into_stream()
|
||||
.map_ok(|js_value| {
|
||||
js_value.dyn_into::<Uint8Array>().unwrap_throw().to_vec()
|
||||
})
|
||||
.map_err(|_js_error| std::io::Error::other("failed to read"))
|
||||
.into_async_read(),
|
||||
),
|
||||
};
|
||||
use al_core::image::format::RGBA8U;
|
||||
match Image::from_reader_and_wcs::<_, RGBA8U>(
|
||||
&gl,
|
||||
bytes_reader,
|
||||
wcs,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
camera_coo_sys,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(image) => {
|
||||
let img = ImageLayer {
|
||||
images: vec![image],
|
||||
id: layer.clone(),
|
||||
layer,
|
||||
meta: cfg,
|
||||
};
|
||||
self.layers.add_image(
|
||||
layer,
|
||||
&mut self.camera,
|
||||
&self.projection,
|
||||
&mut self.tile_fetcher,
|
||||
)?;
|
||||
|
||||
img_sender.send(img).await.unwrap();
|
||||
self.request_redraw = true;
|
||||
|
||||
// Wait for the ack here
|
||||
let image_params = ack_img_recv
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| JsValue::from_str("Problem receiving fits"))?;
|
||||
|
||||
serde_wasm_bindgen::to_value(&image_params).map_err(|e| e.into())
|
||||
}
|
||||
Err(error) => Err(error),
|
||||
let promise = js_sys::Promise::resolve(&serde_wasm_bindgen::to_value(¶ms)?);
|
||||
Ok(promise)
|
||||
}
|
||||
};
|
||||
|
||||
let reenable_inertia = Closure::new(move || {
|
||||
// renable inertia again
|
||||
*(disable_inertia.borrow_mut()) = false;
|
||||
});
|
||||
|
||||
let promise = wasm_bindgen_futures::future_to_promise(fut)
|
||||
// Reenable inertia independantly from whether the
|
||||
// fits has been correctly parsed or not
|
||||
.finally(&reenable_inertia);
|
||||
|
||||
// forget the closure, it is not very proper to do this as
|
||||
// it won't be deallocated
|
||||
reenable_inertia.forget();
|
||||
|
||||
Ok(promise)
|
||||
Err(error) => Err(error),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_image_fits(
|
||||
pub(crate) fn add_fits_image(
|
||||
&mut self,
|
||||
stream: web_sys::ReadableStream,
|
||||
mut bytes: &[u8],
|
||||
meta: ImageMetadata,
|
||||
layer: String,
|
||||
) -> Result<js_sys::Promise, JsValue> {
|
||||
let gl = self.gl.clone();
|
||||
|
||||
let fits_sender = self.img_send.clone();
|
||||
let ack_fits_recv = self.ack_img_recv.clone();
|
||||
// Stop the current inertia
|
||||
self.inertia = None;
|
||||
// And disable it while the fits has not been loaded
|
||||
let disable_inertia = self.disable_inertia.clone();
|
||||
*(disable_inertia.borrow_mut()) = true;
|
||||
|
||||
let camera_coo_sys = self.camera.get_coo_system();
|
||||
|
||||
let fut = async move {
|
||||
use crate::renderable::image::Image;
|
||||
use futures::future::Either;
|
||||
use futures::TryStreamExt;
|
||||
use js_sys::Uint8Array;
|
||||
use wasm_streams::ReadableStream;
|
||||
// FIXME: this is done to prevent the view inerting after being unblocked
|
||||
self.set_inertia(false);
|
||||
|
||||
// Get the response's body as a JS ReadableStream
|
||||
let body = ReadableStream::from_raw(stream.dyn_into()?);
|
||||
let gz = fitsrs::gz::GzReader::new(Cursor::new(bytes))
|
||||
.map_err(|_| JsValue::from_str("Error creating gz wrapper"))?;
|
||||
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let bytes_reader = match body.try_into_async_read() {
|
||||
Ok(async_read) => Either::Left(async_read),
|
||||
Err((_err, body)) => Either::Right(
|
||||
body.into_stream()
|
||||
.map_ok(|js_value| {
|
||||
js_value.dyn_into::<Uint8Array>().unwrap_throw().to_vec()
|
||||
})
|
||||
.map_err(|_js_error| std::io::Error::other("failed to read"))
|
||||
.into_async_read(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut reader = BufReader::new(bytes_reader);
|
||||
|
||||
let AsyncFits { mut hdu } = AsyncFits::from_reader(&mut reader)
|
||||
.await
|
||||
.map_err(|e| JsValue::from_str(&format!("Fits file parsing: reason: {}", e)))?;
|
||||
|
||||
let mut hdu_ext_idx = 0;
|
||||
let mut images = vec![];
|
||||
|
||||
match Image::from_fits_hdu_async(&gl, &mut hdu.0, camera_coo_sys).await {
|
||||
Ok(image) => {
|
||||
images.push(image);
|
||||
|
||||
let mut hdu_ext = hdu.next().await;
|
||||
|
||||
// Continue parsing the file extensions here
|
||||
while let Ok(Some(mut xhdu)) = hdu_ext {
|
||||
match &mut xhdu {
|
||||
AsyncXtensionHDU::Image(xhdu_img) => {
|
||||
match Image::from_fits_hdu_async(&gl, xhdu_img, camera_coo_sys)
|
||||
.await
|
||||
{
|
||||
Ok(image) => {
|
||||
images.push(image);
|
||||
}
|
||||
Err(error) => {
|
||||
al_core::log::console_warn(format!("The extension {hdu_ext_idx} has not been parsed, reason:")
|
||||
);
|
||||
|
||||
al_core::log::console_warn(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
al_core::log::console_warn(format!("The extension {hdu_ext_idx} is a BinTable/AsciiTable and is thus discarded")
|
||||
);
|
||||
}
|
||||
let parse_fits_images_from_bytes = |raw_bytes: &[u8]| -> Result<Vec<Image>, JsValue> {
|
||||
Ok(FitsImage::from_raw_bytes(raw_bytes)?
|
||||
.into_iter()
|
||||
.filter_map(
|
||||
|FitsImage {
|
||||
bitpix,
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
wcs,
|
||||
raw_bytes,
|
||||
..
|
||||
}| {
|
||||
if let Some(wcs) = wcs {
|
||||
let image = Image::from_fits_hdu(
|
||||
&gl,
|
||||
wcs,
|
||||
bitpix,
|
||||
raw_bytes,
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
camera_coo_sys,
|
||||
)
|
||||
.ok()?;
|
||||
Some(image)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
hdu_ext_idx += 1;
|
||||
|
||||
hdu_ext = xhdu.next().await;
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
al_core::log::console_warn(error);
|
||||
|
||||
let mut hdu_ext = hdu.next().await;
|
||||
|
||||
while let Ok(Some(mut xhdu)) = hdu_ext {
|
||||
match &mut xhdu {
|
||||
AsyncXtensionHDU::Image(xhdu_img) => {
|
||||
match Image::from_fits_hdu_async(&gl, xhdu_img, camera_coo_sys)
|
||||
.await
|
||||
{
|
||||
Ok(image) => {
|
||||
images.push(image);
|
||||
}
|
||||
Err(error) => {
|
||||
al_core::log::console_warn(format!("The extension {hdu_ext_idx} has not been parsed, reason:")
|
||||
);
|
||||
|
||||
al_core::log::console_warn(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
al_core::log::console_warn(format!("The extension {hdu_ext_idx} is a BinTable/AsciiTable and is thus discarded")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
hdu_ext_idx += 1;
|
||||
|
||||
hdu_ext = xhdu.next().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if images.is_empty() {
|
||||
Err(JsValue::from_str("no images have been parsed"))
|
||||
} else {
|
||||
let fits = ImageLayer {
|
||||
images,
|
||||
id: layer.clone(),
|
||||
|
||||
layer,
|
||||
meta,
|
||||
};
|
||||
|
||||
fits_sender.send(fits).await.unwrap();
|
||||
|
||||
// Wait for the ack here
|
||||
let image_params = ack_fits_recv
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| JsValue::from_str("Problem receiving fits"))?;
|
||||
|
||||
serde_wasm_bindgen::to_value(&image_params).map_err(|e| e.into())
|
||||
}
|
||||
},
|
||||
)
|
||||
.collect::<Vec<_>>())
|
||||
};
|
||||
|
||||
let reenable_inertia = Closure::new(move || {
|
||||
// renable inertia again
|
||||
*(disable_inertia.borrow_mut()) = false;
|
||||
});
|
||||
let images = match gz {
|
||||
fitsrs::gz::GzReader::GzReader(bytes) => parse_fits_images_from_bytes(bytes.get_ref())?,
|
||||
fitsrs::gz::GzReader::Reader(bytes) => parse_fits_images_from_bytes(bytes.get_ref())?,
|
||||
};
|
||||
|
||||
let promise = wasm_bindgen_futures::future_to_promise(fut)
|
||||
// Reenable inertia independantly from whether the
|
||||
// fits has been correctly parsed or not
|
||||
.finally(&reenable_inertia);
|
||||
if images.is_empty() {
|
||||
Err(JsValue::from_str("no images have been parsed"))
|
||||
} else {
|
||||
let layer = ImageLayer {
|
||||
images,
|
||||
id: layer.clone(),
|
||||
|
||||
// forget the closure, it is not very proper to do this as
|
||||
// it won't be deallocated
|
||||
reenable_inertia.forget();
|
||||
layer,
|
||||
meta,
|
||||
};
|
||||
|
||||
Ok(promise)
|
||||
let params = layer.get_params();
|
||||
self.layers.add_image(
|
||||
layer,
|
||||
&mut self.camera,
|
||||
&self.projection,
|
||||
&mut self.tile_fetcher,
|
||||
)?;
|
||||
self.request_redraw = true;
|
||||
|
||||
let promise = js_sys::Promise::resolve(&serde_wasm_bindgen::to_value(¶ms)?);
|
||||
Ok(promise)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_layer_cfg(&self, layer: &str) -> Result<ImageMetadata, JsValue> {
|
||||
|
||||
@@ -159,43 +159,8 @@ impl Query for Allsky {
|
||||
}
|
||||
|
||||
/* ---------------------------------- */
|
||||
pub struct PixelMetadata {
|
||||
pub format: ImageFormatType,
|
||||
// The root url of the HiPS
|
||||
pub hips_cdid: CreatorDid,
|
||||
// The total url of the query
|
||||
pub url: Url,
|
||||
pub id: QueryId,
|
||||
}
|
||||
|
||||
impl PixelMetadata {
|
||||
pub fn new(cfg: &HiPSConfig) -> Self {
|
||||
let hips_cdid = cfg.get_creator_did().to_string();
|
||||
let format = cfg.get_format();
|
||||
let ext = format.get_ext_file();
|
||||
|
||||
let url = format!("{}/Norder3/Allsky.{}", cfg.get_root_url(), ext);
|
||||
|
||||
let id = format!("{}Allsky{}", hips_cdid, ext);
|
||||
PixelMetadata {
|
||||
hips_cdid,
|
||||
url,
|
||||
format,
|
||||
id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use super::request::blank::PixelMetadataRequest;
|
||||
impl Query for PixelMetadata {
|
||||
type Request = PixelMetadataRequest;
|
||||
|
||||
fn id(&self) -> &QueryId {
|
||||
&self.id
|
||||
}
|
||||
}
|
||||
use al_api::moc::MOCOptions;
|
||||
/* ---------------------------------- */
|
||||
|
||||
pub struct Moc {
|
||||
// The total url of the query
|
||||
pub url: Url,
|
||||
|
||||
@@ -2,10 +2,10 @@ use std::io::Cursor;
|
||||
|
||||
use crate::downloader::query;
|
||||
use crate::renderable::CreatorDid;
|
||||
use al_core::image::format::ChannelType;
|
||||
use al_core::image::fits::FitsImage;
|
||||
use al_core::image::ImageType;
|
||||
|
||||
use fitsrs::{fits::Fits, hdu::data::InMemData};
|
||||
use al_core::texture::format::PixelType;
|
||||
use fitsrs::hdu::header::Bitpix;
|
||||
|
||||
use super::{Request, RequestType};
|
||||
use crate::downloader::QueryId;
|
||||
@@ -78,12 +78,12 @@ impl From<query::Allsky> for AllskyRequest {
|
||||
} = query;
|
||||
|
||||
//let depth_tile = crate::math::utils::log_2_unchecked(texture_size / tile_size) as u8;
|
||||
let channel = format.get_channel();
|
||||
let channel = format.get_pixel_format();
|
||||
let url_clone = url.clone();
|
||||
|
||||
let request = Request::new(async move {
|
||||
match channel {
|
||||
ChannelType::RGB8U => {
|
||||
PixelType::RGB8U => {
|
||||
let allsky = query_allsky(&url_clone, credentials).await?;
|
||||
|
||||
let allsky_tiles =
|
||||
@@ -104,7 +104,7 @@ impl From<query::Allsky> for AllskyRequest {
|
||||
|
||||
Ok(allsky_tiles)
|
||||
}
|
||||
ChannelType::RGBA8U => {
|
||||
PixelType::RGBA8U => {
|
||||
let allsky = query_allsky(&url_clone, credentials).await?;
|
||||
|
||||
let allsky_tiles = handle_allsky_file(allsky, allsky_tile_size, tile_size)?
|
||||
@@ -132,61 +132,66 @@ impl From<query::Allsky> for AllskyRequest {
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;*/
|
||||
|
||||
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
|
||||
let buf = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
|
||||
|
||||
let num_bytes = bytes_buffer.length() as usize;
|
||||
let mut raw_bytes = vec![0; num_bytes];
|
||||
bytes_buffer.copy_to(&mut raw_bytes[..]);
|
||||
let mut reader = Cursor::new(&raw_bytes[..]);
|
||||
let Fits { hdu } = Fits::from_reader(&mut reader)
|
||||
.map_err(|_| JsValue::from_str("Parsing fits error of allsky"))?;
|
||||
|
||||
let data = hdu.get_data();
|
||||
|
||||
match data {
|
||||
InMemData::U8(data) => {
|
||||
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
|
||||
let FitsImage {
|
||||
raw_bytes, bitpix, ..
|
||||
} = FitsImage::from_raw_bytes(raw_bytes.as_slice())?[0];
|
||||
match bitpix {
|
||||
Bitpix::U8 => {
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawR8ui { image })
|
||||
.collect())
|
||||
}
|
||||
InMemData::I16(data) => {
|
||||
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
|
||||
Bitpix::I16 => {
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawR16i { image })
|
||||
.collect())
|
||||
}
|
||||
InMemData::I32(data) => {
|
||||
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
|
||||
Bitpix::I32 => {
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawR32i { image })
|
||||
.collect())
|
||||
}
|
||||
InMemData::I64(data) => {
|
||||
let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>();
|
||||
Ok(handle_allsky_fits(&data, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawR32i { image })
|
||||
.collect())
|
||||
}
|
||||
InMemData::F32(data) => {
|
||||
Bitpix::I64 => {
|
||||
let data = unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
raw_bytes.as_ptr() as *const i64,
|
||||
raw_bytes.len() / 8,
|
||||
)
|
||||
};
|
||||
let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>();
|
||||
let raw_bytes = unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
data.as_ptr() as *const u8,
|
||||
data.len() * 4,
|
||||
)
|
||||
};
|
||||
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawR32i { image })
|
||||
.collect())
|
||||
}
|
||||
Bitpix::F32 => {
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawRgba8u { image })
|
||||
.collect())
|
||||
}
|
||||
InMemData::F64(data) => {
|
||||
let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>();
|
||||
Bitpix::F64 => {
|
||||
let data = unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
raw_bytes.as_ptr() as *const f64,
|
||||
raw_bytes.len() / 8,
|
||||
)
|
||||
};
|
||||
let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>();
|
||||
let raw_bytes = unsafe {
|
||||
std::slice::from_raw_parts(
|
||||
data.as_ptr() as *const u8,
|
||||
data.len() * 4,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
|
||||
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
|
||||
.map(|image| ImageType::RawRgba8u { image })
|
||||
.collect())
|
||||
}
|
||||
@@ -206,9 +211,9 @@ impl From<query::Allsky> for AllskyRequest {
|
||||
}
|
||||
}
|
||||
|
||||
use al_core::image::format::ImageFormat;
|
||||
use al_core::image::raw::ImageBufferView;
|
||||
fn handle_allsky_file<F: ImageFormat>(
|
||||
use al_core::texture::format::TextureFormat;
|
||||
fn handle_allsky_file<F: TextureFormat>(
|
||||
image: ImageBuffer<F>,
|
||||
allsky_tile_size: i32,
|
||||
tile_size: i32,
|
||||
@@ -217,11 +222,8 @@ fn handle_allsky_file<F: ImageFormat>(
|
||||
|
||||
let mut src_idx = 0;
|
||||
let tiles = (0..12).map(move |_| {
|
||||
let mut base_tile = ImageBuffer::<F>::allocate(
|
||||
&<F as ImageFormat>::P::BLACK,
|
||||
allsky_tile_size,
|
||||
allsky_tile_size,
|
||||
);
|
||||
let mut base_tile =
|
||||
ImageBuffer::<F>::allocate(&F::P::BLACK, allsky_tile_size, allsky_tile_size);
|
||||
for idx_tile in 0..64 {
|
||||
let (x, y) = crate::utils::unmortonize(idx_tile as u64);
|
||||
let dx = x * (d3_tile_allsky_size as u32);
|
||||
@@ -253,8 +255,8 @@ fn handle_allsky_file<F: ImageFormat>(
|
||||
Ok(tiles)
|
||||
}
|
||||
|
||||
fn handle_allsky_fits<F: ImageFormat>(
|
||||
image: &[<<F as ImageFormat>::P as Pixel>::Item],
|
||||
fn handle_allsky_fits<F: TextureFormat>(
|
||||
image: &[<F::P as Pixel>::Item],
|
||||
|
||||
tile_size: i32,
|
||||
allsky_tile_size: i32,
|
||||
@@ -292,7 +294,7 @@ fn handle_allsky_fits<F: ImageFormat>(
|
||||
Ok(allsky_tiles_iter)
|
||||
}
|
||||
|
||||
use al_core::image::format::RGBA8U;
|
||||
use al_core::texture::format::RGBA8U;
|
||||
|
||||
use crate::time::Time;
|
||||
use std::cell::RefCell;
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
use al_core::image::format::ChannelType;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::downloader::query;
|
||||
use crate::renderable::CreatorDid;
|
||||
use fitsrs::fits::Fits;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Metadata {
|
||||
pub blank: f32,
|
||||
pub scale: f32,
|
||||
pub offset: f32,
|
||||
}
|
||||
|
||||
impl Default for Metadata {
|
||||
fn default() -> Self {
|
||||
Metadata {
|
||||
blank: -1.0,
|
||||
scale: 1.0,
|
||||
offset: 0.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use super::{Request, RequestType};
|
||||
use crate::downloader::QueryId;
|
||||
|
||||
pub struct PixelMetadataRequest {
|
||||
pub id: QueryId,
|
||||
pub url: Url,
|
||||
pub hips_cdid: CreatorDid,
|
||||
request: Request<Metadata>,
|
||||
}
|
||||
|
||||
impl From<PixelMetadataRequest> for RequestType {
|
||||
fn from(request: PixelMetadataRequest) -> Self {
|
||||
RequestType::PixelMetadata(request)
|
||||
}
|
||||
}
|
||||
|
||||
use super::Url;
|
||||
use wasm_bindgen::JsCast;
|
||||
use wasm_bindgen::JsValue;
|
||||
use wasm_bindgen_futures::JsFuture;
|
||||
use web_sys::{RequestInit, RequestMode, Response};
|
||||
impl From<query::PixelMetadata> for PixelMetadataRequest {
|
||||
// Create a tile request associated to a HiPS
|
||||
fn from(query: query::PixelMetadata) -> Self {
|
||||
let query::PixelMetadata {
|
||||
format,
|
||||
url,
|
||||
hips_cdid,
|
||||
id,
|
||||
} = query;
|
||||
|
||||
let url_clone = url.clone();
|
||||
|
||||
let channel = format.get_channel();
|
||||
|
||||
let window = web_sys::window().unwrap_abort();
|
||||
let request = match channel {
|
||||
ChannelType::R32F | ChannelType::R32I | ChannelType::R16I | ChannelType::R8UI => {
|
||||
Request::new(async move {
|
||||
let mut opts = RequestInit::new();
|
||||
opts.method("GET");
|
||||
opts.mode(RequestMode::Cors);
|
||||
|
||||
let request =
|
||||
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
|
||||
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
|
||||
// `resp_value` is a `Response` object.
|
||||
debug_assert!(resp_value.is_instance_of::<Response>());
|
||||
let resp: Response = resp_value.dyn_into()?;
|
||||
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
|
||||
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
|
||||
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
|
||||
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
|
||||
let image = Fits::new(reader).await?;*/
|
||||
|
||||
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
|
||||
|
||||
let num_bytes = bytes_buffer.length() as usize;
|
||||
let mut raw_bytes = vec![0; num_bytes];
|
||||
bytes_buffer.copy_to(&mut raw_bytes[..]);
|
||||
|
||||
let mut reader = Cursor::new(&raw_bytes[..]);
|
||||
let Fits { hdu } = Fits::from_reader(&mut reader)
|
||||
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
|
||||
|
||||
let header = hdu.get_header();
|
||||
let scale =
|
||||
if let Some(fitsrs::card::Value::Float(bscale)) = header.get(b"BSCALE ") {
|
||||
*bscale as f32
|
||||
} else {
|
||||
1.0
|
||||
};
|
||||
let offset =
|
||||
if let Some(fitsrs::card::Value::Float(bzero)) = header.get(b"BZERO ") {
|
||||
*bzero as f32
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let blank =
|
||||
if let Some(fitsrs::card::Value::Float(blank)) = header.get(b"BLANK ") {
|
||||
*blank as f32
|
||||
} else {
|
||||
f32::NAN
|
||||
};
|
||||
|
||||
Ok(Metadata {
|
||||
blank,
|
||||
scale,
|
||||
offset,
|
||||
})
|
||||
})
|
||||
}
|
||||
_ => Request::new(async move { Ok(Metadata::default()) }),
|
||||
};
|
||||
|
||||
Self {
|
||||
id,
|
||||
url,
|
||||
hips_cdid,
|
||||
request,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
#[derive(Debug)]
|
||||
pub struct PixelMetadata {
|
||||
pub value: Rc<RefCell<Option<Metadata>>>,
|
||||
pub hips_cdid: CreatorDid,
|
||||
pub url: String,
|
||||
}
|
||||
use crate::Abort;
|
||||
impl<'a> From<&'a PixelMetadataRequest> for Option<PixelMetadata> {
|
||||
fn from(request: &'a PixelMetadataRequest) -> Self {
|
||||
let PixelMetadataRequest {
|
||||
request,
|
||||
hips_cdid,
|
||||
url,
|
||||
..
|
||||
} = request;
|
||||
if request.is_resolved() {
|
||||
let Request::<Metadata> { data, .. } = request;
|
||||
// It will always be resolved and found as we will request a well know tile (Norder0/Tile0)
|
||||
Some(PixelMetadata {
|
||||
hips_cdid: hips_cdid.clone(),
|
||||
url: url.to_string(),
|
||||
value: data.clone(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
// A request image should not be used outside this module
|
||||
// but contained inside a more specific type of query (e.g. for a tile or allsky)
|
||||
pub mod allsky;
|
||||
pub mod blank;
|
||||
pub mod moc;
|
||||
pub mod tile;
|
||||
|
||||
@@ -79,13 +78,11 @@ where
|
||||
}
|
||||
|
||||
use allsky::AllskyRequest;
|
||||
use blank::PixelMetadataRequest;
|
||||
use moc::MOCRequest;
|
||||
use tile::TileRequest;
|
||||
pub enum RequestType {
|
||||
Tile(TileRequest),
|
||||
Allsky(AllskyRequest),
|
||||
PixelMetadata(PixelMetadataRequest),
|
||||
Moc(MOCRequest), //..
|
||||
}
|
||||
|
||||
@@ -95,7 +92,6 @@ impl RequestType {
|
||||
match self {
|
||||
RequestType::Tile(request) => &request.id,
|
||||
RequestType::Allsky(request) => &request.id,
|
||||
RequestType::PixelMetadata(request) => &request.id,
|
||||
RequestType::Moc(request) => &request.hips_cdid,
|
||||
}
|
||||
}
|
||||
@@ -106,9 +102,6 @@ impl<'a> From<&'a RequestType> for Option<Resource> {
|
||||
match request {
|
||||
RequestType::Tile(request) => Option::<Tile>::from(request).map(Resource::Tile),
|
||||
RequestType::Allsky(request) => Option::<Allsky>::from(request).map(Resource::Allsky),
|
||||
RequestType::PixelMetadata(request) => {
|
||||
Option::<PixelMetadata>::from(request).map(Resource::PixelMetadata)
|
||||
}
|
||||
RequestType::Moc(request) => Option::<Moc>::from(request).map(Resource::Moc),
|
||||
}
|
||||
}
|
||||
@@ -116,13 +109,11 @@ impl<'a> From<&'a RequestType> for Option<Resource> {
|
||||
|
||||
use crate::Abort;
|
||||
use allsky::Allsky;
|
||||
use blank::PixelMetadata;
|
||||
use moc::Moc;
|
||||
use tile::Tile;
|
||||
pub enum Resource {
|
||||
Tile(Tile),
|
||||
Allsky(Allsky),
|
||||
PixelMetadata(PixelMetadata),
|
||||
Moc(Moc),
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::healpix::cell::HEALPixCell;
|
||||
use crate::renderable::CreatorDid;
|
||||
use al_core::image::format::{ChannelType, ImageFormatType, RGB8U, RGBA8U};
|
||||
use al_core::image::format::ImageFormatType;
|
||||
use al_core::texture::format::{PixelType, RGB8U, RGBA8U};
|
||||
|
||||
use crate::downloader::query;
|
||||
use al_core::image::ImageType;
|
||||
@@ -47,38 +48,11 @@ impl From<query::Tile> for TileRequest {
|
||||
} = query;
|
||||
|
||||
let url_clone = url.clone();
|
||||
let channel = format.get_channel();
|
||||
let channel = format.get_pixel_format();
|
||||
|
||||
let window = web_sys::window().unwrap_abort();
|
||||
let request = match channel {
|
||||
ChannelType::RGB8U => Request::new(async move {
|
||||
/*let mut opts = RequestInit::new();
|
||||
opts.method("GET");
|
||||
opts.mode(RequestMode::Cors);
|
||||
|
||||
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
|
||||
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
|
||||
// `resp_value` is a `Response` object.
|
||||
debug_assert!(resp_value.is_instance_of::<Response>());
|
||||
let resp: Response = resp_value.dyn_into()?;*/
|
||||
|
||||
/*/// Bitmap version
|
||||
let blob = JsFuture::from(resp.blob()?).await?.into();
|
||||
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
|
||||
.await?
|
||||
.into();
|
||||
|
||||
let image = Bitmap::new(image);
|
||||
Ok(ImageType::JpgImageRgb8u { image })*/
|
||||
/*
|
||||
/// Raw image decoding
|
||||
|
||||
let buf = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
|
||||
let image = ImageBuffer::<RGB8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
|
||||
|
||||
Ok(ImageType::RawRgb8u { image })
|
||||
*/
|
||||
PixelType::RGB8U => Request::new(async move {
|
||||
// HTMLImageElement
|
||||
let image = query_html_image(&url_clone, credentials).await?;
|
||||
// The image has been resolved
|
||||
@@ -86,34 +60,7 @@ impl From<query::Tile> for TileRequest {
|
||||
image: HTMLImage::<RGB8U>::new(image),
|
||||
})
|
||||
}),
|
||||
ChannelType::RGBA8U => Request::new(async move {
|
||||
/*let mut opts = RequestInit::new();
|
||||
opts.method("GET");
|
||||
opts.mode(RequestMode::Cors);
|
||||
|
||||
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
|
||||
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
|
||||
// `resp_value` is a `Response` object.
|
||||
debug_assert!(resp_value.is_instance_of::<Response>());
|
||||
let resp: Response = resp_value.dyn_into()?;*/
|
||||
|
||||
/*/// Bitmap version
|
||||
let blob = JsFuture::from(resp.blob()?).await?.into();
|
||||
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
|
||||
.await?
|
||||
.into();
|
||||
|
||||
let image = Bitmap::new(image);
|
||||
Ok(ImageType::PngImageRgba8u { image })*/
|
||||
|
||||
/*
|
||||
/// Raw image decoding
|
||||
let buf = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
|
||||
let image = ImageBuffer::<RGBA8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
|
||||
|
||||
Ok(ImageType::RawRgba8u { image })
|
||||
*/
|
||||
PixelType::RGBA8U => Request::new(async move {
|
||||
// HTMLImageElement
|
||||
let image = query_html_image(&url_clone, credentials).await?;
|
||||
// The image has been resolved
|
||||
@@ -121,45 +68,42 @@ impl From<query::Tile> for TileRequest {
|
||||
image: HTMLImage::<RGBA8U>::new(image),
|
||||
})
|
||||
}),
|
||||
ChannelType::R32F
|
||||
| ChannelType::R64F
|
||||
| ChannelType::R32I
|
||||
| ChannelType::R16I
|
||||
| ChannelType::R8UI => Request::new(async move {
|
||||
let mut opts = RequestInit::new();
|
||||
opts.method("GET");
|
||||
opts.mode(mode);
|
||||
opts.credentials(credentials);
|
||||
PixelType::R32F | PixelType::R32I | PixelType::R16I | PixelType::R8U => {
|
||||
Request::new(async move {
|
||||
let mut opts = RequestInit::new();
|
||||
opts.method("GET");
|
||||
opts.mode(mode);
|
||||
opts.credentials(credentials);
|
||||
|
||||
let request =
|
||||
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
|
||||
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
|
||||
// `resp_value` is a `Response` object.
|
||||
debug_assert!(resp_value.is_instance_of::<Response>());
|
||||
let resp: Response = resp_value.dyn_into()?;
|
||||
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
|
||||
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
|
||||
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
|
||||
let request =
|
||||
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
|
||||
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
|
||||
// `resp_value` is a `Response` object.
|
||||
debug_assert!(resp_value.is_instance_of::<Response>());
|
||||
let resp: Response = resp_value.dyn_into()?;
|
||||
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
|
||||
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
|
||||
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
|
||||
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
|
||||
let image = Fits::new(reader).await?;
|
||||
*/
|
||||
if resp.ok() {
|
||||
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
|
||||
// Convert the JS ReadableStream to a Rust stream
|
||||
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
|
||||
let image = Fits::new(reader).await?;
|
||||
*/
|
||||
if resp.ok() {
|
||||
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
|
||||
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
|
||||
|
||||
Ok(ImageType::FitsImage {
|
||||
raw_bytes,
|
||||
size: (size, size),
|
||||
})
|
||||
} else {
|
||||
Err(JsValue::from_str(
|
||||
"Response status code not between 200-299.",
|
||||
))
|
||||
}
|
||||
}),
|
||||
_ => todo!(),
|
||||
Ok(ImageType::FitsRawBytes {
|
||||
raw_bytes,
|
||||
size: (size, size),
|
||||
})
|
||||
} else {
|
||||
Err(JsValue::from_str(
|
||||
"Response status code not between 200-299.",
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
|
||||
@@ -92,6 +92,8 @@ use al_api::moc::MOCOptions;
|
||||
use wasm_bindgen::prelude::*;
|
||||
use web_sys::HtmlElement;
|
||||
|
||||
use fitsrs::{WCSParams, WCS};
|
||||
|
||||
use crate::math::angle::ToAngle;
|
||||
|
||||
mod app;
|
||||
@@ -350,33 +352,31 @@ impl WebClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = addImageFITS)]
|
||||
pub fn add_image_fits(
|
||||
#[wasm_bindgen(js_name = addFITSImage)]
|
||||
pub fn add_fits_image(
|
||||
&mut self,
|
||||
stream: web_sys::ReadableStream,
|
||||
bytes: &[u8],
|
||||
cfg: JsValue,
|
||||
layer: String,
|
||||
) -> Result<js_sys::Promise, JsValue> {
|
||||
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
|
||||
|
||||
self.app.add_image_fits(stream, cfg, layer)
|
||||
self.app.add_fits_image(bytes, cfg, layer)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = addImageWithWCS)]
|
||||
pub fn add_image_with_wcs(
|
||||
#[wasm_bindgen(js_name = addRGBAImage)]
|
||||
pub fn add_rgba_image(
|
||||
&mut self,
|
||||
stream: web_sys::ReadableStream,
|
||||
bytes: &[u8],
|
||||
wcs: JsValue,
|
||||
cfg: JsValue,
|
||||
layer: String,
|
||||
) -> Result<js_sys::Promise, JsValue> {
|
||||
use wcs::{WCSParams, WCS};
|
||||
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
|
||||
let wcs_params: WCSParams = serde_wasm_bindgen::from_value(wcs)?;
|
||||
|
||||
let wcs = WCS::new(&wcs_params).map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
|
||||
|
||||
self.app
|
||||
.add_image_from_blob_and_wcs(layer, stream, wcs, cfg)
|
||||
self.app.add_rgba_image(layer, bytes, wcs, cfg)
|
||||
}
|
||||
|
||||
#[wasm_bindgen(js_name = removeLayer)]
|
||||
|
||||
@@ -49,8 +49,8 @@ where
|
||||
}
|
||||
|
||||
use crate::math::angle::ToAngle;
|
||||
impl From<wcs::LonLat> for LonLatT<f64> {
|
||||
fn from(lonlat: wcs::LonLat) -> Self {
|
||||
impl From<fitsrs::wcs::LonLat> for LonLatT<f64> {
|
||||
fn from(lonlat: fitsrs::wcs::LonLat) -> Self {
|
||||
Self(lonlat.lon().to_angle(), lonlat.lat().to_angle())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use al_api::hips::ImageExt;
|
||||
|
||||
use al_core::image::format::{ChannelType, ImageFormatType};
|
||||
use al_core::image::format::ImageFormatType;
|
||||
use al_core::texture::format::PixelType;
|
||||
use web_sys::{RequestCredentials, RequestMode};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -21,18 +22,6 @@ pub struct HiPSConfig {
|
||||
|
||||
pub is_allsky: bool,
|
||||
|
||||
// TODO: store this values in the ImageSurvey
|
||||
// These are proper to the survey (FITS one) and not
|
||||
// to a specific survey color
|
||||
pub fits_metadata: bool,
|
||||
pub scale: f32,
|
||||
pub offset: f32,
|
||||
pub blank: f32,
|
||||
|
||||
pub tex_storing_integers: bool,
|
||||
pub tex_storing_fits: bool,
|
||||
pub tex_storing_unsigned_int: bool,
|
||||
|
||||
pub frame: CooSystem,
|
||||
pub bitpix: Option<i32>,
|
||||
format: ImageFormatType,
|
||||
@@ -68,12 +57,7 @@ impl HiPSConfig {
|
||||
// Determine the size of the texture to copy
|
||||
// it cannot be > to 512x512px
|
||||
|
||||
let _fmt = properties.get_formats();
|
||||
let bitpix = properties.get_bitpix();
|
||||
let mut tex_storing_unsigned_int = false;
|
||||
let mut tex_storing_integers = false;
|
||||
|
||||
let mut tex_storing_fits = false;
|
||||
|
||||
if !properties.get_formats().contains(&img_ext) {
|
||||
return Err(js_sys::Error::new("HiPS format not available").into());
|
||||
@@ -83,45 +67,18 @@ impl HiPSConfig {
|
||||
ImageExt::Fits => {
|
||||
// Check the bitpix to determine the internal format of the tiles
|
||||
if let Some(bitpix) = bitpix {
|
||||
let channel = (match bitpix {
|
||||
#[cfg(feature = "webgl2")]
|
||||
8 => {
|
||||
tex_storing_fits = true;
|
||||
tex_storing_unsigned_int = true;
|
||||
Ok(ChannelType::R8UI)
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
16 => {
|
||||
tex_storing_fits = true;
|
||||
tex_storing_integers = true;
|
||||
Ok(ChannelType::R16I)
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
32 => {
|
||||
tex_storing_fits = true;
|
||||
tex_storing_integers = true;
|
||||
Ok(ChannelType::R32I)
|
||||
}
|
||||
-32 => {
|
||||
tex_storing_fits = true;
|
||||
tex_storing_integers = false;
|
||||
Ok(ChannelType::R32F)
|
||||
}
|
||||
-64 => {
|
||||
tex_storing_fits = true;
|
||||
tex_storing_integers = false;
|
||||
//Err(JsValue::from_str("f64 FITS files not supported"))
|
||||
Ok(ChannelType::R64F)
|
||||
}
|
||||
let fmt = (match bitpix {
|
||||
8 => Ok(PixelType::R8U),
|
||||
16 => Ok(PixelType::R16I),
|
||||
32 => Ok(PixelType::R32I),
|
||||
-32 => Ok(PixelType::R32F),
|
||||
-64 => Ok(PixelType::R32F),
|
||||
_ => Err(JsValue::from_str(
|
||||
"Fits tiles exists but the BITPIX is not correct in the property file",
|
||||
)),
|
||||
})?;
|
||||
|
||||
Ok(ImageFormatType {
|
||||
ext: img_ext,
|
||||
channel,
|
||||
})
|
||||
Ok(ImageFormatType { ext: img_ext, fmt })
|
||||
} else {
|
||||
Err(JsValue::from_str(
|
||||
"Fits tiles exists but the BITPIX is not found",
|
||||
@@ -130,11 +87,11 @@ impl HiPSConfig {
|
||||
}
|
||||
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
|
||||
ext: img_ext,
|
||||
channel: ChannelType::RGBA8U,
|
||||
fmt: PixelType::RGBA8U,
|
||||
}),
|
||||
ImageExt::Jpeg => Ok(ImageFormatType {
|
||||
ext: img_ext,
|
||||
channel: ChannelType::RGB8U,
|
||||
fmt: PixelType::RGB8U,
|
||||
}),
|
||||
}?;
|
||||
|
||||
@@ -168,15 +125,6 @@ impl HiPSConfig {
|
||||
|
||||
is_allsky,
|
||||
|
||||
fits_metadata: false,
|
||||
scale: 1.0,
|
||||
offset: 0.0,
|
||||
blank: -1.0, // by default, set it to -1
|
||||
|
||||
tex_storing_fits,
|
||||
tex_storing_integers,
|
||||
tex_storing_unsigned_int,
|
||||
|
||||
// the number of slices in a cube
|
||||
cube_depth,
|
||||
|
||||
@@ -196,66 +144,32 @@ impl HiPSConfig {
|
||||
ImageExt::Fits => {
|
||||
// Check the bitpix to determine the internal format of the tiles
|
||||
if let Some(bitpix) = self.bitpix {
|
||||
let channel = (match bitpix {
|
||||
#[cfg(feature = "webgl2")]
|
||||
8 => {
|
||||
self.tex_storing_fits = true;
|
||||
self.tex_storing_unsigned_int = true;
|
||||
Ok(ChannelType::R8UI)
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
16 => {
|
||||
self.tex_storing_fits = true;
|
||||
self.tex_storing_integers = true;
|
||||
Ok(ChannelType::R16I)
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
32 => {
|
||||
self.tex_storing_fits = true;
|
||||
self.tex_storing_integers = true;
|
||||
Ok(ChannelType::R32I)
|
||||
}
|
||||
-32 => {
|
||||
self.tex_storing_fits = true;
|
||||
self.tex_storing_integers = false;
|
||||
Ok(ChannelType::R32F)
|
||||
}
|
||||
-64 => {
|
||||
self.tex_storing_fits = true;
|
||||
self.tex_storing_integers = false;
|
||||
//Err(JsValue::from_str("f64 FITS files not supported"))
|
||||
Ok(ChannelType::R64F)
|
||||
}
|
||||
let fmt = (match bitpix {
|
||||
8 => Ok(PixelType::R8U),
|
||||
16 => Ok(PixelType::R16I),
|
||||
32 => Ok(PixelType::R32I),
|
||||
-32 => Ok(PixelType::R32F),
|
||||
-64 => Ok(PixelType::R32F),
|
||||
_ => Err(JsValue::from_str(
|
||||
"Fits tiles exists but the BITPIX is not correct in the property file",
|
||||
)),
|
||||
})?;
|
||||
|
||||
Ok(ImageFormatType { ext, channel })
|
||||
Ok(ImageFormatType { ext, fmt })
|
||||
} else {
|
||||
Err(JsValue::from_str(
|
||||
"Fits tiles exists but the BITPIX is not found",
|
||||
))
|
||||
}
|
||||
}
|
||||
ImageExt::Png | ImageExt::Webp => {
|
||||
self.tex_storing_fits = false;
|
||||
self.tex_storing_unsigned_int = false;
|
||||
self.tex_storing_integers = false;
|
||||
Ok(ImageFormatType {
|
||||
ext,
|
||||
channel: ChannelType::RGBA8U,
|
||||
})
|
||||
}
|
||||
ImageExt::Jpeg => {
|
||||
self.tex_storing_fits = false;
|
||||
self.tex_storing_unsigned_int = false;
|
||||
self.tex_storing_integers = false;
|
||||
Ok(ImageFormatType {
|
||||
ext,
|
||||
channel: ChannelType::RGB8U,
|
||||
})
|
||||
}
|
||||
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
|
||||
ext,
|
||||
fmt: PixelType::RGBA8U,
|
||||
}),
|
||||
ImageExt::Jpeg => Ok(ImageFormatType {
|
||||
ext,
|
||||
fmt: PixelType::RGB8U,
|
||||
}),
|
||||
}?;
|
||||
|
||||
self.format = format;
|
||||
@@ -277,14 +191,6 @@ impl HiPSConfig {
|
||||
self.cube_depth
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn set_fits_metadata(&mut self, bscale: f32, bzero: f32, blank: f32) {
|
||||
self.scale = bscale;
|
||||
self.offset = bzero;
|
||||
self.blank = blank;
|
||||
self.fits_metadata = true;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn allsky_tile_size(&self) -> i32 {
|
||||
(self.get_tile_size() << 3).min(512)
|
||||
@@ -341,12 +247,7 @@ use al_core::shader::{SendUniforms, ShaderBound};
|
||||
impl SendUniforms for HiPSConfig {
|
||||
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
|
||||
// Send max depth
|
||||
shader
|
||||
.attach_uniform("max_depth", &(self.max_depth_tile as i32))
|
||||
.attach_uniform("tex_storing_fits", &self.tex_storing_fits)
|
||||
.attach_uniform("scale", &self.scale)
|
||||
.attach_uniform("offset", &self.offset)
|
||||
.attach_uniform("blank", &self.blank);
|
||||
shader.attach_uniform("max_depth", &(self.max_depth_tile as i32));
|
||||
|
||||
shader
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use al_core::image::format::ChannelType;
|
||||
use al_core::texture::format::PixelType;
|
||||
|
||||
use crate::renderable::hips::HpxTile;
|
||||
use cgmath::Vector3;
|
||||
@@ -10,9 +10,9 @@ use cgmath::Vector3;
|
||||
use al_api::hips::ImageExt;
|
||||
use al_core::webgl_ctx::WebGlRenderingCtx;
|
||||
|
||||
use al_core::image::format::{R16I, R32F, R32I, R64F, R8UI, RGB8U, RGBA8U};
|
||||
use al_core::image::Image;
|
||||
use al_core::shader::{SendUniforms, ShaderBound};
|
||||
use al_core::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
|
||||
use al_core::Texture2DArray;
|
||||
use al_core::WebGlContext;
|
||||
|
||||
@@ -150,7 +150,7 @@ pub struct HiPS2DBuffer {
|
||||
fn create_hpx_texture_storage(
|
||||
gl: &WebGlContext,
|
||||
// The texture image channel definition
|
||||
channel: ChannelType,
|
||||
channel: PixelType,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles: i32,
|
||||
// The size of the tile
|
||||
@@ -182,41 +182,34 @@ fn create_hpx_texture_storage(
|
||||
),
|
||||
];
|
||||
match channel {
|
||||
ChannelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
|
||||
PixelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
ChannelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
|
||||
PixelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
ChannelType::R32F => Texture2DArray::create_empty::<R32F>(
|
||||
PixelType::R32F => Texture2DArray::create_empty::<R32F>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R8UI => Texture2DArray::create_empty::<R8UI>(
|
||||
|
||||
PixelType::R8U => Texture2DArray::create_empty::<R8U>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R16I => Texture2DArray::create_empty::<R16I>(
|
||||
|
||||
PixelType::R16I => Texture2DArray::create_empty::<R16I>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R32I => Texture2DArray::create_empty::<R32I>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
),
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R64F => Texture2DArray::create_empty::<R64F>(
|
||||
PixelType::R32I => Texture2DArray::create_empty::<R32I>(
|
||||
gl, tile_size, tile_size,
|
||||
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
|
||||
num_tiles, tex_params,
|
||||
@@ -380,6 +373,8 @@ impl HiPS2DBuffer {
|
||||
cell: &HEALPixCell,
|
||||
dx: f64,
|
||||
dy: f64,
|
||||
scale: f32,
|
||||
offset: f32,
|
||||
) -> Result<JsValue, JsValue> {
|
||||
let value = if let Some(tile) = self.get(cell) {
|
||||
// Index of the texture in the total set of textures
|
||||
@@ -395,28 +390,27 @@ impl HiPS2DBuffer {
|
||||
tile_idx,
|
||||
);
|
||||
|
||||
// Offset in the slice in pixels
|
||||
if self.config.tex_storing_fits {
|
||||
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
|
||||
match self.config.get_format().get_pixel_format() {
|
||||
PixelType::RGB8U | PixelType::RGBA8U => self
|
||||
.tile_pixels
|
||||
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?,
|
||||
_ => {
|
||||
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
|
||||
pos_tex.y = (uvy * tile_size) as i32;
|
||||
|
||||
pos_tex.y = (uvy * tile_size) as i32;
|
||||
}
|
||||
let f64_v = self
|
||||
.tile_pixels
|
||||
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?
|
||||
.as_f64()
|
||||
.ok_or("Error unwraping the pixel read value.")?;
|
||||
|
||||
let value = self
|
||||
.tile_pixels
|
||||
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?;
|
||||
// 1 channel
|
||||
// scale the value
|
||||
let scale = scale as f64;
|
||||
let offset = offset as f64;
|
||||
|
||||
if self.config.tex_storing_fits {
|
||||
// scale the value
|
||||
let f64_v = value
|
||||
.as_f64()
|
||||
.ok_or("Error unwraping the pixel read value.")?;
|
||||
let scale = self.config.scale as f64;
|
||||
let offset = self.config.offset as f64;
|
||||
|
||||
JsValue::from_f64(f64_v * scale + offset)
|
||||
} else {
|
||||
value
|
||||
JsValue::from_f64(f64_v * scale + offset)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
JsValue::null()
|
||||
@@ -457,7 +451,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
|
||||
HpxTexture2D::new(&HEALPixCell(0, 10), 10, now),
|
||||
HpxTexture2D::new(&HEALPixCell(0, 11), 11, now),
|
||||
];
|
||||
let channel = config.get_format().get_channel();
|
||||
let channel = config.get_format().get_pixel_format();
|
||||
let tile_size = config.get_tile_size();
|
||||
let tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;
|
||||
|
||||
@@ -490,7 +484,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
|
||||
fn set_image_ext(&mut self, gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
|
||||
self.config.set_image_ext(ext)?;
|
||||
|
||||
let channel = self.config.get_format().get_channel();
|
||||
let channel = self.config.get_format().get_pixel_format();
|
||||
let tile_size = self.config.get_tile_size();
|
||||
self.tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;
|
||||
|
||||
|
||||
@@ -9,10 +9,12 @@ use al_api::hips::ImageExt;
|
||||
use al_api::hips::ImageMetadata;
|
||||
use al_core::colormap::Colormap;
|
||||
use al_core::colormap::Colormaps;
|
||||
use al_core::image::format::ChannelType;
|
||||
use al_core::texture::format::PixelType;
|
||||
use cgmath::Vector2;
|
||||
use cgmath::Vector3;
|
||||
|
||||
use crate::renderable::hips::FitsParams;
|
||||
|
||||
use al_core::image::Image;
|
||||
|
||||
use al_core::shader::Shader;
|
||||
@@ -97,43 +99,49 @@ pub fn get_raster_shader<'a>(
|
||||
shaders: &'a mut ShaderManager,
|
||||
config: &HiPSConfig,
|
||||
) -> Result<&'a Shader, JsValue> {
|
||||
if config.get_format().is_colored() {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_color.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_color_to_colormap.frag",
|
||||
)
|
||||
match config.get_format().get_pixel_format() {
|
||||
PixelType::R8U => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_u8.frag",
|
||||
),
|
||||
PixelType::R16I => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_i16.frag",
|
||||
),
|
||||
PixelType::R32I => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_i32.frag",
|
||||
),
|
||||
PixelType::R32F => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_f32.frag",
|
||||
),
|
||||
// color case
|
||||
_ => {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_rgba.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_rgba2cmap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if config.tex_storing_unsigned_int {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_grayscale_to_colormap_u.frag",
|
||||
)
|
||||
} else if config.tex_storing_integers {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_grayscale_to_colormap_i.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_rasterizer_raster.vert",
|
||||
"hips_rasterizer_grayscale_to_colormap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,44 +151,49 @@ pub fn get_raytracer_shader<'a>(
|
||||
shaders: &'a mut ShaderManager,
|
||||
config: &HiPSConfig,
|
||||
) -> Result<&'a Shader, JsValue> {
|
||||
//let colored_hips = config.is_colored();
|
||||
if config.get_format().is_colored() {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_color.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_color_to_colormap.frag",
|
||||
)
|
||||
match config.get_format().get_pixel_format() {
|
||||
PixelType::R8U => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_u8.frag",
|
||||
),
|
||||
PixelType::R16I => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_i16.frag",
|
||||
),
|
||||
PixelType::R32I => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_i32.frag",
|
||||
),
|
||||
PixelType::R32F => crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_f32.frag",
|
||||
),
|
||||
// color case
|
||||
_ => {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_rgba.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_rgba2cmap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if config.tex_storing_unsigned_int {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_grayscale_to_colormap_u.frag",
|
||||
)
|
||||
} else if config.tex_storing_integers {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_grayscale_to_colormap_i.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips_raytracer_raytracer.vert",
|
||||
"hips_raytracer_grayscale_to_colormap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,6 +230,8 @@ pub struct HiPS2D {
|
||||
|
||||
// A buffer storing the cells in the view
|
||||
hpx_cells_in_view: Vec<HEALPixCell>,
|
||||
|
||||
pub fits_params: Option<FitsParams>,
|
||||
}
|
||||
|
||||
use super::HpxTileBuffer;
|
||||
@@ -289,6 +304,8 @@ impl HiPS2D {
|
||||
|
||||
gl,
|
||||
|
||||
fits_params: None,
|
||||
|
||||
position,
|
||||
uv_start,
|
||||
uv_end,
|
||||
@@ -423,7 +440,13 @@ impl HiPS2D {
|
||||
let (pix, dx, dy) = crate::healpix::utils::hash_with_dxdy(depth, &lonlat);
|
||||
let tile_cell = HEALPixCell(depth, pix);
|
||||
|
||||
self.buffer.read_pixel(&tile_cell, dx, dy)
|
||||
let (bscale, bzero) = if let Some(FitsParams { bscale, bzero, .. }) = self.fits_params {
|
||||
(bscale, bzero)
|
||||
} else {
|
||||
(1.0, 0.0)
|
||||
};
|
||||
|
||||
self.buffer.read_pixel(&tile_cell, dx, dy, bscale, bzero)
|
||||
} else {
|
||||
Err(JsValue::from_str("Out of projection"))
|
||||
}
|
||||
@@ -438,7 +461,7 @@ impl HiPS2D {
|
||||
|
||||
let cfg = self.buffer.config();
|
||||
// Get the coo system transformation matrix
|
||||
let channel = cfg.get_format().get_channel();
|
||||
let channel = cfg.get_format().get_pixel_format();
|
||||
|
||||
// Retrieve the model and inverse model matrix
|
||||
let mut off_indices = 0;
|
||||
@@ -531,7 +554,7 @@ impl HiPS2D {
|
||||
} else {
|
||||
// No ancestor has been found in the buffer to draw.
|
||||
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
|
||||
if channel == ChannelType::RGB8U {
|
||||
if channel == PixelType::RGB8U {
|
||||
Some(HpxDrawData::new(cell))
|
||||
} else {
|
||||
None
|
||||
@@ -540,7 +563,7 @@ impl HiPS2D {
|
||||
} else {
|
||||
// No ancestor has been found in the buffer to draw.
|
||||
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
|
||||
if channel == ChannelType::RGB8U {
|
||||
if channel == PixelType::RGB8U {
|
||||
Some(HpxDrawData::new(cell))
|
||||
} else {
|
||||
None
|
||||
@@ -754,7 +777,7 @@ impl HiPS2D {
|
||||
.attach_uniform("current_time", &utils::get_current_time())
|
||||
.attach_uniform(
|
||||
"no_tile_color",
|
||||
&(if config.get_format().get_channel() == ChannelType::RGB8U {
|
||||
&(if config.get_format().get_pixel_format() == PixelType::RGB8U {
|
||||
Vector4::new(0.0, 0.0, 0.0, 1.0)
|
||||
} else {
|
||||
Vector4::new(0.0, 0.0, 0.0, 0.0)
|
||||
@@ -763,6 +786,10 @@ impl HiPS2D {
|
||||
.attach_uniform("opacity", opacity)
|
||||
.attach_uniforms_from(colormaps);
|
||||
|
||||
if let Some(fits_params) = self.fits_params.as_ref() {
|
||||
shader.attach_uniforms_from(fits_params);
|
||||
}
|
||||
|
||||
raytracer.draw(&shader);
|
||||
} else {
|
||||
let v2w = (*camera.get_m2w()) * c.transpose();
|
||||
@@ -791,7 +818,13 @@ impl HiPS2D {
|
||||
.attach_uniform("current_time", &utils::get_current_time())
|
||||
.attach_uniform("opacity", opacity)
|
||||
.attach_uniform("u_proj", proj)
|
||||
.attach_uniforms_from(colormaps)
|
||||
.attach_uniforms_from(colormaps);
|
||||
|
||||
if let Some(fits_params) = self.fits_params.as_ref() {
|
||||
shader.attach_uniforms_from(fits_params);
|
||||
}
|
||||
|
||||
shader
|
||||
.bind_vertex_array_object_ref(&self.vao)
|
||||
.draw_elements_with_i32(
|
||||
WebGl2RenderingContext::TRIANGLES,
|
||||
@@ -806,7 +839,14 @@ impl HiPS2D {
|
||||
})?;
|
||||
|
||||
//self.gl.disable(WebGl2RenderingContext::BLEND);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
|
||||
self.fits_params = Some(FitsParams {
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use al_api::hips::ImageExt;
|
||||
use al_api::hips::ImageMetadata;
|
||||
use al_core::colormap::Colormap;
|
||||
use al_core::colormap::Colormaps;
|
||||
use al_core::image::format::ChannelType;
|
||||
use al_core::texture::format::PixelType;
|
||||
|
||||
use al_core::image::Image;
|
||||
|
||||
@@ -30,6 +30,7 @@ use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
|
||||
use crate::time::Time;
|
||||
|
||||
use super::config::HiPSConfig;
|
||||
use super::FitsParams;
|
||||
use std::collections::HashSet;
|
||||
|
||||
// Recursively compute the number of subdivision needed for a cell
|
||||
@@ -50,43 +51,32 @@ pub fn get_raster_shader<'a>(
|
||||
shaders: &'a mut ShaderManager,
|
||||
config: &HiPSConfig,
|
||||
) -> Result<&'a Shader, JsValue> {
|
||||
if config.get_format().is_colored() {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_rasterizer_raster.vert",
|
||||
"hips3d_rasterizer_color.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_rasterizer_raster.vert",
|
||||
"hips3d_rasterizer_color_to_colormap.frag",
|
||||
)
|
||||
match config.get_format().get_pixel_format() {
|
||||
PixelType::R8U => {
|
||||
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_u8.frag")
|
||||
}
|
||||
PixelType::R16I => {
|
||||
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_i16.frag")
|
||||
}
|
||||
PixelType::R32I => {
|
||||
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_i32.frag")
|
||||
}
|
||||
PixelType::R32F => {
|
||||
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_f32.frag")
|
||||
}
|
||||
// color case
|
||||
_ => {
|
||||
if cmap.label() == "native" {
|
||||
crate::shader::get_shader(gl, shaders, "hips3d_raster.vert", "hips3d_rgba.frag")
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_raster.vert",
|
||||
"hips3d_rgba2cmap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if config.tex_storing_unsigned_int {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_rasterizer_raster.vert",
|
||||
"hips3d_rasterizer_grayscale_to_colormap_u.frag",
|
||||
)
|
||||
} else if config.tex_storing_integers {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_rasterizer_raster.vert",
|
||||
"hips3d_rasterizer_grayscale_to_colormap_i.frag",
|
||||
)
|
||||
} else {
|
||||
crate::shader::get_shader(
|
||||
gl,
|
||||
shaders,
|
||||
"hips3d_rasterizer_raster.vert",
|
||||
"hips3d_rasterizer_grayscale_to_colormap.frag",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,6 +105,8 @@ pub struct HiPS3D {
|
||||
// A buffer storing the cells in the view
|
||||
hpx_cells_in_view: Vec<HEALPixCell>,
|
||||
|
||||
pub fits_params: Option<FitsParams>,
|
||||
|
||||
// The current slice index
|
||||
slice: u16,
|
||||
|
||||
@@ -187,6 +179,8 @@ impl HiPS3D {
|
||||
uv,
|
||||
idx_vertices,
|
||||
|
||||
fits_params: None,
|
||||
|
||||
footprint_moc,
|
||||
hpx_cells_in_view,
|
||||
|
||||
@@ -288,7 +282,7 @@ impl HiPS3D {
|
||||
|
||||
let mut off_indices = 0;
|
||||
|
||||
let channel = self.get_config().get_format().get_channel();
|
||||
let channel = self.get_config().get_format().get_pixel_format();
|
||||
|
||||
// Define a global level of subdivisions for all the healpix tile cells in the view
|
||||
// This should prevent seeing many holes
|
||||
@@ -307,7 +301,7 @@ impl HiPS3D {
|
||||
let cell = if let Some(moc) = self.footprint_moc.as_ref() {
|
||||
if moc.intersects_cell(cell) {
|
||||
Some(&cell)
|
||||
} else if channel == ChannelType::RGB8U {
|
||||
} else if channel == PixelType::RGB8U {
|
||||
// Rasterizer does not render tiles that are not in the MOC
|
||||
// This is not a problem for transparency rendered HiPses (FITS or PNG)
|
||||
// but JPEG tiles do have black when no pixels data is found
|
||||
@@ -471,6 +465,14 @@ impl HiPS3D {
|
||||
self.footprint_moc = Some(moc);
|
||||
}
|
||||
|
||||
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
|
||||
self.fits_params = Some(FitsParams {
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
});
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
|
||||
self.footprint_moc.as_ref()
|
||||
@@ -565,7 +567,13 @@ impl HiPS3D {
|
||||
.attach_uniform("inv_model", &v2w)
|
||||
.attach_uniform("opacity", opacity)
|
||||
.attach_uniform("u_proj", proj)
|
||||
.attach_uniforms_from(colormaps)
|
||||
.attach_uniforms_from(colormaps);
|
||||
|
||||
if let Some(fits_params) = self.fits_params.as_ref() {
|
||||
shaderbound.attach_uniforms_from(fits_params);
|
||||
}
|
||||
|
||||
shaderbound
|
||||
.bind_vertex_array_object_ref(&self.vao)
|
||||
.draw_elements_with_i32(
|
||||
WebGl2RenderingContext::TRIANGLES,
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
use crate::renderable::hips::d2::texture::HpxTexture2D;
|
||||
use crate::{healpix::cell::HEALPixCell, time::Time};
|
||||
|
||||
use al_core::image::format::{
|
||||
ChannelType, R16I, R32F, R32I, R64F, R8UI, RGB32F, RGB8U, RGBA32F, RGBA8U,
|
||||
};
|
||||
use al_core::image::Image;
|
||||
use al_core::texture::format::{PixelType, R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
|
||||
use al_core::texture::Texture3D;
|
||||
use al_core::webgl_ctx::WebGlRenderingCtx;
|
||||
use cgmath::Vector3;
|
||||
@@ -255,32 +253,23 @@ impl HpxTexture3D {
|
||||
),
|
||||
];
|
||||
|
||||
let texture = match cfg.get_format().get_channel() {
|
||||
ChannelType::RGBA32F => {
|
||||
Texture3D::create_empty::<RGBA32F>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::RGB32F => {
|
||||
Texture3D::create_empty::<RGB32F>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::RGBA8U => {
|
||||
let texture = match cfg.get_format().get_pixel_format() {
|
||||
PixelType::RGBA8U => {
|
||||
Texture3D::create_empty::<RGBA8U>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::RGB8U => {
|
||||
PixelType::RGB8U => {
|
||||
Texture3D::create_empty::<RGB8U>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::R32F => {
|
||||
PixelType::R32F => {
|
||||
Texture3D::create_empty::<R32F>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::R64F => {
|
||||
Texture3D::create_empty::<R64F>(gl, tile_size, tile_size, 32, params)
|
||||
PixelType::R8U => {
|
||||
Texture3D::create_empty::<R8U>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::R8UI => {
|
||||
Texture3D::create_empty::<R8UI>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::R16I => {
|
||||
PixelType::R16I => {
|
||||
Texture3D::create_empty::<R16I>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
ChannelType::R32I => {
|
||||
PixelType::R32I => {
|
||||
Texture3D::create_empty::<R32I>(gl, tile_size, tile_size, 32, params)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -155,4 +155,40 @@ impl HiPS {
|
||||
pub fn is_allsky(&self) -> bool {
|
||||
self.get_config().is_allsky
|
||||
}
|
||||
|
||||
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
|
||||
match self {
|
||||
HiPS::D2(hips) => hips.set_fits_params(bscale, bzero, blank),
|
||||
HiPS::D3(hips) => hips.set_fits_params(bscale, bzero, blank),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_fits_params(&self) -> &Option<FitsParams> {
|
||||
match self {
|
||||
HiPS::D2(hips) => &hips.fits_params,
|
||||
HiPS::D3(hips) => &hips.fits_params,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FitsParams {
|
||||
pub bscale: f32,
|
||||
pub bzero: f32,
|
||||
pub blank: Option<f32>,
|
||||
}
|
||||
|
||||
use al_core::shader::{SendUniforms, ShaderBound};
|
||||
impl SendUniforms for FitsParams {
|
||||
// Send only the allsky textures
|
||||
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
|
||||
shader
|
||||
.attach_uniform("scale", &self.bscale)
|
||||
.attach_uniform("offset", &self.bzero);
|
||||
|
||||
if let Some(blank) = &self.blank {
|
||||
shader.attach_uniform("blank", blank);
|
||||
}
|
||||
|
||||
shader
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use cgmath::Vector3;
|
||||
use std::ops::RangeInclusive;
|
||||
use wcs::ImgXY;
|
||||
|
||||
use crate::camera::CameraViewPort;
|
||||
use crate::math::projection::ProjectionType;
|
||||
use crate::renderable::utils::index_patch::CCWCheckPatchIndexIter;
|
||||
use al_api::coo_system::CooSystem;
|
||||
use wcs::WCS;
|
||||
use fitsrs::wcs::{ImgXY, WCS};
|
||||
|
||||
pub fn get_grid_params(
|
||||
xy_min: &(f64, f64),
|
||||
@@ -240,6 +239,82 @@ pub fn vertices(
|
||||
(pos, uv, indices, num_indices)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn vertices2(
|
||||
xy_min: &(f64, f64),
|
||||
xy_max: &(f64, f64),
|
||||
max_tex_size_x: u64,
|
||||
max_tex_size_y: u64,
|
||||
num_tri_per_tex_patch: u64,
|
||||
camera: &CameraViewPort,
|
||||
wcs: &WCS,
|
||||
projection: &ProjectionType,
|
||||
) -> (Vec<f32>, Vec<f32>, Vec<u16>, Vec<u32>) {
|
||||
let (x_it, y_it) = get_grid_params(
|
||||
xy_min,
|
||||
xy_max,
|
||||
max_tex_size_x,
|
||||
max_tex_size_y,
|
||||
num_tri_per_tex_patch,
|
||||
);
|
||||
|
||||
let idx_x_ranges = build_range_indices(x_it.clone());
|
||||
let idx_y_ranges = build_range_indices(y_it.clone());
|
||||
|
||||
let num_x_vertices = idx_x_ranges.last().unwrap().end() + 1;
|
||||
|
||||
let mut uv = vec![];
|
||||
let pos = y_it
|
||||
.flat_map(|(y, uvy)| {
|
||||
x_it.clone().map(move |(x, uvx)| {
|
||||
let ndc = if let Some(xyz) = wcs.unproj_xyz(&ImgXY::new(x as f64, y as f64)) {
|
||||
let xyz = crate::coosys::apply_coo_system(
|
||||
CooSystem::ICRS,
|
||||
camera.get_coo_system(),
|
||||
&Vector3::new(xyz.y(), xyz.z(), xyz.x()),
|
||||
);
|
||||
|
||||
projection
|
||||
.model_to_normalized_device_space(&xyz, camera)
|
||||
.map(|v| [v.x as f32, v.y as f32])
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(ndc, [uvx, uvy])
|
||||
})
|
||||
})
|
||||
.map(|(p, uu)| {
|
||||
uv.extend_from_slice(&uu);
|
||||
p
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut indices = vec![];
|
||||
let mut num_indices = vec![];
|
||||
for idx_x_range in &idx_x_ranges {
|
||||
for idx_y_range in &idx_y_ranges {
|
||||
let build_indices_iter =
|
||||
CCWCheckPatchIndexIter::new(idx_x_range, idx_y_range, num_x_vertices, &pos, camera);
|
||||
|
||||
let patch_indices = build_indices_iter
|
||||
.flatten()
|
||||
.flat_map(|indices| [indices.0, indices.1, indices.2])
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
num_indices.push(patch_indices.len() as u32);
|
||||
indices.extend(patch_indices);
|
||||
}
|
||||
}
|
||||
|
||||
let pos = pos
|
||||
.into_iter()
|
||||
.flat_map(|ndc| ndc.unwrap_or([0.0, 0.0]))
|
||||
.collect();
|
||||
|
||||
(pos, uv, indices, num_indices)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
|
||||
@@ -3,7 +3,12 @@ pub mod grid;
|
||||
pub mod subdivide_texture;
|
||||
|
||||
use al_core::convert::Cast;
|
||||
use al_core::texture::format::PixelType;
|
||||
use al_core::texture::format::TextureFormat;
|
||||
use al_core::texture::format::RGBA8U;
|
||||
use al_core::texture::format::{R16I, R32F, R32I, R8U};
|
||||
use al_core::webgl_ctx::WebGlRenderingCtx;
|
||||
use fitsrs::hdu::header::Bitpix;
|
||||
use std::fmt::Debug;
|
||||
use std::marker::Unpin;
|
||||
use std::vec;
|
||||
@@ -17,8 +22,7 @@ use wasm_bindgen::JsValue;
|
||||
|
||||
use web_sys::WebGl2RenderingContext;
|
||||
|
||||
use fitsrs::hdu::data::stream;
|
||||
use wcs::{ImgXY, WCS};
|
||||
use fitsrs::wcs::{ImgXY, WCS};
|
||||
|
||||
use al_api::fov::CenteredFoV;
|
||||
use al_api::hips::ImageMetadata;
|
||||
@@ -36,7 +40,10 @@ use crate::ProjectionType;
|
||||
use crate::ShaderManager;
|
||||
|
||||
use std::ops::Range;
|
||||
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
|
||||
type PixelItem<F> = <<F as TextureFormat>::P as Pixel>::Item;
|
||||
use al_core::pixel::Pixel;
|
||||
use futures::io::BufReader;
|
||||
use futures::AsyncReadExt;
|
||||
|
||||
pub struct Image {
|
||||
/// A reference to the GL context
|
||||
@@ -49,17 +56,20 @@ pub struct Image {
|
||||
pos: Vec<f32>,
|
||||
uv: Vec<f32>,
|
||||
|
||||
/// Parameters extracted from the fits
|
||||
/// WCS allowing to locate the image on the sky
|
||||
wcs: WCS,
|
||||
|
||||
/// Some parameters, only defined for image coming from FITS files
|
||||
blank: Option<f32>,
|
||||
scale: f32,
|
||||
offset: f32,
|
||||
bscale: f32,
|
||||
bzero: f32,
|
||||
|
||||
cuts: Range<f32>,
|
||||
/// The center of the fits
|
||||
centered_fov: CenteredFoV,
|
||||
|
||||
//+ Texture format
|
||||
channel: ChannelType,
|
||||
channel: PixelType,
|
||||
/// Texture chunks objects
|
||||
textures: Vec<Texture2D>,
|
||||
/// Texture indices that must be drawn
|
||||
@@ -72,28 +82,49 @@ pub struct Image {
|
||||
// The coo system in which the polygonal region has been defined
|
||||
coo_sys: CooSystem,
|
||||
}
|
||||
use al_core::pixel::Pixel;
|
||||
use fitsrs::hdu::header::extension;
|
||||
use fitsrs::hdu::AsyncHDU;
|
||||
use futures::io::BufReader;
|
||||
use futures::AsyncReadExt;
|
||||
|
||||
const TEX_PARAMS: &'static [(u32, u32)] = &[
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
|
||||
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
|
||||
),
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
|
||||
WebGlRenderingCtx::NEAREST,
|
||||
),
|
||||
// Prevents s-coordinate wrapping (repeating)
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_WRAP_S,
|
||||
WebGlRenderingCtx::CLAMP_TO_EDGE,
|
||||
),
|
||||
// Prevents t-coordinate wrapping (repeating)
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_WRAP_T,
|
||||
WebGlRenderingCtx::CLAMP_TO_EDGE,
|
||||
),
|
||||
];
|
||||
impl Image {
|
||||
pub async fn from_reader_and_wcs<R, F>(
|
||||
pub fn get_cuts(&self) -> &Range<f32> {
|
||||
&self.cuts
|
||||
}
|
||||
|
||||
pub fn from_fits_hdu(
|
||||
gl: &WebGlContext,
|
||||
mut reader: R,
|
||||
wcs: WCS,
|
||||
scale: Option<f32>,
|
||||
offset: Option<f32>,
|
||||
// wcs extracted from the image HDU
|
||||
wcs: fitsrs::WCS,
|
||||
// bitpix extracted from the image HDU
|
||||
bitpix: fitsrs::hdu::header::Bitpix,
|
||||
// bytes slice extracted from the HDU
|
||||
bytes: &[u8],
|
||||
// other keywords extracted from the header of the image HDU
|
||||
bscale: f32,
|
||||
bzero: f32,
|
||||
blank: Option<f32>,
|
||||
// Coo sys of the view
|
||||
coo_sys: CooSystem,
|
||||
) -> Result<Self, JsValue>
|
||||
where
|
||||
F: ImageFormat,
|
||||
R: AsyncReadExt + Unpin,
|
||||
{
|
||||
let (width, height) = wcs.img_dimensions();
|
||||
) -> Result<Self, JsValue> {
|
||||
let dim = wcs.img_dimensions();
|
||||
let (width, height) = (dim[0] as u64, dim[1] as u64);
|
||||
|
||||
let max_tex_size =
|
||||
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
|
||||
@@ -103,125 +134,234 @@ impl Image {
|
||||
let mut max_tex_size_x = max_tex_size;
|
||||
let mut max_tex_size_y = max_tex_size;
|
||||
|
||||
// apply bscale to the cuts
|
||||
let offset = offset.unwrap_or(0.0);
|
||||
let scale = scale.unwrap_or(1.0);
|
||||
let (channel, textures, cuts) =
|
||||
if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
|
||||
// small image case, can fit into a webgl texture
|
||||
|
||||
let (textures, cuts) = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
|
||||
max_tex_size_x = width as usize;
|
||||
max_tex_size_y = height as usize;
|
||||
// can fit in one texture
|
||||
max_tex_size_x = width as usize;
|
||||
max_tex_size_y = height as usize;
|
||||
// can fit in one texture
|
||||
|
||||
let num_pixels_to_read = (width as usize) * (height as usize);
|
||||
let num_bytes_to_read = num_pixels_to_read * std::mem::size_of::<F::P>();
|
||||
let mut buf = vec![0; num_bytes_to_read];
|
||||
// bytes aligned
|
||||
match bitpix {
|
||||
Bitpix::I64 => {
|
||||
// one must convert the data to i32
|
||||
let bytes_from_i32 = bytes
|
||||
.chunks(8)
|
||||
.flat_map(|bytes| {
|
||||
let l = i64::from_be_bytes([
|
||||
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
|
||||
bytes[6], bytes[7],
|
||||
]);
|
||||
let i = l as i32;
|
||||
|
||||
reader
|
||||
.read_exact(&mut buf[..num_bytes_to_read])
|
||||
.await
|
||||
.map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
|
||||
|
||||
// bytes aligned
|
||||
unsafe {
|
||||
let data = std::slice::from_raw_parts_mut(
|
||||
buf[..].as_mut_ptr() as *mut PixelItem<F>,
|
||||
num_pixels_to_read * F::NUM_CHANNELS,
|
||||
);
|
||||
|
||||
let texture = Texture2D::create_from_raw_pixels::<F>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
&[
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
|
||||
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
|
||||
),
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
|
||||
WebGlRenderingCtx::NEAREST,
|
||||
),
|
||||
// Prevents s-coordinate wrapping (repeating)
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_WRAP_S,
|
||||
WebGlRenderingCtx::CLAMP_TO_EDGE,
|
||||
),
|
||||
// Prevents t-coordinate wrapping (repeating)
|
||||
(
|
||||
WebGlRenderingCtx::TEXTURE_WRAP_T,
|
||||
WebGlRenderingCtx::CLAMP_TO_EDGE,
|
||||
),
|
||||
],
|
||||
Some(data),
|
||||
)?;
|
||||
|
||||
let cuts = match F::CHANNEL_TYPE {
|
||||
ChannelType::R32F | ChannelType::R64F => {
|
||||
let pixels =
|
||||
std::slice::from_raw_parts(data.as_ptr() as *const f32, data.len() / 4);
|
||||
|
||||
let mut sub_pixels = pixels
|
||||
.iter()
|
||||
.step_by(100)
|
||||
.filter(|pixel| (*pixel).is_finite())
|
||||
.cloned()
|
||||
i32::to_be_bytes(i)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
|
||||
}
|
||||
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
|
||||
// BLANK is only valid for those channels/BITPIX (> 0)
|
||||
if let Some(blank) = blank {
|
||||
let mut sub_pixels = data
|
||||
.iter()
|
||||
.step_by(100)
|
||||
.filter_map(|pixel| {
|
||||
let pixel = <PixelItem<F> as Cast<f32>>::cast(*pixel);
|
||||
let texture = Texture2D::create_from_raw_bytes::<R32I>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes_from_i32.as_slice(),
|
||||
)?;
|
||||
|
||||
if pixel != blank {
|
||||
Some(pixel)
|
||||
let mut sub_pixels = bytes_from_i32
|
||||
.chunks(std::mem::size_of::<i32>())
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]);
|
||||
if let Some(blank) = blank {
|
||||
if p as f32 != blank {
|
||||
Some(p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
} else {
|
||||
Some(p)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
|
||||
} else {
|
||||
// No blank value => we consider all the values
|
||||
let mut sub_pixels = data
|
||||
.iter()
|
||||
.step_by(100)
|
||||
.map(|pixel| <PixelItem<F> as Cast<f32>>::cast(*pixel))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
|
||||
}
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(
|
||||
PixelType::R32I,
|
||||
vec![texture],
|
||||
(cuts.start as f32)..(cuts.end as f32),
|
||||
)
|
||||
}
|
||||
// RGB(A) images
|
||||
_ => 0.0..1.0,
|
||||
};
|
||||
Bitpix::F64 => {
|
||||
// one must convert the data to f32
|
||||
let bytes_from_f32 = bytes
|
||||
.chunks(8)
|
||||
.flat_map(|bytes| {
|
||||
let d = f64::from_be_bytes([
|
||||
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
|
||||
bytes[6], bytes[7],
|
||||
]);
|
||||
let f = d as f32;
|
||||
|
||||
(vec![texture], cuts)
|
||||
}
|
||||
} else {
|
||||
subdivide_texture::crop_image::<F, R>(
|
||||
gl,
|
||||
width,
|
||||
height,
|
||||
reader,
|
||||
max_tex_size as u64,
|
||||
blank,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
f32::to_be_bytes(f)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for tex in &textures {
|
||||
tex.generate_mipmap();
|
||||
}
|
||||
let texture = Texture2D::create_from_raw_bytes::<R32F>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes_from_f32.as_slice(),
|
||||
)?;
|
||||
|
||||
let start = cuts.start * scale + offset;
|
||||
let end = cuts.end * scale + offset;
|
||||
let mut sub_pixels = bytes_from_f32
|
||||
.chunks(std::mem::size_of::<f32>())
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
|
||||
if p.is_finite() {
|
||||
Some(p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(PixelType::R32F, vec![texture], cuts)
|
||||
}
|
||||
Bitpix::U8 => {
|
||||
let texture = Texture2D::create_from_raw_bytes::<R8U>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes,
|
||||
)?;
|
||||
|
||||
let mut sub_pixels = bytes
|
||||
.iter()
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
if let Some(blank) = blank {
|
||||
if *p as f32 != blank {
|
||||
Some(*p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some(*p)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(
|
||||
PixelType::R8U,
|
||||
vec![texture],
|
||||
(cuts.start as f32)..(cuts.end as f32),
|
||||
)
|
||||
}
|
||||
Bitpix::I16 => {
|
||||
let texture = Texture2D::create_from_raw_bytes::<R16I>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes,
|
||||
)?;
|
||||
|
||||
let mut sub_pixels = bytes
|
||||
.chunks(2)
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
let p = i16::from_be_bytes([p[0], p[1]]);
|
||||
|
||||
if let Some(blank) = blank {
|
||||
if p as f32 != blank {
|
||||
Some(p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some(p)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(
|
||||
PixelType::R16I,
|
||||
vec![texture],
|
||||
(cuts.start as f32)..(cuts.end as f32),
|
||||
)
|
||||
}
|
||||
Bitpix::I32 => {
|
||||
let texture = Texture2D::create_from_raw_bytes::<R32I>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes,
|
||||
)?;
|
||||
|
||||
let mut sub_pixels = bytes
|
||||
.chunks(4)
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]);
|
||||
|
||||
if let Some(blank) = blank {
|
||||
if p as f32 != blank {
|
||||
Some(p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some(p)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(
|
||||
PixelType::R32I,
|
||||
vec![texture],
|
||||
(cuts.start as f32)..(cuts.end as f32),
|
||||
)
|
||||
}
|
||||
Bitpix::F32 => {
|
||||
let texture = Texture2D::create_from_raw_bytes::<R32F>(
|
||||
gl,
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes,
|
||||
)?;
|
||||
|
||||
let mut sub_pixels = bytes
|
||||
.chunks(std::mem::size_of::<f32>())
|
||||
.step_by(100)
|
||||
.filter_map(|p| {
|
||||
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
|
||||
if p.is_finite() {
|
||||
Some(p)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
|
||||
(PixelType::R32F, vec![texture], cuts)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(JsValue::from_str("too big image"));
|
||||
};
|
||||
|
||||
let start = cuts.start * bscale + bzero;
|
||||
let end = cuts.end * bscale + bzero;
|
||||
|
||||
let cuts = start..end;
|
||||
|
||||
@@ -304,7 +444,7 @@ impl Image {
|
||||
|
||||
let idx_tex = (0..textures.len()).collect();
|
||||
|
||||
Ok(Image {
|
||||
Ok(Self {
|
||||
gl,
|
||||
|
||||
// The positions
|
||||
@@ -317,15 +457,15 @@ impl Image {
|
||||
// Metadata extracted from the fits
|
||||
wcs,
|
||||
// CooSystem of the wcs, this should belong to the WCS
|
||||
scale,
|
||||
offset,
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
|
||||
// Centered field of view allowing to locate the fits
|
||||
centered_fov,
|
||||
|
||||
// Texture parameters
|
||||
channel: F::CHANNEL_TYPE,
|
||||
channel,
|
||||
textures,
|
||||
cuts,
|
||||
max_tex_size_x,
|
||||
@@ -339,127 +479,173 @@ impl Image {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_cuts(&self) -> &Range<f32> {
|
||||
&self.cuts
|
||||
}
|
||||
|
||||
pub async fn from_fits_hdu_async<'a, R>(
|
||||
pub fn from_rgba_bytes(
|
||||
gl: &WebGlContext,
|
||||
hdu: &mut AsyncHDU<'a, BufReader<R>, extension::image::Image>,
|
||||
// bytes in TextureFormat
|
||||
bytes: &[u8],
|
||||
// wcs extracted from the image HDU
|
||||
wcs: fitsrs::WCS,
|
||||
// Coo sys of the view
|
||||
coo_sys: CooSystem,
|
||||
) -> Result<Self, JsValue>
|
||||
where
|
||||
R: AsyncRead + Unpin + Debug + 'a,
|
||||
{
|
||||
// Load the FITS file
|
||||
let header = hdu.get_header();
|
||||
) -> Result<Self, JsValue> {
|
||||
let dim = wcs.img_dimensions();
|
||||
let (width, height) = (dim[0] as u64, dim[1] as u64);
|
||||
|
||||
let scale = header.get_parsed::<f64>(b"BSCALE ").map(|v| v.unwrap());
|
||||
let offset = header.get_parsed::<f64>(b"BZERO ").map(|v| v.unwrap());
|
||||
let blank = header.get_parsed::<f64>(b"BLANK ").map(|v| v.unwrap());
|
||||
let max_tex_size =
|
||||
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
|
||||
.as_f64()
|
||||
.unwrap_or(4096.0) as usize;
|
||||
|
||||
// Create a WCS from a specific header unit
|
||||
let wcs = WCS::from_fits_header(header)
|
||||
.map_err(|e| JsValue::from_str(&format!("WCS parsing error: reason: {}", e)))?;
|
||||
let bscale = 1.0;
|
||||
let bzero = 0.0;
|
||||
let blank = None;
|
||||
|
||||
let data = hdu.get_data_mut();
|
||||
let mut max_tex_size_x = max_tex_size;
|
||||
let mut max_tex_size_y = max_tex_size;
|
||||
|
||||
match data {
|
||||
stream::Data::U8(data) => {
|
||||
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
|
||||
let (channel, textures, cuts) =
|
||||
if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
|
||||
// small image case, can fit into a webgl texture
|
||||
max_tex_size_x = width as usize;
|
||||
max_tex_size_y = height as usize;
|
||||
// can fit in one texture
|
||||
|
||||
Self::from_reader_and_wcs::<_, R8UI>(
|
||||
let textures = vec![Texture2D::create_from_raw_bytes::<RGBA8U>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
stream::Data::I16(data) => {
|
||||
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
|
||||
width as i32,
|
||||
height as i32,
|
||||
TEX_PARAMS,
|
||||
bytes,
|
||||
)?];
|
||||
let pixel_ty = PixelType::RGBA8U;
|
||||
let cuts = 0.0..1.0;
|
||||
|
||||
Self::from_reader_and_wcs::<_, R16I>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
stream::Data::I32(data) => {
|
||||
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
|
||||
(pixel_ty, textures, cuts)
|
||||
} else {
|
||||
return Err(JsValue::from_str("too big image"));
|
||||
};
|
||||
|
||||
Self::from_reader_and_wcs::<_, R32I>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
stream::Data::I64(data) => {
|
||||
let reader = data
|
||||
.map_ok(|v| {
|
||||
let v = v[0] as i32;
|
||||
v.to_le_bytes()
|
||||
})
|
||||
.into_async_read();
|
||||
|
||||
Self::from_reader_and_wcs::<_, R32I>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
stream::Data::F32(data) => {
|
||||
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
|
||||
|
||||
Self::from_reader_and_wcs::<_, R32F>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
stream::Data::F64(data) => {
|
||||
let reader = data
|
||||
.map_ok(|v| {
|
||||
let v = v[0] as f32;
|
||||
v.to_le_bytes()
|
||||
})
|
||||
.into_async_read();
|
||||
|
||||
Self::from_reader_and_wcs::<_, R32F>(
|
||||
gl,
|
||||
reader,
|
||||
wcs,
|
||||
scale.map(|v| v as f32),
|
||||
offset.map(|v| v as f32),
|
||||
blank.map(|v| v as f32),
|
||||
coo_sys,
|
||||
)
|
||||
.await
|
||||
}
|
||||
for tex in &textures {
|
||||
tex.generate_mipmap();
|
||||
}
|
||||
|
||||
let start = cuts.start * bscale + bzero;
|
||||
let end = cuts.end * bscale + bzero;
|
||||
|
||||
let cuts = start..end;
|
||||
|
||||
let num_indices = vec![];
|
||||
let indices = vec![];
|
||||
let pos = vec![];
|
||||
let uv = vec![];
|
||||
// Define the buffers
|
||||
let vao = {
|
||||
let mut vao = VertexArrayObject::new(gl);
|
||||
|
||||
#[cfg(feature = "webgl2")]
|
||||
vao.bind_for_update()
|
||||
// layout (location = 0) in vec2 ndc_pos;
|
||||
.add_array_buffer_single(
|
||||
2,
|
||||
"ndc_pos",
|
||||
WebGl2RenderingContext::DYNAMIC_DRAW,
|
||||
VecData::<f32>(&pos),
|
||||
)
|
||||
.add_array_buffer_single(
|
||||
2,
|
||||
"uv",
|
||||
WebGl2RenderingContext::DYNAMIC_DRAW,
|
||||
VecData::<f32>(&uv),
|
||||
)
|
||||
// Set the element buffer
|
||||
.add_element_buffer(
|
||||
WebGl2RenderingContext::DYNAMIC_DRAW,
|
||||
VecData::<u16>(&indices),
|
||||
)
|
||||
.unbind();
|
||||
|
||||
vao
|
||||
};
|
||||
let gl = gl.clone();
|
||||
|
||||
// Compute the fov
|
||||
let center = wcs
|
||||
.unproj_lonlat(&ImgXY::new(width as f64 / 2.0, height as f64 / 2.0))
|
||||
.ok_or(JsValue::from_str("(w / 2, h / 2) px cannot be unprojected"))?;
|
||||
let center_xyz = center.to_xyz();
|
||||
let inside = crate::coosys::apply_coo_system(
|
||||
CooSystem::ICRS,
|
||||
coo_sys,
|
||||
&Vector3::new(center_xyz.y(), center_xyz.z(), center_xyz.x()),
|
||||
);
|
||||
|
||||
let vertices = [
|
||||
wcs.unproj_lonlat(&ImgXY::new(0.0, 0.0))
|
||||
.ok_or(JsValue::from_str("(0, 0) does not lie in the sky"))?,
|
||||
wcs.unproj_lonlat(&ImgXY::new(width as f64 - 1.0, 0.0))
|
||||
.ok_or(JsValue::from_str("(w - 1, 0) does not lie in the sky"))?,
|
||||
wcs.unproj_lonlat(&ImgXY::new(width as f64 - 1.0, height as f64 - 1.0))
|
||||
.ok_or(JsValue::from_str("(w - 1, h - 1) does not lie in the sky"))?,
|
||||
wcs.unproj_lonlat(&ImgXY::new(0.0, height as f64 - 1.0))
|
||||
.ok_or(JsValue::from_str("(0, h - 1) does not lie in the sky"))?,
|
||||
]
|
||||
.iter()
|
||||
.map(|lonlat| {
|
||||
let xyz = lonlat.to_xyz();
|
||||
|
||||
crate::coosys::apply_coo_system(
|
||||
CooSystem::ICRS,
|
||||
coo_sys,
|
||||
&Vector3::new(xyz.y(), xyz.z(), xyz.x()),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let reg = Region::from_vertices(&vertices, &inside);
|
||||
|
||||
// ra and dec must be given in ICRS coo system, which is the case because wcs returns
|
||||
// only ICRS coo
|
||||
let centered_fov = CenteredFoV {
|
||||
ra: center.lon().to_degrees(),
|
||||
dec: center.lat().to_degrees(),
|
||||
fov: wcs.field_of_view().0,
|
||||
};
|
||||
|
||||
let idx_tex = (0..textures.len()).collect();
|
||||
|
||||
Ok(Self {
|
||||
gl,
|
||||
|
||||
// The positions
|
||||
vao,
|
||||
num_indices,
|
||||
pos,
|
||||
uv,
|
||||
indices,
|
||||
|
||||
// Metadata extracted from the fits
|
||||
wcs,
|
||||
// CooSystem of the wcs, this should belong to the WCS
|
||||
bscale,
|
||||
bzero,
|
||||
blank,
|
||||
|
||||
// Centered field of view allowing to locate the fits
|
||||
centered_fov,
|
||||
|
||||
// Texture parameters
|
||||
channel,
|
||||
textures,
|
||||
cuts,
|
||||
max_tex_size_x,
|
||||
max_tex_size_y,
|
||||
// Indices of textures that must be drawn
|
||||
idx_tex,
|
||||
// The polygonal region in the sky
|
||||
reg,
|
||||
// The coo system in which the polygonal region has been defined
|
||||
coo_sys,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn recompute_vertices(
|
||||
@@ -467,9 +653,8 @@ impl Image {
|
||||
camera: &CameraViewPort,
|
||||
projection: &ProjectionType,
|
||||
) -> Result<(), JsValue> {
|
||||
let (width, height) = self.wcs.img_dimensions();
|
||||
let width = width as f64;
|
||||
let height = height as f64;
|
||||
let dim = self.wcs.img_dimensions();
|
||||
let (width, height) = (dim[0] as f64, dim[1] as f64);
|
||||
|
||||
let (x_mesh_range, y_mesh_range) =
|
||||
if camera.get_field_of_view().intersects_region(&self.reg) {
|
||||
@@ -488,7 +673,7 @@ impl Image {
|
||||
let num_vertices =
|
||||
((self.centered_fov.fov / 180.0) * (MAX_NUM_TRI_PER_SIDE_IMAGE as f64)).ceil() as u64;
|
||||
|
||||
let (pos, uv, indices, num_indices) = grid::vertices(
|
||||
let (pos, uv, indices, num_indices) = grid::vertices2(
|
||||
&(x_mesh_range.start, y_mesh_range.start),
|
||||
&(x_mesh_range.end.ceil(), y_mesh_range.end.ceil()),
|
||||
self.max_tex_size_x as u64,
|
||||
@@ -539,7 +724,8 @@ impl Image {
|
||||
if self.coo_sys != camera.get_coo_system() {
|
||||
self.coo_sys = camera.get_coo_system();
|
||||
|
||||
let (width, height) = self.wcs.img_dimensions();
|
||||
let dim = self.wcs.img_dimensions();
|
||||
let (width, height) = (dim[0] as usize, dim[1] as usize);
|
||||
|
||||
// the camera coo system is not sync with the one in which the region
|
||||
// has been defined
|
||||
@@ -600,43 +786,35 @@ impl Image {
|
||||
} = cfg;
|
||||
|
||||
let shader = match self.channel {
|
||||
ChannelType::RGBA8U => crate::shader::get_shader(
|
||||
PixelType::RGBA8U => crate::shader::get_shader(
|
||||
&self.gl,
|
||||
shaders,
|
||||
"image_base.vert",
|
||||
"image_sampler.frag",
|
||||
)?,
|
||||
ChannelType::R32F => {
|
||||
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_sampler.frag")?
|
||||
PixelType::RGB8U => crate::shader::get_shader(
|
||||
&self.gl,
|
||||
shaders,
|
||||
"image_base.vert",
|
||||
"image_sampler.frag",
|
||||
)?,
|
||||
PixelType::R32F => {
|
||||
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_f32.frag")?
|
||||
}
|
||||
PixelType::R32I => {
|
||||
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i32.frag")?
|
||||
}
|
||||
PixelType::R16I => {
|
||||
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i16.frag")?
|
||||
}
|
||||
PixelType::R8U => {
|
||||
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_u8.frag")?
|
||||
}
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R32I => crate::shader::get_shader(
|
||||
&self.gl,
|
||||
shaders,
|
||||
"fits_base.vert",
|
||||
"fits_isampler.frag",
|
||||
)?,
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R16I => crate::shader::get_shader(
|
||||
&self.gl,
|
||||
shaders,
|
||||
"fits_base.vert",
|
||||
"fits_isampler.frag",
|
||||
)?,
|
||||
#[cfg(feature = "webgl2")]
|
||||
ChannelType::R8UI => crate::shader::get_shader(
|
||||
&self.gl,
|
||||
shaders,
|
||||
"fits_base.vert",
|
||||
"fits_usampler.frag",
|
||||
)?,
|
||||
_ => return Err(JsValue::from_str("Image format type not supported")),
|
||||
};
|
||||
|
||||
//self.gl.disable(WebGl2RenderingContext::CULL_FACE);
|
||||
|
||||
// 2. Draw it if its opacity is not null
|
||||
|
||||
blend_cfg.enable(&self.gl, || {
|
||||
let mut off_indices = 0;
|
||||
for (idx, &idx_tex) in self.idx_tex.iter().enumerate() {
|
||||
@@ -646,12 +824,11 @@ impl Image {
|
||||
let shader_bound = shader.bind(&self.gl);
|
||||
|
||||
shader_bound
|
||||
.attach_uniforms_from(colormaps)
|
||||
.attach_uniforms_with_params_from(color, colormaps)
|
||||
.attach_uniform("opacity", opacity)
|
||||
.attach_uniform("tex", texture)
|
||||
.attach_uniform("scale", &self.scale)
|
||||
.attach_uniform("offset", &self.offset);
|
||||
.attach_uniform("scale", &self.bscale)
|
||||
.attach_uniform("offset", &self.bzero);
|
||||
|
||||
if let Some(blank) = self.blank {
|
||||
shader_bound.attach_uniform("blank", &blank);
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use al_core::image::format::ChannelType;
|
||||
use al_core::texture::format::PixelType;
|
||||
use wasm_bindgen::JsValue;
|
||||
|
||||
use futures::AsyncReadExt;
|
||||
|
||||
use super::cuts;
|
||||
use al_core::image::format::ImageFormat;
|
||||
use al_core::texture::format::TextureFormat;
|
||||
use al_core::texture::pixel::Pixel;
|
||||
use al_core::webgl_ctx::WebGlRenderingCtx;
|
||||
use al_core::Texture2D;
|
||||
@@ -12,7 +12,7 @@ use al_core::WebGlContext;
|
||||
use std::ops::Range;
|
||||
|
||||
use al_core::convert::Cast;
|
||||
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
|
||||
type PixelItem<F> = <<F as TextureFormat>::P as Pixel>::Item;
|
||||
|
||||
pub async fn crop_image<F, R>(
|
||||
gl: &WebGlContext,
|
||||
@@ -23,7 +23,7 @@ pub async fn crop_image<F, R>(
|
||||
blank: Option<f32>,
|
||||
) -> Result<(Vec<Texture2D>, Range<f32>), JsValue>
|
||||
where
|
||||
F: ImageFormat,
|
||||
F: TextureFormat,
|
||||
R: AsyncReadExt + Unpin,
|
||||
{
|
||||
let mut tex_chunks = vec![];
|
||||
@@ -117,8 +117,8 @@ where
|
||||
// We are in a good line
|
||||
let xmin = pixels_written % width;
|
||||
|
||||
match F::CHANNEL_TYPE {
|
||||
ChannelType::R32F | ChannelType::R64F => {
|
||||
match F::PIXEL_TYPE {
|
||||
PixelType::R32F => {
|
||||
let pixels = std::slice::from_raw_parts(
|
||||
data.as_ptr() as *const f32,
|
||||
data.len() / 4,
|
||||
@@ -134,7 +134,7 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
|
||||
PixelType::R8U | PixelType::R16I | PixelType::R32I => {
|
||||
if let Some(blank) = blank {
|
||||
for i in (0..width).step_by(step_cut) {
|
||||
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
|
||||
@@ -184,7 +184,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
let cuts = if F::CHANNEL_TYPE.is_colored() {
|
||||
let cuts = if F::PIXEL_TYPE.num_channels() == 1 {
|
||||
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
|
||||
} else {
|
||||
0.0..1.0
|
||||
|
||||
@@ -12,8 +12,6 @@ pub mod utils;
|
||||
use crate::renderable::image::Image;
|
||||
use crate::tile_fetcher::TileFetcherQueue;
|
||||
|
||||
use al_core::image::format::ChannelType;
|
||||
|
||||
use al_api::color::ColorRGB;
|
||||
use al_api::hips::HiPSCfg;
|
||||
use al_api::hips::ImageMetadata;
|
||||
@@ -22,6 +20,8 @@ use al_api::image::ImageParams;
|
||||
use al_core::colormap::Colormaps;
|
||||
|
||||
use al_core::shader::Shader;
|
||||
use al_core::texture::format::PixelType;
|
||||
use al_core::texture::format::TextureFormat;
|
||||
use al_core::VertexArrayObject;
|
||||
use al_core::WebGlContext;
|
||||
|
||||
@@ -226,8 +226,8 @@ impl Layers {
|
||||
if let Some(hips) = self.hipses.get(cdid) {
|
||||
// Check if a HiPS is fully opaque so that we cannot see the background
|
||||
// In that case, no need to draw a background because a HiPS will fully cover it
|
||||
let full_covering_hips = (hips.get_config().get_format().get_channel()
|
||||
== ChannelType::RGB8U
|
||||
let full_covering_hips = (hips.get_config().get_format().get_pixel_format()
|
||||
== PixelType::RGB8U
|
||||
|| hips.is_allsky())
|
||||
&& meta.opacity == 1.0;
|
||||
if full_covering_hips {
|
||||
@@ -498,17 +498,6 @@ impl Layers {
|
||||
let fits_already_found = self.images.keys().any(|image_id| image_id == &id);
|
||||
|
||||
if !fits_already_found {
|
||||
// The fits has not been loaded yet
|
||||
/*if let Some(initial_ra) = properties.get_initial_ra() {
|
||||
if let Some(initial_dec) = properties.get_initial_dec() {
|
||||
camera.set_center::<P>(&LonLatT::new(Angle((initial_ra).to_radians()), Angle((initial_dec).to_radians())), &properties.get_frame());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(initial_fov) = properties.get_initial_fov() {
|
||||
camera.set_aperture::<P>(Angle((initial_fov).to_radians()));
|
||||
}*/
|
||||
|
||||
self.images.insert(id.clone(), images);
|
||||
}
|
||||
|
||||
|
||||
45
src/glsl/webgl2/decode.glsl
Normal file
45
src/glsl/webgl2/decode.glsl
Normal file
@@ -0,0 +1,45 @@
|
||||
// Utils methods for decoding texture bytes to f32, i32, i16, u8
|
||||
highp float decode_f32(highp vec4 rgba) {
|
||||
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
|
||||
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
|
||||
if (abs(Exponent + 127.0) < 1e-3) {
|
||||
return 0.0;
|
||||
}
|
||||
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
|
||||
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
|
||||
return Result;
|
||||
}
|
||||
|
||||
int decode_i32(vec4 rgba) {
|
||||
int r = int(rgba.r * 255.0 + 0.5);
|
||||
int g = int(rgba.g * 255.0 + 0.5);
|
||||
int b = int(rgba.b * 255.0 + 0.5);
|
||||
int a = int(rgba.a * 255.0 + 0.5);
|
||||
|
||||
// GLSL int automatically handle the top-most sign bit (two's complement behaviour)
|
||||
int value = (r << 24) | (g << 16) | (b << 8) | a; // Combine into a 16-bit integer
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
int decode_i16(vec2 rg) {
|
||||
int r = int(rg.r * 255.0 + 0.5);
|
||||
int g = int(rg.g * 255.0 + 0.5);
|
||||
|
||||
int value = (r << 8) | g; // Combine into a 16-bit integer
|
||||
|
||||
// Convert from unsigned to signed 16-bit
|
||||
if (value >= 32768) {
|
||||
value -= 65536;
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
uint decode_u8(float r) {
|
||||
uint value = uint(r * 255.0 + 0.5);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
|
||||
57
src/glsl/webgl2/fits/color.glsl
Normal file
57
src/glsl/webgl2/fits/color.glsl
Normal file
@@ -0,0 +1,57 @@
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
uniform float reversed;
|
||||
|
||||
#include ../colormaps/colormap.glsl;
|
||||
#include ../transfer_funcs.glsl;
|
||||
#include ../tonal_corrections.glsl;
|
||||
#include ../decode.glsl;
|
||||
|
||||
/////////////////////////////////////////////
|
||||
/// FITS sampler
|
||||
|
||||
vec4 val2c_f32(float x) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
vec4 val2c(float x) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
vec4 uv2c_f32(vec2 uv) {
|
||||
float val = decode_f32(texture(tex, uv).rgba*255.0);
|
||||
return val2c_f32(val);
|
||||
}
|
||||
|
||||
vec4 uv2c_i32(vec2 uv) {
|
||||
float val = float(decode_i32(texture(tex, uv).rgba));
|
||||
return val2c(val);
|
||||
}
|
||||
|
||||
vec4 uv2c_i16(vec2 uv) {
|
||||
float val = float(decode_i16(texture(tex, uv).rg));
|
||||
return val2c(val);
|
||||
}
|
||||
|
||||
vec4 uv2c_u8(vec2 uv) {
|
||||
float val = float(decode_u8(texture(tex, uv).r));
|
||||
return val2c(val);
|
||||
}
|
||||
21
src/glsl/webgl2/fits/f32.frag
Normal file
21
src/glsl/webgl2/fits/f32.frag
Normal file
@@ -0,0 +1,21 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
precision highp sampler2D;
|
||||
precision highp int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform sampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
#include ./color.glsl;
|
||||
|
||||
void main() {
|
||||
// FITS y axis looks down
|
||||
vec2 uv = frag_uv;
|
||||
uv.y = 1.0 - uv.y;
|
||||
|
||||
out_frag_color = uv2c_f32(frag_uv);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
21
src/glsl/webgl2/fits/i16.frag
Normal file
21
src/glsl/webgl2/fits/i16.frag
Normal file
@@ -0,0 +1,21 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2D;
|
||||
precision mediump int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform sampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
#include ./color.glsl;
|
||||
|
||||
void main() {
|
||||
// FITS y axis looks down
|
||||
vec2 uv = frag_uv;
|
||||
uv.y = 1.0 - uv.y;
|
||||
|
||||
out_frag_color = uv2c_i16(frag_uv);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
21
src/glsl/webgl2/fits/i32.frag
Normal file
21
src/glsl/webgl2/fits/i32.frag
Normal file
@@ -0,0 +1,21 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2D;
|
||||
precision mediump int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform sampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
#include ./color.glsl;
|
||||
|
||||
void main() {
|
||||
// FITS y axis looks down
|
||||
vec2 uv = frag_uv;
|
||||
uv.y = 1.0 - uv.y;
|
||||
|
||||
out_frag_color = uv2c_i32(frag_uv);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2D;
|
||||
precision lowp isampler2D;
|
||||
precision lowp usampler2D;
|
||||
precision mediump int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform isampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
|
||||
uniform float reversed;
|
||||
|
||||
#include ./../colormaps/colormap.glsl;
|
||||
#include ./../hips/transfer_funcs.glsl;
|
||||
#include ./../hips/tonal_corrections.glsl;
|
||||
|
||||
vec4 apply_colormap_to_grayscale(float x, float a) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
void main() {
|
||||
ivec4 color = texture(tex, frag_uv);
|
||||
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
#version 300 es
|
||||
precision highp float;
|
||||
precision highp sampler2D;
|
||||
precision lowp isampler2D;
|
||||
precision lowp usampler2D;
|
||||
precision highp int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform sampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
|
||||
uniform float reversed;
|
||||
|
||||
#include ./../colormaps/colormap.glsl;
|
||||
#include ./../hips/transfer_funcs.glsl;
|
||||
#include ./../hips/tonal_corrections.glsl;
|
||||
|
||||
vec4 apply_colormap_to_grayscale(float x) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
highp float decode32(highp vec4 rgba) {
|
||||
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
|
||||
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
|
||||
if (abs(Exponent + 127.0) < 1e-3) {
|
||||
return 0.0;
|
||||
}
|
||||
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
|
||||
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
|
||||
return Result;
|
||||
}
|
||||
|
||||
void main() {
|
||||
highp float value = decode32(texture(tex, frag_uv).abgr*255.0);
|
||||
// reconstruct the float value
|
||||
out_frag_color = apply_colormap_to_grayscale(value);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
21
src/glsl/webgl2/fits/u8.frag
Normal file
21
src/glsl/webgl2/fits/u8.frag
Normal file
@@ -0,0 +1,21 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2D;
|
||||
precision mediump int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform sampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
#include ./color.glsl;
|
||||
|
||||
void main() {
|
||||
// FITS y axis looks down
|
||||
vec2 uv = frag_uv;
|
||||
uv.y = 1.0 - uv.y;
|
||||
|
||||
out_frag_color = uv2c_u8(frag_uv);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2D;
|
||||
precision lowp isampler2D;
|
||||
precision lowp usampler2D;
|
||||
precision mediump int;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
in vec2 frag_uv;
|
||||
|
||||
uniform usampler2D tex;
|
||||
uniform float opacity;
|
||||
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
|
||||
uniform float reversed;
|
||||
|
||||
#include ./../colormaps/colormap.glsl;
|
||||
#include ./../hips/transfer_funcs.glsl;
|
||||
#include ./../hips/tonal_corrections.glsl;
|
||||
|
||||
vec4 apply_colormap_to_grayscale(float x, float a) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
void main() {
|
||||
uvec4 color = texture(tex, frag_uv);
|
||||
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -5,67 +5,82 @@ uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
uniform float reversed;
|
||||
uniform int tex_storing_fits;
|
||||
|
||||
#include ../colormaps/colormap.glsl;
|
||||
#include ./transfer_funcs.glsl;
|
||||
#include ./tonal_corrections.glsl;
|
||||
#include ./hsv.glsl;
|
||||
#include ../transfer_funcs.glsl;
|
||||
#include ../tonal_corrections.glsl;
|
||||
#include ../hsv.glsl;
|
||||
#include ../decode.glsl;
|
||||
|
||||
vec4 get_pixels(vec3 uv) {
|
||||
return texture(tex, uv);
|
||||
}
|
||||
/////////////////////////////////////////////
|
||||
/// RGBA sampler
|
||||
|
||||
vec3 reverse_uv(vec3 uv) {
|
||||
uv.y = 1.0 - uv.y;
|
||||
return uv;
|
||||
}
|
||||
vec4 uvw2c_rgba(vec3 uv) {
|
||||
vec4 c = texture(tex, uv).rgba;
|
||||
|
||||
vec4 apply_color_settings(vec4 color) {
|
||||
color.r = transfer_func(H, color.r, min_value, max_value);
|
||||
color.g = transfer_func(H, color.g, min_value, max_value);
|
||||
color.b = transfer_func(H, color.b, min_value, max_value);
|
||||
c.r = transfer_func(H, c.r, min_value, max_value);
|
||||
c.g = transfer_func(H, c.g, min_value, max_value);
|
||||
c.b = transfer_func(H, c.b, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
color.rgb = mix(color.rgb, 1.0 - color.rgb, reversed);
|
||||
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
|
||||
|
||||
return apply_tonal(color);
|
||||
return apply_tonal(c);
|
||||
}
|
||||
|
||||
vec4 get_color_from_texture(vec3 UV) {
|
||||
vec4 color = get_pixels(UV);
|
||||
|
||||
return apply_color_settings(color);
|
||||
vec4 uvw2cmap_rgba(vec3 uv) {
|
||||
float v = texture(tex, uv).r;
|
||||
// apply transfer f
|
||||
v = transfer_func(H, v, min_value, max_value);
|
||||
// apply cmap
|
||||
vec4 c = colormap_f(v);
|
||||
// apply reversed
|
||||
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
|
||||
|
||||
return apply_tonal(c);
|
||||
}
|
||||
|
||||
vec4 apply_colormap_to_grayscale(float x) {
|
||||
/////////////////////////////////////////////
|
||||
/// FITS sampler
|
||||
|
||||
vec4 val2c_f32(float x) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x) || isnan(x)));
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
highp float decode32(highp vec4 rgba) {
|
||||
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
|
||||
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
|
||||
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
|
||||
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
|
||||
return Result;
|
||||
vec4 val2c(float x) {
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
|
||||
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
|
||||
|
||||
float value = decode32(get_pixels(uv).abgr*255.0);
|
||||
return apply_colormap_to_grayscale(value);
|
||||
vec4 uvw2c_f32(vec3 uv) {
|
||||
float val = decode_f32(texture(tex, uv).rgba*255.0);
|
||||
return val2c_f32(val);
|
||||
}
|
||||
|
||||
vec4 get_colormap_from_color_texture(vec3 uv) {
|
||||
float value = get_pixels(uv).r;
|
||||
return apply_colormap_to_grayscale(value);
|
||||
}
|
||||
vec4 uvw2c_i32(vec3 uv) {
|
||||
float val = float(decode_i32(texture(tex, uv).rgba));
|
||||
return val2c(val);
|
||||
}
|
||||
|
||||
vec4 uvw2c_i16(vec3 uv) {
|
||||
float val = float(decode_i16(texture(tex, uv).rg));
|
||||
return val2c(val);
|
||||
}
|
||||
|
||||
vec4 uvw2c_u8(vec3 uv) {
|
||||
float val = float(decode_u8(texture(tex, uv).r));
|
||||
return val2c(val);
|
||||
}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
uniform float reversed;
|
||||
|
||||
uniform int tex_storing_fits;
|
||||
|
||||
#include ../colormaps/colormap.glsl;
|
||||
#include ./transfer_funcs.glsl;
|
||||
#include ./tonal_corrections.glsl;
|
||||
|
||||
ivec4 get_pixels(vec3 uv) {
|
||||
return ivec4(texture(tex, uv));
|
||||
}
|
||||
|
||||
vec3 reverse_uv(vec3 uv) {
|
||||
uv.y = 1.0 - uv.y;
|
||||
return uv;
|
||||
}
|
||||
|
||||
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
|
||||
|
||||
float x = float(get_pixels(uv).r);
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
uniform float scale;
|
||||
uniform float offset;
|
||||
uniform float blank;
|
||||
|
||||
uniform float min_value;
|
||||
uniform float max_value;
|
||||
uniform int H;
|
||||
uniform float reversed;
|
||||
|
||||
uniform int tex_storing_fits;
|
||||
|
||||
#include ../colormaps/colormap.glsl;
|
||||
#include ./transfer_funcs.glsl;
|
||||
#include ./tonal_corrections.glsl;
|
||||
|
||||
uvec4 get_pixels(vec3 uv) {
|
||||
return uvec4(texture(tex, uv));
|
||||
}
|
||||
|
||||
vec3 reverse_uv(vec3 uv) {
|
||||
uv.y = 1.0 - uv.y;
|
||||
return uv;
|
||||
}
|
||||
|
||||
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
|
||||
|
||||
float x = float(get_pixels(uv).r);
|
||||
float alpha = x * scale + offset;
|
||||
alpha = transfer_func(H, alpha, min_value, max_value);
|
||||
|
||||
// apply reversed
|
||||
alpha = mix(alpha, 1.0 - alpha, reversed);
|
||||
|
||||
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
|
||||
return apply_tonal(new_color);
|
||||
}
|
||||
@@ -1,10 +1,8 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
|
||||
uniform isampler2DArray tex;
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_uv_start;
|
||||
in vec3 frag_uv_end;
|
||||
@@ -12,13 +10,19 @@ in float frag_blending_factor;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../color_i.glsl;
|
||||
#include ../color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
|
||||
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv0 = frag_uv_start;
|
||||
vec3 uv1 = frag_uv_end;
|
||||
uv0.y = 1.0 - uv0.y;
|
||||
uv1.y = 1.0 - uv1.y;
|
||||
|
||||
vec4 color_start = uvw2c_f32(uv0);
|
||||
vec4 color_end = uvw2c_f32(uv1);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
@@ -1,25 +0,0 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
|
||||
uniform usampler2DArray tex;
|
||||
|
||||
in vec3 frag_uv_start;
|
||||
in vec3 frag_uv_end;
|
||||
in float frag_blending_factor;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../color_u.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
|
||||
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
29
src/glsl/webgl2/hips/rasterizer/i16.frag
Normal file
29
src/glsl/webgl2/hips/rasterizer/i16.frag
Normal file
@@ -0,0 +1,29 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_uv_start;
|
||||
in vec3 frag_uv_end;
|
||||
in float frag_blending_factor;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv0 = frag_uv_start;
|
||||
vec3 uv1 = frag_uv_end;
|
||||
uv0.y = 1.0 - uv0.y;
|
||||
uv1.y = 1.0 - uv1.y;
|
||||
|
||||
vec4 color_start = uvw2c_i16(uv0);
|
||||
vec4 color_end = uvw2c_i16(uv1);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
29
src/glsl/webgl2/hips/rasterizer/i32.frag
Normal file
29
src/glsl/webgl2/hips/rasterizer/i32.frag
Normal file
@@ -0,0 +1,29 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_uv_start;
|
||||
in vec3 frag_uv_end;
|
||||
in float frag_blending_factor;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv0 = frag_uv_start;
|
||||
vec3 uv1 = frag_uv_end;
|
||||
uv0.y = 1.0 - uv0.y;
|
||||
uv1.y = 1.0 - uv1.y;
|
||||
|
||||
vec4 color_start = uvw2c_i32(uv0);
|
||||
vec4 color_end = uvw2c_i32(uv1);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -2,7 +2,6 @@
|
||||
precision highp float;
|
||||
|
||||
layout (location = 0) in vec3 xyz;
|
||||
//layout (location = 0) in vec2 lonlat;
|
||||
layout (location = 1) in vec3 uv_start;
|
||||
layout (location = 2) in vec3 uv_end;
|
||||
layout (location = 3) in float time_tile_received;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
@@ -16,8 +14,8 @@ uniform float opacity;
|
||||
#include ../color.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 color_start = get_color_from_texture(frag_uv_start);
|
||||
vec4 color_end = get_color_from_texture(frag_uv_end);
|
||||
vec4 color_start = uvw2c_rgba(frag_uv_start);
|
||||
vec4 color_end = uvw2c_rgba(frag_uv_end);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = opacity * out_frag_color.a;
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
@@ -16,8 +14,8 @@ uniform float opacity;
|
||||
#include ../color.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 color_start = get_colormap_from_color_texture(frag_uv_start);
|
||||
vec4 color_end = get_colormap_from_color_texture(frag_uv_end);
|
||||
vec4 color_start = uvw2cmap_rgba(frag_uv_start);
|
||||
vec4 color_end = uvw2cmap_rgba(frag_uv_end);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = opacity * out_frag_color.a;
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
@@ -17,8 +15,14 @@ out vec4 out_frag_color;
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
|
||||
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
|
||||
// FITS data pixels are reversed along the y axis
|
||||
vec3 uv0 = frag_uv_start;
|
||||
vec3 uv1 = frag_uv_end;
|
||||
uv0.y = 1.0 - uv0.y;
|
||||
uv1.y = 1.0 - uv1.y;
|
||||
|
||||
vec4 color_start = uvw2c_u8(uv0);
|
||||
vec4 color_end = uvw2c_u8(uv1);
|
||||
|
||||
out_frag_color = mix(color_start, color_end, frag_blending_factor);
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision mediump int;
|
||||
|
||||
in vec3 frag_pos;
|
||||
@@ -29,24 +27,16 @@ struct TileColor {
|
||||
|
||||
#include ../color.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
|
||||
vec4 get_tile_color(vec3 pos) {
|
||||
HashDxDy result = hash_with_dxdy(0, pos.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 uv = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
vec2 offset = uv;
|
||||
vec3 UV = vec3(offset, float(tile.texture_idx));
|
||||
|
||||
vec4 color = get_colormap_from_grayscale_texture(UV);
|
||||
color.a *= (1.0 - tile.empty);
|
||||
return color;
|
||||
}
|
||||
#include ./utils.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 c = get_tile_color(normalize(frag_pos));
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
|
||||
uv.y = 1.0 - uv.y;
|
||||
vec4 c = uvw2c_f32(uv);
|
||||
|
||||
//c.a *= (1.0 - tile.empty);
|
||||
|
||||
out_frag_color = c;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
37
src/glsl/webgl2/hips/raytracer/i16.frag
Normal file
37
src/glsl/webgl2/hips/raytracer/i16.frag
Normal file
@@ -0,0 +1,37 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision mediump int;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_pos;
|
||||
in vec2 out_clip_pos;
|
||||
out vec4 out_frag_color;
|
||||
|
||||
struct Tile {
|
||||
int uniq; // Healpix cell
|
||||
int texture_idx; // Index in the texture buffer
|
||||
float start_time; // Absolute time that the load has been done in ms
|
||||
float empty;
|
||||
};
|
||||
|
||||
uniform Tile textures_tiles[12];
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
#include ../color.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
#include ./utils.glsl;
|
||||
|
||||
void main() {
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
|
||||
uv.y = 1.0 - uv.y;
|
||||
vec4 c = uvw2c_i16(uv);
|
||||
|
||||
//c.a *= (1.0 - tile.empty);
|
||||
|
||||
out_frag_color = c;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -5,7 +5,7 @@ precision lowp usampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision mediump int;
|
||||
|
||||
uniform isampler2DArray tex;
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_pos;
|
||||
in vec2 out_clip_pos;
|
||||
@@ -22,26 +22,17 @@ uniform Tile textures_tiles[12];
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
#include ../color_i.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
|
||||
vec4 get_tile_color(vec3 pos) {
|
||||
HashDxDy result = hash_with_dxdy(0, pos.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 uv = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
vec2 offset = uv;
|
||||
vec3 UV = vec3(offset, float(tile.texture_idx));
|
||||
|
||||
vec4 color = get_colormap_from_grayscale_texture(UV);
|
||||
color.a *= (1.0 - tile.empty);
|
||||
return color;
|
||||
}
|
||||
#include ./utils.glsl;
|
||||
#include ./../color.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 c = get_tile_color(normalize(frag_pos));
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
|
||||
uv.y = 1.0 - uv.y;
|
||||
vec4 c = uvw2c_i32(uv);
|
||||
|
||||
//c.a *= (1.0 - tile.empty);
|
||||
out_frag_color = c;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler2DArray;
|
||||
precision lowp usampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision mediump int;
|
||||
|
||||
uniform sampler2DArray tex;
|
||||
@@ -22,26 +20,16 @@ uniform Tile textures_tiles[12];
|
||||
|
||||
#include ../color.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
#include ./utils.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
uniform vec4 no_tile_color;
|
||||
|
||||
vec4 get_tile_color(vec3 pos) {
|
||||
HashDxDy result = hash_with_dxdy(0, pos.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 uv = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
vec2 offset = uv;
|
||||
vec3 UV = vec3(offset, float(tile.texture_idx));
|
||||
|
||||
vec4 color = mix(get_pixels(UV), no_tile_color, tile.empty);
|
||||
return apply_color_settings(color);
|
||||
}
|
||||
|
||||
void main() {
|
||||
// Get the HEALPix cell idx and the uv in the texture
|
||||
vec4 c = get_tile_color(normalize(frag_pos));
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
vec4 c = uvw2c_rgba(uv);
|
||||
|
||||
//c = mix(c, no_tile_color, tile.empty);
|
||||
out_frag_color = c;
|
||||
out_frag_color = vec4(c.rgb, opacity * c.a);
|
||||
}
|
||||
@@ -23,23 +23,12 @@ uniform sampler2DArray tex;
|
||||
|
||||
#include ../color.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
|
||||
vec4 get_tile_color(vec3 pos) {
|
||||
HashDxDy result = hash_with_dxdy(0, pos.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 uv = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
vec2 offset = uv;
|
||||
vec3 UV = vec3(offset, float(tile.texture_idx));
|
||||
|
||||
float value = mix(get_pixels(UV).r, 0.0, tile.empty);
|
||||
return apply_colormap_to_grayscale(value);
|
||||
}
|
||||
#include ./utils.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 c = get_tile_color(normalize(frag_pos));
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
vec4 c = uvw2cmap_rgba(uv);
|
||||
|
||||
out_frag_color = c;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -5,7 +5,7 @@ precision lowp usampler2DArray;
|
||||
precision lowp isampler2DArray;
|
||||
precision mediump int;
|
||||
|
||||
uniform usampler2DArray tex;
|
||||
uniform sampler2DArray tex;
|
||||
|
||||
in vec3 frag_pos;
|
||||
in vec2 out_clip_pos;
|
||||
@@ -22,26 +22,18 @@ uniform Tile textures_tiles[12];
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
#include ../color_u.glsl;
|
||||
#include ../color.glsl;
|
||||
#include ../../projection/hpx_proj.glsl;
|
||||
|
||||
vec4 get_tile_color(vec3 pos) {
|
||||
HashDxDy result = hash_with_dxdy(0, pos.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 uv = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
vec2 offset = uv;
|
||||
vec3 UV = vec3(offset, float(tile.texture_idx));
|
||||
|
||||
vec4 color = get_colormap_from_grayscale_texture(UV);
|
||||
color.a *= (1.0 - tile.empty);
|
||||
return color;
|
||||
}
|
||||
#include ./utils.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 c = get_tile_color(normalize(frag_pos));
|
||||
vec3 uv = xyz2uv(normalize(frag_pos));
|
||||
|
||||
uv.y = 1.0 - uv.y;
|
||||
vec4 c = uvw2c_u8(uv);
|
||||
|
||||
//c.a *= (1.0 - tile.empty);
|
||||
|
||||
out_frag_color = c;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
9
src/glsl/webgl2/hips/raytracer/utils.glsl
Normal file
9
src/glsl/webgl2/hips/raytracer/utils.glsl
Normal file
@@ -0,0 +1,9 @@
|
||||
vec3 xyz2uv(vec3 xyz) {
|
||||
HashDxDy result = hash_with_dxdy(0, xyz.zxy);
|
||||
|
||||
int idx = result.idx;
|
||||
vec2 offset = vec2(result.dy, result.dx);
|
||||
Tile tile = textures_tiles[idx];
|
||||
|
||||
return vec3(offset, float(tile.texture_idx));
|
||||
}
|
||||
@@ -1,21 +1,20 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler3D;
|
||||
precision lowp isampler3D;
|
||||
precision lowp usampler3D;
|
||||
|
||||
uniform isampler3D tex;
|
||||
uniform sampler3D tex;
|
||||
|
||||
in vec3 frag_uv;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../../hips/color_i.glsl;
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2c_f32(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
@@ -10,12 +10,13 @@ in vec3 frag_uv;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../../hips/color.glsl;
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2c_i16(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
@@ -4,18 +4,19 @@ precision lowp sampler3D;
|
||||
precision lowp isampler3D;
|
||||
precision lowp usampler3D;
|
||||
|
||||
uniform usampler3D tex;
|
||||
uniform sampler3D tex;
|
||||
|
||||
in vec3 frag_uv;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../../hips/color_u.glsl;
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec4 color = get_colormap_from_grayscale_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2c_i32(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
@@ -11,7 +11,7 @@ uniform mat3 inv_model;
|
||||
uniform vec2 ndc_to_clip;
|
||||
uniform float czf;
|
||||
|
||||
#include ../../projection/projection.glsl;
|
||||
#include ../projection/projection.glsl;
|
||||
|
||||
void main() {
|
||||
vec3 p_xyz = lonlat2xyz(lonlat);
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler3D;
|
||||
precision lowp isampler3D;
|
||||
precision lowp usampler3D;
|
||||
|
||||
uniform sampler3D tex;
|
||||
|
||||
@@ -11,10 +9,11 @@ in vec3 frag_uv;
|
||||
out vec4 out_frag_color;
|
||||
uniform float opacity;
|
||||
|
||||
#include ../../hips/color.glsl;
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 color = get_color_from_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2c_rgba(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = opacity * out_frag_color.a;
|
||||
@@ -1,8 +1,6 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler3D;
|
||||
precision lowp isampler3D;
|
||||
precision lowp usampler3D;
|
||||
|
||||
uniform sampler3D tex;
|
||||
|
||||
@@ -11,10 +9,11 @@ in vec3 frag_uv;
|
||||
out vec4 out_frag_color;
|
||||
uniform float opacity;
|
||||
|
||||
#include ../../hips/color.glsl;
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
void main() {
|
||||
vec4 color = get_colormap_from_color_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2cmap_rgba(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = opacity * out_frag_color.a;
|
||||
21
src/glsl/webgl2/hips3d/u8.frag
Normal file
21
src/glsl/webgl2/hips3d/u8.frag
Normal file
@@ -0,0 +1,21 @@
|
||||
#version 300 es
|
||||
precision lowp float;
|
||||
precision lowp sampler3D;
|
||||
|
||||
uniform sampler3D tex;
|
||||
|
||||
in vec3 frag_uv;
|
||||
|
||||
out vec4 out_frag_color;
|
||||
|
||||
#include ../hips/color.glsl;
|
||||
|
||||
uniform float opacity;
|
||||
|
||||
void main() {
|
||||
vec3 uv = vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0);
|
||||
vec4 color = uvw2c_u8(uv);
|
||||
|
||||
out_frag_color = color;
|
||||
out_frag_color.a = out_frag_color.a * opacity;
|
||||
}
|
||||
@@ -371,14 +371,14 @@ export let Image = (function () {
|
||||
if (this.imgFormat === 'fits') {
|
||||
promise = this._addFITS(layer)
|
||||
.catch(e => {
|
||||
console.error(`Image located at ${this.url} could not be parsed as fits file. Is the imgFormat specified correct?`)
|
||||
console.error(`Image located at ${this.url} could not be parsed as fits file. Is the imgFormat specified correct? Reason: `, e)
|
||||
return Promise.reject(e)
|
||||
})
|
||||
} else if (this.imgFormat === 'jpeg' || this.imgFormat === 'png') {
|
||||
|
||||
promise = this._addJPGOrPNG(layer)
|
||||
.catch(e => {
|
||||
console.error(`Image located at ${this.url} could not be parsed as a ${this.imgFormat} file. Is the imgFormat specified correct?`);
|
||||
console.error(`Image located at ${this.url} could not be parsed as a ${this.imgFormat} file. Is the imgFormat specified correct? Reason: `, e);
|
||||
return Promise.reject(e)
|
||||
})
|
||||
} else {
|
||||
@@ -386,8 +386,8 @@ export let Image = (function () {
|
||||
promise = self._addFITS(layer)
|
||||
.catch(e => {
|
||||
return self._addJPGOrPNG(layer)
|
||||
.catch(e => {
|
||||
console.error(`Image located at ${self.url} could not be parsed as jpg/png/tif image file. Aborting...`)
|
||||
.catch(e2 => {
|
||||
console.error(`Image located at ${self.url} could not be parsed as jpg/png/tif image file. Reason: `, e2)
|
||||
return Promise.reject(e);
|
||||
})
|
||||
})
|
||||
@@ -441,10 +441,10 @@ export let Image = (function () {
|
||||
|
||||
return Utils.fetch({
|
||||
url: this.url,
|
||||
dataType: 'readableStream',
|
||||
success: (stream) => {
|
||||
return self.view.wasm.addImageFITS(
|
||||
stream,
|
||||
dataType: 'arrayBuffer',
|
||||
success: (buf) => {
|
||||
return self.view.wasm.addFITSImage(
|
||||
new Uint8Array(buf),
|
||||
{
|
||||
...self.colorCfg.get(),
|
||||
imgFormat: 'fits',
|
||||
@@ -458,10 +458,10 @@ export let Image = (function () {
|
||||
|
||||
return Utils.fetch({
|
||||
url: url,
|
||||
dataType: 'readableStream',
|
||||
success: (stream) => {
|
||||
return self.view.wasm.addImageFITS(
|
||||
stream,
|
||||
dataType: 'arrayBuffer',
|
||||
success: (buf) => {
|
||||
return self.view.wasm.addFITSImage(
|
||||
new Uint8Array(buf),
|
||||
{
|
||||
...self.colorCfg.get(),
|
||||
imgFormat: 'fits',
|
||||
@@ -498,12 +498,8 @@ export let Image = (function () {
|
||||
var ctx = canvas.getContext("2d");
|
||||
ctx.drawImage(img, 0, 0, img.width, img.height);
|
||||
|
||||
const imageData = ctx.getImageData(0, 0, img.width, img.height);
|
||||
|
||||
const blob = new Blob([imageData.data]);
|
||||
const stream = blob.stream(1024);
|
||||
|
||||
resolve(stream)
|
||||
const imageData = ctx.getImageData(0, 0, img.width, img.height);
|
||||
resolve(imageData.data)
|
||||
};
|
||||
|
||||
if (!self.options.wcs) {
|
||||
@@ -555,14 +551,14 @@ export let Image = (function () {
|
||||
img.src = Aladin.JSONP_PROXY + '?url=' + self.url;
|
||||
}
|
||||
})
|
||||
.then((readableStream) => {
|
||||
.then((bytes) => {
|
||||
let wcs = self.options && self.options.wcs;
|
||||
wcs.NAXIS1 = wcs.NAXIS1 || img.width;
|
||||
wcs.NAXIS2 = wcs.NAXIS2 || img.height;
|
||||
|
||||
return self.view.wasm
|
||||
.addImageWithWCS(
|
||||
readableStream,
|
||||
.addRGBAImage(
|
||||
bytes,
|
||||
wcs,
|
||||
{
|
||||
...self.colorCfg.get(),
|
||||
|
||||
@@ -397,6 +397,8 @@ Utils.fetch = function(params) {
|
||||
return resp.json();
|
||||
} else if (params.dataType && params.dataType.includes('blob')) {
|
||||
return resp.blob();
|
||||
} else if (params.dataType && params.dataType.includes('arrayBuffer')) {
|
||||
return resp.arrayBuffer();
|
||||
} else if (params.dataType && params.dataType.includes('readableStream')) {
|
||||
return Promise.resolve(resp.body);
|
||||
} else {
|
||||
|
||||
@@ -201,6 +201,7 @@ export let AVM = (function() {
|
||||
if (unwindTag(tags['Spatial.Equinox']))
|
||||
wcs.EQUINOX = +unwindTag(tags['Spatial.Equinox']);
|
||||
|
||||
wcs.NAXIS = tags['Spatial.ReferenceDimension'] && +tags['Spatial.ReferenceDimension'].length;
|
||||
wcs.NAXIS1 = tags['Spatial.ReferenceDimension'] && +tags['Spatial.ReferenceDimension'][0];
|
||||
wcs.NAXIS2 = tags['Spatial.ReferenceDimension'] && +tags['Spatial.ReferenceDimension'][1];
|
||||
|
||||
|
||||
Reference in New Issue
Block a user