Fix NaN detection by the GPU on float textures for window platform

float textures coming from BITPIX<0 fits images are sent to GPU as
RGBA8UI textures. Float decoding from a vec4 rgba is done in the shader.
Then this obtained decoded float can be tested against nan/inf in the
shader
This commit is contained in:
bmatthieu3
2025-03-13 18:45:15 +01:00
committed by Matthieu Baumann
parent 68d9e67774
commit ee2eb6e704
17 changed files with 400 additions and 219 deletions

View File

@@ -163,7 +163,7 @@ impl Image for Fits<'_> {
); );
} }
Data::F32(data) => { Data::F32(data) => {
let view = unsafe { R32F::view(&data) }; let view = unsafe { R8UI::view(&std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * 4)) };
textures.tex_sub_image_3d_with_opt_array_buffer_view( textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x, offset.x,
offset.y, offset.y,

View File

@@ -152,21 +152,13 @@ impl ImageFormat for RGB32F {
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32F; pub struct R32F;
impl ImageFormat for R32F { impl ImageFormat for R32F {
type P = [f32; 1]; type P = [u8; 4];
const NUM_CHANNELS: usize = 1; const NUM_CHANNELS: usize = 4;
#[cfg(feature = "webgl2")] const FORMAT: u32 = WebGlRenderingCtx::RGBA as u32;
const FORMAT: u32 = WebGlRenderingCtx::RED as u32; const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
#[cfg(feature = "webgl1")] const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const FORMAT: u32 = WebGlRenderingCtx::LUMINANCE as u32;
#[cfg(feature = "webgl2")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::LUMINANCE as i32;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
const CHANNEL_TYPE: ChannelType = ChannelType::R32F; const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
@@ -174,41 +166,31 @@ impl ImageFormat for R32F {
Ok(Bytes::Borrowed(raw_bytes)) Ok(Bytes::Borrowed(raw_bytes))
} }
type ArrayBufferView = js_sys::Float32Array; type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView { unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s) Self::ArrayBufferView::view(s)
} }
} }
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R64F; pub struct R64F;
#[cfg(feature = "webgl2")]
impl ImageFormat for R64F { impl ImageFormat for R64F {
type P = [f32; 1]; type P = [u8; 4];
const NUM_CHANNELS: usize = 1; const NUM_CHANNELS: usize = 4;
#[cfg(feature = "webgl2")] const FORMAT: u32 = WebGlRenderingCtx::RGBA as u32;
const FORMAT: u32 = WebGlRenderingCtx::RED as u32; const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
#[cfg(feature = "webgl1")] const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const FORMAT: u32 = WebGlRenderingCtx::LUMINANCE as u32;
#[cfg(feature = "webgl2")] const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::LUMINANCE as i32;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
const CHANNEL_TYPE: ChannelType = ChannelType::R64F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> { fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes)) Ok(Bytes::Borrowed(raw_bytes))
} }
type ArrayBufferView = js_sys::Float32Array; type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView { unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s) Self::ArrayBufferView::view(s)
@@ -309,6 +291,18 @@ pub enum ChannelType {
R32I, R32I,
} }
impl ChannelType {
pub fn is_colored(&self) -> bool {
match self {
ChannelType::RGBA32F
| ChannelType::RGB32F
| ChannelType::RGBA8U
| ChannelType::RGB8U => true,
_ => false,
}
}
}
pub const NUM_CHANNELS: usize = 9; pub const NUM_CHANNELS: usize = 9;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
@@ -327,12 +321,6 @@ impl ImageFormatType {
} }
pub fn is_colored(&self) -> bool { pub fn is_colored(&self) -> bool {
match self.channel { self.channel.is_colored()
ChannelType::RGBA32F
| ChannelType::RGB32F
| ChannelType::RGBA8U
| ChannelType::RGB8U => true,
_ => false,
}
} }
} }

View File

@@ -11,6 +11,7 @@ pub trait Pixel:
+ Copy + Copy
+ std::fmt::Debug + std::fmt::Debug
+ cgmath::Zero + cgmath::Zero
+ cgmath::One
+ std::cmp::PartialEq + std::cmp::PartialEq
+ crate::convert::Cast<f32>; + crate::convert::Cast<f32>;
type Container: ArrayBuffer<Item = Self::Item>; type Container: ArrayBuffer<Item = Self::Item>;

View File

@@ -160,47 +160,42 @@ impl From<query::Allsky> for AllskyRequest {
let Fits { hdu } = Fits::from_reader(&mut reader) let Fits { hdu } = Fits::from_reader(&mut reader)
.map_err(|_| JsValue::from_str("Parsing fits error of allsky"))?; .map_err(|_| JsValue::from_str("Parsing fits error of allsky"))?;
//let width_allsky_px = 27 * std::cmp::min(tile_size, 64) as i32;
//let height_allsky_px = 29 * std::cmp::min(tile_size, 64) as i32;
let data = hdu.get_data(); let data = hdu.get_data();
match data { match data {
InMemData::U8(data) => { InMemData::U8(data) => {
Ok(handle_allsky_fits(&data, tile_size, texture_size)? Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter()
.map(|image| ImageType::RawR8ui { image }) .map(|image| ImageType::RawR8ui { image })
.collect()) .collect())
} }
InMemData::I16(data) => { InMemData::I16(data) => {
Ok(handle_allsky_fits(&data, tile_size, texture_size)? Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter()
.map(|image| ImageType::RawR16i { image }) .map(|image| ImageType::RawR16i { image })
.collect()) .collect())
} }
InMemData::I32(data) => { InMemData::I32(data) => {
Ok(handle_allsky_fits(&data, tile_size, texture_size)? Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter()
.map(|image| ImageType::RawR32i { image }) .map(|image| ImageType::RawR32i { image })
.collect()) .collect())
} }
InMemData::F32(data) => {
Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter()
.map(|image| ImageType::RawR32f { image })
.collect())
}
InMemData::I64(data) => { InMemData::I64(data) => {
let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>(); let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>();
Ok(handle_allsky_fits(&data, tile_size, texture_size)? Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter()
.map(|image| ImageType::RawR32i { image }) .map(|image| ImageType::RawR32i { image })
.collect()) .collect())
} }
InMemData::F32(data) => {
let data = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * 4) };
Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.map(|image| ImageType::RawRgba8u { image })
.collect())
}
InMemData::F64(data) => { InMemData::F64(data) => {
let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>(); let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>();
let data = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * 4) };
Ok(handle_allsky_fits(&data, tile_size, texture_size)? Ok(handle_allsky_fits(&data, tile_size, texture_size)?
.into_iter() .map(|image| ImageType::RawRgba8u { image })
.map(|image| ImageType::RawR32f { image })
.collect()) .collect())
} }
} }
@@ -226,44 +221,44 @@ fn handle_allsky_file<F: ImageFormat>(
allsky_tile_size: i32, allsky_tile_size: i32,
texture_size: i32, texture_size: i32,
tile_size: i32, tile_size: i32,
) -> Result<Vec<ImageBuffer<F>>, JsValue> { ) -> Result<impl Iterator<Item = ImageBuffer<F>>, JsValue> {
let num_tiles_per_texture = (texture_size / tile_size) * (texture_size / tile_size); let num_tiles_per_texture = (texture_size / tile_size) * (texture_size / tile_size);
let num_tiles = num_tiles_per_texture * 12; let num_tiles = num_tiles_per_texture * 12;
let mut tiles = Vec::with_capacity(num_tiles as usize);
let num_allsky_tiles_per_tile = (tile_size / allsky_tile_size) * (tile_size / allsky_tile_size); let num_allsky_tiles_per_tile = (tile_size / allsky_tile_size) * (tile_size / allsky_tile_size);
let mut src_idx = 0; let mut src_idx = 0;
for _ in 0..num_tiles { let tiles = (0..num_tiles)
let mut base_tile = .map(move |_| {
let mut base_tile =
ImageBuffer::<F>::allocate(&<F as ImageFormat>::P::BLACK, tile_size, tile_size); ImageBuffer::<F>::allocate(&<F as ImageFormat>::P::BLACK, tile_size, tile_size);
for idx_tile in 0..num_allsky_tiles_per_tile { for idx_tile in 0..num_allsky_tiles_per_tile {
let (x, y) = crate::utils::unmortonize(idx_tile as u64); let (x, y) = crate::utils::unmortonize(idx_tile as u64);
let dx = x * (allsky_tile_size as u32); let dx = x * (allsky_tile_size as u32);
let dy = y * (allsky_tile_size as u32); let dy = y * (allsky_tile_size as u32);
let sx = (src_idx % 27) * allsky_tile_size; let sx = (src_idx % 27) * allsky_tile_size;
let sy = (src_idx / 27) * allsky_tile_size; let sy = (src_idx / 27) * allsky_tile_size;
let s = ImageBufferView { let s = ImageBufferView {
x: sx as i32, x: sx as i32,
y: sy as i32, y: sy as i32,
w: allsky_tile_size as i32, w: allsky_tile_size as i32,
h: allsky_tile_size as i32, h: allsky_tile_size as i32,
}; };
let d = ImageBufferView { let d = ImageBufferView {
x: dx as i32, x: dx as i32,
y: dy as i32, y: dy as i32,
w: allsky_tile_size as i32, w: allsky_tile_size as i32,
h: allsky_tile_size as i32, h: allsky_tile_size as i32,
}; };
base_tile.tex_sub(&allsky, &s, &d); base_tile.tex_sub(&allsky, &s, &d);
src_idx += 1; src_idx += 1;
} }
tiles.push(base_tile); base_tile
} });
Ok(tiles) Ok(tiles)
} }
@@ -272,7 +267,7 @@ fn handle_allsky_fits<F: ImageFormat>(
allsky_data: &[<<F as ImageFormat>::P as Pixel>::Item], allsky_data: &[<<F as ImageFormat>::P as Pixel>::Item],
tile_size: i32, tile_size: i32,
texture_size: i32, texture_size: i32,
) -> Result<Vec<ImageBuffer<F>>, JsValue> { ) -> Result<impl Iterator<Item=ImageBuffer<F>>, JsValue> {
let allsky_tile_size = std::cmp::min(tile_size, 64); let allsky_tile_size = std::cmp::min(tile_size, 64);
let width_allsky_px = 27 * allsky_tile_size; let width_allsky_px = 27 * allsky_tile_size;
let height_allsky_px = 29 * allsky_tile_size; let height_allsky_px = 29 * allsky_tile_size;
@@ -286,9 +281,8 @@ fn handle_allsky_fits<F: ImageFormat>(
let allsky = ImageBuffer::<F>::new(reversed_rows_data, width_allsky_px, height_allsky_px); let allsky = ImageBuffer::<F>::new(reversed_rows_data, width_allsky_px, height_allsky_px);
let allsky_tiles = handle_allsky_file::<F>(allsky, allsky_tile_size, texture_size, tile_size)? let allsky_tiles_iter = handle_allsky_file::<F>(allsky, allsky_tile_size, texture_size, tile_size)?
.into_iter() .map(move |image| {
.map(|image| {
// The GPU does a specific transformation on the UV // The GPU does a specific transformation on the UV
// for FITS tiles // for FITS tiles
// We must revert this to be compatible with this GPU transformation // We must revert this to be compatible with this GPU transformation
@@ -298,10 +292,9 @@ fn handle_allsky_fits<F: ImageFormat>(
} }
ImageBuffer::<F>::new(new_image_data, tile_size, tile_size) ImageBuffer::<F>::new(new_image_data, tile_size, tile_size)
}) });
.collect();
Ok(allsky_tiles) Ok(allsky_tiles_iter)
} }
use al_core::image::format::RGBA8U; use al_core::image::format::RGBA8U;

View File

@@ -75,13 +75,22 @@ pub fn get_raster_shader<'a>(
shaders: &'a mut ShaderManager, shaders: &'a mut ShaderManager,
config: &HiPSConfig, config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> { ) -> Result<&'a Shader, JsValue> {
if config.get_format().is_colored() && cmap.label() == "native" { if config.get_format().is_colored() {
crate::shader::get_shader( if cmap.label() == "native" {
gl, crate::shader::get_shader(
shaders, gl,
"hips_rasterizer_raster.vert", shaders,
"hips_rasterizer_color.frag", "hips_rasterizer_raster.vert",
) "hips_rasterizer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color_to_colormap.frag",
)
}
} else { } else {
if config.tex_storing_unsigned_int { if config.tex_storing_unsigned_int {
crate::shader::get_shader( crate::shader::get_shader(
@@ -115,13 +124,22 @@ pub fn get_raytracer_shader<'a>(
config: &HiPSConfig, config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> { ) -> Result<&'a Shader, JsValue> {
//let colored_hips = config.is_colored(); //let colored_hips = config.is_colored();
if config.get_format().is_colored() && cmap.label() == "native" { if config.get_format().is_colored() {
crate::shader::get_shader( if cmap.label() == "native" {
gl, crate::shader::get_shader(
shaders, gl,
"hips_raytracer_raytracer.vert", shaders,
"hips_raytracer_color.frag", "hips_raytracer_raytracer.vert",
) "hips_raytracer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color_to_colormap.frag",
)
}
} else { } else {
if config.tex_storing_unsigned_int { if config.tex_storing_unsigned_int {
crate::shader::get_shader( crate::shader::get_shader(

View File

@@ -52,13 +52,22 @@ pub fn get_raster_shader<'a>(
shaders: &'a mut ShaderManager, shaders: &'a mut ShaderManager,
config: &HiPSConfig, config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> { ) -> Result<&'a Shader, JsValue> {
if config.get_format().is_colored() && cmap.label() == "native" { if config.get_format().is_colored() {
crate::shader::get_shader( if cmap.label() == "native" {
gl, crate::shader::get_shader(
shaders, gl,
"hips3d_rasterizer_raster.vert", shaders,
"hips3d_rasterizer_color.frag", "hips3d_rasterizer_raster.vert",
) "hips3d_rasterizer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips3d_rasterizer_raster.vert",
"hips3d_rasterizer_color_to_colormap.frag",
)
}
} else { } else {
if config.tex_storing_unsigned_int { if config.tex_storing_unsigned_int {
crate::shader::get_shader( crate::shader::get_shader(

View File

@@ -1,23 +1,23 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::ops::Range; use std::ops::Range;
pub fn first_and_last_percent<T>(slice: &mut [T], first_percent: i32, last_percent: i32) -> Range<T> pub fn first_and_last_percent<T>(slice: &mut [T], mut first_percent: i32, mut last_percent: i32) -> Range<T>
where where
T: PartialOrd + Copy, T: PartialOrd + Copy,
{ {
if first_percent > last_percent {
std::mem::swap(&mut first_percent, &mut last_percent);
}
let n = slice.len(); let n = slice.len();
let first_pct_idx = ((first_percent as f32) * 0.01 * (n as f32)) as usize; let i1 = ((first_percent as f32) * 0.01 * (n as f32)) as usize;
let last_pct_idx = ((last_percent as f32) * 0.01 * (n as f32)) as usize; let i2 = ((last_percent as f32) * 0.01 * (n as f32)) as usize;
let min_val = { let min_val = {
let (_, min_val, _) = slice.select_nth_unstable_by(first_pct_idx, |a, b| { let (_, min_val, _) = slice.select_nth_unstable_by(i1, |a, b| a.partial_cmp(b).unwrap_or(Ordering::Greater));
a.partial_cmp(b).unwrap_or(Ordering::Greater)
});
*min_val *min_val
}; };
let max_val = { let max_val = {
let (_, max_val, _) = slice.select_nth_unstable_by(last_pct_idx, |a, b| { let (_, max_val, _) = slice.select_nth_unstable_by(i2, |a, b| a.partial_cmp(b).unwrap_or(Ordering::Greater));
a.partial_cmp(b).unwrap_or(Ordering::Greater)
});
*max_val *max_val
}; };

View File

@@ -3,6 +3,7 @@ pub mod grid;
pub mod subdivide_texture; pub mod subdivide_texture;
use al_core::webgl_ctx::WebGlRenderingCtx; use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::convert::Cast;
use std::fmt::Debug; use std::fmt::Debug;
use std::marker::Unpin; use std::marker::Unpin;
use std::vec; use std::vec;
@@ -35,6 +36,7 @@ use crate::ProjectionType;
use crate::ShaderManager; use crate::ShaderManager;
use std::ops::Range; use std::ops::Range;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub struct Image { pub struct Image {
/// A reference to the GL context /// A reference to the GL context
@@ -49,7 +51,7 @@ pub struct Image {
/// Parameters extracted from the fits /// Parameters extracted from the fits
wcs: WCS, wcs: WCS,
blank: f32, blank: Option<f32>,
scale: f32, scale: f32,
offset: f32, offset: f32,
cuts: Range<f32>, cuts: Range<f32>,
@@ -75,6 +77,7 @@ use fitsrs::hdu::header::extension;
use fitsrs::hdu::AsyncHDU; use fitsrs::hdu::AsyncHDU;
use futures::io::BufReader; use futures::io::BufReader;
use futures::AsyncReadExt; use futures::AsyncReadExt;
impl Image { impl Image {
pub async fn from_reader_and_wcs<R, F>( pub async fn from_reader_and_wcs<R, F>(
gl: &WebGlContext, gl: &WebGlContext,
@@ -103,9 +106,8 @@ impl Image {
// apply bscale to the cuts // apply bscale to the cuts
let offset = offset.unwrap_or(0.0); let offset = offset.unwrap_or(0.0);
let scale = scale.unwrap_or(1.0); let scale = scale.unwrap_or(1.0);
let blank = blank.unwrap_or(std::f32::NAN);
let (textures, mut cuts) = if width <= max_tex_size as u64 && height <= max_tex_size as u64 let (textures, cuts) = if width <= max_tex_size as u64 && height <= max_tex_size as u64
{ {
max_tex_size_x = width as usize; max_tex_size_x = width as usize;
max_tex_size_y = height as usize; max_tex_size_y = height as usize;
@@ -113,7 +115,7 @@ impl Image {
let num_pixels_to_read = (width as usize) * (height as usize); let num_pixels_to_read = (width as usize) * (height as usize);
let num_bytes_to_read = let num_bytes_to_read =
num_pixels_to_read * std::mem::size_of::<<F::P as Pixel>::Item>() * F::NUM_CHANNELS; num_pixels_to_read * std::mem::size_of::<F::P>();
let mut buf = vec![0; num_bytes_to_read]; let mut buf = vec![0; num_bytes_to_read];
let _ = reader let _ = reader
@@ -123,30 +125,11 @@ impl Image {
// bytes aligned // bytes aligned
unsafe { unsafe {
let slice = std::slice::from_raw_parts( let data = std::slice::from_raw_parts_mut(
buf[..].as_ptr() as *const <F::P as Pixel>::Item, buf[..].as_mut_ptr() as *mut PixelItem<F>,
(num_pixels_to_read as usize) * F::NUM_CHANNELS, (num_pixels_to_read as usize) * F::NUM_CHANNELS,
); );
let cuts = if F::NUM_CHANNELS == 1 {
let mut samples = slice
.iter()
.filter_map(|item| {
let t: f32 =
<<F::P as Pixel>::Item as al_core::convert::Cast<f32>>::cast(*item);
if t.is_nan() || t == blank {
None
} else {
Some(t)
}
})
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut samples, 1, 99)
} else {
0.0..1.0
};
let texture = Texture2D::create_from_raw_pixels::<F>( let texture = Texture2D::create_from_raw_pixels::<F>(
gl, gl,
width as i32, width as i32,
@@ -171,9 +154,52 @@ impl Image {
WebGlRenderingCtx::CLAMP_TO_EDGE, WebGlRenderingCtx::CLAMP_TO_EDGE,
), ),
], ],
Some(slice), Some(data),
)?; )?;
let cuts = match F::CHANNEL_TYPE {
ChannelType::R32F | ChannelType::R64F => {
let pixels = std::slice::from_raw_parts(data.as_ptr() as *const f32, data.len() / 4);
let mut sub_pixels = pixels.iter()
.step_by(100)
.filter(|pixel| (*pixel).is_finite())
.cloned()
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
// BLANK is only valid for those channels/BITPIX (> 0)
if let Some(blank) = blank {
let mut sub_pixels = data.iter()
.step_by(100)
.filter_map(|pixel| {
let pixel = <PixelItem::<F> as Cast<f32>>::cast(*pixel);
if pixel != blank {
Some(pixel)
} else {
None
}
})
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else {
// No blank value => we consider all the values
let mut sub_pixels = data.iter()
.step_by(100)
.map(|pixel| <PixelItem::<F> as Cast<f32>>::cast(*pixel))
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
}
// RGB(A) images
_ => 0.0..1.0
};
(vec![texture], cuts) (vec![texture], cuts)
} }
} else { } else {
@@ -195,7 +221,7 @@ impl Image {
let start = cuts.start * scale + offset; let start = cuts.start * scale + offset;
let end = cuts.end * scale + offset; let end = cuts.end * scale + offset;
cuts = start..end; let cuts = start..end;
let num_indices = vec![]; let num_indices = vec![];
let indices = vec![]; let indices = vec![];
@@ -319,26 +345,22 @@ impl Image {
gl: &WebGlContext, gl: &WebGlContext,
hdu: &mut AsyncHDU<'a, BufReader<R>, extension::image::Image>, hdu: &mut AsyncHDU<'a, BufReader<R>, extension::image::Image>,
coo_sys: CooSystem, coo_sys: CooSystem,
//reader: &'a mut BufReader<R>,
) -> Result<Self, JsValue> ) -> Result<Self, JsValue>
where where
R: AsyncRead + Unpin + Debug + 'a, R: AsyncRead + Unpin + Debug + 'a,
{ {
// Load the fits file // Load the FITS file
let header = hdu.get_header(); let header = hdu.get_header();
let scale = header let scale = header
.get_parsed::<f64>(b"BSCALE ") .get_parsed::<f64>(b"BSCALE ")
.unwrap_or(Ok(1.0)) .map(|v| v.unwrap());
.unwrap() as f32;
let offset = header let offset = header
.get_parsed::<f64>(b"BZERO ") .get_parsed::<f64>(b"BZERO ")
.unwrap_or(Ok(0.0)) .map(|v| v.unwrap());
.unwrap() as f32;
let blank = header let blank = header
.get_parsed::<f64>(b"BLANK ") .get_parsed::<f64>(b"BLANK ")
.unwrap_or(Ok(std::f64::NAN)) .map(|v| v.unwrap());
.unwrap() as f32;
// Create a WCS from a specific header unit // Create a WCS from a specific header unit
let wcs = WCS::from_fits_header(header) let wcs = WCS::from_fits_header(header)
@@ -354,9 +376,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -368,9 +390,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -382,9 +404,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -401,9 +423,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -415,9 +437,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -434,9 +456,9 @@ impl Image {
gl, gl,
reader, reader,
wcs, wcs,
Some(scale), scale.map(|v| v as f32),
Some(offset), offset.map(|v| v as f32),
Some(blank), blank.map(|v| v as f32),
coo_sys, coo_sys,
) )
.await .await
@@ -724,15 +746,22 @@ impl Image {
let texture = &self.textures[idx_tex]; let texture = &self.textures[idx_tex];
let num_indices = self.num_indices[idx] as i32; let num_indices = self.num_indices[idx] as i32;
shader let shader_bound = shader
.bind(&self.gl) .bind(&self.gl);
shader_bound
.attach_uniforms_from(colormaps) .attach_uniforms_from(colormaps)
.attach_uniforms_with_params_from(color, colormaps) .attach_uniforms_with_params_from(color, colormaps)
.attach_uniform("opacity", opacity) .attach_uniform("opacity", opacity)
.attach_uniform("tex", texture) .attach_uniform("tex", texture)
.attach_uniform("scale", &self.scale) .attach_uniform("scale", &self.scale)
.attach_uniform("offset", &self.offset) .attach_uniform("offset", &self.offset);
.attach_uniform("blank", &self.blank)
if let Some(blank) = self.blank {
shader_bound.attach_uniform("blank", &blank);
}
shader_bound
.bind_vertex_array_object_ref(&self.vao) .bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32( .draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES, WebGl2RenderingContext::TRIANGLES,

View File

@@ -1,3 +1,4 @@
use al_core::image::format::ChannelType;
use wasm_bindgen::JsValue; use wasm_bindgen::JsValue;
use futures::AsyncReadExt; use futures::AsyncReadExt;
@@ -10,13 +11,16 @@ use al_core::Texture2D;
use al_core::WebGlContext; use al_core::WebGlContext;
use std::ops::Range; use std::ops::Range;
use al_core::convert::Cast;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub async fn crop_image<'a, F, R>( pub async fn crop_image<'a, F, R>(
gl: &WebGlContext, gl: &WebGlContext,
width: u64, width: u64,
height: u64, height: u64,
mut reader: R, mut reader: R,
max_tex_size: u64, max_tex_size: u64,
blank: f32, blank: Option<f32>,
) -> Result<(Vec<Texture2D>, Range<f32>), JsValue> ) -> Result<(Vec<Texture2D>, Range<f32>), JsValue>
where where
F: ImageFormat, F: ImageFormat,
@@ -67,10 +71,12 @@ where
let mut pixels_written = 0; let mut pixels_written = 0;
let num_pixels = width * height; let num_pixels = width * height;
let step_x_cut = (width / 50) as usize; const PIXEL_STEP: u64 = 256;
let step_y_cut = (height / 50) as usize;
let mut samples = vec![]; let step_x_cut = (width / PIXEL_STEP) as usize;
let step_y_cut = (height / PIXEL_STEP) as usize;
let mut sub_pixels = vec![];
let step_cut = step_x_cut.max(step_y_cut) + 1; let step_cut = step_x_cut.max(step_y_cut) + 1;
@@ -101,36 +107,60 @@ where
let dy = (pixels_written / width) - off_y_px; let dy = (pixels_written / width) - off_y_px;
let view = unsafe { let view = unsafe {
let slice = std::slice::from_raw_parts( let data = std::slice::from_raw_parts(
buf[..num_bytes_to_read].as_ptr() as *const <F::P as Pixel>::Item, buf[..num_bytes_to_read].as_ptr() as *const <F::P as Pixel>::Item,
(num_pixels_to_read as usize) * F::NUM_CHANNELS, (num_pixels_to_read as usize) * F::NUM_CHANNELS,
); );
// compute the cuts if the pixel is grayscale // compute the cuts if the pixel is grayscale
if F::NUM_CHANNELS == 1 { if (pixels_written / width) % (step_cut as u64) == 0 {
// fill the samples buffer // We are in a good line
if (pixels_written / width) % (step_cut as u64) == 0 { let xmin = pixels_written % width;
// We are in a good line
let xmin = pixels_written % width;
for i in (0..width).step_by(step_cut) { match F::CHANNEL_TYPE {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) { ChannelType::R32F | ChannelType::R64F => {
let j = (i - xmin) as usize; let pixels = std::slice::from_raw_parts(data.as_ptr() as *const f32, data.len() / 4);
let sj: f32 = <<F::P as Pixel>::Item as al_core::convert::Cast< for i in (0..width).step_by(step_cut) {
f32, if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
>>::cast(slice[j]); let j = (i - xmin) as usize;
if !sj.is_nan() {
if blank != sj { if pixels[j].is_finite() {
samples.push(sj); sub_pixels.push(pixels[j]);
} }
} }
} }
} },
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
if let Some(blank) = blank {
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let pixel = <PixelItem::<F> as Cast<f32>>::cast(data[j]);
if pixel != blank {
sub_pixels.push(pixel);
}
}
}
} else {
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let pixel = <PixelItem::<F> as Cast<f32>>::cast(data[j]);
sub_pixels.push(pixel);
}
}
}
},
// colored pixels
_ => (),
} }
} }
F::view(slice) F::view(data)
}; };
(&mut tex_chunks[id_t as usize]) (&mut tex_chunks[id_t as usize])
@@ -151,8 +181,8 @@ where
} }
} }
let cuts = if F::NUM_CHANNELS == 1 { let cuts = if F::CHANNEL_TYPE.is_colored() {
cuts::first_and_last_percent(&mut samples, 1, 99) cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else { } else {
0.0..1.0 0.0..1.0
}; };

View File

@@ -1,6 +1,4 @@
use crate::{healpix::coverage::HEALPixCoverage, CameraViewPort, ShaderManager}; use crate::{healpix::coverage::HEALPixCoverage, CameraViewPort, ShaderManager};
use web_sys::WebGl2RenderingContext;
use al_core::WebGlContext; use al_core::WebGlContext;
use wasm_bindgen::JsValue; use wasm_bindgen::JsValue;

View File

@@ -225,8 +225,6 @@ impl Layers {
let cdid = self.ids.get(layer).unwrap_abort(); let cdid = self.ids.get(layer).unwrap_abort();
if let Some(hips) = self.hipses.get(cdid) { if let Some(hips) = self.hipses.get(cdid) {
let hips_cfg = hips.get_config();
let allsky = hips.is_allsky(); let allsky = hips.is_allsky();
let opaque = meta.opacity == 1.0; let opaque = meta.opacity == 1.0;

View File

@@ -1,6 +1,6 @@
#version 300 es #version 300 es
precision lowp float; precision highp float;
precision mediump int; precision highp int;
layout (location = 0) in vec2 ndc_pos; layout (location = 0) in vec2 ndc_pos;
layout (location = 1) in vec2 uv; layout (location = 1) in vec2 uv;

View File

@@ -1,9 +1,9 @@
#version 300 es #version 300 es
precision lowp float; precision highp float;
precision lowp sampler2D; precision highp sampler2D;
precision lowp isampler2D; precision lowp isampler2D;
precision lowp usampler2D; precision lowp usampler2D;
precision mediump int; precision highp int;
out vec4 out_frag_color; out vec4 out_frag_color;
in vec2 frag_uv; in vec2 frag_uv;
@@ -25,19 +25,31 @@ uniform float reversed;
#include ./../hips/transfer_funcs.glsl; #include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl; #include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x, float a) { vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset; float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value); alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed // apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed); alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x))); vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
return apply_tonal(new_color); return apply_tonal(new_color);
} }
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
if (abs(Exponent + 127.0) < 1e-3) {
return 0.0;
}
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
void main() { void main() {
vec4 color = texture(tex, frag_uv); highp float value = decode32(texture(tex, frag_uv).abgr*255.0);
out_frag_color = apply_colormap_to_grayscale(color.r, color.a); // reconstruct the float value
out_frag_color = apply_colormap_to_grayscale(value);
out_frag_color.a = out_frag_color.a * opacity; out_frag_color.a = out_frag_color.a * opacity;
} }

View File

@@ -1,15 +1,11 @@
uniform float scale; uniform float scale;
uniform float offset; uniform float offset;
uniform float blank; uniform float blank;
uniform float min_value; uniform float min_value;
uniform float max_value; uniform float max_value;
uniform int H; uniform int H;
uniform float reversed; uniform float reversed;
uniform float size_tile_uv; uniform float size_tile_uv;
uniform int tex_storing_fits; uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl; #include ../colormaps/colormap.glsl;
@@ -26,7 +22,6 @@ vec3 reverse_uv(vec3 uv) {
return uv; return uv;
} }
vec4 get_color_from_texture(vec3 UV) { vec4 get_color_from_texture(vec3 UV) {
vec4 color = get_pixels(UV); vec4 color = get_pixels(UV);
@@ -40,21 +35,34 @@ vec4 get_color_from_texture(vec3 UV) {
return apply_tonal(color); return apply_tonal(color);
} }
vec4 apply_colormap_to_grayscale(float x, float a) { vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset; float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value); alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed // apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed); alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x))); vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x) || isnan(x)));
return apply_tonal(new_color); return apply_tonal(new_color);
} }
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) { vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis // FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1)); vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
vec4 color = get_pixels(uv); float value = decode32(get_pixels(uv).abgr*255.0);
return apply_colormap_to_grayscale(color.r, color.a); return apply_colormap_to_grayscale(value);
}
vec4 get_colormap_from_color_texture(vec3 uv) {
float value = get_pixels(uv).r;
return apply_colormap_to_grayscale(value);
} }

View File

@@ -0,0 +1,24 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
in float frag_blending_factor;
out vec4 out_frag_color;
uniform float opacity;
#include ../color.glsl;
void main() {
vec4 color_start = get_colormap_from_color_texture(frag_uv_start);
vec4 color_end = get_colormap_from_color_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;
}

View File

@@ -0,0 +1,52 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision mediump int;
in vec3 frag_pos;
in vec2 out_clip_pos;
out vec4 out_frag_color;
struct Tile {
int uniq; // Healpix cell
int texture_idx; // Index in the texture buffer
float start_time; // Absolute time that the load has been done in ms
float empty;
};
uniform Tile textures_tiles[12];
uniform float opacity;
uniform sampler2DArray tex;
struct TileColor {
Tile tile;
vec4 color;
bool found;
};
#include ../color.glsl;
#include ../../projection/hpx_proj.glsl;
vec4 get_tile_color(vec3 pos) {
HashDxDy result = hash_with_dxdy(0, pos.zxy);
int idx = result.idx;
vec2 uv = vec2(result.dy, result.dx);
Tile tile = textures_tiles[idx];
vec2 offset = uv;
vec3 UV = vec3(offset, float(tile.texture_idx));
vec4 color = get_colormap_from_color_texture(UV);
color.a *= (1.0 - tile.empty);
return color;
}
void main() {
vec4 c = get_tile_color(normalize(frag_pos));
out_frag_color = c;
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,21 @@
#version 300 es
precision lowp float;
precision lowp sampler3D;
precision lowp isampler3D;
precision lowp usampler3D;
uniform sampler3D tex;
in vec3 frag_uv;
out vec4 out_frag_color;
uniform float opacity;
#include ../../hips/color.glsl;
void main() {
vec4 color = get_colormap_from_color_texture(vec3(frag_uv.xy, mod(frag_uv.z, 32.0) / 32.0));
out_frag_color = color;
out_frag_color.a = opacity * out_frag_color.a;
}