commit
ea25c19b2e
13 changed files with 3219 additions and 0 deletions
@ -0,0 +1,31 @@ |
|||||||
|
[package] |
||||||
|
name = "bzvx" |
||||||
|
version = "0.1.0" |
||||||
|
authors = ["Sam Blazes <blazes.sam@gmail.com>"] |
||||||
|
edition = "2018" |
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html |
||||||
|
|
||||||
|
[dependencies] |
||||||
|
|
||||||
|
log = "0.4" |
||||||
|
env_logger = "0.8.2" |
||||||
|
rand = "0.7.3" |
||||||
|
opensimplex = "0.2.0" |
||||||
|
cgmath = "0.18.0" |
||||||
|
png = "0.16.3" |
||||||
|
serde = {version = "1.0.110", features = ["derive"]} |
||||||
|
serde_json = "1.0.53" |
||||||
|
bincode = "1.2.1" |
||||||
|
nalgebra = "0.21.0" |
||||||
|
obj = "0.10.0" |
||||||
|
image = "0.23.8" |
||||||
|
pbr = "1.0" |
||||||
|
noise = "0.6.0" |
||||||
|
fxhash = "0.2.1" |
||||||
|
fnv = "1.0.7" |
||||||
|
|
||||||
|
[features] |
||||||
|
|
||||||
|
benchtest = [] |
||||||
|
default = [] |
||||||
@ -0,0 +1,20 @@ |
|||||||
|
BZVX |
||||||
|
---- |
||||||
|
|
||||||
|
BZVX is a voxel geometry library that uses Sparse Voxel Directed Acyclic Graphs (SVDAGs) for a variety of operations. |
||||||
|
|
||||||
|
## Sparse Voxel Directed Acyclic Graphs (SVDAGs) |
||||||
|
|
||||||
|
A SVDAG is similar to a sparse voxel octree (SVO): A cubic volume is recursively subdivided in 8 parts where there are boundaries between materials, while uniform volumes are not subdivided. |
||||||
|
However, In a SVDAG, multiple nodes can point to the same child in cases where there would have been duplicated subtrees in an SVO. |
||||||
|
This has the potential to vastly reduce the memory footprint of the voxel data. |
||||||
|
|
||||||
|
Reading and storing a SVDAG is very similar to a SVO. |
||||||
|
Absolute position data can't be stored along with the nodes, since one node may represent multiple different volumes. |
||||||
|
Therefore, absolute position information must be calculated implicitly as you traverse the datastructure. |
||||||
|
Other than that, traversing SVDAGs is the same as traversing SVOs, and the compression is "free" from a reading/traversing perspective. |
||||||
|
|
||||||
|
Modifying a SVDAG is more complicated, however. |
||||||
|
Nodes can be referenced from multiple parents, so each time voxel data is changed, parent nodes potentially up to the root node may need modifications to avoid invalid changes to other data. |
||||||
|
The simplest method is to simply duplicate every node while traversing from the root and modify the new copy (and the original child of the root). |
||||||
|
Inductively, if a node can be modified without causing collateral changes, then a direct child node can be duplicated, the current node can be modified to point to the duplicate, and the child can then also be modified without causing |
||||||
@ -0,0 +1,119 @@ |
|||||||
|
use crate::VoxelChunk; |
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
pub fn get_voxel_at_depth_index(&self, depth : usize, x : usize, y : usize, z : usize) -> u32 { |
||||||
|
let mut i = 0; |
||||||
|
|
||||||
|
for b in (0..depth).rev() { |
||||||
|
|
||||||
|
let ci =
|
||||||
|
(((x >> b) & 1) << 0) | |
||||||
|
(((y >> b) & 1) << 1) | |
||||||
|
(((z >> b) & 1) << 2); |
||||||
|
|
||||||
|
let j = self.voxels[i].sub_voxels[ci]; |
||||||
|
|
||||||
|
if j <= 0 { |
||||||
|
return (-j) as u32; |
||||||
|
} |
||||||
|
|
||||||
|
i = j as usize - 1; |
||||||
|
} |
||||||
|
|
||||||
|
self.lod_materials[i] |
||||||
|
} |
||||||
|
|
||||||
|
pub fn set_voxel_at_depth_index(&mut self, depth : usize, x : usize, y : usize, z : usize, material : u32) { |
||||||
|
|
||||||
|
r_set_helper(self, 0, depth, x, y, z, material); |
||||||
|
fn r_set_helper(v : &mut VoxelChunk, i : usize, depth : usize, x : usize, y : usize, z : usize, material : u32) -> i32 { |
||||||
|
|
||||||
|
let b = depth - 1; |
||||||
|
|
||||||
|
let ci =
|
||||||
|
(((x >> b) & 1) << 0) | |
||||||
|
(((y >> b) & 1) << 1) | |
||||||
|
(((z >> b) & 1) << 2); |
||||||
|
|
||||||
|
let mut j = v.voxels[i].sub_voxels[ci]; |
||||||
|
|
||||||
|
if b == 0 { |
||||||
|
v.voxels[i].sub_voxels[ci] = -(material as i32); |
||||||
|
} else { |
||||||
|
if j <= 0 { |
||||||
|
|
||||||
|
j = v.subdivide_subvoxel(i, ci) as i32 - 1; |
||||||
|
} else { |
||||||
|
|
||||||
|
j = v.duplicate_subvoxel(i, ci).unwrap() as i32 - 1; |
||||||
|
} |
||||||
|
|
||||||
|
v.voxels[i].sub_voxels[ci] = r_set_helper(v, j as usize, b, x, y, z, material); |
||||||
|
} |
||||||
|
|
||||||
|
v.deduplicate_voxel(i) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
pub fn fill_voxel_region_at_depth(&mut self, depth : usize, min : [usize; 3], max : [usize; 3], material : u32) { |
||||||
|
|
||||||
|
r_fill_region_helper(self, 0, [0; 3], 1 << depth, min, max, material); |
||||||
|
fn r_fill_region_helper(v : &mut VoxelChunk, i : usize, pos : [usize; 3], size : usize, min : [usize; 3], max : [usize; 3], material : u32) -> i32 { |
||||||
|
const BOX_OFFSETS : [[usize; 3]; 8] = [ |
||||||
|
[0, 0, 0], |
||||||
|
[1, 0, 0], |
||||||
|
[0, 1, 0], |
||||||
|
[1, 1, 0], |
||||||
|
[0, 0, 1], |
||||||
|
[1, 0, 1], |
||||||
|
[0, 1, 1], |
||||||
|
[1, 1, 1] |
||||||
|
]; |
||||||
|
|
||||||
|
let half_size = size >> 1; |
||||||
|
|
||||||
|
assert!(half_size > 0); |
||||||
|
|
||||||
|
for j in 0..8 { |
||||||
|
let bmin = [ |
||||||
|
pos[0] + BOX_OFFSETS[j][0] * half_size, |
||||||
|
pos[1] + BOX_OFFSETS[j][1] * half_size, |
||||||
|
pos[2] + BOX_OFFSETS[j][2] * half_size |
||||||
|
]; |
||||||
|
|
||||||
|
let bmax = [ |
||||||
|
bmin[0] + half_size, |
||||||
|
bmin[1] + half_size, |
||||||
|
bmin[2] + half_size, |
||||||
|
]; |
||||||
|
|
||||||
|
let x_contained = min[0] <= bmin[0] && bmax[0] < max[0]; |
||||||
|
let y_contained = min[1] <= bmin[1] && bmax[1] < max[1]; |
||||||
|
let z_contained = min[2] <= bmin[2] && bmax[2] < max[2]; |
||||||
|
|
||||||
|
let x_overlap = (min[0] <= bmin[0] && bmin[0] < max[0]) || (min[0] <= bmax[0]-1 && bmax[0]-1 < max[0]); |
||||||
|
let y_overlap = (min[1] <= bmin[1] && bmin[1] < max[1]) || (min[1] <= bmax[1]-1 && bmax[1]-1 < max[1]); |
||||||
|
let z_overlap = (min[2] <= bmin[2] && bmin[2] < max[2]) || (min[2] <= bmax[2]-1 && bmax[2]-1 < max[2]); |
||||||
|
|
||||||
|
if x_contained && y_contained && z_contained { |
||||||
|
// if this is the case, we can overwrite the entire subvoxel
|
||||||
|
v.voxels[i].sub_voxels[j] = -(material as i32); |
||||||
|
} else if x_overlap && y_overlap && z_overlap { |
||||||
|
// if this is the case, we recurse, subdividing if necessary
|
||||||
|
let k = if let Some(k) = v.duplicate_subvoxel(i, j) { |
||||||
|
k |
||||||
|
} else { |
||||||
|
v.subdivide_subvoxel(i, j) |
||||||
|
}; |
||||||
|
|
||||||
|
v.voxels[i].sub_voxels[j] = r_fill_region_helper(v, k, bmin, half_size, min, max, material); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
v.deduplicate_voxel(i) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
@ -0,0 +1,81 @@ |
|||||||
|
use test::{Bencher, black_box}; |
||||||
|
|
||||||
|
use crate::VoxelChunk; |
||||||
|
use cgmath::Vector3; |
||||||
|
|
||||||
|
macro_rules! bench_sdf_depth { |
||||||
|
($name:ident, $n:expr) => { |
||||||
|
#[bench] |
||||||
|
fn $name(b : &mut Bencher) { |
||||||
|
|
||||||
|
// test voxelizing a toroid section
|
||||||
|
|
||||||
|
let scx = 1.0; |
||||||
|
let scy = 1.0; |
||||||
|
let ra = 0.75; |
||||||
|
let rb = 0.125; |
||||||
|
|
||||||
|
b.iter(|| { |
||||||
|
black_box( |
||||||
|
VoxelChunk::from_distance_equation($n, |x, y, z| { |
||||||
|
|
||||||
|
let x = x - 0.5; |
||||||
|
let y = y - 0.5; |
||||||
|
let z = z - 0.5; |
||||||
|
|
||||||
|
let x = x.abs(); |
||||||
|
|
||||||
|
let k = if scy * x > scx * y { |
||||||
|
x * scx + y * scy |
||||||
|
} else { |
||||||
|
(x*x + y*y).sqrt() |
||||||
|
}; |
||||||
|
|
||||||
|
((x*x + y*y + z*z) + ra*ra - 2.0 * ra * k).sqrt() - rb |
||||||
|
|
||||||
|
}) |
||||||
|
); |
||||||
|
}); |
||||||
|
} |
||||||
|
}; |
||||||
|
} |
||||||
|
|
||||||
|
bench_sdf_depth!(bench_distance_function_depth_4, 4); |
||||||
|
bench_sdf_depth!(bench_distance_function_depth_5, 5); |
||||||
|
bench_sdf_depth!(bench_distance_function_depth_6, 6); |
||||||
|
bench_sdf_depth!(bench_distance_function_depth_7, 7); |
||||||
|
|
||||||
|
|
||||||
|
macro_rules! bench_raycast { |
||||||
|
($name:ident, $n:expr) => { |
||||||
|
#[bench] |
||||||
|
fn $name(b : &mut Bencher) { |
||||||
|
|
||||||
|
// test raycasts against a sphere
|
||||||
|
let chunk = VoxelChunk::from_distance_equation($n, |x, y, z| { |
||||||
|
|
||||||
|
let x = x - 0.5; |
||||||
|
let y = y - 0.5; |
||||||
|
let z = z - 0.5; |
||||||
|
|
||||||
|
(x*x + y*y + z*z).sqrt() - 0.5 |
||||||
|
|
||||||
|
}); |
||||||
|
|
||||||
|
b.iter(|| { |
||||||
|
black_box( |
||||||
|
chunk.raycast(Vector3::new(0.01, 0.01, 0.01), Vector3::new(1.0, 1.0, 1.0), 16, 1000.0) |
||||||
|
); |
||||||
|
}); |
||||||
|
|
||||||
|
println!("{:?}", chunk.raycast(Vector3::new(0.01, 0.01, 0.01), Vector3::new(1.0, 1.0, 1.0), 16, 1000.0)) |
||||||
|
} |
||||||
|
}; |
||||||
|
} |
||||||
|
|
||||||
|
bench_raycast!(bench_raycast_depth_6, 6); |
||||||
|
bench_raycast!(bench_raycast_depth_7, 7); |
||||||
|
bench_raycast!(bench_raycast_depth_8, 8); |
||||||
|
bench_raycast!(bench_raycast_depth_9, 9); |
||||||
|
bench_raycast!(bench_raycast_depth_10, 10); |
||||||
|
bench_raycast!(bench_raycast_depth_11, 11); |
||||||
@ -0,0 +1,194 @@ |
|||||||
|
use fnv::FnvHashMap; |
||||||
|
|
||||||
|
use crate::VChildDescriptor; |
||||||
|
use crate::VoxelChunk; |
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
|
||||||
|
pub fn deduplicate_voxel(&mut self, i : usize) -> i32 { |
||||||
|
|
||||||
|
*self.compression_dict.entry(self.voxels[i]) |
||||||
|
.or_insert((i + 1) as i32) |
||||||
|
|
||||||
|
// if let Some(&v) = self.compression_dict.get(&self.voxels[i]) {
|
||||||
|
// v
|
||||||
|
// } else {
|
||||||
|
// (i + 1) as i32
|
||||||
|
// }
|
||||||
|
} |
||||||
|
|
||||||
|
/// Reduce the size of a voxel chunk by deduplicating voxels
|
||||||
|
/// This function should run in linear time with the number DAG nodes
|
||||||
|
pub fn compress(&mut self) { |
||||||
|
let n = self.len(); |
||||||
|
|
||||||
|
let mut marked = (0..n).map(|_| false).collect::<Vec<_>>(); |
||||||
|
|
||||||
|
self.compression_dict.clear(); |
||||||
|
|
||||||
|
// helper function to traverse the hierarchy in depth-first, post-traversal order
|
||||||
|
// returns the index of the deduplicated voxel [1 indexed]
|
||||||
|
fn recurse_dedup(s : &mut VoxelChunk, idx : i32, marks : &mut Vec<bool>) -> i32 { |
||||||
|
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = s.voxels[idx as usize].sub_voxels[j]; |
||||||
|
|
||||||
|
// check if there is a subvoxel for this index
|
||||||
|
if sv > 0 { |
||||||
|
let svi = sv - 1; |
||||||
|
// check if the subvoxel is a duplicate
|
||||||
|
if let Some(&nsvi) = s.compression_dict.get(&s.voxels[svi as usize]) { |
||||||
|
// if it is, modify this voxel to point to the canonical version
|
||||||
|
s.voxels[idx as usize].sub_voxels[j] = nsvi; |
||||||
|
} else { |
||||||
|
// if the subvoxels is not (yet) a duplicate, try deduplicating it
|
||||||
|
let nsvi = recurse_dedup(s, svi, marks); |
||||||
|
s.voxels[idx as usize].sub_voxels[j] = nsvi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut ret = idx + 1; |
||||||
|
s.compression_dict.entry(s.voxels[idx as usize]) |
||||||
|
.and_modify(|nidx| { |
||||||
|
// if this voxel is a now duplicate after deduplicating children,
|
||||||
|
// return the deduplicated index
|
||||||
|
ret = *nidx; |
||||||
|
}) |
||||||
|
.or_insert_with(|| { |
||||||
|
// otherwise, this is a now a unique voxel
|
||||||
|
marks[idx as usize] = true; |
||||||
|
ret |
||||||
|
}); |
||||||
|
|
||||||
|
ret |
||||||
|
} |
||||||
|
|
||||||
|
recurse_dedup(self, 0, &mut marked); |
||||||
|
|
||||||
|
// compress the marked nodes to be contiguous, while recording their new index
|
||||||
|
let mut new_idx = marked.iter().map(|_| -1).collect::<Vec<i32>>(); |
||||||
|
let mut nlen = 0i32; |
||||||
|
for i in 0..n { |
||||||
|
if marked[i] { |
||||||
|
self.voxels[nlen as usize] = self.voxels[i]; |
||||||
|
new_idx[i] = nlen; |
||||||
|
nlen += 1; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// truncate the list of voxel nodes to delete the old ones
|
||||||
|
self.voxels.truncate(nlen as usize); |
||||||
|
|
||||||
|
// convert all of the subvoxel indexes to the new addresses
|
||||||
|
for i in 0..(self.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = self.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
self.voxels[i].sub_voxels[j] = new_idx[(sv - 1) as usize] + 1; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
// fix the indexes in the compression dictionary
|
||||||
|
let mut new_comp_dict = FnvHashMap::default(); |
||||||
|
for (mut k,v) in self.compression_dict.drain() { |
||||||
|
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = k.sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
k.sub_voxels[j] = new_idx[(sv - 1) as usize] + 1; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
new_comp_dict.insert(k, if v > 0 {new_idx[(v - 1) as usize] + 1} else {v}); |
||||||
|
} |
||||||
|
self.compression_dict = new_comp_dict; |
||||||
|
|
||||||
|
|
||||||
|
//invalidate lod materials:
|
||||||
|
self.lod_materials = vec![]; |
||||||
|
} |
||||||
|
|
||||||
|
/// Permute the DAG nodes inplace. Note: the permutation array is overwritten.
|
||||||
|
fn permute_node_indexes(&mut self, permutation : &mut [i32]) { |
||||||
|
let n = permutation.len(); |
||||||
|
|
||||||
|
// first: update the subvoxel inputs
|
||||||
|
for i in 0..n { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = self.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
self.voxels[i].sub_voxels[j] = permutation[sv as usize - 1] as i32 + 1; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// fix the indexes in the compression dictionary
|
||||||
|
let mut new_comp_dict = FnvHashMap::default(); |
||||||
|
for (mut k,v) in self.compression_dict.drain() { |
||||||
|
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = k.sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
k.sub_voxels[j] = permutation[(sv - 1) as usize] + 1; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
new_comp_dict.insert(k, if v > 0 {permutation[(v - 1) as usize] + 1} else {v}); |
||||||
|
} |
||||||
|
self.compression_dict = new_comp_dict; |
||||||
|
|
||||||
|
// second, permute the voxel nodes in place
|
||||||
|
// I did some quick testing, and this seems to be faster than allocating a new array
|
||||||
|
// and deallocating the old one
|
||||||
|
for i in 0..n { |
||||||
|
let mut j = i; |
||||||
|
let mut temp = self.voxels[j]; |
||||||
|
while permutation[j] < n as i32 { |
||||||
|
let k = permutation[j] as usize; |
||||||
|
let temp2 = self.voxels[k]; |
||||||
|
self.voxels[k] = temp; |
||||||
|
temp = temp2; |
||||||
|
permutation[j] = n as i32; |
||||||
|
j = k; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/// Sort the DAG nodes in DFS order (hopefully makes the DAG more cache friendly)
|
||||||
|
pub fn topological_sort(&mut self) { |
||||||
|
|
||||||
|
let mut new_indexes = self.voxels.iter() |
||||||
|
.map(|_| -1) |
||||||
|
.collect::<Vec<i32>>(); |
||||||
|
|
||||||
|
fn recurse_traverse(voxels : &Vec<VChildDescriptor>, new_indexes : &mut Vec<i32>, i : usize, j : &mut i32) { |
||||||
|
for k in 0..8 { |
||||||
|
let sv = voxels[i].sub_voxels[k]; |
||||||
|
|
||||||
|
if sv > 0 { |
||||||
|
let svi = sv - 1; |
||||||
|
if new_indexes[svi as usize] == -1 { |
||||||
|
recurse_traverse(voxels, new_indexes, svi as usize, j); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
*j -= 1; |
||||||
|
new_indexes[i] = *j; |
||||||
|
} |
||||||
|
|
||||||
|
let mut j = self.len() as i32; |
||||||
|
|
||||||
|
recurse_traverse(&self.voxels, &mut new_indexes, 0, &mut j); |
||||||
|
|
||||||
|
// ensure every voxel is considered
|
||||||
|
assert_eq!(0, j); |
||||||
|
// ensure the root stays the same
|
||||||
|
assert_eq!(new_indexes[0], 0); |
||||||
|
|
||||||
|
self.permute_node_indexes(&mut new_indexes[..]); |
||||||
|
} |
||||||
|
} |
||||||
@ -0,0 +1,198 @@ |
|||||||
|
use super::*; |
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
|
||||||
|
/// Make an `x` by `y` by `z` grid of the current voxel chunk
|
||||||
|
/// Makes a small number of duplicated voxels in the
|
||||||
|
pub fn grid(&mut self, x : usize, y : usize, z : usize) { |
||||||
|
let max_dim = { |
||||||
|
use std::cmp::max; |
||||||
|
max(x, max(y,z)) |
||||||
|
}; |
||||||
|
|
||||||
|
let s = log_2(max_dim - 1) + 1; |
||||||
|
|
||||||
|
self.shift_indexes(s as usize); |
||||||
|
|
||||||
|
self.voxels.splice( |
||||||
|
0..0, |
||||||
|
(0..s).map(|i| VChildDescriptor{sub_voxels: [i as i32 + 2; 8]}) |
||||||
|
); |
||||||
|
|
||||||
|
fn recursive_restrict(s : &mut VoxelChunk, i : usize, x : usize, y : usize, z : usize, scale : usize) { |
||||||
|
|
||||||
|
// base case, the current voxel is entirely contained in the grid
|
||||||
|
if x >= scale && y >= scale && z >= scale { |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
let half_scale = scale >> 1; |
||||||
|
|
||||||
|
for j in 0..8 { |
||||||
|
let xn = if j & 0b001 == 0 { 0 } else {half_scale}; |
||||||
|
let yn = if j & 0b010 == 0 { 0 } else {half_scale}; |
||||||
|
let zn = if j & 0b100 == 0 { 0 } else {half_scale}; |
||||||
|
|
||||||
|
if xn >= x || yn >= y || zn >= z { |
||||||
|
// clear the subvoxel if the subvoxel is outside the grid
|
||||||
|
s.voxels[i].sub_voxels[j] = 0; |
||||||
|
} else { |
||||||
|
// further process the subvoxel
|
||||||
|
s.duplicate_subvoxel(i, j); |
||||||
|
recursive_restrict(s, s.voxels[i].sub_voxels[j] as usize - 1, x - xn, y - yn, z - zn, half_scale); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
recursive_restrict(self, 0, x, y, z, 1 << s); |
||||||
|
} |
||||||
|
|
||||||
|
/// Translates the voxel chunk in multiples of its size in a new larger space
|
||||||
|
pub fn translate_integral(&mut self, x : usize, y : usize, z : usize, size : usize) { |
||||||
|
let max_coord = { |
||||||
|
use std::cmp::max; |
||||||
|
max(x, max(y,z)) |
||||||
|
}; |
||||||
|
|
||||||
|
assert!(max_coord < size); |
||||||
|
|
||||||
|
let s = log_2(size - 1) + 1; |
||||||
|
|
||||||
|
self.shift_indexes(s as usize); |
||||||
|
|
||||||
|
self.voxels.splice( |
||||||
|
0..0, |
||||||
|
(0..s).map(|i| { |
||||||
|
let mut sub_voxels = [0i32; 8]; |
||||||
|
let j = s - i - 1; |
||||||
|
|
||||||
|
// calculate the index of the next child at depth i
|
||||||
|
let xo = if x & (1 << j) == 0 {0} else {1}; |
||||||
|
let yo = if y & (1 << j) == 0 {0} else {2}; |
||||||
|
let zo = if z & (1 << j) == 0 {0} else {4}; |
||||||
|
|
||||||
|
sub_voxels[xo + yo + zo] = i as i32 + 2; |
||||||
|
|
||||||
|
VChildDescriptor{sub_voxels} |
||||||
|
}) |
||||||
|
); |
||||||
|
} |
||||||
|
|
||||||
|
/// Translates the voxel chunk by fractions of its size
|
||||||
|
pub fn translate_fractional(&mut self, _x : usize, _y : usize, _z : usize) { |
||||||
|
unimplemented!() |
||||||
|
} |
||||||
|
|
||||||
|
/// Writes the other voxel chunk into this one. Whether the other voxels overwrite or not is
|
||||||
|
/// controlled by the `overwrite` parameter
|
||||||
|
pub fn combine(&mut self, other : &VoxelChunk, overwrite : bool, recompress : bool) { |
||||||
|
let n = self.len(); |
||||||
|
|
||||||
|
{ |
||||||
|
let mut other_clone : VoxelChunk = other.clone(); |
||||||
|
other_clone.shift_indexes(n); |
||||||
|
self.voxels.extend(other_clone.voxels); |
||||||
|
} |
||||||
|
|
||||||
|
fn recursive_combine(s : &mut VoxelChunk, overwrite : bool, i : usize, j : usize) { |
||||||
|
for k in 0..8 { |
||||||
|
let sv0 = s.voxels[i].sub_voxels[k]; |
||||||
|
let sv1 = s.voxels[j].sub_voxels[k]; |
||||||
|
|
||||||
|
if sv1 == 0 { |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv0 == 0 { |
||||||
|
s.voxels[i].sub_voxels[k] = sv1; |
||||||
|
continue; |
||||||
|
} |
||||||
|
if overwrite { |
||||||
|
if sv1 < 0 { |
||||||
|
s.voxels[i].sub_voxels[k] = sv1; |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv0 < 0 { |
||||||
|
if sv1 > 0 { |
||||||
|
let sv0 = s.subdivide_subvoxel(i, k); |
||||||
|
recursive_combine(s, overwrite, sv0 - 1, sv1 as usize - 1); |
||||||
|
continue; |
||||||
|
} else { |
||||||
|
s.voxels[i].sub_voxels[k] = sv1; |
||||||
|
} |
||||||
|
} |
||||||
|
} else { |
||||||
|
if sv0 < 0 { |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv1 < 0 { |
||||||
|
let sv1 = s.subdivide_subvoxel(j, k); |
||||||
|
let sv0 = s.duplicate_subvoxel(i, k).unwrap(); |
||||||
|
recursive_combine(s, overwrite, sv0 as usize - 1, sv1 as usize - 1); |
||||||
|
continue; |
||||||
|
} |
||||||
|
} |
||||||
|
if sv0 > 0 && sv1 > 0 { |
||||||
|
let sv0 = s.duplicate_subvoxel(i, k).unwrap(); |
||||||
|
recursive_combine(s, overwrite, sv0 - 1, sv1 as usize - 1); |
||||||
|
continue; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
recursive_combine(self, overwrite, 0, n); |
||||||
|
|
||||||
|
if recompress { |
||||||
|
self.compress(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/// Writes the other voxel chunk into this one. Whether the other voxels overwrite or not is
|
||||||
|
/// controlled by the `overwrite` parameter
|
||||||
|
pub fn subtract(&mut self, other : &VoxelChunk, recompress : bool) { |
||||||
|
let n = self.len(); |
||||||
|
|
||||||
|
{ |
||||||
|
let mut other_clone : VoxelChunk = other.clone(); |
||||||
|
other_clone.shift_indexes(n); |
||||||
|
self.voxels.extend(other_clone.voxels); |
||||||
|
} |
||||||
|
|
||||||
|
fn recursive_subtract(s : &mut VoxelChunk, i : usize, j : usize) { |
||||||
|
for k in 0..8 { |
||||||
|
let sv0 = s.voxels[i].sub_voxels[k]; |
||||||
|
let sv1 = s.voxels[j].sub_voxels[k]; |
||||||
|
|
||||||
|
if sv1 == 0 { |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv0 == 0 { |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv1 < 0 { |
||||||
|
s.voxels[i].sub_voxels[k] = 0; |
||||||
|
continue; |
||||||
|
} |
||||||
|
if sv0 < 0 { |
||||||
|
if sv1 > 0 { |
||||||
|
let sv0 = s.subdivide_subvoxel(i, k); |
||||||
|
recursive_subtract(s, sv0 - 1, sv1 as usize - 1); |
||||||
|
continue; |
||||||
|
} else { |
||||||
|
s.voxels[i].sub_voxels[k] = 0; |
||||||
|
} |
||||||
|
} |
||||||
|
if sv0 > 0 && sv1 > 0 { |
||||||
|
let sv0 = s.duplicate_subvoxel(i, k).unwrap(); |
||||||
|
recursive_subtract(s, sv0 - 1, sv1 as usize - 1); |
||||||
|
continue; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
recursive_subtract(self, 0, n); |
||||||
|
|
||||||
|
if recompress { |
||||||
|
self.compress(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
@ -0,0 +1,433 @@ |
|||||||
|
use fnv::FnvHashMap; |
||||||
|
|
||||||
|
use crate::MAX_DAG_DEPTH; |
||||||
|
use crate::VChildDescriptor; |
||||||
|
use crate::Vec3; |
||||||
|
use crate::VoxelChunk; |
||||||
|
use crate::log_2; |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
|
||||||
|
/// process a 3D array (`data` with dimensions `dim`) into a SVDAG, mapping values to materials using `f`
|
||||||
|
pub fn from_dense_voxels(data : &[i32], dim : [usize; 3]) -> Self { |
||||||
|
|
||||||
|
assert!(dim[0] > 0 && dim[1] > 0 && dim[2] > 0); |
||||||
|
|
||||||
|
let depth = log_2(dim.iter().cloned().max().unwrap_or(0) - 1) as usize + 1; |
||||||
|
|
||||||
|
assert!(depth < MAX_DAG_DEPTH, "Depth is too large: {} >= {}", depth, MAX_DAG_DEPTH); |
||||||
|
|
||||||
|
let size = 1 << depth; |
||||||
|
|
||||||
|
fn recursive_create_dense( |
||||||
|
s : &mut VoxelChunk, d : usize, min : [usize; 3], size : usize,
|
||||||
|
data : &[i32], dim : [usize; 3], |
||||||
|
dedup : &mut FnvHashMap<VChildDescriptor, i32> |
||||||
|
) -> i32 { |
||||||
|
if min[0] >= dim[0] || min[1] >= dim[1] || min[2] >= dim[2] { |
||||||
|
// air if the voxel does not intersect the voxel data
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
if size <= 1 { |
||||||
|
// once we reach size 1, take the material from the data
|
||||||
|
let v = data[min[0] + dim[0] * (min[1] + dim[1] * min[2])]; |
||||||
|
// prevent awful fractal voxel graphs
|
||||||
|
return -v.abs(); |
||||||
|
} |
||||||
|
|
||||||
|
const BOX_OFFSETS : [[usize; 3]; 8] = [ |
||||||
|
[0, 0, 0], |
||||||
|
[1, 0, 0], |
||||||
|
[0, 1, 0], |
||||||
|
[1, 1, 0], |
||||||
|
[0, 0, 1], |
||||||
|
[1, 0, 1], |
||||||
|
[0, 1, 1], |
||||||
|
[1, 1, 1] |
||||||
|
]; |
||||||
|
|
||||||
|
let mut voxel = VChildDescriptor{ |
||||||
|
sub_voxels : [0; 8], |
||||||
|
}; |
||||||
|
|
||||||
|
let half_size = size >> 1; |
||||||
|
|
||||||
|
let mut is_uniform = true; |
||||||
|
|
||||||
|
for i in 0..8 { |
||||||
|
let bmin = [ |
||||||
|
min[0] + BOX_OFFSETS[i][0] * half_size, |
||||||
|
min[1] + BOX_OFFSETS[i][1] * half_size, |
||||||
|
min[2] + BOX_OFFSETS[i][2] * half_size |
||||||
|
]; |
||||||
|
|
||||||
|
voxel.sub_voxels[i] = recursive_create_dense(s, d - 1, bmin, half_size, data, dim, dedup); |
||||||
|
|
||||||
|
if voxel.sub_voxels[i] != voxel.sub_voxels[0] || voxel.sub_voxels[i] > 0 { |
||||||
|
// the subvoxels are not all the same leaf node, so this voxel is not uniform
|
||||||
|
is_uniform = false; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if is_uniform { |
||||||
|
return voxel.sub_voxels[0]; |
||||||
|
} |
||||||
|
|
||||||
|
if let Some(&id) = dedup.get(&voxel) { |
||||||
|
// this node is a duplicate
|
||||||
|
id |
||||||
|
} else { |
||||||
|
// this node is new, so add it
|
||||||
|
s.voxels.push(voxel); |
||||||
|
let id = s.voxels.len() as i32; |
||||||
|
dedup.insert(voxel, id); |
||||||
|
id |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::empty(); |
||||||
|
chunk.voxels.clear(); |
||||||
|
// we build a list of unique voxels and store them in here
|
||||||
|
let mut dedup = FnvHashMap::default(); |
||||||
|
|
||||||
|
recursive_create_dense(&mut chunk, depth, [0,0,0], size, data, dim, &mut dedup); |
||||||
|
|
||||||
|
chunk.voxels.reverse(); |
||||||
|
|
||||||
|
//fixup the subvoxel pointers (we reversed the order)
|
||||||
|
let n = chunk.voxels.len() as i32; |
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = chunk.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let svi = n - sv + 1; |
||||||
|
chunk.voxels[i].sub_voxels[j] = svi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
chunk |
||||||
|
} |
||||||
|
|
||||||
|
/// process an implicit 3D array into a DAG.
|
||||||
|
pub fn from_implicit_array<F : FnMut(usize, usize, usize) -> i32>(depth : usize, mut implicit : F) -> Self { |
||||||
|
|
||||||
|
assert!(depth < MAX_DAG_DEPTH, "Depth is too large: {} >= {}", depth, MAX_DAG_DEPTH); |
||||||
|
|
||||||
|
let size = 1 << depth; |
||||||
|
|
||||||
|
fn recursive_create_dense_implicit<F : FnMut(usize, usize, usize) -> i32>( |
||||||
|
s : &mut VoxelChunk, min : [usize; 3], size : usize, implicit : &mut F, |
||||||
|
dedup : &mut FnvHashMap<VChildDescriptor, i32> |
||||||
|
) -> i32 { |
||||||
|
|
||||||
|
if size <= 1 { |
||||||
|
// once we reach size 1, evaluate the material at the implicit surface
|
||||||
|
let v = implicit(min[0], min[1], min[2]); |
||||||
|
// prevent awful fractal voxel graphs
|
||||||
|
return -v.abs(); |
||||||
|
} |
||||||
|
|
||||||
|
const BOX_OFFSETS : [[usize; 3]; 8] = [ |
||||||
|
[0, 0, 0], |
||||||
|
[1, 0, 0], |
||||||
|
[0, 1, 0], |
||||||
|
[1, 1, 0], |
||||||
|
[0, 0, 1], |
||||||
|
[1, 0, 1], |
||||||
|
[0, 1, 1], |
||||||
|
[1, 1, 1] |
||||||
|
]; |
||||||
|
|
||||||
|
let mut voxel = VChildDescriptor{ |
||||||
|
sub_voxels : [0; 8], |
||||||
|
}; |
||||||
|
|
||||||
|
let half_size = size >> 1; |
||||||
|
|
||||||
|
let mut is_uniform = true; |
||||||
|
|
||||||
|
for i in 0..8 { |
||||||
|
let bmin = [ |
||||||
|
min[0] + BOX_OFFSETS[i][0] * half_size, |
||||||
|
min[1] + BOX_OFFSETS[i][1] * half_size, |
||||||
|
min[2] + BOX_OFFSETS[i][2] * half_size |
||||||
|
]; |
||||||
|
|
||||||
|
voxel.sub_voxels[i] = recursive_create_dense_implicit(s, bmin, half_size, implicit, dedup); |
||||||
|
|
||||||
|
if voxel.sub_voxels[i] != voxel.sub_voxels[0] || voxel.sub_voxels[i] > 0 { |
||||||
|
// the subvoxels are not all the same leaf node, so this voxel is not uniform
|
||||||
|
is_uniform = false; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
if is_uniform { |
||||||
|
return voxel.sub_voxels[0]; |
||||||
|
} |
||||||
|
|
||||||
|
if let Some(&id) = dedup.get(&voxel) { |
||||||
|
// this node is a duplicate
|
||||||
|
id |
||||||
|
} else { |
||||||
|
// this node is new, so add it
|
||||||
|
s.voxels.push(voxel); |
||||||
|
let id = s.voxels.len() as i32; |
||||||
|
dedup.insert(voxel, id); |
||||||
|
id |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::empty(); |
||||||
|
chunk.voxels.clear(); |
||||||
|
// we build a list of unique voxels and store them in here
|
||||||
|
let mut dedup = FnvHashMap::default(); |
||||||
|
|
||||||
|
recursive_create_dense_implicit(&mut chunk, [0,0,0], size, &mut implicit, &mut dedup); |
||||||
|
|
||||||
|
chunk.voxels.reverse(); |
||||||
|
|
||||||
|
//fixup the subvoxel pointers (we reversed the order)
|
||||||
|
let n = chunk.voxels.len() as i32; |
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = chunk.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let svi = n - sv + 1; |
||||||
|
chunk.voxels[i].sub_voxels[j] = svi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
chunk |
||||||
|
} |
||||||
|
|
||||||
|
/// process a distance equation array into a DAG.
|
||||||
|
///
|
||||||
|
pub fn from_distance_equation<F : FnMut(f32, f32, f32) -> f32>(depth : usize, mut implicit : F) -> Self { |
||||||
|
|
||||||
|
assert!(depth < MAX_DAG_DEPTH, "Depth is too large: {} >= {}", depth, MAX_DAG_DEPTH); |
||||||
|
|
||||||
|
let size = 1 << depth; |
||||||
|
|
||||||
|
fn recurse_distance_equation<F : FnMut(f32, f32, f32) -> f32>( |
||||||
|
s : &mut VoxelChunk, min : [usize; 3], size : usize, implicit : &mut F, rscale : f32, |
||||||
|
dedup : &mut FnvHashMap<VChildDescriptor, i32> |
||||||
|
) -> i32 { |
||||||
|
|
||||||
|
const SQRT_THREE : f32 = 1.732050807568877293527446341505872366942805253810380628055; |
||||||
|
|
||||||
|
let v = implicit( |
||||||
|
rscale * (min[0] as f32 + 0.5 * size as f32), |
||||||
|
rscale * (min[1] as f32 + 0.5 * size as f32), |
||||||
|
rscale * (min[2] as f32 + 0.5 * size as f32) |
||||||
|
); |
||||||
|
|
||||||
|
let bounding_radius = rscale * size as f32 * SQRT_THREE; |
||||||
|
|
||||||
|
if size <= 1 { |
||||||
|
// once we reach size 1, check if the object intersects the implicit region
|
||||||
|
if min[0] == 0 && min[1] == 0 && min[2] == 0 { |
||||||
|
// println!("maybe intersection {} < {}", v, bounding_radius);
|
||||||
|
} |
||||||
|
return if v < bounding_radius { -1 } else { 0 }; |
||||||
|
} |
||||||
|
|
||||||
|
if v > bounding_radius { |
||||||
|
// the voxel does not intersect the cube at all based on the distance equation
|
||||||
|
// println!("no intersection {} {}", v, bounding_radius);
|
||||||
|
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
const BOX_OFFSETS : [[usize; 3]; 8] = [ |
||||||
|
[0, 0, 0], |
||||||
|
[1, 0, 0], |
||||||
|
[0, 1, 0], |
||||||
|
[1, 1, 0], |
||||||
|
[0, 0, 1], |
||||||
|
[1, 0, 1], |
||||||
|
[0, 1, 1], |
||||||
|
[1, 1, 1] |
||||||
|
]; |
||||||
|
|
||||||
|
let mut voxel = VChildDescriptor{ |
||||||
|
sub_voxels : [0; 8], |
||||||
|
}; |
||||||
|
|
||||||
|
let half_size = size >> 1; |
||||||
|
|
||||||
|
let mut is_uniform = true; |
||||||
|
|
||||||
|
for i in 0..8 { |
||||||
|
let bmin = [ |
||||||
|
min[0] + BOX_OFFSETS[i][0] * half_size, |
||||||
|
min[1] + BOX_OFFSETS[i][1] * half_size, |
||||||
|
min[2] + BOX_OFFSETS[i][2] * half_size |
||||||
|
]; |
||||||
|
|
||||||
|
voxel.sub_voxels[i] = recurse_distance_equation(s, bmin, half_size, implicit, rscale, dedup); |
||||||
|
|
||||||
|
if voxel.sub_voxels[i] != voxel.sub_voxels[0] || voxel.sub_voxels[i] > 0 { |
||||||
|
// the subvoxels are not all the same leaf node, so this voxel is not uniform
|
||||||
|
is_uniform = false; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
if is_uniform { |
||||||
|
return voxel.sub_voxels[0]; |
||||||
|
} |
||||||
|
|
||||||
|
if let Some(&id) = dedup.get(&voxel) { |
||||||
|
// this node is a duplicate
|
||||||
|
id |
||||||
|
} else { |
||||||
|
// this node is new, so add it
|
||||||
|
s.voxels.push(voxel); |
||||||
|
let id = s.voxels.len() as i32; |
||||||
|
dedup.insert(voxel, id); |
||||||
|
id |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::empty(); |
||||||
|
chunk.voxels.clear(); |
||||||
|
// we build a list of unique voxels and store them in here
|
||||||
|
let mut dedup : FnvHashMap<VChildDescriptor, i32> = FnvHashMap::default(); |
||||||
|
|
||||||
|
recurse_distance_equation(&mut chunk, [0,0,0], size, &mut implicit, 1.0 / (size as f32), &mut dedup); |
||||||
|
|
||||||
|
chunk.voxels.reverse(); |
||||||
|
|
||||||
|
//fixup the subvoxel pointers (we reversed the order)
|
||||||
|
let n = chunk.voxels.len() as i32; |
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = chunk.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let svi = n - sv + 1; |
||||||
|
chunk.voxels[i].sub_voxels[j] = svi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
chunk |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
/// process a distance equation array into a DAG.
|
||||||
|
pub fn from_intersection_test<F : FnMut(Vec3, f32) -> bool>(depth : usize, mut intersect_test : F) -> Self { |
||||||
|
|
||||||
|
assert!(depth < MAX_DAG_DEPTH, "Depth is too large: {} >= {}", depth, MAX_DAG_DEPTH); |
||||||
|
|
||||||
|
let size = 1 << depth; |
||||||
|
|
||||||
|
fn recurse_intersection_test<F : FnMut(Vec3, f32) -> bool>( |
||||||
|
s : &mut VoxelChunk, min : [usize; 3], size : usize, intersect_test : &mut F, rscale : f32, |
||||||
|
dedup : &mut FnvHashMap<VChildDescriptor, i32> |
||||||
|
) -> i32 { |
||||||
|
|
||||||
|
let intersects = intersect_test( |
||||||
|
Vec3::new( |
||||||
|
rscale * (min[0] as f32 + 0.5 * size as f32), |
||||||
|
rscale * (min[1] as f32 + 0.5 * size as f32), |
||||||
|
rscale * (min[2] as f32 + 0.5 * size as f32) |
||||||
|
), |
||||||
|
0.5 * rscale * size as f32 |
||||||
|
); |
||||||
|
|
||||||
|
if size <= 1 { |
||||||
|
// once we reach size 1, check if the object intersects the implicit region
|
||||||
|
if min[0] == 0 && min[1] == 0 && min[2] == 0 { |
||||||
|
// println!("maybe intersection {} < {}", v, bounding_radius);
|
||||||
|
} |
||||||
|
return if intersects { -1 } else { 0 }; |
||||||
|
} |
||||||
|
|
||||||
|
if !intersects { |
||||||
|
// the voxel does not intersect the cube at all based on the distance equation
|
||||||
|
// println!("no intersection {} {}", v, bounding_radius);
|
||||||
|
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
const BOX_OFFSETS : [[usize; 3]; 8] = [ |
||||||
|
[0, 0, 0], |
||||||
|
[1, 0, 0], |
||||||
|
[0, 1, 0], |
||||||
|
[1, 1, 0], |
||||||
|
[0, 0, 1], |
||||||
|
[1, 0, 1], |
||||||
|
[0, 1, 1], |
||||||
|
[1, 1, 1] |
||||||
|
]; |
||||||
|
|
||||||
|
let mut voxel = VChildDescriptor{ |
||||||
|
sub_voxels : [0; 8], |
||||||
|
}; |
||||||
|
|
||||||
|
let half_size = size >> 1; |
||||||
|
|
||||||
|
let mut is_uniform = true; |
||||||
|
|
||||||
|
for i in 0..8 { |
||||||
|
let bmin = [ |
||||||
|
min[0] + BOX_OFFSETS[i][0] * half_size, |
||||||
|
min[1] + BOX_OFFSETS[i][1] * half_size, |
||||||
|
min[2] + BOX_OFFSETS[i][2] * half_size |
||||||
|
]; |
||||||
|
|
||||||
|
voxel.sub_voxels[i] = recurse_intersection_test(s, bmin, half_size, intersect_test, rscale, dedup); |
||||||
|
|
||||||
|
if voxel.sub_voxels[i] != voxel.sub_voxels[0] || voxel.sub_voxels[i] > 0 { |
||||||
|
// the subvoxels are not all the same leaf node, so this voxel is not uniform
|
||||||
|
is_uniform = false; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
if is_uniform { |
||||||
|
return voxel.sub_voxels[0]; |
||||||
|
} |
||||||
|
|
||||||
|
if let Some(&id) = dedup.get(&voxel) { |
||||||
|
// this node is a duplicate
|
||||||
|
id |
||||||
|
} else { |
||||||
|
// this node is new, so add it
|
||||||
|
s.voxels.push(voxel); |
||||||
|
let id = s.voxels.len() as i32; |
||||||
|
dedup.insert(voxel, id); |
||||||
|
id |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::empty(); |
||||||
|
chunk.voxels.clear(); |
||||||
|
// we build a list of unique voxel subtrees and store them in here
|
||||||
|
let mut dedup : FnvHashMap<VChildDescriptor, i32> = FnvHashMap::default(); |
||||||
|
|
||||||
|
recurse_intersection_test(&mut chunk, [0,0,0], size, &mut intersect_test, 1.0 / (size as f32), &mut dedup); |
||||||
|
|
||||||
|
chunk.voxels.reverse(); |
||||||
|
|
||||||
|
//fixup the subvoxel pointers (we reversed the order)
|
||||||
|
let n = chunk.voxels.len() as i32; |
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = chunk.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let svi = n - sv + 1; |
||||||
|
chunk.voxels[i].sub_voxels[j] = svi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
chunk |
||||||
|
} |
||||||
|
} |
||||||
@ -0,0 +1,595 @@ |
|||||||
|
#![cfg_attr(feature = "benchtest", feature(test))] |
||||||
|
|
||||||
|
#[cfg(feature="benchtest")] |
||||||
|
extern crate test; |
||||||
|
|
||||||
|
mod access; |
||||||
|
mod raycast; |
||||||
|
mod csg; |
||||||
|
mod voxelize; |
||||||
|
mod encode; |
||||||
|
mod compress; |
||||||
|
mod generate; |
||||||
|
|
||||||
|
|
||||||
|
mod tests; |
||||||
|
#[cfg(all(test, feature = "benchtest"))] |
||||||
|
mod bench; |
||||||
|
|
||||||
|
use cgmath::Vector2; |
||||||
|
use cgmath::Vector3; |
||||||
|
use cgmath::InnerSpace; |
||||||
|
use cgmath::prelude::*; |
||||||
|
|
||||||
|
use std::collections::HashMap; |
||||||
|
use std::collections::HashSet; |
||||||
|
|
||||||
|
use fnv::FnvHashMap; |
||||||
|
|
||||||
|
use serde::{Serialize, Deserialize}; |
||||||
|
|
||||||
|
pub const MAX_DAG_DEPTH : usize = 16; |
||||||
|
|
||||||
|
type Vec3 = Vector3<f32>; |
||||||
|
type Vec2 = Vector2<f32>; |
||||||
|
|
||||||
|
#[derive(Copy, Clone, PartialEq, Debug, Default, Serialize, Deserialize)] |
||||||
|
pub struct Material { |
||||||
|
pub albedo : [f32; 3], |
||||||
|
pub metalness : f32, |
||||||
|
pub emission : [f32; 3], |
||||||
|
pub roughness : f32, |
||||||
|
} |
||||||
|
|
||||||
|
#[derive(Debug, Clone)] |
||||||
|
pub struct Triangle { |
||||||
|
pub points : [Vec3; 3], |
||||||
|
pub uv : [Vec2; 3], |
||||||
|
pub normal : Vec3, |
||||||
|
pub mat : u16, |
||||||
|
} |
||||||
|
|
||||||
|
impl Default for Triangle { |
||||||
|
fn default() -> Self { |
||||||
|
Triangle { |
||||||
|
points : [Vec3::zero(); 3], |
||||||
|
uv : [Vec2::zero(); 3], |
||||||
|
normal : Vec3::zero(), |
||||||
|
mat : 0, |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
impl Triangle { |
||||||
|
fn area(&self) -> f32 { |
||||||
|
// calculate the area of the triangle using heron's formula
|
||||||
|
let a = self.points[0].distance(self.points[1]); |
||||||
|
let b = self.points[1].distance(self.points[2]); |
||||||
|
let c = self.points[2].distance(self.points[0]); |
||||||
|
|
||||||
|
let s = 0.5 * (a + b + c); |
||||||
|
|
||||||
|
(s * (s - a) * (s - b) * (s - c)).sqrt() |
||||||
|
} |
||||||
|
|
||||||
|
fn pos_center(&self) -> Vec3 { |
||||||
|
(self.points[0] + self.points[1] + self.points[2]) / 3.0 |
||||||
|
} |
||||||
|
|
||||||
|
fn uv_center(&self) -> Vec2 { |
||||||
|
(self.uv[0] + self.uv[1] + self.uv[2]) / 3.0 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// this is a redefinition of the type in the voxel.glsl shader.
|
||||||
|
// these redefinition shenanigans are necessary because serde can't quite derive
|
||||||
|
// serialize/deserialize for types in another module that are used in Vec fields
|
||||||
|
#[derive(Copy, Clone, Serialize, Deserialize, Debug, Hash, PartialEq, Eq)] |
||||||
|
#[repr(C)] |
||||||
|
pub struct VChildDescriptor { |
||||||
|
/// layout : [
|
||||||
|
/// -X -Y -Z
|
||||||
|
/// +X -Y -Z
|
||||||
|
/// -X +Y -Z
|
||||||
|
/// +X +Y -Z
|
||||||
|
/// -X -Y +Z
|
||||||
|
/// +X -Y +Z
|
||||||
|
/// -X +Y +Z
|
||||||
|
/// +X +Y +Z
|
||||||
|
/// ]
|
||||||
|
/// storage format:
|
||||||
|
/// if sv <= 0 : solid material (0 is empty)
|
||||||
|
/// if sv > 0 : subdivided voxel (1 indexed)
|
||||||
|
pub sub_voxels : [i32; 8], |
||||||
|
} |
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone)] |
||||||
|
pub struct VoxelChunk { |
||||||
|
pub voxels : Vec<VChildDescriptor>, |
||||||
|
#[serde(skip_serializing, skip_deserializing)] |
||||||
|
pub lod_materials : Vec<u32>, |
||||||
|
#[serde(skip_serializing, skip_deserializing)] |
||||||
|
pub compression_dict : FnvHashMap<VChildDescriptor, i32>, |
||||||
|
} |
||||||
|
|
||||||
|
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 } |
||||||
|
|
||||||
|
fn log_2(x: usize) -> u32 { |
||||||
|
assert!(x > 0); |
||||||
|
num_bits::<usize>() as u32 - x.leading_zeros() - 1 |
||||||
|
} |
||||||
|
|
||||||
|
fn array3d_get<S : Copy>(a : &[S], d : [usize;3], i : [usize;3]) -> Option<S> { |
||||||
|
if i[0] < d[0] && i[1] < d[1] && i[2] < d[2] { |
||||||
|
Some(a[i[0] + d[0] * (i[1] + d[1] * i[2])]) |
||||||
|
} else { |
||||||
|
None |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
/// Construct an empty
|
||||||
|
pub fn empty() -> Self { |
||||||
|
// we must populate an empty root voxel;
|
||||||
|
Self { |
||||||
|
voxels : vec![VChildDescriptor{ |
||||||
|
sub_voxels : [0;8], |
||||||
|
}], |
||||||
|
lod_materials : vec![], |
||||||
|
compression_dict : Default::default(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/// Return the number of unique DAG nodes in this chunk
|
||||||
|
pub fn len(&self) -> usize { |
||||||
|
self.voxels.len() |
||||||
|
} |
||||||
|
|
||||||
|
pub fn compressed_size(&self) -> usize { |
||||||
|
self.voxels.len() |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
/// Convert an obj file into a voxel chunk format.
|
||||||
|
/// Only places voxels intersect triangles will be made solid
|
||||||
|
pub fn from_mesh<F : FnMut(u64)>(depth : usize, triangles: &[Triangle], corner : Vec3, size : f32, progress_callback : &mut F) -> VoxelChunk { |
||||||
|
assert!(depth < MAX_DAG_DEPTH, "Depth is too large: {} >= {}", depth, MAX_DAG_DEPTH); |
||||||
|
|
||||||
|
fn recursive_create_shell<F : FnMut(u64)>( |
||||||
|
s : &mut VoxelChunk, d : usize, md : usize, min : Vec3, size : f32,
|
||||||
|
tris : &[Triangle], indexes : &mut Vec<usize>, start : usize,
|
||||||
|
dedup : &mut FnvHashMap<VChildDescriptor, i32>, |
||||||
|
counts : &mut FnvHashMap<u16, i32>, |
||||||
|
progress_callback : &mut F |
||||||
|
) -> i32 { |
||||||
|
|
||||||
|
if d == 0 { |
||||||
|
// if we reach the max resolution, check if there are intersecting triangles
|
||||||
|
if start < indexes.len() { |
||||||
|
// solid material
|
||||||
|
|
||||||
|
// let m = tris[indexes[start]].mat;
|
||||||
|
|
||||||
|
let m = mode(indexes[start..].iter().map(|&i| tris[i].mat)); |
||||||
|
|
||||||
|
return - (m as i32 + 1); |
||||||
|
} else { |
||||||
|
// air
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// there are no intersecting triangles, so the voxel is empty
|
||||||
|
if start == indexes.len() { |
||||||
|
// air
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
const BOX_OFFSETS : [Vec3; 8] = [ |
||||||
|
Vec3::new(0.0, 0.0, 0.0), |
||||||
|
Vec3::new(1.0, 0.0, 0.0), |
||||||
|
Vec3::new(0.0, 1.0, 0.0), |
||||||
|
Vec3::new(1.0, 1.0, 0.0), |
||||||
|
Vec3::new(0.0, 0.0, 1.0), |
||||||
|
Vec3::new(1.0, 0.0, 1.0), |
||||||
|
Vec3::new(0.0, 1.0, 1.0), |
||||||
|
Vec3::new(1.0, 1.0, 1.0) |
||||||
|
]; |
||||||
|
|
||||||
|
let end = indexes.len(); |
||||||
|
|
||||||
|
let mut voxel = VChildDescriptor{ |
||||||
|
sub_voxels : [0; 8], |
||||||
|
}; |
||||||
|
|
||||||
|
for i in 0..8 { |
||||||
|
let bmin = min + BOX_OFFSETS[i] * (size * 0.5); |
||||||
|
let bmax = bmin + Vec3::new(size * 0.5, size * 0.5, size * 0.5); |
||||||
|
|
||||||
|
for j in start..end { |
||||||
|
if aabb_triangle_test(bmin, bmax, &tris[indexes[j]]) { |
||||||
|
indexes.push(indexes[j]); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
voxel.sub_voxels[i] = recursive_create_shell(s, d - 1, md, bmin, size * 0.5, tris, indexes, end, dedup, counts, progress_callback); |
||||||
|
|
||||||
|
indexes.truncate(end); |
||||||
|
} |
||||||
|
|
||||||
|
if md - d == 4 { |
||||||
|
progress_callback(8*8*8*8); |
||||||
|
} |
||||||
|
|
||||||
|
if let Some(&id) = dedup.get(&voxel) { |
||||||
|
// this node is a duplicate
|
||||||
|
id |
||||||
|
} else { |
||||||
|
// this node is new, so add it
|
||||||
|
s.voxels.push(voxel); |
||||||
|
let id = s.voxels.len() as i32; |
||||||
|
dedup.insert(voxel, id); |
||||||
|
id |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::empty(); |
||||||
|
chunk.voxels.clear(); |
||||||
|
// we build a list of unique voxels and store them in here
|
||||||
|
let mut dedup : FnvHashMap<VChildDescriptor, i32> = FnvHashMap::default(); |
||||||
|
// a reused hashmap to count most common material
|
||||||
|
let mut counts : FnvHashMap<u16, i32> = FnvHashMap::default(); |
||||||
|
// indexes acts as a simple growing allocator for the recursion
|
||||||
|
let mut indexes = (0..(triangles.len())).collect::<Vec<_>>(); |
||||||
|
|
||||||
|
recursive_create_shell(&mut chunk, depth, depth, corner, size, &triangles, &mut indexes, 0, &mut dedup, &mut counts, progress_callback); |
||||||
|
|
||||||
|
chunk.voxels.reverse(); |
||||||
|
|
||||||
|
//fixup the subvoxel pointers (we reversed the order)
|
||||||
|
let n = chunk.voxels.len() as i32; |
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
let sv = chunk.voxels[i].sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let svi = n - sv + 1; |
||||||
|
chunk.voxels[i].sub_voxels[j] = svi; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
chunk |
||||||
|
} |
||||||
|
|
||||||
|
pub fn shift_indexes(&mut self, s : usize) { |
||||||
|
for i in 0..(self.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
// check that it is a subvoxel
|
||||||
|
if self.voxels[i].sub_voxels[j] > 0 { |
||||||
|
// permute the subvoxel index
|
||||||
|
self.voxels[i].sub_voxels[j] += s as i32; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/// Permute the order of each node's subvoxels.
|
||||||
|
/// This operation can be used to apply flips, rotates, and other operations
|
||||||
|
pub fn permute(&mut self, permutation : [u8; 8]) { |
||||||
|
let perm = [ |
||||||
|
permutation[0] as usize,
|
||||||
|
permutation[1] as usize,
|
||||||
|
permutation[2] as usize,
|
||||||
|
permutation[3] as usize,
|
||||||
|
permutation[4] as usize,
|
||||||
|
permutation[5] as usize,
|
||||||
|
permutation[6] as usize,
|
||||||
|
permutation[7] as usize |
||||||
|
]; |
||||||
|
|
||||||
|
for i in 0..(self.len()) { |
||||||
|
let mut j = 0; |
||||||
|
let t = self.voxels[i].sub_voxels[j]; |
||||||
|
for _ in 0..8 { |
||||||
|
let k = perm[j]; |
||||||
|
self.voxels[i].sub_voxels[j] = self.voxels[i].sub_voxels[k]; |
||||||
|
j = k; |
||||||
|
} |
||||||
|
self.voxels[i].sub_voxels[j] = t; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/// Combine 8 voxel chunks into a single chunk.
|
||||||
|
/// This operation simply concatenates the data and does not compress it
|
||||||
|
pub fn combine_8_voxel_chunks<'a>(subvoxels : [&'a VoxelChunk; 8]) -> VoxelChunk { |
||||||
|
let mut new_chunk = VoxelChunk::empty(); |
||||||
|
|
||||||
|
let mut index = 1; |
||||||
|
for i in 0..8 { |
||||||
|
let mut vc : VoxelChunk = (*subvoxels[i]).clone(); |
||||||
|
vc.shift_indexes(index); |
||||||
|
new_chunk.voxels.append(&mut vc.voxels); |
||||||
|
new_chunk.voxels[0].sub_voxels[i] = (index + 1) as i32; |
||||||
|
index += subvoxels[i].len(); |
||||||
|
} |
||||||
|
|
||||||
|
new_chunk |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
pub fn duplicate_subvoxel(&mut self, i : usize, j : usize) -> Option<usize> { |
||||||
|
let subvoxel = self.voxels[i].sub_voxels[j]; |
||||||
|
// check if it is a subvoxel and not a leaf
|
||||||
|
if subvoxel > 0 { |
||||||
|
// append a new voxel that is a duplicate of the specified subvoxel and point the voxel to it
|
||||||
|
self.voxels.push(self.voxels[subvoxel as usize - 1]); |
||||||
|
self.voxels[i].sub_voxels[j] = self.voxels.len() as i32; |
||||||
|
Some(self.voxels.len())
|
||||||
|
} else { |
||||||
|
None |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
pub fn subdivide_subvoxel(&mut self, i : usize, j : usize) -> usize { |
||||||
|
let subvoxel = self.voxels[i].sub_voxels[j]; |
||||||
|
|
||||||
|
assert!(subvoxel <= 0); |
||||||
|
|
||||||
|
self.voxels.push(VChildDescriptor{sub_voxels : [subvoxel; 8]}); |
||||||
|
self.voxels[i].sub_voxels[j] = self.voxels.len() as i32; |
||||||
|
self.voxels.len() |
||||||
|
} |
||||||
|
|
||||||
|
/// traverse the voxel data and determine the proper material to display for an LOD
|
||||||
|
pub fn calculate_lod_materials(&mut self) { |
||||||
|
|
||||||
|
// Recursive helper function to calculate the most common material from each voxel's subvoxels
|
||||||
|
fn recurse_calculate_lod_materials(s : &mut VoxelChunk, i : usize) -> u32 { |
||||||
|
let v = s.voxels[i]; |
||||||
|
|
||||||
|
let mut mats : FnvHashMap<u32, usize> = FnvHashMap::default(); |
||||||
|
|
||||||
|
// count the subvoxel materials
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = v.sub_voxels[j]; |
||||||
|
let m = if sv > 0 { |
||||||
|
let id = sv as usize -1; |
||||||
|
if s.lod_materials[id] == std::u32::MAX { |
||||||
|
recurse_calculate_lod_materials(s, id) |
||||||
|
} else { |
||||||
|
s.lod_materials[id] |
||||||
|
} |
||||||
|
} else if sv == 0 { |
||||||
|
0 |
||||||
|
} else { |
||||||
|
(-sv) as u32 |
||||||
|
}; |
||||||
|
|
||||||
|
if m == 0 { |
||||||
|
continue; |
||||||
|
} |
||||||
|
|
||||||
|
mats.entry(m) |
||||||
|
.and_modify(|x| *x += 1) |
||||||
|
.or_insert(0usize); |
||||||
|
} |
||||||
|
|
||||||
|
let mut max_c = 0; |
||||||
|
let mut max_m = 0; |
||||||
|
for (&m, &c) in mats.iter() { |
||||||
|
if c > max_c { |
||||||
|
max_c = c; |
||||||
|
max_m = m; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
s.lod_materials[i] = max_m; |
||||||
|
max_m |
||||||
|
} |
||||||
|
|
||||||
|
self.lod_materials = self.voxels.iter().map(|_| std::u32::MAX).collect::<Vec<u32>>(); |
||||||
|
recurse_calculate_lod_materials(self, 0); |
||||||
|
} |
||||||
|
|
||||||
|
/// traverse the voxel data and determine the proper material to display for an LOD
|
||||||
|
pub fn detect_cycles(&self) -> bool { |
||||||
|
|
||||||
|
// Recursive helper function to calculate the most common material from each voxel's subvoxels
|
||||||
|
fn recurse_detect_cycles(s : &VoxelChunk, i : usize, visited : &mut [bool], safe : &mut [bool], cycle : &mut Vec<usize>) -> bool { |
||||||
|
let v = s.voxels[i]; |
||||||
|
|
||||||
|
if visited[i] { |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
visited[i] = true; |
||||||
|
|
||||||
|
// count the subvoxel materials
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = v.sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let id = sv as usize - 1; |
||||||
|
|
||||||
|
if safe[id] { |
||||||
|
continue; |
||||||
|
} |
||||||
|
|
||||||
|
let b = recurse_detect_cycles(s, id, visited, safe, cycle); |
||||||
|
if b { |
||||||
|
return true; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
visited[i] = false; |
||||||
|
safe[i] = true; |
||||||
|
false |
||||||
|
} |
||||||
|
|
||||||
|
let mut visited = self.voxels.iter().map(|_| false).collect::<Vec<bool>>(); |
||||||
|
let mut safe = self.voxels.iter().map(|_| false).collect::<Vec<bool>>(); |
||||||
|
let mut cycle = vec![]; |
||||||
|
let has_cycle = recurse_detect_cycles(self, 0, &mut visited, &mut safe, &mut cycle); |
||||||
|
|
||||||
|
if has_cycle { |
||||||
|
println!("{} {:?}", cycle.len(), cycle); |
||||||
|
} |
||||||
|
|
||||||
|
has_cycle |
||||||
|
} |
||||||
|
|
||||||
|
/// Recursively calculate the DAG depth.
|
||||||
|
pub fn depth(&self) -> u8 { |
||||||
|
fn recurse_depth(s : &VoxelChunk, i : usize, visited : &mut [bool], safe : &mut [u8]) -> u8 { |
||||||
|
let v = s.voxels[i]; |
||||||
|
|
||||||
|
if visited[i] { |
||||||
|
return safe[i]; |
||||||
|
} |
||||||
|
|
||||||
|
visited[i] = true; |
||||||
|
safe[i] = 255; |
||||||
|
|
||||||
|
let mut max_child_depth = 0u8; |
||||||
|
|
||||||
|
// count the subvoxel materials
|
||||||
|
for j in 0..8 { |
||||||
|
let sv = v.sub_voxels[j]; |
||||||
|
if sv > 0 { |
||||||
|
let id = sv as usize - 1; |
||||||
|
|
||||||
|
let d = recurse_depth(s, id, visited, safe); |
||||||
|
max_child_depth = u8::max(max_child_depth, d); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
visited[i] = false; |
||||||
|
safe[i] = max_child_depth + 1; |
||||||
|
safe[i] |
||||||
|
} |
||||||
|
|
||||||
|
let mut visited = self.voxels.iter().map(|_| false).collect::<Vec<bool>>(); |
||||||
|
let mut safe = self.voxels.iter().map(|_| 0).collect::<Vec<u8>>(); |
||||||
|
recurse_depth(self, 0, &mut visited, &mut safe) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
pub trait Integer : Into<u32> {} |
||||||
|
|
||||||
|
impl Integer for u8 {} |
||||||
|
impl Integer for u16 {} |
||||||
|
impl Integer for u32 {} |
||||||
|
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
// Helper Functions
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
// ###############################################################################################################################################
|
||||||
|
use std::hash::Hash; |
||||||
|
|
||||||
|
fn mode<T : Hash + Copy + Clone + Eq, I : IntoIterator<Item = T>>(numbers: I) -> T { |
||||||
|
let mut occurrences = FnvHashMap::default(); |
||||||
|
|
||||||
|
for value in numbers.into_iter() { |
||||||
|
*occurrences.entry(value).or_insert(0) += 1; |
||||||
|
} |
||||||
|
|
||||||
|
occurrences |
||||||
|
.into_iter() |
||||||
|
.max_by_key(|&(_, count)| count) |
||||||
|
.map(|(val, _)| val) |
||||||
|
.expect("Cannot compute the mode of zero numbers") |
||||||
|
} |
||||||
|
|
||||||
|
fn texture_lookup(img : &image::RgbImage, u : f32, v : f32) -> [u8; 3] { |
||||||
|
let (w, h) = img.dimensions(); |
||||||
|
let image::Rgb(p) = img[((w as f32 * u) as u32 % w, (h as f32 * v) as u32 % h)]; |
||||||
|
p |
||||||
|
} |
||||||
|
|
||||||
|
fn aabb_triangle_test(aabb_min : Vec3, aabb_max : Vec3, triangle : &Triangle) -> bool { |
||||||
|
|
||||||
|
let box_normals = [ |
||||||
|
Vec3::new(1.0,0.0,0.0), |
||||||
|
Vec3::new(0.0,1.0,0.0), |
||||||
|
Vec3::new(0.0,0.0,1.0) |
||||||
|
]; |
||||||
|
|
||||||
|
let tri = &triangle.points; |
||||||
|
|
||||||
|
fn project(points : &[Vec3], axis : Vec3) -> (f32, f32) { |
||||||
|
let mut min = f32::MAX; |
||||||
|
let mut max = f32::MIN; |
||||||
|
|
||||||
|
for &v in points { |
||||||
|
let d = axis.dot(v); |
||||||
|
|
||||||
|
if d < min {min = d;} |
||||||
|
if d > max {max = d;} |
||||||
|
} |
||||||
|
|
||||||
|
(min, max) |
||||||
|
} |
||||||
|
|
||||||
|
for i in 0..3 { |
||||||
|
let (min, max) = project(tri, box_normals[i]); |
||||||
|
|
||||||
|
if max < aabb_min[i] || min > aabb_max[i] { |
||||||
|
return false; // No intersection possible.
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
let box_vertices = [ |
||||||
|
aabb_min, |
||||||
|
Vec3::new(aabb_max.x, aabb_min.y, aabb_min.z), |
||||||
|
Vec3::new(aabb_min.x, aabb_max.y, aabb_min.z), |
||||||
|
Vec3::new(aabb_max.x, aabb_max.y, aabb_min.z), |
||||||
|
Vec3::new(aabb_min.x, aabb_min.y, aabb_max.z), |
||||||
|
Vec3::new(aabb_max.x, aabb_min.y, aabb_max.z), |
||||||
|
Vec3::new(aabb_min.x, aabb_max.y, aabb_max.z), |
||||||
|
aabb_max |
||||||
|
]; |
||||||
|
|
||||||
|
// Test the triangle normal
|
||||||
|
let tri_edges = [ |
||||||
|
tri[0] - tri[1], |
||||||
|
tri[1] - tri[2], |
||||||
|
tri[2] - tri[0] |
||||||
|
]; |
||||||
|
|
||||||
|
let tri_norm = triangle.normal; |
||||||
|
|
||||||
|
let tri_offset = tri_norm.dot(tri[0]); |
||||||
|
let (min, max) = project(&box_vertices, tri_norm); |
||||||
|
|
||||||
|
if max < tri_offset || min > tri_offset { |
||||||
|
return false; // No intersection possible.
|
||||||
|
} |
||||||
|
|
||||||
|
// Test the nine edge cross-products
|
||||||
|
|
||||||
|
for i in 0..3 { |
||||||
|
for j in 0..3 { |
||||||
|
// The box normals are the same as it's edge tangents
|
||||||
|
let axis = tri_edges[i].cross(box_normals[j]); |
||||||
|
let (bmin, bmax) = project(&box_vertices, axis); |
||||||
|
let (tmin, tmax) = project(tri, axis); |
||||||
|
if bmax < tmin || bmin > tmax { |
||||||
|
return false; // No intersection possible
|
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// No separating axis found.
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
fn cartesian_to_barycentric(tri : &Triangle, mut p : Vec3) -> Vec3 { |
||||||
|
// project point onto triangle:
|
||||||
|
p *= tri.normal.dot(tri.points[0]) / tri.normal.dot(p); |
||||||
|
|
||||||
|
p |
||||||
|
} |
||||||
@ -0,0 +1,388 @@ |
|||||||
|
// this file is a roughly 1-to-1 translation of the GPU shader for use on the CPU
|
||||||
|
|
||||||
|
use super::*; |
||||||
|
|
||||||
|
use cgmath::prelude::*; |
||||||
|
use cgmath::Vector2; |
||||||
|
use cgmath::Vector3; |
||||||
|
|
||||||
|
type Vec2 = Vector2<f32>; |
||||||
|
type Vec3 = Vector3<f32>; |
||||||
|
|
||||||
|
type UVec3 = Vector3<u32>; |
||||||
|
|
||||||
|
impl VoxelChunk { |
||||||
|
pub fn raycast(&self, o : Vec3, d : Vec3, max_depth : u32, max_dist : f32) -> raycast::Raycast { |
||||||
|
raycast::voxel_march(&self.voxels, &self.lod_materials, o, d, max_depth, max_dist) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
const AXIS_X_MASK : u32 = 1; |
||||||
|
const AXIS_Y_MASK : u32 = 2; |
||||||
|
const AXIS_Z_MASK : u32 = 4; |
||||||
|
|
||||||
|
const AXIS_MASK_VEC : UVec3 = UVec3::new(AXIS_X_MASK, AXIS_Y_MASK, AXIS_Z_MASK); |
||||||
|
|
||||||
|
const INCIDENCE_X : u32 = 0; |
||||||
|
const INCIDENCE_Y : u32 = 1; |
||||||
|
const INCIDENCE_Z : u32 = 2; |
||||||
|
|
||||||
|
const _INCIDENCE_NORMALS : [Vec3; 3] = [ |
||||||
|
Vec3::new(1.0, 0.0, 0.0), |
||||||
|
Vec3::new(0.0, 1.0, 0.0), |
||||||
|
Vec3::new(0.0, 0.0, 1.0) |
||||||
|
]; |
||||||
|
|
||||||
|
const LOD_CUTOFF_CONSTANT : f32 = 0.002; |
||||||
|
|
||||||
|
fn fma(a : Vec3, b : Vec3, c : Vec3) -> Vec3 { |
||||||
|
a.mul_element_wise(b) + c |
||||||
|
} |
||||||
|
|
||||||
|
fn idot(a : UVec3, b : UVec3) -> u32 { |
||||||
|
return a.x * b.x + a.y * b.y + a.z * b.z; |
||||||
|
} |
||||||
|
|
||||||
|
fn project_cube(id : Vec3, od : Vec3, mn : Vec3, mx : Vec3) -> (u32, Vec2) { |
||||||
|
|
||||||
|
let tmn = fma(id, mn, od); |
||||||
|
let tmx = fma(id, mx, od); |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
let ts = f32::max(tmn.x, f32::max(tmn.y, tmn.z)); |
||||||
|
|
||||||
|
let te = f32::min(tmx.x, f32::min(tmx.y, tmx.z)); |
||||||
|
|
||||||
|
let mut incidence = 0; |
||||||
|
|
||||||
|
if te == tmx.x {incidence = INCIDENCE_X;} |
||||||
|
if te == tmx.y {incidence = INCIDENCE_Y;} |
||||||
|
if te == tmx.z {incidence = INCIDENCE_Z;} |
||||||
|
|
||||||
|
|
||||||
|
(incidence, Vec2::new(ts, te)) |
||||||
|
} |
||||||
|
|
||||||
|
fn subvoxel_valid(sv : i32) -> bool {sv != 0} |
||||||
|
fn subvoxel_leaf(sv : i32) -> bool {sv < 0} |
||||||
|
fn _subvoxel_empty(sv : i32) -> bool {sv != 0} |
||||||
|
fn subvoxel_child(sv : i32) -> u32 {sv as u32 - 1} |
||||||
|
fn subvoxel_material(sv : i32) -> u32 {(-sv) as u32} |
||||||
|
|
||||||
|
fn voxel_get_subvoxel(voxels : &[VChildDescriptor], parent : u32, idx : u32) -> i32 { |
||||||
|
return voxels[parent as usize].sub_voxels[idx as usize]; |
||||||
|
} |
||||||
|
|
||||||
|
fn interval_nonempty(t : Vec2) -> bool { |
||||||
|
return t.x < t.y; |
||||||
|
} |
||||||
|
|
||||||
|
fn interval_intersect(a : Vec2, b : Vec2) -> Vec2 { |
||||||
|
return Vec2::new(f32::max(a.x,b.x), f32::min(a.y, b.y)); |
||||||
|
// return ((b.x > a.y || a.x > b.y) ? vec2(1,0) : vec2(max(a.x,b.x), min(a.y, b.y)));
|
||||||
|
} |
||||||
|
|
||||||
|
fn _select_child(pos : Vec3, scale : f32, o : Vec3, d : Vec3, t : f32) -> u32 { |
||||||
|
let p = fma(d, Vec3::from_value(t), o) - pos - Vec3::from_value(scale); |
||||||
|
// vec3 p = o + d * t - pos - scale;
|
||||||
|
|
||||||
|
let less = UVec3::new( |
||||||
|
if p.x < 0.0 { 1 } else { 0 }, |
||||||
|
if p.y < 0.0 { 1 } else { 0 }, |
||||||
|
if p.z < 0.0 { 1 } else { 0 } |
||||||
|
); |
||||||
|
|
||||||
|
idot(less, AXIS_MASK_VEC) |
||||||
|
} |
||||||
|
|
||||||
|
fn select_child_bit(pos : Vec3, scale : f32, o : Vec3, d : Vec3, t : f32) -> u32 { |
||||||
|
let p = fma(d, Vec3::new(t,t,t), o) - pos - Vec3::from_value(scale); |
||||||
|
|
||||||
|
let s = UVec3::new( |
||||||
|
(p.x > 0.0) as u32, |
||||||
|
(p.y > 0.0) as u32, |
||||||
|
(p.z > 0.0) as u32 |
||||||
|
); |
||||||
|
|
||||||
|
idot(s, AXIS_MASK_VEC) |
||||||
|
} |
||||||
|
|
||||||
|
fn child_cube(pos : UVec3, scale : u32, idx : u32) -> UVec3 { |
||||||
|
|
||||||
|
|
||||||
|
const CUBE_OFFSETS : [UVec3; 8] = [ |
||||||
|
UVec3::new(0,0,0), |
||||||
|
UVec3::new(1,0,0), |
||||||
|
UVec3::new(0,1,0), |
||||||
|
UVec3::new(1,1,0), |
||||||
|
UVec3::new(0,0,1), |
||||||
|
UVec3::new(1,0,1), |
||||||
|
UVec3::new(0,1,1), |
||||||
|
UVec3::new(1,1,1) |
||||||
|
]; |
||||||
|
|
||||||
|
return pos + (scale * CUBE_OFFSETS[idx as usize]); |
||||||
|
} |
||||||
|
|
||||||
|
fn highest_differing_bit(a : UVec3, b : UVec3) -> u32 { |
||||||
|
let t : u32 = (a.x ^ b.x) | (a.y ^ b.y) | (a.z ^ b.z); |
||||||
|
|
||||||
|
31 - t.leading_zeros() |
||||||
|
} |
||||||
|
|
||||||
|
fn _extract_child_slot(pos : UVec3, scale : u32) -> u32 { |
||||||
|
|
||||||
|
let d = UVec3::new( |
||||||
|
(pos.x & scale == 0) as u32, |
||||||
|
(pos.y & scale == 0) as u32, |
||||||
|
(pos.z & scale == 0) as u32 |
||||||
|
); |
||||||
|
|
||||||
|
idot(d, AXIS_MASK_VEC) |
||||||
|
} |
||||||
|
|
||||||
|
fn extract_child_slot_bfe(pos : UVec3, depth : u32) -> u32 { |
||||||
|
|
||||||
|
let d = UVec3::new( |
||||||
|
(pos.x >> depth) & 1, |
||||||
|
(pos.y >> depth) & 1, |
||||||
|
(pos.z >> depth) & 1 |
||||||
|
); |
||||||
|
|
||||||
|
// return AXIS_X_MASK * d.x + AXIS_Y_MASK * d.y + AXIS_Z_MASK * d.z;
|
||||||
|
return idot(d, AXIS_MASK_VEC); |
||||||
|
} |
||||||
|
|
||||||
|
#[inline] |
||||||
|
fn denormalize_directions(position : UVec3, dmask : u32, size : u32) -> UVec3 { |
||||||
|
UVec3::new( |
||||||
|
if dmask & 0b001 == 0 {position.x} else {size - 1 - position.x}, |
||||||
|
if dmask & 0b010 == 0 {position.y} else {size - 1 - position.y}, |
||||||
|
if dmask & 0b100 == 0 {position.z} else {size - 1 - position.z} |
||||||
|
) |
||||||
|
} |
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] |
||||||
|
pub enum RaycastTermination { |
||||||
|
Miss, |
||||||
|
Hit, |
||||||
|
MaxDepth, |
||||||
|
LoD, |
||||||
|
MaxDist, |
||||||
|
Error, |
||||||
|
LoopEnd, |
||||||
|
} |
||||||
|
|
||||||
|
impl Default for RaycastTermination { |
||||||
|
fn default() -> Self { |
||||||
|
RaycastTermination::Miss |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
#[derive(Copy, Clone, Default, Debug)] |
||||||
|
pub struct Raycast { |
||||||
|
pub hit : bool, |
||||||
|
pub dist : f32, |
||||||
|
pub incidence : u32, |
||||||
|
pub material : u32, |
||||||
|
pub voxel_id : u32, |
||||||
|
pub termination : RaycastTermination, |
||||||
|
pub iterations : u32, |
||||||
|
pub position : [u32; 3], |
||||||
|
} |
||||||
|
|
||||||
|
pub fn voxel_march(voxels : &[VChildDescriptor], lod_materials : &[u32], mut o : Vec3, mut d : Vec3, max_depth : u32, max_dist : f32) -> Raycast { |
||||||
|
|
||||||
|
let mut res = Raycast::default(); |
||||||
|
|
||||||
|
const MAX_DAG_DEPTH : u32 = 16; |
||||||
|
const MAX_SCALE : u32 = 1 << MAX_DAG_DEPTH; |
||||||
|
|
||||||
|
let mut pstack = [0u32; MAX_DAG_DEPTH as usize]; |
||||||
|
let mut tstack = [0.0f32; MAX_DAG_DEPTH as usize]; |
||||||
|
let mut dmask = 0; |
||||||
|
|
||||||
|
d.x = if d.x == 0.0 { 1e-6 } else { d.x }; |
||||||
|
d.y = if d.y == 0.0 { 1e-6 } else { d.y }; |
||||||
|
d.z = if d.z == 0.0 { 1e-6 } else { d.z }; |
||||||
|
|
||||||
|
let ds = Vec3::new(d.x.signum(), d.y.signum(), d.z.signum()); |
||||||
|
|
||||||
|
d.mul_assign_element_wise(ds); |
||||||
|
// o = o * ds + (1 - ds) * 0.5;
|
||||||
|
o = fma(o, ds, (Vec3::from_value(1.0) - ds) * 0.5); |
||||||
|
|
||||||
|
o *= MAX_SCALE as f32; |
||||||
|
d *= MAX_SCALE as f32; |
||||||
|
|
||||||
|
dmask |= if ds.x < 0.0 { AXIS_X_MASK } else { 0 }; |
||||||
|
dmask |= if ds.y < 0.0 { AXIS_Y_MASK } else { 0 }; |
||||||
|
dmask |= if ds.z < 0.0 { AXIS_Z_MASK } else { 0 }; |
||||||
|
|
||||||
|
let id = 1.0 / d; |
||||||
|
let od = - o.mul_element_wise(id); |
||||||
|
|
||||||
|
let mut t = Vec2::new(0.0, max_dist); |
||||||
|
|
||||||
|
let mut h = t.y; |
||||||
|
|
||||||
|
// fix initial position
|
||||||
|
let mut pos = UVec3::from_value(0); |
||||||
|
|
||||||
|
let mut parent = 0; |
||||||
|
let mut idx; |
||||||
|
|
||||||
|
let mut scale = 1 << MAX_DAG_DEPTH; |
||||||
|
let mut depth = 1; |
||||||
|
|
||||||
|
let (mut incidence, tp) = project_cube(id, od, pos.cast().unwrap(), pos.add_element_wise(scale).cast().unwrap()); |
||||||
|
|
||||||
|
t = interval_intersect(t, tp); |
||||||
|
|
||||||
|
res.iterations = 0; |
||||||
|
|
||||||
|
if !interval_nonempty(t) { |
||||||
|
// we didn't hit the bounding cube
|
||||||
|
res.termination = RaycastTermination::Miss; |
||||||
|
res.hit = false; |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
scale = scale >> 1; |
||||||
|
// idx = select_child(pos, scale, o, d, t.x);
|
||||||
|
idx = select_child_bit(pos.cast().unwrap(), scale as f32, o, d, t.x); |
||||||
|
pos = child_cube(pos, scale, idx); |
||||||
|
|
||||||
|
pstack[0] = parent; |
||||||
|
tstack[0] = t.y; |
||||||
|
let mut tv; |
||||||
|
|
||||||
|
// very hot loop
|
||||||
|
while res.iterations < 2048 { |
||||||
|
res.iterations += 1; |
||||||
|
|
||||||
|
let (new_incidence, tc) = project_cube(id, od, pos.cast().unwrap(), pos.add_element_wise(scale).cast().unwrap()); |
||||||
|
|
||||||
|
let subvoxel = voxel_get_subvoxel(voxels, parent, dmask ^ idx); |
||||||
|
|
||||||
|
if subvoxel_valid(subvoxel) && interval_nonempty(t) { |
||||||
|
|
||||||
|
if scale as f32 <= tc.x * LOD_CUTOFF_CONSTANT as f32 || depth >= max_depth { |
||||||
|
|
||||||
|
// voxel is too small
|
||||||
|
res.dist = t.x; |
||||||
|
res.termination = if depth >= max_depth { RaycastTermination::MaxDepth } else { RaycastTermination::LoD }; |
||||||
|
res.material = lod_materials[parent as usize]; |
||||||
|
res.voxel_id = (parent << 3) | (dmask ^ idx); |
||||||
|
res.incidence = incidence; |
||||||
|
res.hit = true; |
||||||
|
res.position = denormalize_directions(pos, dmask, MAX_SCALE).into(); |
||||||
|
|
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
if tc.x > max_dist { |
||||||
|
// voxel is beyond the render distance
|
||||||
|
res.termination = RaycastTermination::MaxDist; |
||||||
|
res.hit = false; |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
tv = interval_intersect(tc, t); |
||||||
|
|
||||||
|
if interval_nonempty(tv) { |
||||||
|
if subvoxel_leaf(subvoxel) { |
||||||
|
|
||||||
|
res.dist = tv.x; |
||||||
|
res.voxel_id = (parent << 3) | (dmask ^ idx); |
||||||
|
res.termination = RaycastTermination::Hit; |
||||||
|
res.material = subvoxel_material(subvoxel); |
||||||
|
res.incidence = incidence; |
||||||
|
res.hit = true; |
||||||
|
res.position = denormalize_directions(child_cube(pos, scale >> 1, idx), dmask, MAX_SCALE).into(); |
||||||
|
|
||||||
|
return res; |
||||||
|
} |
||||||
|
// descend:
|
||||||
|
if tc.y < h { |
||||||
|
pstack[depth as usize] = parent; |
||||||
|
tstack[depth as usize] = t.y; |
||||||
|
} |
||||||
|
depth += 1; |
||||||
|
|
||||||
|
h = tc.y; |
||||||
|
scale = scale >> 1; |
||||||
|
parent = subvoxel_child(subvoxel); |
||||||
|
// idx = select_child(pos, scale, o, d, tv.x);
|
||||||
|
idx = select_child_bit(pos.cast().unwrap(), scale as f32, o, d, tv.x); |
||||||
|
t = tv; |
||||||
|
pos = child_cube(pos, scale, idx); |
||||||
|
|
||||||
|
continue; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
incidence = new_incidence; |
||||||
|
|
||||||
|
// advance
|
||||||
|
t.x = tc.y; |
||||||
|
|
||||||
|
let incidence_mask = UVec3::new( |
||||||
|
(incidence == INCIDENCE_X) as u32, |
||||||
|
(incidence == INCIDENCE_Y) as u32, |
||||||
|
(incidence == INCIDENCE_Z) as u32 |
||||||
|
); |
||||||
|
|
||||||
|
let p = pos + UVec3::from_value(scale); |
||||||
|
let bit_diff = highest_differing_bit(p, pos); |
||||||
|
pos += scale * incidence_mask; |
||||||
|
|
||||||
|
// bit_diff = p.x | p.y | p.z;
|
||||||
|
|
||||||
|
let mask = 1 << incidence; |
||||||
|
idx ^= mask; |
||||||
|
|
||||||
|
// idx bits should only ever flip 0->1 because we force the ray direction to always be in the (1,1,1) quadrant
|
||||||
|
if (idx & mask) == 0 { |
||||||
|
// ascend
|
||||||
|
|
||||||
|
// highest differing bit
|
||||||
|
// depth = ilog2(bit_diff);
|
||||||
|
let idepth = bit_diff; |
||||||
|
|
||||||
|
// check if we exited voxel tree
|
||||||
|
if idepth >= MAX_DAG_DEPTH { |
||||||
|
res.termination = RaycastTermination::Miss; |
||||||
|
res.hit = false; |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
depth = MAX_DAG_DEPTH - idepth; |
||||||
|
|
||||||
|
scale = MAX_SCALE >> depth; |
||||||
|
// scale = 1 << (MAX_DAG_DEPTH - 1 - depth);
|
||||||
|
|
||||||
|
parent = pstack[depth as usize]; |
||||||
|
t.y = tstack[depth as usize]; |
||||||
|
|
||||||
|
// round position to correct voxel (mask out low bits)
|
||||||
|
pos.x &= 0xFFFFFFFF ^ (scale - 1); |
||||||
|
pos.y &= 0xFFFFFFFF ^ (scale - 1); |
||||||
|
pos.z &= 0xFFFFFFFF ^ (scale - 1); |
||||||
|
// pos = bitfieldInsert(pos, uvec3(0), 0, int(idepth));
|
||||||
|
|
||||||
|
// get the idx of the child at the new depth
|
||||||
|
// idx = extract_child_slot(pos, scale);
|
||||||
|
idx = extract_child_slot_bfe(pos, idepth); |
||||||
|
|
||||||
|
h = 0.0; |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
res.termination = RaycastTermination::LoopEnd; |
||||||
|
res.hit = false; |
||||||
|
return res; |
||||||
|
} |
||||||
@ -0,0 +1,635 @@ |
|||||||
|
use super::*; |
||||||
|
|
||||||
|
use super::voxelize::*; |
||||||
|
|
||||||
|
use pbr::ProgressBar; |
||||||
|
|
||||||
|
use noise; |
||||||
|
use noise::NoiseFn; |
||||||
|
|
||||||
|
use std::path::PathBuf; |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_buff_doge() { |
||||||
|
println!("Converting {:?}", "./data/obj/BuffDoge.OBJ"); |
||||||
|
|
||||||
|
convert_obj_file( |
||||||
|
PathBuf::from("./data/obj/BuffDoge.OBJ"), |
||||||
|
PathBuf::from("./data/dag/BuffDoge.svdag"), |
||||||
|
12 |
||||||
|
); |
||||||
|
|
||||||
|
println!(""); |
||||||
|
println!("Converting {:?}", "./data/obj/BuffDoge.OBJ"); |
||||||
|
|
||||||
|
convert_obj_file( |
||||||
|
PathBuf::from("./data/obj/MegaBuffDoge.OBJ"), |
||||||
|
PathBuf::from("./data/dag/MegaBuffDoge.svdag"), |
||||||
|
12 |
||||||
|
); |
||||||
|
|
||||||
|
println!(""); |
||||||
|
println!("Converting {:?}", "./data/obj/BuffDoge.OBJ"); |
||||||
|
|
||||||
|
convert_obj_file( |
||||||
|
PathBuf::from("./data/obj/Cheem.OBJ"), |
||||||
|
PathBuf::from("./data/dag/Cheem.svdag"), |
||||||
|
12 |
||||||
|
); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_teapot() { |
||||||
|
|
||||||
|
convert_obj_file( |
||||||
|
PathBuf::from("./data/obj/teapot.obj"), |
||||||
|
PathBuf::from("./data/dag/teapot.svdag"), |
||||||
|
12 |
||||||
|
); |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_sponza() { |
||||||
|
|
||||||
|
convert_obj_file_with_materials( |
||||||
|
PathBuf::from("./data/obj/Sponza/sponza.obj"), |
||||||
|
PathBuf::from("./data/dag/sponza_mats.svdag"), |
||||||
|
PathBuf::from("./data/dag/sponza_mats.mats"), |
||||||
|
12 |
||||||
|
); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_sponza_textured() { |
||||||
|
|
||||||
|
convert_obj_file_textured( |
||||||
|
PathBuf::from("./data/obj/sponza-modified/sponza.obj"), |
||||||
|
PathBuf::from("./data/dag/sponza_tex_1k.svdag"), |
||||||
|
PathBuf::from("./data/dag/sponza_tex_1k.mats"), |
||||||
|
10 |
||||||
|
); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_sibenik() { |
||||||
|
convert_obj_file_with_materials( |
||||||
|
PathBuf::from("./data/obj/sibenik/sibenik.obj"), |
||||||
|
PathBuf::from("./data/dag/sibenik_mats.svdag"), |
||||||
|
PathBuf::from("./data/dag/sibenik_mats.mats"), |
||||||
|
10 |
||||||
|
); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_obj_shell_hairball() { |
||||||
|
|
||||||
|
convert_obj_file( |
||||||
|
PathBuf::from("./data/obj/hairball.obj"), |
||||||
|
PathBuf::from("./data/dag/hairball.svdag"), |
||||||
|
9 |
||||||
|
); |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_tri_shell() { |
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
|
||||||
|
let v0 = Vec3::new(1.0, 0.0, 0.0); |
||||||
|
let v1 = Vec3::new(0.0, 1.0, 0.0); |
||||||
|
let v2 = Vec3::new(0.0, 0.0, 1.0); |
||||||
|
|
||||||
|
let triangles = vec![ |
||||||
|
Triangle{ |
||||||
|
points : [v0, v1, v2], |
||||||
|
normal : (v0 - v1).cross(v1 - v2), |
||||||
|
mat : 1, |
||||||
|
..Default::default() |
||||||
|
} |
||||||
|
]; |
||||||
|
|
||||||
|
let min = Vec3::new(0.0, 0.0, 0.0); |
||||||
|
let size = 1.0; |
||||||
|
|
||||||
|
println!("Triangles: {}", triangles.len()); |
||||||
|
|
||||||
|
use std::time::*; |
||||||
|
|
||||||
|
let mut pb = ProgressBar::new(8*8*8*8); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
let vchunk = VoxelChunk::from_mesh(8, &triangles, min, size, &mut |t| { pb.total = t; pb.inc(); }); |
||||||
|
let elapsed = start.elapsed(); |
||||||
|
|
||||||
|
pb.finish(); |
||||||
|
|
||||||
|
println!("DAG nodes: {}", vchunk.len()); |
||||||
|
|
||||||
|
println!("Time to assemble: {:?}", elapsed); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&vchunk).unwrap(); |
||||||
|
|
||||||
|
fs::write("./data/dag/tri.svdag", serialized).unwrap(); |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
/// Construct an SVDAG of a ct-scan of the stanford bunny;
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_bunny() { |
||||||
|
let mut data : Vec<u16> = Vec::with_capacity(512 * 512 * 361); |
||||||
|
|
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
use std::u16; |
||||||
|
use std::time::*; |
||||||
|
|
||||||
|
let dir = Path::new("./data/dense/bunny/"); |
||||||
|
if dir.is_dir() { |
||||||
|
for entry in fs::read_dir(dir).unwrap() { |
||||||
|
let entry = entry.unwrap(); |
||||||
|
let path = entry.path(); |
||||||
|
|
||||||
|
if !path.is_dir() { |
||||||
|
println!("Loading {:?}", path); |
||||||
|
let slice = fs::read(path).unwrap(); |
||||||
|
|
||||||
|
assert_eq!(slice.len(), 512*512*2); |
||||||
|
|
||||||
|
data.append(&mut slice.chunks_exact(2).map(|v| u16::from_be_bytes([v[0],v[1]])).collect::<Vec<_>>()); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
assert_eq!(data.len(), 512 * 512 * 361); |
||||||
|
|
||||||
|
println!("Converting..."); |
||||||
|
|
||||||
|
// scan data has a solid cylinder around the bunny, so this code removes that.
|
||||||
|
for z in 0..361 { |
||||||
|
for y in 0..512 { |
||||||
|
for x in 0..512 { |
||||||
|
let dx = x as i32 - 256; |
||||||
|
let dy = y as i32 - 256; |
||||||
|
let i = x + 512 * (y + 512 * z); |
||||||
|
|
||||||
|
if dx * dx + dy * dy > 255 * 255 { |
||||||
|
data[i] = 0; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// threshold for the ct scan data
|
||||||
|
let data = data.iter().map(|&v| if v > 0x06ff {1i32} else {0i32}).collect::<Vec<i32>>(); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
|
||||||
|
let mut chunk = VoxelChunk::from_dense_voxels(&data, [512, 512, 361]); |
||||||
|
|
||||||
|
let runtime = start.elapsed(); |
||||||
|
|
||||||
|
|
||||||
|
println!("Compression took {:?}", runtime); |
||||||
|
chunk.topological_sort(); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/bunny.svdag"); |
||||||
|
|
||||||
|
println!("Writing File... ({:?})", out_path); |
||||||
|
|
||||||
|
use bincode; |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
|
||||||
|
println!("Num Voxels: {} (from {})", chunk.voxels.len(), 512*512*361); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_implicit() { |
||||||
|
use std::time::*; |
||||||
|
use std::fs; |
||||||
|
use bincode; |
||||||
|
use std::path::*; |
||||||
|
|
||||||
|
println!("Compressing implicit gyroid..."); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_implicit_array(8, |x, y, z| { |
||||||
|
let scale = 1.0/16.0; |
||||||
|
let threshold = 0.05; |
||||||
|
|
||||||
|
let x = x as f32 * scale; |
||||||
|
let y = y as f32 * scale; |
||||||
|
let z = z as f32 * scale; |
||||||
|
|
||||||
|
let sdf = x.sin() * y.cos() + y.sin() * z.cos() + z.sin() * x.cos(); |
||||||
|
|
||||||
|
if sdf.abs() < threshold { |
||||||
|
1 |
||||||
|
} else { |
||||||
|
0 |
||||||
|
} |
||||||
|
}); |
||||||
|
|
||||||
|
let runtime = start.elapsed(); |
||||||
|
|
||||||
|
println!("Compression took {:?}", runtime); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/gyroid.svdag"); |
||||||
|
|
||||||
|
println!("Writing File... ({:?})", out_path); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
|
||||||
|
println!("Num Voxels: {} (from {})", chunk.voxels.len(), 512*512*512); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_de_gyroid() { |
||||||
|
use std::time::*; |
||||||
|
use std::fs; |
||||||
|
use bincode; |
||||||
|
use std::path::*; |
||||||
|
|
||||||
|
println!("Compressing DE gyroid..."); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_distance_equation(10, |x, y, z| { |
||||||
|
let scale = std::f32::consts::PI * 4.0; |
||||||
|
|
||||||
|
let x = x as f32 * scale; |
||||||
|
let y = y as f32 * scale; |
||||||
|
let z = z as f32 * scale; |
||||||
|
|
||||||
|
let sdf = x.sin() * y.cos() + y.sin() * z.cos() + z.sin() * x.cos(); |
||||||
|
|
||||||
|
sdf.abs() / scale |
||||||
|
}); |
||||||
|
|
||||||
|
let runtime = start.elapsed(); |
||||||
|
|
||||||
|
println!("Compression took {:?}", runtime); |
||||||
|
|
||||||
|
println!("Num Voxels: {} (uncompress: {})", chunk.voxels.len(), 512*512*512); |
||||||
|
|
||||||
|
assert!(!chunk.detect_cycles(), "Cycle Detected!"); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/gyroid_de.svdag"); |
||||||
|
|
||||||
|
println!("Writing File... ({:?})", out_path); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_intersection_sphere() { |
||||||
|
use std::time::*; |
||||||
|
use std::fs; |
||||||
|
use bincode; |
||||||
|
use std::path::*; |
||||||
|
|
||||||
|
println!("Creating SVDAG from sphere intersection test..."); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
|
||||||
|
let sc = Vec3::new(0.5, 0.5, 0.5); |
||||||
|
let sr = 0.01; |
||||||
|
|
||||||
|
let depth = 10; |
||||||
|
let uncompressed_size = (1 << depth) * (1 << depth) * (1 << depth); |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_intersection_test(depth, |v, s| { |
||||||
|
|
||||||
|
let mut sq_dist = 0.0; |
||||||
|
|
||||||
|
if sc.x < v.x - s { sq_dist += (v.x - s - sc.x).powi(2); } |
||||||
|
if sc.x > v.x + s { sq_dist += (v.x + s - sc.x).powi(2); } |
||||||
|
|
||||||
|
if sc.y < v.y - s { sq_dist += (v.y - s - sc.y).powi(2); } |
||||||
|
if sc.y > v.y + s { sq_dist += (v.y + s - sc.y).powi(2); } |
||||||
|
|
||||||
|
if sc.z < v.z - s { sq_dist += (v.z - s - sc.z).powi(2); } |
||||||
|
if sc.z > v.z + s { sq_dist += (v.z + s - sc.z).powi(2); } |
||||||
|
|
||||||
|
sq_dist < sr * sr |
||||||
|
}); |
||||||
|
|
||||||
|
let runtime = start.elapsed(); |
||||||
|
|
||||||
|
println!("Compression took {:?}", runtime); |
||||||
|
|
||||||
|
println!("Num Voxels: {} (uncompressed: {} - ) [{:3.3}%]", chunk.voxels.len(), uncompressed_size, 100.0 * chunk.voxels.len() as f32 / uncompressed_size as f32); |
||||||
|
|
||||||
|
assert!(!chunk.detect_cycles(), "Cycle Detected!"); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/sphere.svdag"); |
||||||
|
|
||||||
|
println!("Writing File... ({:?})", out_path); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_de_mandelbulb() { |
||||||
|
use std::time::*; |
||||||
|
use std::fs; |
||||||
|
use bincode; |
||||||
|
use std::path::*; |
||||||
|
|
||||||
|
println!("Compressing DE mandelbulb..."); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
const DEPTH : usize = 11; |
||||||
|
const DIM : usize = 1 << DEPTH; |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_distance_equation(DEPTH, |x, y, z| { |
||||||
|
const SCALE : f32 = 4.0; |
||||||
|
|
||||||
|
let pos = Vec3::new(x - 0.5, y - 0.5, z - 0.5) * SCALE; |
||||||
|
let mut z = pos; |
||||||
|
let mut dr = 1.0; |
||||||
|
let mut r = 0.0; |
||||||
|
|
||||||
|
const ITERS : usize = 8; |
||||||
|
const POWER : f32 = 8.0; |
||||||
|
const BAILOUT : f32 = 2.0; |
||||||
|
|
||||||
|
for _ in 0..ITERS { |
||||||
|
r = z.magnitude(); //length(z);
|
||||||
|
if r>BAILOUT { |
||||||
|
break
|
||||||
|
}; |
||||||
|
|
||||||
|
// convert to polar coordinates
|
||||||
|
let mut theta = (z.z/r).acos(); |
||||||
|
let mut phi = (z.y).atan2(z.x); |
||||||
|
dr = r.powf( POWER -1.0) * POWER * dr + 1.0; |
||||||
|
|
||||||
|
// scale and rotate the point
|
||||||
|
let zr = r.powf(POWER); |
||||||
|
theta = theta*POWER; |
||||||
|
phi = phi*POWER; |
||||||
|
|
||||||
|
// convert back to cartesian coordinates
|
||||||
|
z = zr*Vec3::new(theta.sin()*phi.cos(), phi.sin()*theta.sin(), theta.cos()); |
||||||
|
z += pos; |
||||||
|
} |
||||||
|
return 0.5*r.ln()*r/dr / SCALE; |
||||||
|
}); |
||||||
|
|
||||||
|
let runtime = start.elapsed(); |
||||||
|
|
||||||
|
println!("Compression took {:?}", runtime); |
||||||
|
|
||||||
|
println!("Num Voxels: {} (uncompressed: {} ({:2.1}%))", chunk.voxels.len(), DIM*DIM*DIM,chunk.voxels.len() as f32 / (DIM*DIM*DIM) as f32); |
||||||
|
|
||||||
|
assert!(!chunk.detect_cycles(), "Cycle Detected!"); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/mandelbulb.svdag"); |
||||||
|
|
||||||
|
println!("Writing File... ({:?})", out_path); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
/// This test constructs a simple sphere as a test
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_sphere() { |
||||||
|
|
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
|
||||||
|
let data : [i32; 8*8*8]= [ |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 1, 1, 1, 1, 1, 1, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 1, 1, 1, 1, 0, 0, |
||||||
|
0, 0, 0, 1, 1, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, |
||||||
|
0, 0, 0, 0, 0, 0, 0, 0 |
||||||
|
]; |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_dense_voxels(&data, [8, 8, 8]); |
||||||
|
|
||||||
|
let out_path = Path::new("./data/dag/sphere.svdag"); |
||||||
|
|
||||||
|
{ |
||||||
|
use bincode; |
||||||
|
use serde_json::to_string_pretty; |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
|
||||||
|
println!("{}", to_string_pretty(&chunk).unwrap()); |
||||||
|
} |
||||||
|
|
||||||
|
println!("Num Voxels: {} (from {})", chunk.voxels.len(), 8*8*8); |
||||||
|
} |
||||||
|
|
||||||
|
/// This test constructs a simple sphere as a test
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_checkers() { |
||||||
|
|
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
for i in 1..6 { |
||||||
|
let d = 1 << i; |
||||||
|
let data : Vec<i32> = (0..d).map(|z| { |
||||||
|
(0..d).map(move |y| { |
||||||
|
(0..d).map(move |x| (x + y + z) % 2) |
||||||
|
}).flatten() |
||||||
|
}).flatten().collect(); |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_dense_voxels(&data, [d as usize, d as usize, d as usize]); |
||||||
|
|
||||||
|
let path = format!("./data/dag/checker{:0>2}.svdag", d); |
||||||
|
|
||||||
|
let out_path = Path::new(&path); |
||||||
|
|
||||||
|
{ |
||||||
|
use bincode; |
||||||
|
use serde_json::to_string_pretty; |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
|
||||||
|
println!("Checker (d={})", d); |
||||||
|
println!("{}", to_string_pretty(&chunk).unwrap()); |
||||||
|
println!("Num Voxels: {} (from {})", chunk.voxels.len(), d*d*d); |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_dag_octohedron() { |
||||||
|
|
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
for i in 1..6 { |
||||||
|
let d = 1 << i; |
||||||
|
let dd = d >> 1; |
||||||
|
let data : Vec<i32> = (0..d).map(|z| { |
||||||
|
(0..d).map(move |y| { |
||||||
|
(0..d).map(move |x| if (x == dd || x == dd - 1) && (y == dd || y == dd - 1) && (z == dd || z == dd - 1) {1} else {0}) |
||||||
|
}).flatten() |
||||||
|
}).flatten().collect(); |
||||||
|
|
||||||
|
let chunk = VoxelChunk::from_dense_voxels(&data, [d as usize, d as usize, d as usize]); |
||||||
|
|
||||||
|
let path = format!("./data/dag/octohedron{:0>2}.svdag", d); |
||||||
|
|
||||||
|
let out_path = Path::new(&path); |
||||||
|
|
||||||
|
{ |
||||||
|
use bincode; |
||||||
|
use serde_json::to_string_pretty; |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&chunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(out_path, serialized).unwrap(); |
||||||
|
|
||||||
|
println!("Checker (d={})", d); |
||||||
|
println!("{}", to_string_pretty(&chunk).unwrap()); |
||||||
|
println!("Num Voxels: {} (from {})", chunk.voxels.len(), d*d*d); |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#[test] |
||||||
|
fn test_voxel_index_distance() { |
||||||
|
let data = std::fs::read("./data/dag/hairball.svdag").expect("could not read file"); |
||||||
|
|
||||||
|
let mut chunk : VoxelChunk = bincode::deserialize(&data).expect("error deserializing voxels"); |
||||||
|
|
||||||
|
let mut n = 0usize; |
||||||
|
let mut avg = 0.0; |
||||||
|
let mut avg_sq = 0.0; |
||||||
|
let mut max = 0.0; |
||||||
|
let mut min = chunk.voxels.len() as f64; |
||||||
|
|
||||||
|
let mut n_less_16bit = 0; |
||||||
|
|
||||||
|
chunk.topological_sort(); |
||||||
|
|
||||||
|
for i in 0..(chunk.voxels.len()) { |
||||||
|
for j in 0..8 { |
||||||
|
|
||||||
|
let v = chunk.voxels[i].sub_voxels[j]; |
||||||
|
|
||||||
|
if v > 0 { |
||||||
|
|
||||||
|
let d = (v as isize - 1 - i as isize).abs() ; |
||||||
|
|
||||||
|
if d < (1<<16) { |
||||||
|
n_less_16bit += 1; |
||||||
|
} |
||||||
|
|
||||||
|
let d = d as f64; |
||||||
|
|
||||||
|
max = if d > max {d} else {max}; |
||||||
|
min = if d < min {d} else {min}; |
||||||
|
|
||||||
|
avg += d; |
||||||
|
avg_sq += d*d; |
||||||
|
|
||||||
|
n += 1; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
avg /= n as f64; |
||||||
|
avg_sq /= n as f64; |
||||||
|
|
||||||
|
println!("AVG: {}", avg); |
||||||
|
println!("STD: {}", (avg_sq - avg*avg).sqrt()); |
||||||
|
println!("MAX: {}", max); |
||||||
|
println!("MIN: {}", min); |
||||||
|
println!("16B: {}", n_less_16bit); |
||||||
|
println!("LEN: {}", n); |
||||||
|
} |
||||||
@ -0,0 +1,523 @@ |
|||||||
|
use super::*; |
||||||
|
|
||||||
|
use pbr::ProgressBar; |
||||||
|
|
||||||
|
use std::path::{Path, PathBuf}; |
||||||
|
|
||||||
|
fn recursively_subdivide(triangle : Triangle, area_cutoff : f32, buf : &mut Vec<Triangle>) { |
||||||
|
if triangle.area() < area_cutoff { |
||||||
|
buf.push(triangle); |
||||||
|
} else { |
||||||
|
|
||||||
|
//
|
||||||
|
// a
|
||||||
|
// / \
|
||||||
|
// d---f
|
||||||
|
// / \ / \
|
||||||
|
// b---e---c
|
||||||
|
//
|
||||||
|
|
||||||
|
let a = triangle.points[0]; |
||||||
|
let b = triangle.points[1]; |
||||||
|
let c = triangle.points[2]; |
||||||
|
|
||||||
|
let d = 0.5 * (a + b); |
||||||
|
let e = 0.5 * (b + c); |
||||||
|
let f = 0.5 * (c + a); |
||||||
|
|
||||||
|
let ta = triangle.uv[0]; |
||||||
|
let tb = triangle.uv[1]; |
||||||
|
let tc = triangle.uv[2]; |
||||||
|
|
||||||
|
let td = 0.5 * (ta + tb); |
||||||
|
let te = 0.5 * (tb + tc); |
||||||
|
let tf = 0.5 * (tc + ta); |
||||||
|
|
||||||
|
let t_adf = Triangle { points : [a, d, f], uv : [ta, td, tf], ..triangle }; |
||||||
|
let t_bed = Triangle { points : [b, e, d], uv : [tb, te, td], ..triangle }; |
||||||
|
let t_def = Triangle { points : [d, e, f], uv : [td, te, tf], ..triangle }; |
||||||
|
let t_cfe = Triangle { points : [c, f, e], uv : [tc, tf, te], ..triangle }; |
||||||
|
|
||||||
|
recursively_subdivide(t_adf, area_cutoff, buf); |
||||||
|
recursively_subdivide(t_bed, area_cutoff, buf); |
||||||
|
recursively_subdivide(t_def, area_cutoff, buf); |
||||||
|
recursively_subdivide(t_cfe, area_cutoff, buf); |
||||||
|
} |
||||||
|
} |
||||||
|
use obj::Obj; |
||||||
|
|
||||||
|
pub fn convert_obj_file_textured(obj_file : PathBuf, svdag_file : PathBuf, mat_file : PathBuf, depth : usize){ |
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
|
||||||
|
let mut obj_data : Obj = Obj::load(&obj_file).expect("Failed to load obj file"); |
||||||
|
|
||||||
|
|
||||||
|
let mut triangles = vec![]; |
||||||
|
|
||||||
|
let mut materials = HashMap::new(); |
||||||
|
|
||||||
|
use std::path::PathBuf; |
||||||
|
let mut obj_root = obj_file.clone(); |
||||||
|
obj_root.set_file_name(""); |
||||||
|
let mut material_mat_list = vec![]; |
||||||
|
let mut material_idx_list = vec![]; |
||||||
|
let mut material_tex_list = vec![]; |
||||||
|
let mut material_col_list = vec![]; |
||||||
|
|
||||||
|
let mut next_material = 0; |
||||||
|
|
||||||
|
for mtl in obj_data.data.material_libs.iter_mut() { |
||||||
|
use std::io::Read; |
||||||
|
use std::fs::File; |
||||||
|
|
||||||
|
println!("Reloading: {:?}", mtl.filename); |
||||||
|
|
||||||
|
mtl.reload(File::open(&obj_root.join(&mtl.filename)).unwrap()).unwrap(); |
||||||
|
|
||||||
|
for mat in &mtl.materials { |
||||||
|
|
||||||
|
let nid = materials.len(); |
||||||
|
materials.entry(mat.name.clone()).or_insert(nid); |
||||||
|
material_idx_list.push(next_material); |
||||||
|
|
||||||
|
let mat_offset = next_material; |
||||||
|
|
||||||
|
let mut unique_colors = HashSet::new(); |
||||||
|
|
||||||
|
if let Some(kd_tex_file) = &mat.map_kd { |
||||||
|
|
||||||
|
println!("Loading texture: {:?}", kd_tex_file); |
||||||
|
|
||||||
|
let img = read_image_maybe_tga(obj_root.join(kd_tex_file)); |
||||||
|
|
||||||
|
let img = img.into_rgb(); |
||||||
|
|
||||||
|
println!(" Finding Unique Colors..."); |
||||||
|
|
||||||
|
for (_,_,&image::Rgb(p)) in img.enumerate_pixels() { |
||||||
|
unique_colors.insert(p); |
||||||
|
} |
||||||
|
|
||||||
|
println!(" Unique Colors: {}", unique_colors.len()); |
||||||
|
|
||||||
|
next_material += unique_colors.len(); |
||||||
|
|
||||||
|
material_tex_list.push(Some(img)); |
||||||
|
material_col_list.push(Some(unique_colors.iter().cloned().collect::<Vec<_>>())); |
||||||
|
} else { |
||||||
|
next_material += 1; |
||||||
|
|
||||||
|
material_tex_list.push(None); |
||||||
|
material_col_list.push(None); |
||||||
|
} |
||||||
|
|
||||||
|
println!(" Material Offset: {}", mat_offset); |
||||||
|
|
||||||
|
let kdd = mat.kd.unwrap_or([0.0; 3]); |
||||||
|
|
||||||
|
material_mat_list.push(Material { |
||||||
|
albedo : kdd, |
||||||
|
metalness : mat.km.unwrap_or(0.0), |
||||||
|
emission : mat.ke.unwrap_or([0.0; 3]), |
||||||
|
roughness : 0.3, |
||||||
|
}); |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
println!("Material Count: {}", materials.len()); |
||||||
|
|
||||||
|
println!("Processing Triangles..."); |
||||||
|
|
||||||
|
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); |
||||||
|
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); |
||||||
|
|
||||||
|
for &[x,y,z] in &obj_data.data.position { |
||||||
|
if x < min.x {min.x = x;} |
||||||
|
if y < min.y {min.y = y;} |
||||||
|
if z < min.z {min.z = z;} |
||||||
|
if x > max.x {max.x = x;} |
||||||
|
if y > max.y {max.y = y;} |
||||||
|
if z > max.z {max.z = z;} |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
let size = max - min; |
||||||
|
let mut max_size = size.x; |
||||||
|
if size.y > max_size {max_size = size.y;} |
||||||
|
if size.z > max_size {max_size = size.z;} |
||||||
|
|
||||||
|
println!("Max Size: {}", max_size); |
||||||
|
|
||||||
|
let area_cutoff = 4.0 * (max_size * max_size) / (4.0f32.powf(depth as f32)); |
||||||
|
|
||||||
|
println!("Area Cutoff: {}", area_cutoff); |
||||||
|
|
||||||
|
for o in 0..(obj_data.data.objects.len()) { |
||||||
|
let object = &obj_data.data.objects[o]; |
||||||
|
for g in 0..(object.groups.len()) { |
||||||
|
let group = &object.groups[g]; |
||||||
|
|
||||||
|
let next = materials.len(); |
||||||
|
|
||||||
|
let id = if let Some(obj::ObjMaterial::Ref(s)) = &group.material { |
||||||
|
*materials.entry(s.clone()).or_insert(next) |
||||||
|
} else { |
||||||
|
0 |
||||||
|
}; |
||||||
|
|
||||||
|
for p in 0..(group.polys.len()) { |
||||||
|
let poly = &group.polys[p]; |
||||||
|
for v in 2..(poly.0.len()) { |
||||||
|
let v0 = obj_data.data.position[poly.0[0].0]; |
||||||
|
let v1 = obj_data.data.position[poly.0[v-1].0]; |
||||||
|
let v2 = obj_data.data.position[poly.0[v].0]; |
||||||
|
|
||||||
|
let t0 = poly.0[0].1 .map(|i| obj_data.data.texture[i]).unwrap_or([0.0,0.0]); |
||||||
|
let t1 = poly.0[v-1].1.map(|i| obj_data.data.texture[i]).unwrap_or([0.0,0.0]); |
||||||
|
let t2 = poly.0[v].1 .map(|i| obj_data.data.texture[i]).unwrap_or([0.0,0.0]); |
||||||
|
|
||||||
|
let v0 = Vec3::new(v0[0], v0[1], v0[2]); |
||||||
|
let v1 = Vec3::new(v1[0], v1[1], v1[2]); |
||||||
|
let v2 = Vec3::new(v2[0], v2[1], v2[2]); |
||||||
|
|
||||||
|
let t0 = Vec2::new(t0[0], t0[1]); |
||||||
|
let t1 = Vec2::new(t1[0], t1[1]); |
||||||
|
let t2 = Vec2::new(t2[0], t2[1]); |
||||||
|
|
||||||
|
let t = Triangle{ |
||||||
|
points : [v0, v1, v2], |
||||||
|
normal : (v0 - v1).cross(v1 - v2), |
||||||
|
uv : [t0, t1, t2], |
||||||
|
mat : material_idx_list[id] as u16, |
||||||
|
..Default::default() |
||||||
|
}; |
||||||
|
|
||||||
|
let t_start = triangles.len(); |
||||||
|
|
||||||
|
recursively_subdivide(t, area_cutoff, &mut triangles); |
||||||
|
|
||||||
|
let ref tex = material_tex_list[id]; |
||||||
|
let ref col = material_col_list[id]; |
||||||
|
|
||||||
|
if let (Some(t), Some(col)) = (tex, col) { |
||||||
|
for tri in triangles[t_start..].iter_mut() { |
||||||
|
let cuv = tri.uv_center(); |
||||||
|
let c = texture_lookup(&t, cuv.x, cuv.y); |
||||||
|
let i = col.iter().enumerate().find_map(|(i, p)| if *p == c {Some(i)} else {None}).unwrap(); |
||||||
|
tri.mat += i as u16; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
println!("Triangles: {}", triangles.len()); |
||||||
|
|
||||||
|
let material_list = (0..(material_mat_list.len())) |
||||||
|
.flat_map(|i| { |
||||||
|
let m = &material_mat_list[i]; |
||||||
|
if let Some(ref c) = material_col_list[i] { |
||||||
|
c.iter() |
||||||
|
.map(|c| Material { |
||||||
|
albedo : [ |
||||||
|
c[0] as f32 / 255.0, |
||||||
|
c[1] as f32 / 255.0, |
||||||
|
c[2] as f32 / 255.0 |
||||||
|
], |
||||||
|
..*m |
||||||
|
}) |
||||||
|
.collect::<Vec<_>>() |
||||||
|
} else { |
||||||
|
vec![*m] |
||||||
|
} |
||||||
|
}) |
||||||
|
.collect::<Vec<_>>(); |
||||||
|
|
||||||
|
println!("Material Count: {}", material_list.len()); |
||||||
|
|
||||||
|
println!("Constructing SVDAG..."); |
||||||
|
use std::time::*; |
||||||
|
|
||||||
|
let mut pb = ProgressBar::new(8*8*8*8); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
let vchunk = VoxelChunk::from_mesh(depth, &triangles, min, max_size, &mut |t| { pb.total = t; pb.inc(); }); |
||||||
|
let elapsed = start.elapsed(); |
||||||
|
|
||||||
|
pb.finish(); |
||||||
|
|
||||||
|
println!("Time to voxelize: {:?}", elapsed); |
||||||
|
println!("DAG nodes: {}", vchunk.len()); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&vchunk).unwrap(); |
||||||
|
let serialized_mats = bincode::serialize(&material_list).unwrap(); |
||||||
|
|
||||||
|
fs::write(svdag_file, serialized).unwrap(); |
||||||
|
fs::write(mat_file, serialized_mats).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
pub fn convert_obj_file_with_materials(obj_file : PathBuf, svdag_file : PathBuf, mat_file : PathBuf, depth : usize){ |
||||||
|
use obj::Obj; |
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
|
||||||
|
let mut obj_data = Obj::load(&obj_file).expect("Failed to load obj file"); |
||||||
|
|
||||||
|
|
||||||
|
let mut triangles = vec![]; |
||||||
|
|
||||||
|
let mut materials = HashMap::new(); |
||||||
|
|
||||||
|
use std::path::PathBuf; |
||||||
|
let mut obj_root = obj_file.clone(); |
||||||
|
obj_root.set_file_name(""); |
||||||
|
let mut material_list = vec![Material::default(); materials.len()]; |
||||||
|
|
||||||
|
for mtl in obj_data.data.material_libs.iter_mut() { |
||||||
|
use std::io::Read; |
||||||
|
use std::fs::File; |
||||||
|
|
||||||
|
println!("Reloading: {:?}", mtl.filename); |
||||||
|
|
||||||
|
mtl.reload(File::open(&obj_root.join(&mtl.filename)).unwrap()).unwrap(); |
||||||
|
|
||||||
|
for mat in &mtl.materials { |
||||||
|
let kd = if let Some(kd_tex_file) = &mat.map_kd { |
||||||
|
|
||||||
|
println!("Loading texture: {:?}", kd_tex_file); |
||||||
|
|
||||||
|
let img = read_image_maybe_tga(obj_root.join(kd_tex_file)); |
||||||
|
|
||||||
|
println!(" img: {:?}", img.color()); |
||||||
|
|
||||||
|
let img = img.into_rgb(); |
||||||
|
|
||||||
|
let (w, h) = img.dimensions(); |
||||||
|
|
||||||
|
println!(" Averaging..."); |
||||||
|
|
||||||
|
let mut color = [0.0; 3]; |
||||||
|
|
||||||
|
for (_,_,&image::Rgb(p)) in img.enumerate_pixels() { |
||||||
|
color[0] += p[0] as f32; |
||||||
|
color[1] += p[1] as f32; |
||||||
|
color[2] += p[2] as f32; |
||||||
|
} |
||||||
|
|
||||||
|
color[0] /= (w * h * 255) as f32; |
||||||
|
color[1] /= (w * h * 255) as f32; |
||||||
|
color[2] /= (w * h * 255) as f32; |
||||||
|
|
||||||
|
println!(" Color: {:?}", color); |
||||||
|
|
||||||
|
color |
||||||
|
} else { |
||||||
|
[1.0; 3] |
||||||
|
}; |
||||||
|
|
||||||
|
let next = materials.len(); |
||||||
|
let _id = *materials.entry(mat.name.clone()).or_insert(next); |
||||||
|
|
||||||
|
let mut kdd = mat.kd.unwrap_or([0.0; 3]); |
||||||
|
|
||||||
|
kdd[0] *= kd[0]; |
||||||
|
kdd[1] *= kd[1]; |
||||||
|
kdd[2] *= kd[2]; |
||||||
|
|
||||||
|
material_list.push(Material { |
||||||
|
albedo : kdd, |
||||||
|
metalness : mat.km.unwrap_or(0.0), |
||||||
|
emission : mat.ke.unwrap_or([0.0; 3]), |
||||||
|
roughness : 0.3, |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
println!("Material Count: {}", materials.len()); |
||||||
|
|
||||||
|
println!("Processing Triangles..."); |
||||||
|
|
||||||
|
for o in 0..(obj_data.data.objects.len()) { |
||||||
|
let object = &obj_data.data.objects[o]; |
||||||
|
for g in 0..(object.groups.len()) { |
||||||
|
let group = &object.groups[g]; |
||||||
|
|
||||||
|
let next = materials.len(); |
||||||
|
|
||||||
|
let id = if let Some(obj::ObjMaterial::Ref(s)) = &group.material { |
||||||
|
*materials.entry(s.clone()).or_insert(next) as u16 |
||||||
|
} else { |
||||||
|
0 |
||||||
|
}; |
||||||
|
|
||||||
|
for p in 0..(group.polys.len()) { |
||||||
|
let poly = &group.polys[p]; |
||||||
|
for v in 2..(poly.0.len()) { |
||||||
|
let v0 = obj_data.data.position[poly.0[0].0]; |
||||||
|
let v1 = obj_data.data.position[poly.0[v-1].0]; |
||||||
|
let v2 = obj_data.data.position[poly.0[v].0]; |
||||||
|
|
||||||
|
|
||||||
|
let v0 = Vec3::new(v0[0], v0[1], v0[2]); |
||||||
|
let v1 = Vec3::new(v1[0], v1[1], v1[2]); |
||||||
|
let v2 = Vec3::new(v2[0], v2[1], v2[2]); |
||||||
|
|
||||||
|
triangles.push(Triangle{ |
||||||
|
points : [v0, v1, v2], |
||||||
|
normal : (v0 - v1).cross(v1 - v2), |
||||||
|
// mat : 0,
|
||||||
|
mat : id, |
||||||
|
..Default::default() |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
println!("Triangles: {}", triangles.len()); |
||||||
|
|
||||||
|
|
||||||
|
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); |
||||||
|
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); |
||||||
|
|
||||||
|
for [x,y,z] in obj_data.data.position { |
||||||
|
if x < min.x {min.x = x;} |
||||||
|
if y < min.y {min.y = y;} |
||||||
|
if z < min.z {min.z = z;} |
||||||
|
if x > max.x {max.x = x;} |
||||||
|
if y > max.y {max.y = y;} |
||||||
|
if z > max.z {max.z = z;} |
||||||
|
} |
||||||
|
|
||||||
|
let size = max - min; |
||||||
|
let mut max_size = size.x; |
||||||
|
if size.y > max_size {max_size = size.y;} |
||||||
|
if size.z > max_size {max_size = size.z;} |
||||||
|
|
||||||
|
|
||||||
|
println!("Constructing SVDAG..."); |
||||||
|
use std::time::*; |
||||||
|
let mut pb = ProgressBar::new(8*8*8*8); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
let vchunk = VoxelChunk::from_mesh(depth, &triangles, min, max_size, &mut |t| { pb.total = t; pb.inc(); }); |
||||||
|
let elapsed = start.elapsed(); |
||||||
|
|
||||||
|
pb.finish(); |
||||||
|
|
||||||
|
println!("Time to voxelize: {:?}", elapsed); |
||||||
|
println!("DAG nodes: {}", vchunk.len()); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&vchunk).unwrap(); |
||||||
|
let serialized_mats = bincode::serialize(&material_list).unwrap(); |
||||||
|
|
||||||
|
fs::write(svdag_file, serialized).unwrap(); |
||||||
|
fs::write(mat_file, serialized_mats).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
pub fn convert_obj_file(obj_file : PathBuf, svdag_file : PathBuf, depth : usize){ |
||||||
|
use obj::Obj; |
||||||
|
use std::path::Path; |
||||||
|
use std::fs; |
||||||
|
|
||||||
|
let obj_data = Obj::load(&obj_file).expect("Failed to load obj file"); |
||||||
|
|
||||||
|
let mut triangles = vec![]; |
||||||
|
|
||||||
|
use std::path::PathBuf; |
||||||
|
|
||||||
|
println!("Processing Triangles..."); |
||||||
|
|
||||||
|
for o in 0..(obj_data.data.objects.len()) { |
||||||
|
let object = &obj_data.data.objects[o]; |
||||||
|
for g in 0..(object.groups.len()) { |
||||||
|
let group = &object.groups[g]; |
||||||
|
|
||||||
|
for p in 0..(group.polys.len()) { |
||||||
|
let poly = &group.polys[p]; |
||||||
|
for v in 2..(poly.0.len()) { |
||||||
|
let v0 = obj_data.data.position[poly.0[0].0]; |
||||||
|
let v1 = obj_data.data.position[poly.0[v-1].0]; |
||||||
|
let v2 = obj_data.data.position[poly.0[v].0]; |
||||||
|
|
||||||
|
|
||||||
|
let v0 = Vec3::new(v0[0], v0[1], v0[2]); |
||||||
|
let v1 = Vec3::new(v1[0], v1[1], v1[2]); |
||||||
|
let v2 = Vec3::new(v2[0], v2[1], v2[2]); |
||||||
|
|
||||||
|
triangles.push(Triangle{ |
||||||
|
points : [v0, v1, v2], |
||||||
|
normal : (v0 - v1).cross(v1 - v2), |
||||||
|
mat : 0, |
||||||
|
..Default::default() |
||||||
|
}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
println!("Triangles: {}", triangles.len()); |
||||||
|
|
||||||
|
|
||||||
|
let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); |
||||||
|
let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); |
||||||
|
|
||||||
|
for [x,y,z] in obj_data.data.position { |
||||||
|
if x < min.x {min.x = x;} |
||||||
|
if y < min.y {min.y = y;} |
||||||
|
if z < min.z {min.z = z;} |
||||||
|
if x > max.x {max.x = x;} |
||||||
|
if y > max.y {max.y = y;} |
||||||
|
if z > max.z {max.z = z;} |
||||||
|
} |
||||||
|
|
||||||
|
let size = max - min; |
||||||
|
let mut max_size = size.x; |
||||||
|
if size.y > max_size {max_size = size.y;} |
||||||
|
if size.z > max_size {max_size = size.z;} |
||||||
|
|
||||||
|
|
||||||
|
println!("Constructing SVDAG..."); |
||||||
|
use std::time::*; |
||||||
|
|
||||||
|
let mut pb = ProgressBar::new(8*8*8*8); |
||||||
|
|
||||||
|
let start = Instant::now(); |
||||||
|
let vchunk = VoxelChunk::from_mesh(depth, &triangles, min, max_size, &mut |t| { pb.total = t; pb.inc(); }); |
||||||
|
let elapsed = start.elapsed(); |
||||||
|
|
||||||
|
pb.finish(); |
||||||
|
|
||||||
|
println!("Time to voxelize: {:?}", elapsed); |
||||||
|
println!("DAG nodes: {}", vchunk.len()); |
||||||
|
|
||||||
|
let serialized = bincode::serialize(&vchunk).unwrap(); |
||||||
|
|
||||||
|
fs::write(svdag_file, serialized).unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
use image; |
||||||
|
fn read_image_maybe_tga<P : AsRef<Path>>(path : P) -> image::DynamicImage { |
||||||
|
let path : &Path = path.as_ref(); |
||||||
|
let bytes = std::fs::read(path).unwrap(); |
||||||
|
|
||||||
|
let byte_stream = std::io::Cursor::new(&bytes); |
||||||
|
|
||||||
|
let mut reader = image::io::Reader::new(byte_stream); |
||||||
|
|
||||||
|
// somewhat sketchy logic to deal with some tga files I had
|
||||||
|
if path.extension().map(|ext| ext.to_string_lossy().to_string()) == Some("tga".to_string()) { |
||||||
|
reader.set_format(image::ImageFormat::Tga); |
||||||
|
} else { |
||||||
|
reader = reader.with_guessed_format().unwrap(); |
||||||
|
} |
||||||
|
|
||||||
|
let image = reader.decode().unwrap(); |
||||||
|
|
||||||
|
image
|
||||||
|
} |
||||||
|
|
||||||
Loading…
Reference in new issue