Merge branch 'master' into infinite_fall_memory

This commit is contained in:
Robert Hrusecky
2020-10-06 14:22:26 -05:00
39 changed files with 1781 additions and 707 deletions

View File

@@ -1,255 +0,0 @@
use crate::geometry::ColliderHandle;
use ncollide::bounding_volume::AABB;
#[cfg(feature = "simd-is-enabled")]
use {
crate::geometry::WAABB,
crate::math::{Point, SIMD_WIDTH},
crate::utils::WVec,
simba::simd::SimdBool as _,
};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct ColliderPair {
pub collider1: ColliderHandle,
pub collider2: ColliderHandle,
}
impl ColliderPair {
pub fn new(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
ColliderPair {
collider1,
collider2,
}
}
pub fn new_sorted(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
if collider1.into_raw_parts().0 <= collider2.into_raw_parts().0 {
Self::new(collider1, collider2)
} else {
Self::new(collider2, collider1)
}
}
pub fn swap(self) -> Self {
Self::new(self.collider2, self.collider1)
}
pub fn zero() -> Self {
Self {
collider1: ColliderHandle::from_raw_parts(0, 0),
collider2: ColliderHandle::from_raw_parts(0, 0),
}
}
}
pub struct WAABBHierarchyIntersections {
curr_level_interferences: Vec<usize>,
next_level_interferences: Vec<usize>,
}
impl WAABBHierarchyIntersections {
pub fn new() -> Self {
Self {
curr_level_interferences: Vec::new(),
next_level_interferences: Vec::new(),
}
}
pub fn computed_interferences(&self) -> &[usize] {
&self.curr_level_interferences[..]
}
pub(crate) fn computed_interferences_mut(&mut self) -> &mut Vec<usize> {
&mut self.curr_level_interferences
}
}
#[cfg(feature = "simd-is-enabled")]
#[derive(Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct WAABBHierarchy {
levels: Vec<Vec<WAABB>>,
}
#[cfg(feature = "simd-is-enabled")]
impl WAABBHierarchy {
pub fn new(aabbs: &[AABB<f32>]) -> Self {
let mut waabbs: Vec<_> = aabbs
.chunks_exact(SIMD_WIDTH)
.map(|aabbs| WAABB::from(array![|ii| aabbs[ii]; SIMD_WIDTH]))
.collect();
if aabbs.len() % SIMD_WIDTH != 0 {
let first_i = (aabbs.len() / SIMD_WIDTH) * SIMD_WIDTH;
let last_i = aabbs.len() - 1;
let last_waabb =
WAABB::from(array![|ii| aabbs[(first_i + ii).min(last_i)]; SIMD_WIDTH]);
waabbs.push(last_waabb);
}
let mut levels = vec![waabbs];
loop {
let last_level = levels.last().unwrap();
let mut next_level = Vec::new();
for chunk in last_level.chunks_exact(SIMD_WIDTH) {
let mins = Point::from(array![|ii| chunk[ii].mins.horizontal_inf(); SIMD_WIDTH]);
let maxs = Point::from(array![|ii| chunk[ii].maxs.horizontal_sup(); SIMD_WIDTH]);
next_level.push(WAABB::new(mins, maxs));
}
// Deal with the last non-exact chunk.
if last_level.len() % SIMD_WIDTH != 0 {
let first_id = (last_level.len() / SIMD_WIDTH) * SIMD_WIDTH;
let last_id = last_level.len() - 1;
let mins = array![|ii| last_level[(first_id + ii).min(last_id)]
.mins
.horizontal_inf(); SIMD_WIDTH];
let maxs = array![|ii| last_level[(first_id + ii).min(last_id)]
.maxs
.horizontal_sup(); SIMD_WIDTH];
let mins = Point::from(mins);
let maxs = Point::from(maxs);
next_level.push(WAABB::new(mins, maxs));
}
if next_level.len() == 1 {
levels.push(next_level);
break;
}
levels.push(next_level);
}
Self { levels }
}
pub fn compute_interferences_with(
&self,
aabb: AABB<f32>,
workspace: &mut WAABBHierarchyIntersections,
) {
let waabb1 = WAABB::splat(aabb);
workspace.next_level_interferences.clear();
workspace.curr_level_interferences.clear();
workspace.curr_level_interferences.push(0);
for level in self.levels.iter().rev() {
for i in &workspace.curr_level_interferences {
// This `if let` handle the case when `*i` is out of bounds because
// the initial number of aabbs was not a power of SIMD_WIDTH.
if let Some(waabb2) = level.get(*i) {
// NOTE: using `intersect.bitmask()` and performing bit comparisons
// is much more efficient than testing if each intersect.extract(i) is true.
let intersect = waabb1.intersects_lanewise(waabb2);
let bitmask = intersect.bitmask();
for j in 0..SIMD_WIDTH {
if (bitmask & (1 << j)) != 0 {
workspace.next_level_interferences.push(i * SIMD_WIDTH + j)
}
}
}
}
std::mem::swap(
&mut workspace.curr_level_interferences,
&mut workspace.next_level_interferences,
);
workspace.next_level_interferences.clear();
}
}
}
#[cfg(not(feature = "simd-is-enabled"))]
#[derive(Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct WAABBHierarchy {
levels: Vec<Vec<AABB<f32>>>,
}
#[cfg(not(feature = "simd-is-enabled"))]
impl WAABBHierarchy {
const GROUP_SIZE: usize = 4;
pub fn new(aabbs: &[AABB<f32>]) -> Self {
use ncollide::bounding_volume::BoundingVolume;
let mut levels = vec![aabbs.to_vec()];
loop {
let last_level = levels.last().unwrap();
let mut next_level = Vec::new();
for chunk in last_level.chunks(Self::GROUP_SIZE) {
let mut merged = chunk[0];
for aabb in &chunk[1..] {
merged.merge(aabb)
}
next_level.push(merged);
}
if next_level.len() == 1 {
levels.push(next_level);
break;
}
levels.push(next_level);
}
Self { levels }
}
pub fn compute_interferences_with(
&self,
aabb1: AABB<f32>,
workspace: &mut WAABBHierarchyIntersections,
) {
use ncollide::bounding_volume::BoundingVolume;
workspace.next_level_interferences.clear();
workspace.curr_level_interferences.clear();
workspace.curr_level_interferences.push(0);
for level in self.levels[1..].iter().rev() {
for i in &workspace.curr_level_interferences {
for j in 0..Self::GROUP_SIZE {
if let Some(aabb2) = level.get(*i + j) {
if aabb1.intersects(aabb2) {
workspace
.next_level_interferences
.push((i + j) * Self::GROUP_SIZE)
}
}
}
}
std::mem::swap(
&mut workspace.curr_level_interferences,
&mut workspace.next_level_interferences,
);
workspace.next_level_interferences.clear();
}
// Last level.
for i in &workspace.curr_level_interferences {
for j in 0..Self::GROUP_SIZE {
if let Some(aabb2) = self.levels[0].get(*i + j) {
if aabb1.intersects(aabb2) {
workspace.next_level_interferences.push(i + j)
}
}
}
}
std::mem::swap(
&mut workspace.curr_level_interferences,
&mut workspace.next_level_interferences,
);
workspace.next_level_interferences.clear();
}
}

View File

@@ -1,5 +1,6 @@
use crate::data::pubsub::Subscription;
use crate::dynamics::RigidBodySet;
use crate::geometry::{ColliderHandle, ColliderPair, ColliderSet};
use crate::geometry::{ColliderHandle, ColliderSet, RemovedCollider};
use crate::math::{Point, Vector, DIM};
#[cfg(feature = "enhanced-determinism")]
use crate::utils::FxHashMap32 as HashMap;
@@ -15,6 +16,41 @@ const NEXT_FREE_SENTINEL: u32 = u32::MAX;
const SENTINEL_VALUE: f32 = f32::MAX;
const CELL_WIDTH: f32 = 20.0;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct ColliderPair {
pub collider1: ColliderHandle,
pub collider2: ColliderHandle,
}
impl ColliderPair {
pub fn new(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
ColliderPair {
collider1,
collider2,
}
}
pub fn new_sorted(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
if collider1.into_raw_parts().0 <= collider2.into_raw_parts().0 {
Self::new(collider1, collider2)
} else {
Self::new(collider2, collider1)
}
}
pub fn swap(self) -> Self {
Self::new(self.collider2, self.collider1)
}
pub fn zero() -> Self {
Self {
collider1: ColliderHandle::from_raw_parts(0, 0),
collider2: ColliderHandle::from_raw_parts(0, 0),
}
}
}
pub enum BroadPhasePairEvent {
AddPair(ColliderPair),
DeletePair(ColliderPair),
@@ -392,6 +428,7 @@ impl SAPRegion {
pub struct BroadPhase {
proxies: Proxies,
regions: HashMap<Point<i32>, SAPRegion>,
removed_colliders: Option<Subscription<RemovedCollider>>,
deleted_any: bool,
// We could think serializing this workspace is useless.
// It turns out is is important to serialize at least its capacity
@@ -480,6 +517,7 @@ impl BroadPhase {
/// Create a new empty broad-phase.
pub fn new() -> Self {
BroadPhase {
removed_colliders: None,
proxies: Proxies::new(),
regions: HashMap::default(),
reporting: HashMap::default(),
@@ -487,46 +525,60 @@ impl BroadPhase {
}
}
pub(crate) fn remove_colliders(&mut self, handles: &[ColliderHandle], colliders: &ColliderSet) {
for collider in handles.iter().filter_map(|h| colliders.get(*h)) {
if collider.proxy_index == crate::INVALID_USIZE {
// This collider has not been added to the broad-phase yet.
continue;
/// Maintain the broad-phase internal state by taking collider removal into account.
pub fn maintain(&mut self, colliders: &mut ColliderSet) {
// Ensure we already subscribed.
if self.removed_colliders.is_none() {
self.removed_colliders = Some(colliders.removed_colliders.subscribe());
}
let mut cursor = self.removed_colliders.take().unwrap();
for collider in colliders.removed_colliders.read(&cursor) {
self.remove_collider(collider.proxy_index);
}
colliders.removed_colliders.ack(&mut cursor);
self.removed_colliders = Some(cursor);
}
fn remove_collider<'a>(&mut self, proxy_index: usize) {
if proxy_index == crate::INVALID_USIZE {
// This collider has not been added to the broad-phase yet.
return;
}
let proxy = &mut self.proxies[proxy_index];
// Push the proxy to infinity, but not beyond the sentinels.
proxy.aabb.mins.coords.fill(SENTINEL_VALUE / 2.0);
proxy.aabb.maxs.coords.fill(SENTINEL_VALUE / 2.0);
// Discretize the AABB to find the regions that need to be invalidated.
let start = point_key(proxy.aabb.mins);
let end = point_key(proxy.aabb.maxs);
#[cfg(feature = "dim2")]
for i in start.x..=end.x {
for j in start.y..=end.y {
if let Some(region) = self.regions.get_mut(&Point::new(i, j)) {
region.predelete_proxy(proxy_index);
self.deleted_any = true;
}
}
}
let proxy = &mut self.proxies[collider.proxy_index];
// Push the proxy to infinity, but not beyond the sentinels.
proxy.aabb.mins.coords.fill(SENTINEL_VALUE / 2.0);
proxy.aabb.maxs.coords.fill(SENTINEL_VALUE / 2.0);
// Discretize the AABB to find the regions that need to be invalidated.
let start = point_key(proxy.aabb.mins);
let end = point_key(proxy.aabb.maxs);
#[cfg(feature = "dim2")]
for i in start.x..=end.x {
for j in start.y..=end.y {
if let Some(region) = self.regions.get_mut(&Point::new(i, j)) {
region.predelete_proxy(collider.proxy_index);
#[cfg(feature = "dim3")]
for i in start.x..=end.x {
for j in start.y..=end.y {
for k in start.z..=end.z {
if let Some(region) = self.regions.get_mut(&Point::new(i, j, k)) {
region.predelete_proxy(proxy_index);
self.deleted_any = true;
}
}
}
#[cfg(feature = "dim3")]
for i in start.x..=end.x {
for j in start.y..=end.y {
for k in start.z..=end.z {
if let Some(region) = self.regions.get_mut(&Point::new(i, j, k)) {
region.predelete_proxy(collider.proxy_index);
self.deleted_any = true;
}
}
}
}
self.proxies.remove(collider.proxy_index);
}
self.proxies.remove(proxy_index);
}
pub(crate) fn update_aabbs(
@@ -664,16 +716,13 @@ impl BroadPhase {
mod test {
use crate::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet, NarrowPhase};
use crate::pipeline::PhysicsPipeline;
#[test]
fn test_add_update_remove() {
let mut broad_phase = BroadPhase::new();
let mut narrow_phase = NarrowPhase::new();
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut joints = JointSet::new();
let mut pipeline = PhysicsPipeline::new();
let rb = RigidBodyBuilder::new_dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
@@ -682,15 +731,8 @@ mod test {
broad_phase.update_aabbs(0.0, &bodies, &mut colliders);
pipeline.remove_rigid_body(
hrb,
&mut broad_phase,
&mut narrow_phase,
&mut bodies,
&mut colliders,
&mut joints,
);
bodies.remove(hrb, &mut colliders, &mut joints);
broad_phase.maintain(&mut colliders);
broad_phase.update_aabbs(0.0, &bodies, &mut colliders);
// Create another body.

View File

@@ -1,11 +1,12 @@
use crate::dynamics::{MassProperties, RigidBodyHandle, RigidBodySet};
use crate::geometry::{
Ball, Capsule, ColliderGraphIndex, Contact, Cuboid, HeightField, InteractionGraph, Polygon,
Proximity, Triangle, Trimesh,
Proximity, Ray, RayIntersection, Triangle, Trimesh,
};
use crate::math::{AngVector, Isometry, Point, Rotation, Vector};
use na::Point3;
use ncollide::bounding_volume::{HasBoundingVolume, AABB};
use ncollide::query::RayCast;
use num::Zero;
#[derive(Clone)]
@@ -97,6 +98,49 @@ impl Shape {
Shape::HeightField(heightfield) => heightfield.bounding_volume(position),
}
}
/// Computes the first intersection point between a ray in this collider.
///
/// Some shapes are not supported yet and will always return `None`.
///
/// # Parameters
/// - `position`: the position of this shape.
/// - `ray`: the ray to cast.
/// - `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `f32::MAX` for an unbounded ray.
pub fn cast_ray(
&self,
position: &Isometry<f32>,
ray: &Ray,
max_toi: f32,
) -> Option<RayIntersection> {
match self {
Shape::Ball(ball) => ball.toi_and_normal_with_ray(position, ray, max_toi, true),
Shape::Polygon(_poly) => None,
Shape::Capsule(caps) => {
let pos = position * caps.transform_wrt_y();
let caps = ncollide::shape::Capsule::new(caps.half_height(), caps.radius);
caps.toi_and_normal_with_ray(&pos, ray, max_toi, true)
}
Shape::Cuboid(cuboid) => cuboid.toi_and_normal_with_ray(position, ray, max_toi, true),
#[cfg(feature = "dim2")]
Shape::Triangle(_) | Shape::Trimesh(_) => {
// This is not implemented yet in 2D.
None
}
#[cfg(feature = "dim3")]
Shape::Triangle(triangle) => {
triangle.toi_and_normal_with_ray(position, ray, max_toi, true)
}
#[cfg(feature = "dim3")]
Shape::Trimesh(trimesh) => {
trimesh.toi_and_normal_with_ray(position, ray, max_toi, true)
}
Shape::HeightField(heightfield) => {
heightfield.toi_and_normal_with_ray(position, ray, max_toi, true)
}
}
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
@@ -353,6 +397,12 @@ impl ColliderBuilder {
self
}
/// Sets the restitution coefficient of the collider this builder will build.
pub fn restitution(mut self, restitution: f32) -> Self {
self.restitution = restitution;
self
}
/// Sets the density of the collider this builder will build.
pub fn density(mut self, density: f32) -> Self {
self.density = Some(density);

View File

@@ -1,14 +1,25 @@
use crate::data::arena::Arena;
use crate::data::pubsub::PubSub;
use crate::dynamics::{RigidBodyHandle, RigidBodySet};
use crate::geometry::Collider;
use crate::geometry::{Collider, ColliderGraphIndex};
use std::ops::{Index, IndexMut};
/// The unique identifier of a collider added to a collider set.
pub type ColliderHandle = crate::data::arena::Index;
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub(crate) struct RemovedCollider {
pub handle: ColliderHandle,
pub(crate) contact_graph_index: ColliderGraphIndex,
pub(crate) proximity_graph_index: ColliderGraphIndex,
pub(crate) proxy_index: usize,
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// A set of colliders that can be handled by a physics `World`.
pub struct ColliderSet {
pub(crate) removed_colliders: PubSub<RemovedCollider>,
pub(crate) colliders: Arena<Collider>,
}
@@ -16,6 +27,7 @@ impl ColliderSet {
/// Create a new empty set of colliders.
pub fn new() -> Self {
ColliderSet {
removed_colliders: PubSub::new(),
colliders: Arena::new(),
}
}
@@ -26,7 +38,7 @@ impl ColliderSet {
}
/// Iterate through all the colliders on this set.
pub fn iter(&self) -> impl Iterator<Item = (ColliderHandle, &Collider)> {
pub fn iter(&self) -> impl ExactSizeIterator<Item = (ColliderHandle, &Collider)> {
self.colliders.iter()
}
@@ -60,8 +72,35 @@ impl ColliderSet {
handle
}
pub(crate) fn remove_internal(&mut self, handle: ColliderHandle) -> Option<Collider> {
self.colliders.remove(handle)
/// Remove a collider from this set and update its parent accordingly.
pub fn remove(
&mut self,
handle: ColliderHandle,
bodies: &mut RigidBodySet,
) -> Option<Collider> {
let collider = self.colliders.remove(handle)?;
/*
* Delete the collider from its parent body.
*/
if let Some(parent) = bodies.get_mut_internal(collider.parent) {
parent.remove_collider_internal(handle, &collider);
bodies.wake_up(collider.parent, true);
}
/*
* Publish removal.
*/
let message = RemovedCollider {
handle,
contact_graph_index: collider.contact_graph_index,
proximity_graph_index: collider.proximity_graph_index,
proxy_index: collider.proxy_index,
};
self.removed_colliders.publish(message);
Some(collider)
}
/// Gets the collider with the given handle without a known generation.

View File

@@ -71,10 +71,10 @@ fn do_generate_contacts(
} else {
manifold.subshape_index_pair.1
};
println!(
"Restoring for {} [chosen with {:?}]",
subshape_id, manifold.subshape_index_pair
);
// println!(
// "Restoring for {} [chosen with {:?}]",
// subshape_id, manifold.subshape_index_pair
// );
// Use dummy shapes for the dispatch.
#[cfg(feature = "dim2")]

View File

@@ -1,11 +1,11 @@
use crate::geometry::contact_generator::{
ContactGenerationContext, PrimitiveContactGenerationContext,
};
use crate::geometry::{Collider, ContactManifold, Shape, Trimesh, WAABBHierarchyIntersections};
use crate::geometry::{Collider, ContactManifold, Shape, Trimesh};
use crate::ncollide::bounding_volume::{BoundingVolume, AABB};
pub struct TrimeshShapeContactGeneratorWorkspace {
interferences: WAABBHierarchyIntersections,
interferences: Vec<usize>,
local_aabb2: AABB<f32>,
old_interferences: Vec<usize>,
old_manifolds: Vec<ContactManifold>,
@@ -14,7 +14,7 @@ pub struct TrimeshShapeContactGeneratorWorkspace {
impl TrimeshShapeContactGeneratorWorkspace {
pub fn new() -> Self {
Self {
interferences: WAABBHierarchyIntersections::new(),
interferences: Vec::new(),
local_aabb2: AABB::new_invalid(),
old_interferences: Vec::new(),
old_manifolds: Vec::new(),
@@ -74,7 +74,7 @@ fn do_generate_contacts(
let local_aabb2 = new_local_aabb2; // .loosened(ctxt.prediction_distance * 2.0); // FIXME: what would be the best value?
std::mem::swap(
&mut workspace.old_interferences,
workspace.interferences.computed_interferences_mut(),
&mut workspace.interferences,
);
std::mem::swap(&mut workspace.old_manifolds, &mut ctxt.pair.manifolds);
ctxt.pair.manifolds.clear();
@@ -108,16 +108,17 @@ fn do_generate_contacts(
// workspace.old_manifolds.len()
// );
workspace.interferences.clear();
trimesh1
.waabbs()
.compute_interferences_with(local_aabb2, &mut workspace.interferences);
.intersect_aabb(&local_aabb2, &mut workspace.interferences);
workspace.local_aabb2 = local_aabb2;
}
/*
* Dispatch to the specific solver by keeping the previous manifold if we already had one.
*/
let new_interferences = workspace.interferences.computed_interferences();
let new_interferences = &workspace.interferences;
let mut old_inter_it = workspace.old_interferences.drain(..).peekable();
let mut old_manifolds_it = workspace.old_manifolds.drain(..);

View File

@@ -36,11 +36,15 @@ pub type AABB = ncollide::bounding_volume::AABB<f32>;
pub type ContactEvent = ncollide::pipeline::ContactEvent<ColliderHandle>;
/// Event triggered when a sensor collider starts or stop being in proximity with another collider (sensor or not).
pub type ProximityEvent = ncollide::pipeline::ProximityEvent<ColliderHandle>;
/// A ray that can be cast against colliders.
pub type Ray = ncollide::query::Ray<f32>;
/// The intersection between a ray and a collider.
pub type RayIntersection = ncollide::query::RayIntersection<f32>;
#[cfg(feature = "simd-is-enabled")]
pub(crate) use self::ball::WBall;
pub(crate) use self::broad_phase::{ColliderPair, WAABBHierarchy, WAABBHierarchyIntersections};
pub(crate) use self::broad_phase_multi_sap::BroadPhasePairEvent;
pub(crate) use self::broad_phase_multi_sap::{BroadPhasePairEvent, ColliderPair};
pub(crate) use self::collider_set::RemovedCollider;
#[cfg(feature = "simd-is-enabled")]
pub(crate) use self::contact::WContact;
#[cfg(feature = "dim2")]
@@ -48,12 +52,11 @@ pub(crate) use self::contact_generator::{clip_segments, clip_segments_with_norma
pub(crate) use self::narrow_phase::ContactManifoldIndex;
#[cfg(feature = "dim3")]
pub(crate) use self::polyhedron_feature3d::PolyhedronFace;
#[cfg(feature = "simd-is-enabled")]
pub(crate) use self::waabb::WAABB;
pub(crate) use self::waabb::{WRay, WAABB};
pub(crate) use self::wquadtree::WQuadtree;
//pub(crate) use self::z_order::z_cmp_floats;
mod ball;
mod broad_phase;
mod broad_phase_multi_sap;
mod capsule;
mod collider;
@@ -75,6 +78,6 @@ mod proximity_detector;
pub(crate) mod sat;
pub(crate) mod triangle;
mod trimesh;
#[cfg(feature = "simd-is-enabled")]
mod waabb;
mod wquadtree;
//mod z_order;

View File

@@ -14,13 +14,16 @@ use crate::geometry::proximity_detector::{
// proximity_detector::ProximityDetectionContextSimd, WBall,
//};
use crate::geometry::{
BroadPhasePairEvent, ColliderHandle, ContactEvent, ProximityEvent, ProximityPair,
BroadPhasePairEvent, ColliderGraphIndex, ColliderHandle, ContactEvent, ProximityEvent,
ProximityPair, RemovedCollider,
};
use crate::geometry::{ColliderSet, ContactManifold, ContactPair, InteractionGraph};
//#[cfg(feature = "simd-is-enabled")]
//use crate::math::{SimdFloat, SIMD_WIDTH};
use crate::data::pubsub::Subscription;
use crate::ncollide::query::Proximity;
use crate::pipeline::EventHandler;
use std::collections::HashMap;
//use simba::simd::SimdValue;
/// The narrow-phase responsible for computing precise contact information between colliders.
@@ -28,6 +31,7 @@ use crate::pipeline::EventHandler;
pub struct NarrowPhase {
contact_graph: InteractionGraph<ContactPair>,
proximity_graph: InteractionGraph<ProximityPair>,
removed_colliders: Option<Subscription<RemovedCollider>>,
// ball_ball: Vec<usize>, // Workspace: Vec<*mut ContactPair>,
// shape_shape: Vec<usize>, // Workspace: Vec<*mut ContactPair>,
// ball_ball_prox: Vec<usize>, // Workspace: Vec<*mut ProximityPair>,
@@ -42,6 +46,7 @@ impl NarrowPhase {
Self {
contact_graph: InteractionGraph::new(),
proximity_graph: InteractionGraph::new(),
removed_colliders: None,
// ball_ball: Vec::new(),
// shape_shape: Vec::new(),
// ball_ball_prox: Vec::new(),
@@ -73,45 +78,84 @@ impl NarrowPhase {
// &mut self.contact_graph.interactions
// }
pub(crate) fn remove_colliders(
/// Maintain the narrow-phase internal state by taking collider removal into account.
pub fn maintain(&mut self, colliders: &mut ColliderSet, bodies: &mut RigidBodySet) {
// Ensure we already subscribed.
if self.removed_colliders.is_none() {
self.removed_colliders = Some(colliders.removed_colliders.subscribe());
}
let mut cursor = self.removed_colliders.take().unwrap();
// TODO: avoid these hash-maps.
// They are necessary to handle the swap-remove done internally
// by the contact/proximity graphs when a node is removed.
let mut prox_id_remap = HashMap::new();
let mut contact_id_remap = HashMap::new();
for i in 0.. {
if let Some(collider) = colliders.removed_colliders.read_ith(&cursor, i) {
let proximity_graph_id = prox_id_remap
.get(&collider.handle)
.copied()
.unwrap_or(collider.proximity_graph_index);
let contact_graph_id = contact_id_remap
.get(&collider.handle)
.copied()
.unwrap_or(collider.contact_graph_index);
self.remove_collider(
proximity_graph_id,
contact_graph_id,
colliders,
bodies,
&mut prox_id_remap,
&mut contact_id_remap,
);
} else {
break;
}
}
colliders.removed_colliders.ack(&mut cursor);
self.removed_colliders = Some(cursor);
}
pub(crate) fn remove_collider<'a>(
&mut self,
handles: &[ColliderHandle],
proximity_graph_id: ColliderGraphIndex,
contact_graph_id: ColliderGraphIndex,
colliders: &mut ColliderSet,
bodies: &mut RigidBodySet,
prox_id_remap: &mut HashMap<ColliderHandle, ColliderGraphIndex>,
contact_id_remap: &mut HashMap<ColliderHandle, ColliderGraphIndex>,
) {
for handle in handles {
if let Some(collider) = colliders.get(*handle) {
let proximity_graph_id = collider.proximity_graph_index;
let contact_graph_id = collider.contact_graph_index;
// Wake up every body in contact with the deleted collider.
for (a, b, _) in self.contact_graph.interactions_with(contact_graph_id) {
if let Some(parent) = colliders.get(a).map(|c| c.parent) {
bodies.wake_up(parent, true)
}
// Wake up every body in contact with the deleted collider.
for (a, b, _) in self.contact_graph.interactions_with(contact_graph_id) {
if let Some(parent) = colliders.get(a).map(|c| c.parent) {
bodies.wake_up(parent)
}
if let Some(parent) = colliders.get(b).map(|c| c.parent) {
bodies.wake_up(parent, true)
}
}
if let Some(parent) = colliders.get(b).map(|c| c.parent) {
bodies.wake_up(parent)
}
}
// We have to manage the fact that one other collider will
// have its graph index changed because of the node's swap-remove.
if let Some(replacement) = self.proximity_graph.remove_node(proximity_graph_id) {
if let Some(replacement) = colliders.get_mut(replacement) {
replacement.proximity_graph_index = proximity_graph_id;
} else {
prox_id_remap.insert(replacement, proximity_graph_id);
}
}
// We have to manage the fact that one other collider will
// have its graph index changed because of the node's swap-remove.
if let Some(replacement) = self
.proximity_graph
.remove_node(proximity_graph_id)
.and_then(|h| colliders.get_mut(h))
{
replacement.proximity_graph_index = proximity_graph_id;
}
if let Some(replacement) = self
.contact_graph
.remove_node(contact_graph_id)
.and_then(|h| colliders.get_mut(h))
{
replacement.contact_graph_index = contact_graph_id;
}
if let Some(replacement) = self.contact_graph.remove_node(contact_graph_id) {
if let Some(replacement) = colliders.get_mut(replacement) {
replacement.contact_graph_index = contact_graph_id;
} else {
contact_id_remap.insert(replacement, contact_graph_id);
}
}
}
@@ -119,6 +163,7 @@ impl NarrowPhase {
pub(crate) fn register_pairs(
&mut self,
colliders: &mut ColliderSet,
bodies: &mut RigidBodySet,
broad_phase_events: &[BroadPhasePairEvent],
events: &dyn EventHandler,
) {
@@ -218,9 +263,13 @@ impl NarrowPhase {
.contact_graph
.remove_edge(co1.contact_graph_index, co2.contact_graph_index);
// Emit a contact stopped event if we had a proximity before removing the edge.
// Emit a contact stopped event if we had a contact before removing the edge.
// Also wake up the dynamic bodies that were in contact.
if let Some(ctct) = contact_pair {
if ctct.has_any_active_contact() {
bodies.wake_up(co1.parent, true);
bodies.wake_up(co2.parent, true);
events.handle_contact_event(ContactEvent::Stopped(
pair.collider1,
pair.collider2,
@@ -250,8 +299,7 @@ impl NarrowPhase {
let rb1 = &bodies[co1.parent];
let rb2 = &bodies[co2.parent];
if (rb1.is_sleeping() || !rb1.is_dynamic()) && (rb2.is_sleeping() || !rb2.is_dynamic())
{
if (rb1.is_sleeping() || rb1.is_static()) && (rb2.is_sleeping() || rb2.is_static()) {
// No need to update this contact because nothing moved.
return;
}
@@ -359,7 +407,8 @@ impl NarrowPhase {
let rb1 = &bodies[co1.parent];
let rb2 = &bodies[co2.parent];
if (rb1.is_sleeping() || !rb1.is_dynamic()) && (rb2.is_sleeping() || !rb2.is_dynamic())
if ((rb1.is_sleeping() || rb1.is_static()) && (rb2.is_sleeping() || rb2.is_static()))
|| (!rb1.is_dynamic() && !rb2.is_dynamic())
{
// No need to update this contact because nothing moved.
return;

View File

@@ -1,11 +1,11 @@
use crate::geometry::proximity_detector::{
PrimitiveProximityDetectionContext, ProximityDetectionContext,
};
use crate::geometry::{Collider, Proximity, Shape, Trimesh, WAABBHierarchyIntersections};
use crate::geometry::{Collider, Proximity, Shape, Trimesh};
use crate::ncollide::bounding_volume::{BoundingVolume, AABB};
pub struct TrimeshShapeProximityDetectorWorkspace {
interferences: WAABBHierarchyIntersections,
interferences: Vec<usize>,
local_aabb2: AABB<f32>,
old_interferences: Vec<usize>,
}
@@ -13,7 +13,7 @@ pub struct TrimeshShapeProximityDetectorWorkspace {
impl TrimeshShapeProximityDetectorWorkspace {
pub fn new() -> Self {
Self {
interferences: WAABBHierarchyIntersections::new(),
interferences: Vec::new(),
local_aabb2: AABB::new_invalid(),
old_interferences: Vec::new(),
}
@@ -67,19 +67,20 @@ fn do_detect_proximity(
let local_aabb2 = new_local_aabb2; // .loosened(ctxt.prediction_distance * 2.0); // FIXME: what would be the best value?
std::mem::swap(
&mut workspace.old_interferences,
&mut workspace.interferences.computed_interferences_mut(),
&mut workspace.interferences,
);
workspace.interferences.clear();
trimesh1
.waabbs()
.compute_interferences_with(local_aabb2, &mut workspace.interferences);
.intersect_aabb(&local_aabb2, &mut workspace.interferences);
workspace.local_aabb2 = local_aabb2;
}
/*
* Dispatch to the specific solver by keeping the previous manifold if we already had one.
*/
let new_interferences = workspace.interferences.computed_interferences();
let new_interferences = &workspace.interferences;
let mut old_inter_it = workspace.old_interferences.drain(..).peekable();
let mut best_proximity = Proximity::Disjoint;

View File

@@ -1,13 +1,19 @@
use crate::geometry::{Triangle, WAABBHierarchy};
use crate::geometry::{Triangle, WQuadtree};
use crate::math::{Isometry, Point};
use na::Point3;
use ncollide::bounding_volume::{HasBoundingVolume, AABB};
#[cfg(feature = "dim3")]
use {
crate::geometry::{Ray, RayIntersection},
ncollide::query::RayCast,
};
#[derive(Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// A triangle mesh.
pub struct Trimesh {
waabb_tree: WAABBHierarchy,
wquadtree: WQuadtree<usize>,
aabb: AABB<f32>,
vertices: Vec<Point<f32>>,
indices: Vec<Point3<u32>>,
@@ -25,41 +31,24 @@ impl Trimesh {
"A triangle mesh must contain at least one triangle."
);
// z-sort the indices.
// indices.sort_unstable_by(|idx, jdx| {
// let ti = Triangle::new(
// vertices[idx[0] as usize],
// vertices[idx[1] as usize],
// vertices[idx[2] as usize],
// );
// let tj = Triangle::new(
// vertices[jdx[0] as usize],
// vertices[jdx[1] as usize],
// vertices[jdx[2] as usize],
// );
// let center_i = (ti.a.coords + ti.b.coords + ti.c.coords) / 3.0;
// let center_j = (tj.a.coords + tj.b.coords + tj.c.coords) / 3.0;
// crate::geometry::z_cmp_floats(center_i.as_slice(), center_j.as_slice())
// .unwrap_or(std::cmp::Ordering::Equal)
// });
let aabb = AABB::from_points(&vertices);
let data = indices.iter().enumerate().map(|(i, idx)| {
let aabb = Triangle::new(
vertices[idx[0] as usize],
vertices[idx[1] as usize],
vertices[idx[2] as usize],
)
.local_bounding_volume();
(i, aabb)
});
let aabbs: Vec<_> = indices
.iter()
.map(|idx| {
Triangle::new(
vertices[idx[0] as usize],
vertices[idx[1] as usize],
vertices[idx[2] as usize],
)
.local_bounding_volume()
})
.collect();
let waabb_tree = WAABBHierarchy::new(&aabbs);
let mut wquadtree = WQuadtree::new();
// NOTE: we apply no dilation factor because we won't
// update this tree dynamically.
wquadtree.clear_and_rebuild(data, 0.0);
Self {
waabb_tree,
wquadtree,
aabb,
vertices,
indices,
@@ -71,8 +60,8 @@ impl Trimesh {
self.aabb.transform_by(pos)
}
pub(crate) fn waabbs(&self) -> &WAABBHierarchy {
&self.waabb_tree
pub(crate) fn waabbs(&self) -> &WQuadtree<usize> {
&self.wquadtree
}
/// The number of triangles forming this mesh.
@@ -120,3 +109,53 @@ impl Trimesh {
}
}
}
#[cfg(feature = "dim3")]
impl RayCast<f32> for Trimesh {
fn toi_and_normal_with_ray(
&self,
m: &Isometry<f32>,
ray: &Ray,
max_toi: f32,
solid: bool,
) -> Option<RayIntersection> {
// FIXME: do a best-first search.
let mut intersections = Vec::new();
let ls_ray = ray.inverse_transform_by(m);
self.wquadtree
.cast_ray(&ls_ray, max_toi, &mut intersections);
let mut best: Option<RayIntersection> = None;
for inter in intersections {
let tri = self.triangle(inter);
if let Some(inter) = tri.toi_and_normal_with_ray(m, ray, max_toi, solid) {
if let Some(curr) = &mut best {
if curr.toi > inter.toi {
*curr = inter;
}
} else {
best = Some(inter);
}
}
}
best
}
fn intersects_ray(&self, m: &Isometry<f32>, ray: &Ray, max_toi: f32) -> bool {
// FIXME: do a best-first search.
let mut intersections = Vec::new();
let ls_ray = ray.inverse_transform_by(m);
self.wquadtree
.cast_ray(&ls_ray, max_toi, &mut intersections);
for inter in intersections {
let tri = self.triangle(inter);
if tri.intersects_ray(m, ray, max_toi) {
return true;
}
}
false
}
}

View File

@@ -1,8 +1,27 @@
#[cfg(feature = "serde-serialize")]
use crate::math::DIM;
use crate::math::{Point, SimdBool, SimdFloat, SIMD_WIDTH};
use crate::geometry::Ray;
use crate::math::{Point, Vector, DIM, SIMD_WIDTH};
use crate::utils;
use ncollide::bounding_volume::AABB;
use simba::simd::{SimdPartialOrd, SimdValue};
use num::{One, Zero};
use {
crate::math::{SimdBool, SimdFloat},
simba::simd::{SimdPartialOrd, SimdValue},
};
#[derive(Debug, Copy, Clone)]
pub(crate) struct WRay {
pub origin: Point<SimdFloat>,
pub dir: Vector<SimdFloat>,
}
impl WRay {
pub fn splat(ray: Ray) -> Self {
Self {
origin: Point::splat(ray.origin),
dir: Vector::splat(ray.dir),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct WAABB {
@@ -28,6 +47,7 @@ impl serde::Serialize for WAABB {
.coords
.map(|e| array![|ii| e.extract(ii); SIMD_WIDTH]),
);
let mut waabb = serializer.serialize_struct("WAABB", 2)?;
waabb.serialize_field("mins", &mins)?;
waabb.serialize_field("maxs", &maxs)?;
@@ -73,8 +93,8 @@ impl<'de> serde::Deserialize<'de> for WAABB {
}
impl WAABB {
pub fn new(mins: Point<SimdFloat>, maxs: Point<SimdFloat>) -> Self {
Self { mins, maxs }
pub fn new_invalid() -> Self {
Self::splat(AABB::new_invalid())
}
pub fn splat(aabb: AABB<f32>) -> Self {
@@ -84,8 +104,73 @@ impl WAABB {
}
}
pub fn dilate_by_factor(&mut self, factor: SimdFloat) {
let dilation = (self.maxs - self.mins) * factor;
self.mins -= dilation;
self.maxs += dilation;
}
pub fn replace(&mut self, i: usize, aabb: AABB<f32>) {
self.mins.replace(i, aabb.mins);
self.maxs.replace(i, aabb.maxs);
}
pub fn intersects_ray(&self, ray: &WRay, max_toi: SimdFloat) -> SimdBool {
let _0 = SimdFloat::zero();
let _1 = SimdFloat::one();
let _infinity = SimdFloat::splat(f32::MAX);
let mut hit = SimdBool::splat(true);
let mut tmin = SimdFloat::zero();
let mut tmax = max_toi;
// TODO: could this be optimized more considering we really just need a boolean answer?
for i in 0usize..DIM {
let is_not_zero = ray.dir[i].simd_ne(_0);
let is_zero_test =
ray.origin[i].simd_ge(self.mins[i]) & ray.origin[i].simd_le(self.maxs[i]);
let is_not_zero_test = {
let denom = _1 / ray.dir[i];
let mut inter_with_near_plane =
((self.mins[i] - ray.origin[i]) * denom).select(is_not_zero, -_infinity);
let mut inter_with_far_plane =
((self.maxs[i] - ray.origin[i]) * denom).select(is_not_zero, _infinity);
let gt = inter_with_near_plane.simd_gt(inter_with_far_plane);
utils::simd_swap(gt, &mut inter_with_near_plane, &mut inter_with_far_plane);
tmin = tmin.simd_max(inter_with_near_plane);
tmax = tmax.simd_min(inter_with_far_plane);
tmin.simd_le(tmax)
};
hit = hit & is_not_zero_test.select(is_not_zero, is_zero_test);
}
hit
}
#[cfg(feature = "dim2")]
pub fn intersects_lanewise(&self, other: &WAABB) -> SimdBool {
pub fn contains(&self, other: &WAABB) -> SimdBool {
self.mins.x.simd_le(other.mins.x)
& self.mins.y.simd_le(other.mins.y)
& self.maxs.x.simd_ge(other.maxs.x)
& self.maxs.y.simd_ge(other.maxs.y)
}
#[cfg(feature = "dim3")]
pub fn contains(&self, other: &WAABB) -> SimdBool {
self.mins.x.simd_le(other.mins.x)
& self.mins.y.simd_le(other.mins.y)
& self.mins.z.simd_le(other.mins.z)
& self.maxs.x.simd_ge(other.maxs.x)
& self.maxs.y.simd_ge(other.maxs.y)
& self.maxs.z.simd_ge(other.maxs.z)
}
#[cfg(feature = "dim2")]
pub fn intersects(&self, other: &WAABB) -> SimdBool {
self.mins.x.simd_le(other.maxs.x)
& other.mins.x.simd_le(self.maxs.x)
& self.mins.y.simd_le(other.maxs.y)
@@ -93,7 +178,7 @@ impl WAABB {
}
#[cfg(feature = "dim3")]
pub fn intersects_lanewise(&self, other: &WAABB) -> SimdBool {
pub fn intersects(&self, other: &WAABB) -> SimdBool {
self.mins.x.simd_le(other.maxs.x)
& other.mins.x.simd_le(self.maxs.x)
& self.mins.y.simd_le(other.maxs.y)
@@ -101,6 +186,13 @@ impl WAABB {
& self.mins.z.simd_le(other.maxs.z)
& other.mins.z.simd_le(self.maxs.z)
}
pub fn to_merged_aabb(&self) -> AABB<f32> {
AABB::new(
self.mins.coords.map(|e| e.simd_horizontal_min()).into(),
self.maxs.coords.map(|e| e.simd_horizontal_max()).into(),
)
}
}
impl From<[AABB<f32>; SIMD_WIDTH]> for WAABB {

560
src/geometry/wquadtree.rs Normal file
View File

@@ -0,0 +1,560 @@
use crate::geometry::{ColliderHandle, ColliderSet, Ray, AABB};
use crate::geometry::{WRay, WAABB};
use crate::math::Point;
#[cfg(feature = "dim3")]
use crate::math::Vector;
use crate::simd::{SimdFloat, SIMD_WIDTH};
use ncollide::bounding_volume::BoundingVolume;
use simba::simd::{SimdBool, SimdValue};
use std::collections::VecDeque;
use std::ops::Range;
pub trait IndexedData: Copy {
fn default() -> Self;
fn index(&self) -> usize;
}
impl IndexedData for usize {
fn default() -> Self {
u32::MAX as usize
}
fn index(&self) -> usize {
*self
}
}
impl IndexedData for ColliderHandle {
fn default() -> Self {
ColliderSet::invalid_handle()
}
fn index(&self) -> usize {
self.into_raw_parts().0
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
struct NodeIndex {
index: u32, // Index of the addressed node in the `nodes` array.
lane: u8, // SIMD lane of the addressed node.
}
impl NodeIndex {
fn new(index: u32, lane: u8) -> Self {
Self { index, lane }
}
fn invalid() -> Self {
Self {
index: u32::MAX,
lane: 0,
}
}
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
struct WQuadtreeNode {
waabb: WAABB,
// Index of the nodes of the 4 nodes represented by self.
// If this is a leaf, it contains the proxy ids instead.
children: [u32; 4],
parent: NodeIndex,
leaf: bool, // TODO: pack this with the NodexIndex.lane?
dirty: bool, // TODO: move this to a separate bitvec?
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
struct WQuadtreeProxy<T> {
node: NodeIndex,
data: T, // The collider data. TODO: only set the collider generation here?
}
impl<T: IndexedData> WQuadtreeProxy<T> {
fn invalid() -> Self {
Self {
node: NodeIndex::invalid(),
data: T::default(),
}
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct WQuadtree<T> {
nodes: Vec<WQuadtreeNode>,
dirty_nodes: VecDeque<u32>,
proxies: Vec<WQuadtreeProxy<T>>,
}
// FIXME: this should be generic too.
impl WQuadtree<ColliderHandle> {
pub fn pre_update(&mut self, data: ColliderHandle) {
let id = data.into_raw_parts().0;
let node_id = self.proxies[id].node.index;
let node = &mut self.nodes[node_id as usize];
if !node.dirty {
node.dirty = true;
self.dirty_nodes.push_back(node_id);
}
}
pub fn update(&mut self, colliders: &ColliderSet, dilation_factor: f32) {
// Loop on the dirty leaves.
let dilation_factor = SimdFloat::splat(dilation_factor);
while let Some(id) = self.dirty_nodes.pop_front() {
// NOTE: this will data the case where we reach the root of the tree.
if let Some(node) = self.nodes.get(id as usize) {
// Compute the new WAABB.
let mut new_aabbs = [AABB::new_invalid(); SIMD_WIDTH];
for (child_id, new_aabb) in node.children.iter().zip(new_aabbs.iter_mut()) {
if node.leaf {
// We are in a leaf: compute the colliders' AABBs.
if let Some(proxy) = self.proxies.get(*child_id as usize) {
let collider = &colliders[proxy.data];
*new_aabb = collider.compute_aabb();
}
} else {
// We are in an internal node: compute the children's AABBs.
if let Some(node) = self.nodes.get(*child_id as usize) {
*new_aabb = node.waabb.to_merged_aabb();
}
}
}
let node = &mut self.nodes[id as usize];
let new_waabb = WAABB::from(new_aabbs);
if !node.waabb.contains(&new_waabb).all() {
node.waabb = new_waabb;
node.waabb.dilate_by_factor(dilation_factor);
self.dirty_nodes.push_back(node.parent.index);
}
node.dirty = false;
}
}
}
}
impl<T: IndexedData> WQuadtree<T> {
pub fn new() -> Self {
WQuadtree {
nodes: Vec::new(),
dirty_nodes: VecDeque::new(),
proxies: Vec::new(),
}
}
pub fn clear_and_rebuild(
&mut self,
data: impl ExactSizeIterator<Item = (T, AABB)>,
dilation_factor: f32,
) {
self.nodes.clear();
self.proxies.clear();
// Create proxies.
let mut indices = Vec::with_capacity(data.len());
let mut aabbs = vec![AABB::new_invalid(); data.len()];
self.proxies = vec![WQuadtreeProxy::invalid(); data.len()];
for (data, aabb) in data {
let index = data.index();
if index >= self.proxies.len() {
self.proxies.resize(index + 1, WQuadtreeProxy::invalid());
aabbs.resize(index + 1, AABB::new_invalid());
}
self.proxies[index].data = data;
aabbs[index] = aabb;
indices.push(index);
}
// Build the tree recursively.
let root_node = WQuadtreeNode {
waabb: WAABB::new_invalid(),
children: [1, u32::MAX, u32::MAX, u32::MAX],
parent: NodeIndex::invalid(),
leaf: false,
dirty: false,
};
self.nodes.push(root_node);
let root_id = NodeIndex::new(0, 0);
let (_, aabb) = self.do_recurse_build(&mut indices, &aabbs, root_id, dilation_factor);
self.nodes[0].waabb = WAABB::from([
aabb,
AABB::new_invalid(),
AABB::new_invalid(),
AABB::new_invalid(),
]);
}
fn do_recurse_build(
&mut self,
indices: &mut [usize],
aabbs: &[AABB],
parent: NodeIndex,
dilation_factor: f32,
) -> (u32, AABB) {
if indices.len() <= 4 {
// Leaf case.
let my_id = self.nodes.len();
let mut my_aabb = AABB::new_invalid();
let mut leaf_aabbs = [AABB::new_invalid(); 4];
let mut proxy_ids = [u32::MAX; 4];
for (k, id) in indices.iter().enumerate() {
my_aabb.merge(&aabbs[*id]);
leaf_aabbs[k] = aabbs[*id];
proxy_ids[k] = *id as u32;
self.proxies[*id].node = NodeIndex::new(my_id as u32, k as u8);
}
let mut node = WQuadtreeNode {
waabb: WAABB::from(leaf_aabbs),
children: proxy_ids,
parent,
leaf: true,
dirty: false,
};
node.waabb
.dilate_by_factor(SimdFloat::splat(dilation_factor));
self.nodes.push(node);
return (my_id as u32, my_aabb);
}
// Compute the center and variance along each dimension.
// In 3D we compute the variance to not-subdivide the dimension with lowest variance.
// Therefore variance computation is not needed in 2D because we only have 2 dimension
// to split in the first place.
let mut center = Point::origin();
#[cfg(feature = "dim3")]
let mut variance = Vector::zeros();
let denom = 1.0 / (indices.len() as f32);
for i in &*indices {
let coords = aabbs[*i].center().coords;
center += coords * denom;
#[cfg(feature = "dim3")]
{
variance += coords.component_mul(&coords) * denom;
}
}
#[cfg(feature = "dim3")]
{
variance = variance - center.coords.component_mul(&center.coords);
}
// Find the axis with minimum variance. This is the axis along
// which we are **not** subdividing our set.
#[allow(unused_mut)] // Does not need to be mutable in 2D.
let mut subdiv_dims = [0, 1];
#[cfg(feature = "dim3")]
{
let min = variance.imin();
subdiv_dims[0] = (min + 1) % 3;
subdiv_dims[1] = (min + 2) % 3;
}
// Split the set along the two subdiv_dims dimensions.
// TODO: should we split wrt. the median instead of the average?
// TODO: we should ensure each subslice contains at least 4 elements each (or less if
// indices has less than 16 elements in the first place.
let (left, right) = split_indices_wrt_dim(indices, &aabbs, &center, subdiv_dims[0]);
let (left_bottom, left_top) = split_indices_wrt_dim(left, &aabbs, &center, subdiv_dims[1]);
let (right_bottom, right_top) =
split_indices_wrt_dim(right, &aabbs, &center, subdiv_dims[1]);
// println!(
// "Recursing on children: {}, {}, {}, {}",
// left_bottom.len(),
// left_top.len(),
// right_bottom.len(),
// right_top.len()
// );
let node = WQuadtreeNode {
waabb: WAABB::new_invalid(),
children: [0; 4], // Will be set after the recursive call
parent,
leaf: false,
dirty: false,
};
let id = self.nodes.len() as u32;
self.nodes.push(node);
// Recurse!
let a = self.do_recurse_build(left_bottom, aabbs, NodeIndex::new(id, 0), dilation_factor);
let b = self.do_recurse_build(left_top, aabbs, NodeIndex::new(id, 1), dilation_factor);
let c = self.do_recurse_build(right_bottom, aabbs, NodeIndex::new(id, 2), dilation_factor);
let d = self.do_recurse_build(right_top, aabbs, NodeIndex::new(id, 3), dilation_factor);
// Now we know the indices of the grand-nodes.
self.nodes[id as usize].children = [a.0, b.0, c.0, d.0];
self.nodes[id as usize].waabb = WAABB::from([a.1, b.1, c.1, d.1]);
self.nodes[id as usize]
.waabb
.dilate_by_factor(SimdFloat::splat(dilation_factor));
// TODO: will this chain of .merged be properly optimized?
let my_aabb = a.1.merged(&b.1).merged(&c.1).merged(&d.1);
(id, my_aabb)
}
// FIXME: implement a visitor pattern to merge intersect_aabb
// and intersect_ray into a single method.
pub fn intersect_aabb(&self, aabb: &AABB, out: &mut Vec<T>) {
if self.nodes.is_empty() {
return;
}
// Special case for the root.
let mut stack = vec![0u32];
let waabb = WAABB::splat(*aabb);
while let Some(inode) = stack.pop() {
let node = self.nodes[inode as usize];
let intersections = node.waabb.intersects(&waabb);
let bitmask = intersections.bitmask();
for ii in 0..SIMD_WIDTH {
if (bitmask & (1 << ii)) != 0 {
if node.leaf {
// We found a leaf!
// Unfortunately, invalid AABBs return a intersection as well.
if let Some(proxy) = self.proxies.get(node.children[ii] as usize) {
out.push(proxy.data);
}
} else {
// Internal node, visit the child.
// Unfortunately, we have this check because invalid AABBs
// return a intersection as well.
if node.children[ii] as usize <= self.nodes.len() {
stack.push(node.children[ii]);
}
}
}
}
}
}
pub fn cast_ray(&self, ray: &Ray, max_toi: f32, out: &mut Vec<T>) {
if self.nodes.is_empty() {
return;
}
// Special case for the root.
let mut stack = vec![0u32];
let wray = WRay::splat(*ray);
let wmax_toi = SimdFloat::splat(max_toi);
while let Some(inode) = stack.pop() {
let node = self.nodes[inode as usize];
let hits = node.waabb.intersects_ray(&wray, wmax_toi);
let bitmask = hits.bitmask();
for ii in 0..SIMD_WIDTH {
if (bitmask & (1 << ii)) != 0 {
if node.leaf {
// We found a leaf!
// Unfortunately, invalid AABBs return a hit as well.
if let Some(proxy) = self.proxies.get(node.children[ii] as usize) {
out.push(proxy.data);
}
} else {
// Internal node, visit the child.
// Un fortunately, we have this check because invalid AABBs
// return a hit as well.
if node.children[ii] as usize <= self.nodes.len() {
stack.push(node.children[ii]);
}
}
}
}
}
}
}
#[allow(dead_code)]
struct WQuadtreeIncrementalBuilderStep {
range: Range<usize>,
parent: NodeIndex,
}
#[allow(dead_code)]
struct WQuadtreeIncrementalBuilder<T> {
quadtree: WQuadtree<T>,
to_insert: Vec<WQuadtreeIncrementalBuilderStep>,
aabbs: Vec<AABB>,
indices: Vec<usize>,
}
#[allow(dead_code)]
impl<T: IndexedData> WQuadtreeIncrementalBuilder<T> {
pub fn new() -> Self {
Self {
quadtree: WQuadtree::new(),
to_insert: Vec::new(),
aabbs: Vec::new(),
indices: Vec::new(),
}
}
pub fn update_single_depth(&mut self) {
if let Some(to_insert) = self.to_insert.pop() {
let indices = &mut self.indices[to_insert.range];
// Leaf case.
if indices.len() <= 4 {
let id = self.quadtree.nodes.len();
let mut aabb = AABB::new_invalid();
let mut leaf_aabbs = [AABB::new_invalid(); 4];
let mut proxy_ids = [u32::MAX; 4];
for (k, id) in indices.iter().enumerate() {
aabb.merge(&self.aabbs[*id]);
leaf_aabbs[k] = self.aabbs[*id];
proxy_ids[k] = *id as u32;
}
let node = WQuadtreeNode {
waabb: WAABB::from(leaf_aabbs),
children: proxy_ids,
parent: to_insert.parent,
leaf: true,
dirty: false,
};
self.quadtree.nodes[to_insert.parent.index as usize].children
[to_insert.parent.lane as usize] = id as u32;
self.quadtree.nodes[to_insert.parent.index as usize]
.waabb
.replace(to_insert.parent.lane as usize, aabb);
self.quadtree.nodes.push(node);
return;
}
// Compute the center and variance along each dimension.
// In 3D we compute the variance to not-subdivide the dimension with lowest variance.
// Therefore variance computation is not needed in 2D because we only have 2 dimension
// to split in the first place.
let mut center = Point::origin();
#[cfg(feature = "dim3")]
let mut variance = Vector::zeros();
let denom = 1.0 / (indices.len() as f32);
let mut aabb = AABB::new_invalid();
for i in &*indices {
let coords = self.aabbs[*i].center().coords;
aabb.merge(&self.aabbs[*i]);
center += coords * denom;
#[cfg(feature = "dim3")]
{
variance += coords.component_mul(&coords) * denom;
}
}
#[cfg(feature = "dim3")]
{
variance = variance - center.coords.component_mul(&center.coords);
}
// Find the axis with minimum variance. This is the axis along
// which we are **not** subdividing our set.
#[allow(unused_mut)] // Does not need to be mutable in 2D.
let mut subdiv_dims = [0, 1];
#[cfg(feature = "dim3")]
{
let min = variance.imin();
subdiv_dims[0] = (min + 1) % 3;
subdiv_dims[1] = (min + 2) % 3;
}
// Split the set along the two subdiv_dims dimensions.
// TODO: should we split wrt. the median instead of the average?
// TODO: we should ensure each subslice contains at least 4 elements each (or less if
// indices has less than 16 elements in the first place.
let (left, right) =
split_indices_wrt_dim(indices, &self.aabbs, &center, subdiv_dims[0]);
let (left_bottom, left_top) =
split_indices_wrt_dim(left, &self.aabbs, &center, subdiv_dims[1]);
let (right_bottom, right_top) =
split_indices_wrt_dim(right, &self.aabbs, &center, subdiv_dims[1]);
let node = WQuadtreeNode {
waabb: WAABB::new_invalid(),
children: [0; 4], // Will be set after the recursive call
parent: to_insert.parent,
leaf: false,
dirty: false,
};
let id = self.quadtree.nodes.len() as u32;
self.quadtree.nodes.push(node);
// Recurse!
let a = left_bottom.len();
let b = a + left_top.len();
let c = b + right_bottom.len();
let d = c + right_top.len();
self.to_insert.push(WQuadtreeIncrementalBuilderStep {
range: 0..a,
parent: NodeIndex::new(id, 0),
});
self.to_insert.push(WQuadtreeIncrementalBuilderStep {
range: a..b,
parent: NodeIndex::new(id, 1),
});
self.to_insert.push(WQuadtreeIncrementalBuilderStep {
range: b..c,
parent: NodeIndex::new(id, 2),
});
self.to_insert.push(WQuadtreeIncrementalBuilderStep {
range: c..d,
parent: NodeIndex::new(id, 3),
});
self.quadtree.nodes[to_insert.parent.index as usize].children
[to_insert.parent.lane as usize] = id as u32;
self.quadtree.nodes[to_insert.parent.index as usize]
.waabb
.replace(to_insert.parent.lane as usize, aabb);
}
}
}
fn split_indices_wrt_dim<'a>(
indices: &'a mut [usize],
aabbs: &[AABB],
split_point: &Point<f32>,
dim: usize,
) -> (&'a mut [usize], &'a mut [usize]) {
let mut icurr = 0;
let mut ilast = indices.len() - 1;
// The loop condition we can just do 0..indices.len()
// instead of the test icurr < ilast because we know
// we will iterate exactly once per index.
for _ in 0..indices.len() {
let i = indices[icurr];
let center = aabbs[i].center();
if center[dim] > split_point[dim] {
indices.swap(icurr, ilast);
ilast -= 1;
} else {
icurr += 1;
}
}
indices.split_at_mut(icurr)
}