Merge pull request #157 from dimforge/ccd

Implement Continuous Collision Detection
This commit is contained in:
Sébastien Crozet
2021-04-01 11:00:27 +02:00
committed by GitHub
69 changed files with 4644 additions and 1437 deletions

View File

@@ -19,10 +19,10 @@ members = [ "build/rapier2d", "build/rapier2d-f64", "build/rapier_testbed2d", "e
#kiss3d = { git = "https://github.com/sebcrozet/kiss3d" }
#nalgebra = { git = "https://github.com/dimforge/nalgebra", branch = "dev" }
#parry2d = { git = "https://github.com/dimforge/parry" }
#parry3d = { git = "https://github.com/dimforge/parry" }
#parry2d-f64 = { git = "https://github.com/dimforge/parry" }
#parry3d-f64 = { git = "https://github.com/dimforge/parry" }
parry2d = { git = "https://github.com/dimforge/parry", branch = "special_cases" }
parry3d = { git = "https://github.com/dimforge/parry", branch = "special_cases" }
parry2d-f64 = { git = "https://github.com/dimforge/parry", branch = "special_cases" }
parry3d-f64 = { git = "https://github.com/dimforge/parry", branch = "special_cases" }
#ncollide2d = { git = "https://github.com/dimforge/ncollide" }
#ncollide3d = { git = "https://github.com/dimforge/ncollide" }
#nphysics2d = { git = "https://github.com/dimforge/nphysics" }

View File

@@ -13,6 +13,7 @@ use std::cmp::Ordering;
mod balls3;
mod boxes3;
mod capsules3;
mod ccd3;
mod compound3;
mod convex_polyhedron3;
mod heightfield3;
@@ -52,6 +53,7 @@ pub fn main() {
("Balls", balls3::init_world),
("Boxes", boxes3::init_world),
("Capsules", capsules3::init_world),
("CCD", ccd3::init_world),
("Compound", compound3::init_world),
("Convex polyhedron", convex_polyhedron3::init_world),
("Heightfield", heightfield3::init_world),

85
benchmarks3d/ccd3.rs Normal file
View File

@@ -0,0 +1,85 @@
use na::Point3;
use rapier3d::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use rapier3d::geometry::{ColliderBuilder, ColliderSet};
use rapier_testbed3d::Testbed;
pub fn init_world(testbed: &mut Testbed) {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let joints = JointSet::new();
/*
* Ground
*/
let ground_size = 100.1;
let ground_height = 0.1;
let rigid_body = RigidBodyBuilder::new_static()
.translation(0.0, -ground_height, 0.0)
.build();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_height, ground_size).build();
colliders.insert(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 4;
let numj = 20;
let rad = 1.0;
let shiftx = rad * 2.0 + rad;
let shifty = rad * 2.0 + rad;
let shiftz = rad * 2.0 + rad;
let centerx = shiftx * (num / 2) as f32;
let centery = shifty / 2.0;
let centerz = shiftz * (num / 2) as f32;
let mut offset = -(num as f32) * (rad * 2.0 + rad) * 0.5;
for j in 0usize..numj {
for i in 0..num {
for k in 0usize..num {
let x = i as f32 * shiftx - centerx + offset;
let y = j as f32 * shifty + centery + 3.0;
let z = k as f32 * shiftz - centerz + offset;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::new_dynamic()
.translation(x, y, z)
.linvel(0.0, -1000.0, 0.0)
.ccd_enabled(true)
.build();
let handle = bodies.insert(rigid_body);
let collider = match j % 5 {
0 => ColliderBuilder::cuboid(rad, rad, rad),
1 => ColliderBuilder::ball(rad),
// Rounded cylinders are much more efficient that cylinder, even if the
// rounding margin is small.
// 2 => ColliderBuilder::round_cylinder(rad, rad, rad / 10.0),
// 3 => ColliderBuilder::cone(rad, rad),
_ => ColliderBuilder::capsule_y(rad, rad),
};
colliders.insert(collider.build(), handle, &mut bodies);
}
}
offset -= 0.05 * rad * (num as f32 - 1.0);
}
/*
* Set up the testbed.
*/
testbed.set_world(bodies, colliders, joints);
testbed.look_at(Point3::new(100.0, 100.0, 100.0), Point3::origin());
}
fn main() {
let testbed = Testbed::from_builders(0, vec![("Boxes", init_world)]);
testbed.run()
}

View File

@@ -29,6 +29,10 @@ wasm-bindgen = [ "instant/wasm-bindgen" ]
serde-serialize = [ "nalgebra/serde-serialize", "parry2d-f64/serde-serialize", "serde", "bit-vec/serde", "arrayvec/serde" ]
enhanced-determinism = [ "simba/libm_force", "parry2d-f64/enhanced-determinism", "indexmap" ]
# Feature used for development and debugging only.
# Do not enable this unless you are working on the engine internals.
dev-remove-slow-accessors = []
[lib]
name = "rapier2d_f64"
path = "../../src/lib.rs"

View File

@@ -29,6 +29,10 @@ wasm-bindgen = [ "instant/wasm-bindgen" ]
serde-serialize = [ "nalgebra/serde-serialize", "parry2d/serde-serialize", "serde", "bit-vec/serde", "arrayvec/serde" ]
enhanced-determinism = [ "simba/libm_force", "parry2d/enhanced-determinism", "indexmap" ]
# Feature used for development and debugging only.
# Do not enable this unless you are working on the engine internals.
dev-remove-slow-accessors = []
[lib]
name = "rapier2d"
path = "../../src/lib.rs"

View File

@@ -29,6 +29,10 @@ wasm-bindgen = [ "instant/wasm-bindgen" ]
serde-serialize = [ "nalgebra/serde-serialize", "parry3d-f64/serde-serialize", "serde", "bit-vec/serde" ]
enhanced-determinism = [ "simba/libm_force", "parry3d-f64/enhanced-determinism" ]
# Feature used for development and debugging only.
# Do not enable this unless you are working on the engine internals.
dev-remove-slow-accessors = []
[lib]
name = "rapier3d_f64"
path = "../../src/lib.rs"

View File

@@ -29,6 +29,10 @@ wasm-bindgen = [ "instant/wasm-bindgen" ]
serde-serialize = [ "nalgebra/serde-serialize", "parry3d/serde-serialize", "serde", "bit-vec/serde" ]
enhanced-determinism = [ "simba/libm_force", "parry3d/enhanced-determinism" ]
# Feature used for development and debugging only.
# Do not enable this unless you are working on the engine internals.
dev-remove-slow-accessors = []
[lib]
name = "rapier3d"
path = "../../src/lib.rs"

View File

@@ -3,6 +3,7 @@ name = "rapier-examples-2d"
version = "0.1.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
default-run = "all_examples2"
[features]
parallel = [ "rapier2d/parallel", "rapier_testbed2d/parallel" ]
@@ -26,4 +27,4 @@ path = "../build/rapier2d"
[[bin]]
name = "all_examples2"
path = "./all_examples2.rs"
path = "./all_examples2.rs"

View File

@@ -11,6 +11,7 @@ use rapier_testbed2d::Testbed;
use std::cmp::Ordering;
mod add_remove2;
mod ccd2;
mod collision_groups2;
mod convex_polygons2;
mod damping2;
@@ -60,6 +61,7 @@ pub fn main() {
let mut builders: Vec<(_, fn(&mut Testbed))> = vec![
("Add remove", add_remove2::init_world),
("CCD", ccd2::init_world),
("Collision groups", collision_groups2::init_world),
("Convex polygons", convex_polygons2::init_world),
("Damping", damping2::init_world),

124
examples2d/ccd2.rs Normal file
View File

@@ -0,0 +1,124 @@
use na::{Isometry2, Point2, Point3};
use rapier2d::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use rapier2d::geometry::{ColliderBuilder, ColliderSet, SharedShape};
use rapier_testbed2d::Testbed;
pub fn init_world(testbed: &mut Testbed) {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let joints = JointSet::new();
/*
* Ground
*/
let ground_size = 25.0;
let ground_thickness = 0.1;
let rigid_body = RigidBodyBuilder::new_static().ccd_enabled(true).build();
let ground_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_thickness).build();
colliders.insert(collider, ground_handle, &mut bodies);
let collider = ColliderBuilder::cuboid(ground_thickness, ground_size)
.translation(-3.0, 0.0)
.build();
colliders.insert(collider, ground_handle, &mut bodies);
let collider = ColliderBuilder::cuboid(ground_thickness, ground_size)
.translation(6.0, 0.0)
.build();
colliders.insert(collider, ground_handle, &mut bodies);
let collider = ColliderBuilder::cuboid(ground_thickness, ground_size)
.translation(2.5, 0.0)
.sensor(true)
.build();
let sensor_handle = colliders.insert(collider, ground_handle, &mut bodies);
/*
* Create the shapes
*/
let radx = 0.4;
let rady = 0.05;
let delta1 = Isometry2::translation(0.0, radx - rady);
let delta2 = Isometry2::translation(-radx + rady, 0.0);
let delta3 = Isometry2::translation(radx - rady, 0.0);
let mut compound_parts = Vec::new();
let vertical = SharedShape::cuboid(rady, radx);
let horizontal = SharedShape::cuboid(radx, rady);
compound_parts.push((delta1, horizontal));
compound_parts.push((delta2, vertical.clone()));
compound_parts.push((delta3, vertical));
let compound_shape = SharedShape::compound(compound_parts);
let num = 6;
let shift = (radx + 0.01) * 2.0;
let centerx = shift * num as f32 / 2.0 - 0.5;
let centery = shift / 2.0 + 4.0;
for i in 0usize..num {
for j in 0..num {
let x = i as f32 * shift - centerx;
let y = j as f32 * shift + centery;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::new_dynamic()
.translation(x, y)
.linvel(100.0, -10.0)
.ccd_enabled(true)
.build();
let handle = bodies.insert(rigid_body);
// for part in &compound_parts {
// let collider = ColliderBuilder::new(part.1.clone())
// .position_wrt_parent(part.0)
// .build();
// colliders.insert(collider, handle, &mut bodies);
// }
let collider = ColliderBuilder::new(compound_shape.clone()).build();
// let collider = ColliderBuilder::cuboid(radx, rady).build();
colliders.insert(collider, handle, &mut bodies);
}
}
// Callback that will be executed on the main loop to handle proximities.
testbed.add_callback(move |_, mut graphics, physics, events, _| {
while let Ok(prox) = events.intersection_events.try_recv() {
let color = if prox.intersecting {
Point3::new(1.0, 1.0, 0.0)
} else {
Point3::new(0.5, 0.5, 1.0)
};
let parent_handle1 = physics.colliders.get(prox.collider1).unwrap().parent();
let parent_handle2 = physics.colliders.get(prox.collider2).unwrap().parent();
if let Some(graphics) = &mut graphics {
if parent_handle1 != ground_handle && prox.collider1 != sensor_handle {
graphics.set_body_color(parent_handle1, color);
}
if parent_handle2 != ground_handle && prox.collider2 != sensor_handle {
graphics.set_body_color(parent_handle2, color);
}
}
}
});
/*
* Set up the testbed.
*/
testbed.set_world(bodies, colliders, joints);
testbed.look_at(Point2::new(0.0, 2.5), 20.0);
}
fn main() {
let testbed = Testbed::from_builders(0, vec![("Balls", init_world)]);
testbed.run()
}

View File

@@ -3,6 +3,7 @@ name = "rapier-examples-3d"
version = "0.1.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
default-run = "all_examples3"
[features]
parallel = [ "rapier3d/parallel", "rapier_testbed3d/parallel" ]

View File

@@ -10,12 +10,14 @@ use inflector::Inflector;
use rapier_testbed3d::Testbed;
use std::cmp::Ordering;
mod ccd3;
mod collision_groups3;
mod compound3;
mod convex_decomposition3;
mod convex_polyhedron3;
mod damping3;
mod debug_add_remove_collider3;
mod debug_big_colliders3;
mod debug_boxes3;
mod debug_cylinder3;
mod debug_dynamic_collider_add3;
@@ -76,6 +78,7 @@ pub fn main() {
let mut builders: Vec<(_, fn(&mut Testbed))> = vec![
("Fountain", fountain3::init_world),
("Primitives", primitives3::init_world),
("CCD", ccd3::init_world),
("Collision groups", collision_groups3::init_world),
("Compound", compound3::init_world),
("Convex decomposition", convex_decomposition3::init_world),
@@ -95,6 +98,7 @@ pub fn main() {
"(Debug) add/rm collider",
debug_add_remove_collider3::init_world,
),
("(Debug) big colliders", debug_big_colliders3::init_world),
("(Debug) boxes", debug_boxes3::init_world),
(
"(Debug) dyn. coll. add",

145
examples3d/ccd3.rs Normal file
View File

@@ -0,0 +1,145 @@
use na::{Point3, Vector3};
use rapier3d::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use rapier3d::geometry::{ColliderBuilder, ColliderSet};
use rapier_testbed3d::Testbed;
fn create_wall(
testbed: &mut Testbed,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
offset: Vector3<f32>,
stack_height: usize,
half_extents: Vector3<f32>,
) {
let shift = half_extents * 2.0;
for i in 0usize..stack_height {
for j in i..stack_height {
let fj = j as f32;
let fi = i as f32;
let x = offset.x;
let y = fi * shift.y + offset.y;
let z = (fi * shift.z / 2.0) + (fj - fi) * shift.z + offset.z
- stack_height as f32 * half_extents.z;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::new_dynamic().translation(x, y, z).build();
let handle = bodies.insert(rigid_body);
let collider =
ColliderBuilder::cuboid(half_extents.x, half_extents.y, half_extents.z).build();
colliders.insert(collider, handle, bodies);
testbed.set_body_color(handle, Point3::new(218. / 255., 201. / 255., 1.0));
}
}
}
pub fn init_world(testbed: &mut Testbed) {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let joints = JointSet::new();
/*
* Ground
*/
let ground_size = 50.0;
let ground_height = 0.1;
let rigid_body = RigidBodyBuilder::new_static()
.translation(0.0, -ground_height, 0.0)
.build();
let ground_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_height, ground_size).build();
colliders.insert(collider, ground_handle, &mut bodies);
/*
* Create the pyramids.
*/
let num_z = 8;
let num_x = 5;
let shift_y = ground_height + 0.5;
let shift_z = (num_z as f32 + 2.0) * 2.0;
for i in 0..num_x {
let x = i as f32 * 6.0;
create_wall(
testbed,
&mut bodies,
&mut colliders,
Vector3::new(x, shift_y, 0.0),
num_z,
Vector3::new(0.5, 0.5, 1.0),
);
create_wall(
testbed,
&mut bodies,
&mut colliders,
Vector3::new(x, shift_y, shift_z),
num_z,
Vector3::new(0.5, 0.5, 1.0),
);
}
/*
* Create two very fast rigid-bodies.
* The first one has CCD enabled and a sensor collider attached to it.
* The second one has CCD enabled and a collider attached to it.
*/
let collider = ColliderBuilder::ball(1.0)
.density(10.0)
.sensor(true)
.build();
let rigid_body = RigidBodyBuilder::new_dynamic()
.linvel(1000.0, 0.0, 0.0)
.translation(-20.0, shift_y + 2.0, 0.0)
.ccd_enabled(true)
.build();
let sensor_handle = bodies.insert(rigid_body);
colliders.insert(collider, sensor_handle, &mut bodies);
// Second rigid-body with CCD enabled.
let collider = ColliderBuilder::ball(1.0).density(10.0).build();
let rigid_body = RigidBodyBuilder::new_dynamic()
.linvel(1000.0, 0.0, 0.0)
.translation(-20.0, shift_y + 2.0, shift_z)
.ccd_enabled(true)
.build();
let handle = bodies.insert(rigid_body);
colliders.insert(collider.clone(), handle, &mut bodies);
// Callback that will be executed on the main loop to handle proximities.
testbed.add_callback(move |_, mut graphics, physics, events, _| {
while let Ok(prox) = events.intersection_events.try_recv() {
let color = if prox.intersecting {
Point3::new(1.0, 1.0, 0.0)
} else {
Point3::new(0.5, 0.5, 1.0)
};
let parent_handle1 = physics.colliders.get(prox.collider1).unwrap().parent();
let parent_handle2 = physics.colliders.get(prox.collider2).unwrap().parent();
if let Some(graphics) = &mut graphics {
if parent_handle1 != ground_handle && parent_handle1 != sensor_handle {
graphics.set_body_color(parent_handle1, color);
}
if parent_handle2 != ground_handle && parent_handle2 != sensor_handle {
graphics.set_body_color(parent_handle2, color);
}
}
}
});
/*
* Set up the testbed.
*/
testbed.set_world(bodies, colliders, joints);
testbed.look_at(Point3::new(100.0, 100.0, 100.0), Point3::origin());
}
fn main() {
let testbed = Testbed::from_builders(0, vec![("Boxes", init_world)]);
testbed.run()
}

View File

@@ -0,0 +1,50 @@
use na::{Point3, Vector3};
use rapier3d::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use rapier3d::geometry::{ColliderBuilder, ColliderSet, HalfSpace, SharedShape};
use rapier_testbed3d::Testbed;
pub fn init_world(testbed: &mut Testbed) {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let joints = JointSet::new();
/*
* Ground
*/
let rigid_body = RigidBodyBuilder::new_static().build();
let handle = bodies.insert(rigid_body);
let halfspace = SharedShape::new(HalfSpace::new(Vector3::y_axis()));
let collider = ColliderBuilder::new(halfspace).build();
colliders.insert(collider, handle, &mut bodies);
let mut curr_y = 0.0;
let mut curr_width = 10_000.0;
for _ in 0..12 {
let curr_height = 0.1f32.min(curr_width);
curr_y += curr_height * 4.0;
let rigid_body = RigidBodyBuilder::new_dynamic()
.translation(0.0, curr_y, 0.0)
.build();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(curr_width, curr_height, curr_width).build();
colliders.insert(collider, handle, &mut bodies);
curr_width /= 5.0;
}
/*
* Set up the testbed.
*/
testbed.set_world(bodies, colliders, joints);
testbed.look_at(Point3::new(10.0, 10.0, 10.0), Point3::origin());
}
fn main() {
let testbed = Testbed::from_builders(0, vec![("Boxes", init_world)]);
testbed.run()
}

View File

@@ -1,3 +1,4 @@
use na::Point3;
use rapier3d::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use rapier3d::geometry::{ColliderBuilder, ColliderSet};
use rapier_testbed3d::Testbed;
@@ -10,10 +11,31 @@ pub fn init_world(testbed: &mut Testbed) {
let mut colliders = ColliderSet::new();
let joints = JointSet::new();
/*
* Ground
*/
let ground_size = 100.1;
let ground_height = 2.1;
let rigid_body = RigidBodyBuilder::new_static()
.translation(0.0, 4.0, 0.0)
.build();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_height, ground_size).build();
colliders.insert(collider, handle, &mut bodies);
let rad = 1.0;
// Build the dynamic box rigid body.
let rigid_body = RigidBodyBuilder::new_dynamic()
.translation(0.0, 3.0 * rad, 0.0)
.translation(0.0, 7.0 * rad, 0.0)
.can_sleep(false)
.build();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::ball(rad).build();
colliders.insert(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::new_dynamic()
.translation(0.0, 2.0 * rad, 0.0)
.can_sleep(false)
.build();
let handle = bodies.insert(rigid_body);
@@ -23,6 +45,7 @@ pub fn init_world(testbed: &mut Testbed) {
/*
* Set up the testbed.
*/
testbed.look_at(Point3::new(100.0, -10.0, 100.0), Point3::origin());
testbed.set_world(bodies, colliders, joints);
}

View File

@@ -15,7 +15,7 @@ pub fn init_world(testbed: &mut Testbed) {
* Ground
*/
let ground_size = 100.1;
let ground_height = 2.1;
let ground_height = 2.1; // 16.0;
let rigid_body = RigidBodyBuilder::new_static()
.translation(0.0, -ground_height, 0.0)
@@ -64,10 +64,6 @@ pub fn init_world(testbed: &mut Testbed) {
physics
.bodies
.remove(*handle, &mut physics.colliders, &mut physics.joints);
physics.broad_phase.maintain(&mut physics.colliders);
physics
.narrow_phase
.maintain(&mut physics.colliders, &mut physics.bodies);
if let (Some(graphics), Some(window)) = (&mut graphics, &mut window) {
graphics.remove_body_nodes(window, *handle);
@@ -80,6 +76,10 @@ pub fn init_world(testbed: &mut Testbed) {
* Set up the testbed.
*/
testbed.set_world(bodies, colliders, joints);
// testbed
// .physics_state_mut()
// .integration_parameters
// .velocity_based_erp = 0.2;
testbed.look_at(Point3::new(-30.0, 4.0, -30.0), Point3::new(0.0, 1.0, 0.0));
}

View File

@@ -21,6 +21,13 @@ impl CollisionDetectionCounters {
narrow_phase_time: Timer::new(),
}
}
/// Resets all the coounters and timers.
pub fn reset(&mut self) {
self.ncontact_pairs = 0;
self.broad_phase_time.reset();
self.narrow_phase_time.reset();
}
}
impl Display for CollisionDetectionCounters {

View File

@@ -114,6 +114,18 @@ impl Counters {
pub fn set_ncontact_pairs(&mut self, n: usize) {
self.cd.ncontact_pairs = n;
}
/// Resets all the counters and timers.
pub fn reset(&mut self) {
if self.enabled {
self.step_time.reset();
self.custom.reset();
self.stages.reset();
self.cd.reset();
self.solver.reset();
self.ccd.reset();
}
}
}
macro_rules! measure_method {

View File

@@ -27,6 +27,15 @@ impl StagesCounters {
ccd_time: Timer::new(),
}
}
/// Resets all the counters and timers.
pub fn reset(&mut self) {
self.update_time.reset();
self.collision_detection_time.reset();
self.island_construction_time.reset();
self.solver_time.reset();
self.ccd_time.reset();
}
}
impl Display for StagesCounters {

View File

@@ -29,6 +29,20 @@ impl<T> Coarena<T> {
.and_then(|(gg, t)| if g == *gg { Some(t) } else { None })
}
/// Inserts an element into this coarena.
pub fn insert(&mut self, a: Index, value: T)
where
T: Clone + Default,
{
let (i1, g1) = a.into_raw_parts();
if self.data.len() <= i1 {
self.data.resize(i1 + 1, (u32::MAX as u64, T::default()));
}
self.data[i1] = (g1, value);
}
/// Ensure that elements at the two given indices exist in this coarena, and return their reference.
///
/// Missing elements are created automatically and initialized with the `default` value.

View File

@@ -0,0 +1,430 @@
use super::TOIEntry;
use crate::dynamics::{RigidBodyHandle, RigidBodySet};
use crate::geometry::{ColliderSet, IntersectionEvent, NarrowPhase};
use crate::math::Real;
use crate::parry::utils::SortedPair;
use crate::pipeline::{EventHandler, QueryPipeline, QueryPipelineMode};
use parry::query::{DefaultQueryDispatcher, QueryDispatcher};
use parry::utils::hashmap::HashMap;
use std::collections::BinaryHeap;
pub enum PredictedImpacts {
Impacts(HashMap<RigidBodyHandle, Real>),
ImpactsAfterEndTime(Real),
NoImpacts,
}
/// Solver responsible for performing motion-clamping on fast-moving bodies.
pub struct CCDSolver {
query_pipeline: QueryPipeline,
}
impl CCDSolver {
/// Initializes a new CCD solver
pub fn new() -> Self {
Self::with_query_dispatcher(DefaultQueryDispatcher)
}
/// Initializes a CCD solver with a custom `QueryDispatcher` used for computing time-of-impacts.
///
/// Use this constructor in order to use a custom `QueryDispatcher` that is aware of your own
/// user-defined shapes.
pub fn with_query_dispatcher<D>(d: D) -> Self
where
D: 'static + QueryDispatcher,
{
CCDSolver {
query_pipeline: QueryPipeline::with_query_dispatcher(d),
}
}
/// Apply motion-clamping to the bodies affected by the given `impacts`.
///
/// The `impacts` should be the result of a previous call to `self.predict_next_impacts`.
pub fn clamp_motions(&self, dt: Real, bodies: &mut RigidBodySet, impacts: &PredictedImpacts) {
match impacts {
PredictedImpacts::Impacts(tois) => {
// println!("Num to clamp: {}", tois.len());
for (handle, toi) in tois {
if let Some(body) = bodies.get_mut_internal(*handle) {
let min_toi = (body.ccd_thickness
* 0.15
* crate::utils::inv(body.max_point_velocity()))
.min(dt);
// println!("Min toi: {}, Toi: {}", min_toi, toi);
body.integrate_next_position(toi.max(min_toi));
}
}
}
_ => {}
}
}
/// Updates the set of bodies that needs CCD to be resolved.
///
/// Returns `true` if any rigid-body must have CCD resolved.
pub fn update_ccd_active_flags(
&self,
bodies: &mut RigidBodySet,
dt: Real,
include_forces: bool,
) -> bool {
let mut ccd_active = false;
// println!("Checking CCD activation");
bodies.foreach_active_dynamic_body_mut_internal(|_, body| {
body.update_ccd_active_flag(dt, include_forces);
// println!("CCD is active: {}, for {:?}", ccd_active, handle);
ccd_active = ccd_active || body.is_ccd_active();
});
ccd_active
}
/// Find the first time a CCD-enabled body has a non-sensor collider hitting another non-sensor collider.
pub fn find_first_impact(
&mut self,
dt: Real,
bodies: &RigidBodySet,
colliders: &ColliderSet,
narrow_phase: &NarrowPhase,
) -> Option<Real> {
// Update the query pipeline.
self.query_pipeline.update_with_mode(
bodies,
colliders,
QueryPipelineMode::SweepTestWithPredictedPosition { dt },
);
let mut pairs_seen = HashMap::default();
let mut min_toi = dt;
for (_, rb1) in bodies.iter_active_dynamic() {
if rb1.is_ccd_active() {
let predicted_body_pos1 = rb1.predict_position_using_velocity_and_forces(dt);
for ch1 in &rb1.colliders {
let co1 = &colliders[*ch1];
if co1.is_sensor() {
continue; // Ignore sensors.
}
let aabb1 =
co1.compute_swept_aabb(&(predicted_body_pos1 * co1.position_wrt_parent()));
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb1, |ch2| {
if *ch1 == *ch2 {
// Ignore self-intersection.
return true;
}
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
{
let c1 = colliders.get(*ch1).unwrap();
let c2 = colliders.get(*ch2).unwrap();
let bh1 = c1.parent();
let bh2 = c2.parent();
if bh1 == bh2 || (c1.is_sensor() || c2.is_sensor()) {
// Ignore self-intersection and sensors.
return true;
}
let smallest_dist = narrow_phase
.contact_pair(*ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let b1 = bodies.get(bh1).unwrap();
let b2 = bodies.get(bh2).unwrap();
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
*ch1,
*ch2,
c1,
c2,
b1,
b2,
None,
None,
0.0,
min_toi,
smallest_dist,
) {
min_toi = min_toi.min(toi.toi);
}
}
true
});
}
}
}
if min_toi < dt {
Some(min_toi)
} else {
None
}
}
/// Outputs the set of bodies as well as their first time-of-impact event.
pub fn predict_impacts_at_next_positions(
&mut self,
dt: Real,
bodies: &RigidBodySet,
colliders: &ColliderSet,
narrow_phase: &NarrowPhase,
events: &dyn EventHandler,
) -> PredictedImpacts {
let mut frozen = HashMap::<_, Real>::default();
let mut all_toi = BinaryHeap::new();
let mut pairs_seen = HashMap::default();
let mut min_overstep = dt;
// Update the query pipeline.
self.query_pipeline.update_with_mode(
bodies,
colliders,
QueryPipelineMode::SweepTestWithNextPosition,
);
/*
*
* First, collect all TOIs.
*
*/
// TODO: don't iterate through all the colliders.
for (ch1, co1) in colliders.iter() {
let rb1 = &bodies[co1.parent()];
if rb1.is_ccd_active() {
let aabb = co1.compute_swept_aabb(&(rb1.next_position * co1.position_wrt_parent()));
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb, |ch2| {
if ch1 == *ch2 {
// Ignore self-intersection.
return true;
}
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
{
let c1 = colliders.get(ch1).unwrap();
let c2 = colliders.get(*ch2).unwrap();
let bh1 = c1.parent();
let bh2 = c2.parent();
if bh1 == bh2 {
// Ignore self-intersection.
return true;
}
let b1 = bodies.get(bh1).unwrap();
let b2 = bodies.get(bh2).unwrap();
let smallest_dist = narrow_phase
.contact_pair(ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
ch1,
*ch2,
c1,
c2,
b1,
b2,
None,
None,
0.0,
// NOTE: we use dt here only once we know that
// there is at least one TOI before dt.
min_overstep,
smallest_dist,
) {
if toi.toi > dt {
min_overstep = min_overstep.min(toi.toi);
} else {
min_overstep = dt;
all_toi.push(toi);
}
}
}
true
});
}
}
/*
*
* If the smallest TOI is outside of the time interval, return.
*
*/
if min_overstep == dt && all_toi.is_empty() {
return PredictedImpacts::NoImpacts;
} else if min_overstep > dt {
return PredictedImpacts::ImpactsAfterEndTime(min_overstep);
}
// NOTE: all static bodies (and kinematic bodies?) should be considered as "frozen", this
// may avoid some resweeps.
let mut intersections_to_check = vec![];
while let Some(toi) = all_toi.pop() {
assert!(toi.toi <= dt);
let body1 = bodies.get(toi.b1).unwrap();
let body2 = bodies.get(toi.b2).unwrap();
let mut colliders_to_check = Vec::new();
let should_freeze1 = body1.is_ccd_active() && !frozen.contains_key(&toi.b1);
let should_freeze2 = body2.is_ccd_active() && !frozen.contains_key(&toi.b2);
if !should_freeze1 && !should_freeze2 {
continue;
}
if toi.is_intersection_test {
// NOTE: this test is rendundant with the previous `if !should_freeze && ...`
// but let's keep it to avoid tricky regressions if we end up swapping both
// `if` for some reasons in the future.
if should_freeze1 || should_freeze2 {
// This is only an intersection so we don't have to freeze and there is no
// need to resweep. However we will need to see if we have to generate
// intersection events, so push the TOI for further testing.
intersections_to_check.push(toi);
}
continue;
}
if should_freeze1 {
let _ = frozen.insert(toi.b1, toi.toi);
colliders_to_check.extend_from_slice(&body1.colliders);
}
if should_freeze2 {
let _ = frozen.insert(toi.b2, toi.toi);
colliders_to_check.extend_from_slice(&body2.colliders);
}
let start_time = toi.toi;
for ch1 in &colliders_to_check {
let co1 = &colliders[*ch1];
let rb1 = &bodies[co1.parent];
let aabb = co1.compute_swept_aabb(&(rb1.next_position * co1.position_wrt_parent()));
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb, |ch2| {
let c1 = colliders.get(*ch1).unwrap();
let c2 = colliders.get(*ch2).unwrap();
let bh1 = c1.parent();
let bh2 = c2.parent();
if bh1 == bh2 {
// Ignore self-intersection.
return true;
}
let frozen1 = frozen.get(&bh1);
let frozen2 = frozen.get(&bh2);
let b1 = bodies.get(bh1).unwrap();
let b2 = bodies.get(bh2).unwrap();
if (frozen1.is_some() || !b1.is_ccd_active())
&& (frozen2.is_some() || !b2.is_ccd_active())
{
// We already did a resweep.
return true;
}
let smallest_dist = narrow_phase
.contact_pair(*ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
*ch1,
*ch2,
c1,
c2,
b1,
b2,
frozen1.copied(),
frozen2.copied(),
start_time,
dt,
smallest_dist,
) {
all_toi.push(toi);
}
true
});
}
}
for toi in intersections_to_check {
// See if the intersection is still active once the bodies
// reach their final positions.
// - If the intersection is still active, don't report it yet. It will be
// reported by the narrow-phase at the next timestep/substep.
// - If the intersection isn't active anymore, and it wasn't intersecting
// before, then we need to generate one interaction-start and one interaction-stop
// events because it will never be detected by the narrow-phase because of tunneling.
let body1 = &bodies[toi.b1];
let body2 = &bodies[toi.b2];
let co1 = &colliders[toi.c1];
let co2 = &colliders[toi.c2];
let frozen1 = frozen.get(&toi.b1);
let frozen2 = frozen.get(&toi.b2);
let pos1 = frozen1
.map(|t| body1.integrate_velocity(*t))
.unwrap_or(body1.next_position);
let pos2 = frozen2
.map(|t| body2.integrate_velocity(*t))
.unwrap_or(body2.next_position);
let prev_coll_pos12 = co1.position.inv_mul(&co2.position);
let next_coll_pos12 =
(pos1 * co1.position_wrt_parent()).inverse() * (pos2 * co2.position_wrt_parent());
let query_dispatcher = self.query_pipeline.query_dispatcher();
let intersect_before = query_dispatcher
.intersection_test(&prev_coll_pos12, co1.shape(), co2.shape())
.unwrap_or(false);
let intersect_after = query_dispatcher
.intersection_test(&next_coll_pos12, co1.shape(), co2.shape())
.unwrap_or(false);
if !intersect_before && !intersect_after {
// Emit one intersection-started and one intersection-stopped event.
events.handle_intersection_event(IntersectionEvent::new(toi.c1, toi.c2, true));
events.handle_intersection_event(IntersectionEvent::new(toi.c1, toi.c2, false));
}
}
PredictedImpacts::Impacts(frozen)
}
}

5
src/dynamics/ccd/mod.rs Normal file
View File

@@ -0,0 +1,5 @@
pub use self::ccd_solver::{CCDSolver, PredictedImpacts};
pub use self::toi_entry::TOIEntry;
mod ccd_solver;
mod toi_entry;

View File

@@ -0,0 +1,163 @@
use crate::dynamics::{RigidBody, RigidBodyHandle};
use crate::geometry::{Collider, ColliderHandle};
use crate::math::Real;
use parry::query::{NonlinearRigidMotion, QueryDispatcher};
#[derive(Copy, Clone, Debug)]
pub struct TOIEntry {
pub toi: Real,
pub c1: ColliderHandle,
pub b1: RigidBodyHandle,
pub c2: ColliderHandle,
pub b2: RigidBodyHandle,
pub is_intersection_test: bool,
pub timestamp: usize,
}
impl TOIEntry {
fn new(
toi: Real,
c1: ColliderHandle,
b1: RigidBodyHandle,
c2: ColliderHandle,
b2: RigidBodyHandle,
is_intersection_test: bool,
timestamp: usize,
) -> Self {
Self {
toi,
c1,
b1,
c2,
b2,
is_intersection_test,
timestamp,
}
}
pub fn try_from_colliders<QD: ?Sized + QueryDispatcher>(
query_dispatcher: &QD,
ch1: ColliderHandle,
ch2: ColliderHandle,
c1: &Collider,
c2: &Collider,
b1: &RigidBody,
b2: &RigidBody,
frozen1: Option<Real>,
frozen2: Option<Real>,
start_time: Real,
end_time: Real,
smallest_contact_dist: Real,
) -> Option<Self> {
assert!(start_time <= end_time);
let linvel1 = frozen1.is_none() as u32 as Real * b1.linvel();
let linvel2 = frozen2.is_none() as u32 as Real * b2.linvel();
let angvel1 = frozen1.is_none() as u32 as Real * b1.angvel();
let angvel2 = frozen2.is_none() as u32 as Real * b2.angvel();
#[cfg(feature = "dim2")]
let vel12 = (linvel2 - linvel1).norm()
+ angvel1.abs() * b1.ccd_max_dist
+ angvel2.abs() * b2.ccd_max_dist;
#[cfg(feature = "dim3")]
let vel12 = (linvel2 - linvel1).norm()
+ angvel1.norm() * b1.ccd_max_dist
+ angvel2.norm() * b2.ccd_max_dist;
// We may be slightly over-conservative by taking the `max(0.0)` here.
// But removing the `max` doesn't really affect performances so let's
// keep it since more conservatism is good at this stage.
let thickness = (c1.shape().ccd_thickness() + c2.shape().ccd_thickness())
+ smallest_contact_dist.max(0.0);
let is_intersection_test = c1.is_sensor() || c2.is_sensor();
if (end_time - start_time) * vel12 < thickness {
return None;
}
// Compute the TOI.
let mut motion1 = Self::body_motion(b1);
let mut motion2 = Self::body_motion(b2);
if let Some(t) = frozen1 {
motion1.freeze(t);
}
if let Some(t) = frozen2 {
motion2.freeze(t);
}
let motion_c1 = motion1.prepend(*c1.position_wrt_parent());
let motion_c2 = motion2.prepend(*c2.position_wrt_parent());
// println!("start_time: {}", start_time);
// If this is just an intersection test (i.e. with sensors)
// then we can stop the TOI search immediately if it starts with
// a penetration because we don't care about the whether the velocity
// at the impact is a separating velocity or not.
// If the TOI search involves two non-sensor colliders then
// we don't want to stop the TOI search at the first penetration
// because the colliders may be in a separating trajectory.
let stop_at_penetration = is_intersection_test;
let res_toi = query_dispatcher
.nonlinear_time_of_impact(
&motion_c1,
c1.shape(),
&motion_c2,
c2.shape(),
start_time,
end_time,
stop_at_penetration,
)
.ok();
let toi = res_toi??;
Some(Self::new(
toi.toi,
ch1,
c1.parent(),
ch2,
c2.parent(),
is_intersection_test,
0,
))
}
fn body_motion(body: &RigidBody) -> NonlinearRigidMotion {
if body.is_ccd_active() {
NonlinearRigidMotion::new(
0.0,
body.position,
body.mass_properties.local_com,
body.linvel,
body.angvel,
)
} else {
NonlinearRigidMotion::constant_position(body.next_position)
}
}
}
impl PartialOrd for TOIEntry {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
(-self.toi).partial_cmp(&(-other.toi))
}
}
impl Ord for TOIEntry {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl PartialEq for TOIEntry {
fn eq(&self, other: &Self) -> bool {
self.toi == other.toi
}
}
impl Eq for TOIEntry {}

View File

@@ -21,6 +21,16 @@ pub enum CoefficientCombineRule {
}
impl CoefficientCombineRule {
pub(crate) fn from_value(val: u8) -> Self {
match val {
0 => CoefficientCombineRule::Average,
1 => CoefficientCombineRule::Min,
2 => CoefficientCombineRule::Multiply,
3 => CoefficientCombineRule::Max,
_ => panic!("Invalid coefficient combine rule."),
}
}
pub(crate) fn combine(coeff1: Real, coeff2: Real, rule_value1: u8, rule_value2: u8) -> Real {
let effective_rule = rule_value1.max(rule_value2);

View File

@@ -6,6 +6,17 @@ use crate::math::Real;
pub struct IntegrationParameters {
/// The timestep length (default: `1.0 / 60.0`)
pub dt: Real,
/// Minimum timestep size when using CCD with multiple substeps (default `1.0 / 60.0 / 100.0`)
///
/// When CCD with multiple substeps is enabled, the timestep is subdivided
/// into smaller pieces. This timestep subdivision won't generate timestep
/// lengths smaller than `min_dt`.
///
/// Setting this to a large value will reduce the opportunity to performing
/// CCD substepping, resulting in potentially more time dropped by the
/// motion-clamping mechanism. Setting this to an very small value may lead
/// to numerical instabilities.
pub min_ccd_dt: Real,
// /// If `true` and if rapier is compiled with the `parallel` feature, this will enable rayon-based multithreading (default: `true`).
// ///
@@ -15,9 +26,9 @@ pub struct IntegrationParameters {
// /// Note that using only one thread with `multithreading_enabled` set to `true` will result on a slower
// /// simulation than setting `multithreading_enabled` to `false`.
// pub multithreading_enabled: bool,
/// If `true`, the world's `step` method will stop right after resolving exactly one CCD event (default: `false`).
/// This allows the user to take action during a timestep, in-between two CCD events.
pub return_after_ccd_substep: bool,
// /// If `true`, the world's `step` method will stop right after resolving exactly one CCD event (default: `false`).
// /// This allows the user to take action during a timestep, in-between two CCD events.
// pub return_after_ccd_substep: bool,
/// The Error Reduction Parameter in `[0, 1]` is the proportion of
/// the positional error to be corrected at each time step (default: `0.2`).
pub erp: Real,
@@ -27,6 +38,8 @@ pub struct IntegrationParameters {
/// Each cached impulse are multiplied by this coefficient in `[0, 1]`
/// when they are re-used to initialize the solver (default `1.0`).
pub warmstart_coeff: Real,
/// Correction factor to avoid large warmstart impulse after a strong impact (default `10.0`).
pub warmstart_correction_slope: Real,
/// 0-1: how much of the velocity to dampen out in the constraint solver?
/// (default `1.0`).
@@ -51,49 +64,14 @@ pub struct IntegrationParameters {
pub max_linear_correction: Real,
/// Maximum angular correction during one step of the non-linear position solver (default: `0.2`).
pub max_angular_correction: Real,
/// Maximum nonlinear SOR-prox scaling parameter when the constraint
/// correction direction is close to the kernel of the involved multibody's
/// jacobian (default: `0.2`).
pub max_stabilization_multiplier: Real,
/// Maximum number of iterations performed by the velocity constraints solver (default: `4`).
pub max_velocity_iterations: usize,
/// Maximum number of iterations performed by the position-based constraints solver (default: `1`).
pub max_position_iterations: usize,
/// Minimum number of dynamic bodies in each active island (default: `128`).
pub min_island_size: usize,
/// Maximum number of iterations performed by the position-based constraints solver for CCD steps (default: `10`).
///
/// This should be sufficiently high so all penetration get resolved. For example, if CCD cause your
/// objects to stutter, that may be because the number of CCD position iterations is too low, causing
/// them to remain stuck in a penetration configuration for a few frames.
///
/// The higher this number, the higher its computational cost.
pub max_ccd_position_iterations: usize,
/// Maximum number of substeps performed by the solver (default: `1`).
pub max_ccd_substeps: usize,
/// Controls the number of Proximity::Intersecting events generated by a trigger during CCD resolution (default: `false`).
///
/// If false, triggers will only generate one Proximity::Intersecting event during a step, even
/// if another colliders repeatedly enters and leaves the triggers during multiple CCD substeps.
///
/// If true, triggers will generate as many Proximity::Intersecting and Proximity::Disjoint/Proximity::WithinMargin
/// events as the number of times a collider repeatedly enters and leaves the triggers during multiple CCD substeps.
/// This is more computationally intensive.
pub multiple_ccd_substep_sensor_events_enabled: bool,
/// Whether penetration are taken into account in CCD resolution (default: `false`).
///
/// If this is set to `false` two penetrating colliders will not be considered to have any time of impact
/// while they are penetrating. This may end up allowing some tunelling, but will avoid stuttering effect
/// when the constraints solver fails to completely separate two colliders after a CCD contact.
///
/// If this is set to `true`, two penetrating colliders will be considered to have a time of impact
/// equal to 0 until the constraints solver manages to separate them. This will prevent tunnelling
/// almost completely, but may introduce stuttering effects when the constraints solver fails to completely
/// separate two colliders after a CCD contact.
// FIXME: this is a very binary way of handling penetration.
// We should provide a more flexible solution by letting the user choose some
// minimal amount of movement applied to an object that get stuck.
pub ccd_on_penetration_enabled: bool,
}
impl IntegrationParameters {
@@ -101,28 +79,20 @@ impl IntegrationParameters {
#[deprecated = "Use `IntegrationParameters { dt: 60.0, ..Default::default() }` instead"]
pub fn new(
dt: Real,
// multithreading_enabled: bool,
erp: Real,
joint_erp: Real,
warmstart_coeff: Real,
_restitution_velocity_threshold: Real,
allowed_linear_error: Real,
allowed_angular_error: Real,
max_linear_correction: Real,
max_angular_correction: Real,
prediction_distance: Real,
max_stabilization_multiplier: Real,
max_velocity_iterations: usize,
max_position_iterations: usize,
max_ccd_position_iterations: usize,
max_ccd_substeps: usize,
return_after_ccd_substep: bool,
multiple_ccd_substep_sensor_events_enabled: bool,
ccd_on_penetration_enabled: bool,
) -> Self {
IntegrationParameters {
dt,
// multithreading_enabled,
erp,
joint_erp,
warmstart_coeff,
@@ -131,14 +101,9 @@ impl IntegrationParameters {
max_linear_correction,
max_angular_correction,
prediction_distance,
max_stabilization_multiplier,
max_velocity_iterations,
max_position_iterations,
max_ccd_position_iterations,
max_ccd_substeps,
return_after_ccd_substep,
multiple_ccd_substep_sensor_events_enabled,
ccd_on_penetration_enabled,
..Default::default()
}
}
@@ -193,19 +158,19 @@ impl Default for IntegrationParameters {
fn default() -> Self {
Self {
dt: 1.0 / 60.0,
min_ccd_dt: 1.0 / 60.0 / 100.0,
// multithreading_enabled: true,
return_after_ccd_substep: false,
erp: 0.2,
joint_erp: 0.2,
velocity_solve_fraction: 1.0,
velocity_based_erp: 0.0,
warmstart_coeff: 1.0,
warmstart_correction_slope: 10.0,
allowed_linear_error: 0.005,
prediction_distance: 0.002,
allowed_angular_error: 0.001,
max_linear_correction: 0.2,
max_angular_correction: 0.2,
max_stabilization_multiplier: 0.2,
max_velocity_iterations: 4,
max_position_iterations: 1,
// FIXME: what is the optimal value for min_island_size?
@@ -214,10 +179,7 @@ impl Default for IntegrationParameters {
// However we don't want it to be too small and end up with
// tons of islands, reducing SIMD parallelism opportunities.
min_island_size: 128,
max_ccd_position_iterations: 10,
max_ccd_substeps: 1,
multiple_ccd_substep_sensor_events_enabled: false,
ccd_on_penetration_enabled: false,
}
}
}

View File

@@ -18,6 +18,7 @@ pub use self::rigid_body::{ActivationStatus, BodyStatus, RigidBody, RigidBodyBui
pub use self::rigid_body_set::{BodyPair, RigidBodyHandle, RigidBodySet};
pub use parry::mass_properties::MassProperties;
// #[cfg(not(feature = "parallel"))]
pub use self::ccd::CCDSolver;
pub use self::coefficient_combine_rule::CoefficientCombineRule;
pub(crate) use self::joint::JointGraphEdge;
pub(crate) use self::rigid_body::RigidBodyChanges;
@@ -26,6 +27,7 @@ pub(crate) use self::solver::IslandSolver;
#[cfg(feature = "parallel")]
pub(crate) use self::solver::ParallelIslandSolver;
mod ccd;
mod coefficient_combine_rule;
mod integration_parameters;
mod joint;

View File

@@ -5,7 +5,7 @@ use crate::geometry::{
use crate::math::{
AngVector, AngularInertia, Isometry, Point, Real, Rotation, Translation, Vector,
};
use crate::utils::{self, WCross, WDot};
use crate::utils::{self, WAngularInertia, WCross, WDot};
use na::ComplexField;
use num::Zero;
@@ -24,7 +24,7 @@ pub enum BodyStatus {
/// cannot be pushed by anything. In other words, the trajectory of a kinematic body can only be
/// modified by the user and is independent from any contact or joint it is involved in.
Kinematic,
// Semikinematic, // A kinematic that performs automatic CCD with the static environment toi avoid traversing it?
// Semikinematic, // A kinematic that performs automatic CCD with the static environment to avoid traversing it?
// Disabled,
}
@@ -36,17 +36,20 @@ bitflags::bitflags! {
const ROTATION_LOCKED_X = 1 << 1;
const ROTATION_LOCKED_Y = 1 << 2;
const ROTATION_LOCKED_Z = 1 << 3;
const CCD_ENABLED = 1 << 4;
const CCD_ACTIVE = 1 << 5;
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags affecting the behavior of the constraints solver for a given contact manifold.
/// Flags describing how the rigid-body has been modified by the user.
pub(crate) struct RigidBodyChanges: u32 {
const MODIFIED = 1 << 0;
const POSITION = 1 << 1;
const SLEEP = 1 << 2;
const COLLIDERS = 1 << 3;
const MODIFIED = 1 << 0;
const POSITION = 1 << 1;
const SLEEP = 1 << 2;
const COLLIDERS = 1 << 3;
const BODY_STATUS = 1 << 4;
}
}
@@ -58,7 +61,16 @@ bitflags::bitflags! {
pub struct RigidBody {
/// The world-space position of the rigid-body.
pub(crate) position: Isometry<Real>,
pub(crate) predicted_position: Isometry<Real>,
/// The next position of the rigid-body.
///
/// At the beginning of the timestep, and when the
/// timestep is complete we must have position == next_position
/// except for kinematic bodies.
///
/// The next_position is updated after the velocity and position
/// resolution. Then it is either validated (ie. we set position := set_position)
/// or clamped by CCD.
pub(crate) next_position: Isometry<Real>,
/// The local mass properties of the rigid-body.
pub(crate) mass_properties: MassProperties,
/// The world-space center of mass of the rigid-body.
@@ -92,18 +104,20 @@ pub struct RigidBody {
flags: RigidBodyFlags,
pub(crate) changes: RigidBodyChanges,
/// The status of the body, governing how it is affected by external forces.
pub body_status: BodyStatus,
body_status: BodyStatus,
/// The dominance group this rigid-body is part of.
dominance_group: i8,
/// User-defined data associated to this rigid-body.
pub user_data: u128,
pub(crate) ccd_thickness: Real,
pub(crate) ccd_max_dist: Real,
}
impl RigidBody {
fn new() -> Self {
Self {
position: Isometry::identity(),
predicted_position: Isometry::identity(),
next_position: Isometry::identity(),
mass_properties: MassProperties::zero(),
world_com: Point::origin(),
effective_inv_mass: 0.0,
@@ -127,6 +141,8 @@ impl RigidBody {
body_status: BodyStatus::Dynamic,
dominance_group: 0,
user_data: 0,
ccd_thickness: Real::MAX,
ccd_max_dist: 0.0,
}
}
@@ -153,8 +169,19 @@ impl RigidBody {
self.linvel += linear_acc * dt;
self.angvel += angular_acc * dt;
self.force = na::zero();
self.torque = na::zero();
}
/// The status of this rigid-body.
pub fn body_status(&self) -> BodyStatus {
self.body_status
}
/// Sets the status of this rigid-body.
pub fn set_body_status(&mut self, status: BodyStatus) {
if status != self.body_status {
self.changes.insert(RigidBodyChanges::BODY_STATUS);
self.body_status = status;
}
}
/// The mass properties of this rigid-body.
@@ -176,7 +203,72 @@ impl RigidBody {
}
}
/// Sets the rigid-body's mass properties.
/// Enables of disable CCD (continuous collision-detection) for this rigid-body.
pub fn enable_ccd(&mut self, enabled: bool) {
self.flags.set(RigidBodyFlags::CCD_ENABLED, enabled)
}
/// Is CCD (continous collision-detection) enabled for this rigid-body?
pub fn is_ccd_enabled(&self) -> bool {
self.flags.contains(RigidBodyFlags::CCD_ENABLED)
}
// This is different from `is_ccd_enabled`. This checks that CCD
// is active for this rigid-body, i.e., if it was seen to move fast
// enough to justify a CCD run.
/// Is CCD active for this rigid-body?
///
/// The CCD is considered active if the rigid-body is moving at
/// a velocity greater than an automatically-computed threshold.
///
/// This is not the same as `self.is_ccd_enabled` which only
/// checks if CCD is allowed to run for this rigid-body or if
/// it is completely disabled (independently from its velocity).
pub fn is_ccd_active(&self) -> bool {
self.flags.contains(RigidBodyFlags::CCD_ACTIVE)
}
pub(crate) fn update_ccd_active_flag(&mut self, dt: Real, include_forces: bool) {
let ccd_active = self.is_ccd_enabled() && self.is_moving_fast(dt, include_forces);
self.flags.set(RigidBodyFlags::CCD_ACTIVE, ccd_active);
}
pub(crate) fn is_moving_fast(&self, dt: Real, include_forces: bool) -> bool {
if self.is_dynamic() {
// NOTE: for the threshold we don't use the exact CCD thickness. Theoretically, we
// should use `self.ccd_thickness - smallest_contact_dist` where `smallest_contact_dist`
// is the deepest contact (the contact with the largest penetration depth, i.e., the
// negative `dist` with the largest absolute value.
// However, getting this penetration depth assumes querying the contact graph from
// the narrow-phase, which can be pretty expensive. So we use the CCD thickness
// divided by 10 right now. We will see in practice if this value is OK or if we
// should use a smaller (to be less conservative) or larger divisor (to be more conservative).
let threshold = self.ccd_thickness / 10.0;
if include_forces {
let linear_part = (self.linvel + self.force * dt).norm();
#[cfg(feature = "dim2")]
let angular_part = (self.angvel + self.torque * dt).abs() * self.ccd_max_dist;
#[cfg(feature = "dim3")]
let angular_part = (self.angvel + self.torque * dt).norm() * self.ccd_max_dist;
let vel_with_forces = linear_part + angular_part;
vel_with_forces > threshold
} else {
self.max_point_velocity() * dt > threshold
}
} else {
false
}
}
pub(crate) fn max_point_velocity(&self) -> Real {
#[cfg(feature = "dim2")]
return self.linvel.norm() + self.angvel.abs() * self.ccd_max_dist;
#[cfg(feature = "dim3")]
return self.linvel.norm() + self.angvel.norm() * self.ccd_max_dist;
}
/// Sets the rigid-body's initial mass properties.
///
/// If `wake_up` is `true` then the rigid-body will be woken up if it was
/// put to sleep because it did not move for a while.
@@ -228,8 +320,8 @@ impl RigidBody {
/// If this rigid-body is kinematic this value is set by the `set_next_kinematic_position`
/// method and is used for estimating the kinematic body velocity at the next timestep.
/// For non-kinematic bodies, this value is currently unspecified.
pub fn predicted_position(&self) -> &Isometry<Real> {
&self.predicted_position
pub fn next_position(&self) -> &Isometry<Real> {
&self.next_position
}
/// The scale factor applied to the gravity affecting this rigid-body.
@@ -254,6 +346,15 @@ impl RigidBody {
true,
);
self.ccd_thickness = self.ccd_thickness.min(coll.shape().ccd_thickness());
let shape_bsphere = coll
.shape()
.compute_bounding_sphere(coll.position_wrt_parent());
self.ccd_max_dist = self
.ccd_max_dist
.max(shape_bsphere.center.coords.norm() + shape_bsphere.radius);
let mass_properties = coll
.mass_properties()
.transform_by(coll.position_wrt_parent());
@@ -264,9 +365,13 @@ impl RigidBody {
pub(crate) fn update_colliders_positions(&mut self, colliders: &mut ColliderSet) {
for handle in &self.colliders {
let collider = &mut colliders[*handle];
collider.position = self.position * collider.delta;
collider.predicted_position = self.predicted_position * collider.delta;
// NOTE: we use `get_mut_internal_with_modification_tracking` here because we want to
// benefit from the modification tracking to know the colliders
// we need to update the broad-phase and narrow-phase for.
let collider = colliders
.get_mut_internal_with_modification_tracking(*handle)
.unwrap();
collider.set_position(self.position * collider.delta);
}
}
@@ -331,18 +436,35 @@ impl RigidBody {
!self.linvel.is_zero() || !self.angvel.is_zero()
}
fn integrate_velocity(&self, dt: Real) -> Isometry<Real> {
/// Computes the predict position of this rigid-body after `dt` seconds, taking
/// into account its velocities and external forces applied to it.
pub fn predict_position_using_velocity_and_forces(&self, dt: Real) -> Isometry<Real> {
let dlinvel = self.force * (self.effective_inv_mass * dt);
let dangvel = self
.effective_world_inv_inertia_sqrt
.transform_vector(self.torque * dt);
let linvel = self.linvel + dlinvel;
let angvel = self.angvel + dangvel;
let com = self.position * self.mass_properties.local_com;
let shift = Translation::from(com.coords);
shift * Isometry::new(linvel * dt, angvel * dt) * shift.inverse() * self.position
}
pub(crate) fn integrate_velocity(&self, dt: Real) -> Isometry<Real> {
let com = self.position * self.mass_properties.local_com;
let shift = Translation::from(com.coords);
shift * Isometry::new(self.linvel * dt, self.angvel * dt) * shift.inverse()
}
pub(crate) fn integrate(&mut self, dt: Real) {
// TODO: do we want to apply damping before or after the velocity integration?
pub(crate) fn apply_damping(&mut self, dt: Real) {
self.linvel *= 1.0 / (1.0 + dt * self.linear_damping);
self.angvel *= 1.0 / (1.0 + dt * self.angular_damping);
}
self.position = self.integrate_velocity(dt) * self.position;
pub(crate) fn integrate_next_position(&mut self, dt: Real) {
self.next_position = self.integrate_velocity(dt) * self.position;
let _ = self.next_position.rotation.renormalize_fast();
}
/// The linear velocity of this rigid-body.
@@ -416,7 +538,8 @@ impl RigidBody {
/// put to sleep because it did not move for a while.
pub fn set_position(&mut self, pos: Isometry<Real>, wake_up: bool) {
self.changes.insert(RigidBodyChanges::POSITION);
self.set_position_internal(pos);
self.position = pos;
self.next_position = pos;
// TODO: Do we really need to check that the body isn't dynamic?
if wake_up && self.is_dynamic() {
@@ -424,24 +547,19 @@ impl RigidBody {
}
}
pub(crate) fn set_position_internal(&mut self, pos: Isometry<Real>) {
self.position = pos;
// TODO: update the predicted position for dynamic bodies too?
if self.is_static() || self.is_kinematic() {
self.predicted_position = pos;
}
pub(crate) fn set_next_position(&mut self, pos: Isometry<Real>) {
self.next_position = pos;
}
/// If this rigid body is kinematic, sets its future position after the next timestep integration.
pub fn set_next_kinematic_position(&mut self, pos: Isometry<Real>) {
if self.is_kinematic() {
self.predicted_position = pos;
self.next_position = pos;
}
}
pub(crate) fn compute_velocity_from_predicted_position(&mut self, inv_dt: Real) {
let dpos = self.predicted_position * self.position.inverse();
pub(crate) fn compute_velocity_from_next_position(&mut self, inv_dt: Real) {
let dpos = self.next_position * self.position.inverse();
#[cfg(feature = "dim2")]
{
self.angvel = dpos.rotation.angle() * inv_dt;
@@ -453,10 +571,6 @@ impl RigidBody {
self.linvel = dpos.translation.vector * inv_dt;
}
pub(crate) fn update_predicted_position(&mut self, dt: Real) {
self.predicted_position = self.integrate_velocity(dt) * self.position;
}
pub(crate) fn update_world_mass_properties(&mut self) {
self.world_com = self.mass_properties.world_com(&self.position);
self.effective_inv_mass = self.mass_properties.inv_mass;
@@ -666,6 +780,7 @@ pub struct RigidBodyBuilder {
mass_properties: MassProperties,
can_sleep: bool,
sleeping: bool,
ccd_enabled: bool,
dominance_group: i8,
user_data: u128,
}
@@ -685,6 +800,7 @@ impl RigidBodyBuilder {
mass_properties: MassProperties::zero(),
can_sleep: true,
sleeping: false,
ccd_enabled: false,
dominance_group: 0,
user_data: 0,
}
@@ -752,9 +868,9 @@ impl RigidBodyBuilder {
self
}
/// Sets the mass properties of the rigid-body being built.
/// Sets the additional mass properties of the rigid-body being built.
///
/// Note that the final mass properties of the rigid-bodies depends
/// Note that "additional" means that the final mass properties of the rigid-bodies depends
/// on the initial mass-properties of the rigid-body (set by this method)
/// to which is added the contributions of all the colliders with non-zero density
/// attached to this rigid-body.
@@ -762,7 +878,7 @@ impl RigidBodyBuilder {
/// Therefore, if you want your provided mass properties to be the final
/// mass properties of your rigid-body, don't attach colliders to it, or
/// only attach colliders with densities equal to zero.
pub fn mass_properties(mut self, props: MassProperties) -> Self {
pub fn additional_mass_properties(mut self, props: MassProperties) -> Self {
self.mass_properties = props;
self
}
@@ -798,50 +914,76 @@ impl RigidBodyBuilder {
self
}
/// Sets the mass of the rigid-body being built.
pub fn mass(mut self, mass: Real) -> Self {
self.mass_properties.inv_mass = utils::inv(mass);
/// Sets the additional mass of the rigid-body being built.
///
/// This is only the "additional" mass because the total mass of the rigid-body is
/// equal to the sum of this additional mass and the mass computed from the colliders
/// (with non-zero densities) attached to this rigid-body.
pub fn additional_mass(mut self, mass: Real) -> Self {
self.mass_properties.set_mass(mass, false);
self
}
/// Sets the angular inertia of this rigid-body.
/// Sets the additional mass of the rigid-body being built.
///
/// This is only the "additional" mass because the total mass of the rigid-body is
/// equal to the sum of this additional mass and the mass computed from the colliders
/// (with non-zero densities) attached to this rigid-body.
#[deprecated(note = "renamed to `additional_mass`.")]
pub fn mass(self, mass: Real) -> Self {
self.additional_mass(mass)
}
/// Sets the additional angular inertia of this rigid-body.
///
/// This is only the "additional" angular inertia because the total angular inertia of
/// the rigid-body is equal to the sum of this additional value and the angular inertia
/// computed from the colliders (with non-zero densities) attached to this rigid-body.
#[cfg(feature = "dim2")]
pub fn principal_angular_inertia(mut self, inertia: Real) -> Self {
pub fn additional_principal_angular_inertia(mut self, inertia: Real) -> Self {
self.mass_properties.inv_principal_inertia_sqrt =
utils::inv(ComplexField::sqrt(inertia.max(0.0)));
self
}
/// Use `self.principal_angular_inertia` instead.
/// Sets the angular inertia of this rigid-body.
#[cfg(feature = "dim2")]
#[deprecated(note = "renamed to `principal_angular_inertia`.")]
pub fn principal_inertia(self, inertia: Real) -> Self {
self.principal_angular_inertia(inertia)
#[deprecated(note = "renamed to `additional_principal_angular_inertia`.")]
pub fn principal_angular_inertia(self, inertia: Real) -> Self {
self.additional_principal_angular_inertia(inertia)
}
/// Sets the principal angular inertia of this rigid-body.
/// Use `self.principal_angular_inertia` instead.
#[cfg(feature = "dim2")]
#[deprecated(note = "renamed to `additional_principal_angular_inertia`.")]
pub fn principal_inertia(self, inertia: Real) -> Self {
self.additional_principal_angular_inertia(inertia)
}
/// Sets the additional principal angular inertia of this rigid-body.
///
/// In order to lock the rotations of this rigid-body (by
/// making them kinematic), call `.principal_inertia(Vector3::zeros(), Vector3::repeat(false))`.
///
/// If `colliders_contribution_enabled[i]` is `false`, then the principal inertia specified here
/// along the `i`-th local axis of the rigid-body, will be the final principal inertia along
/// the `i`-th local axis of the rigid-body created by this builder.
/// If `colliders_contribution_enabled[i]` is `true`, then the final principal of the rigid-body
/// along its `i`-th local axis will depend on the initial principal inertia set by this method
/// to which is added the contributions of all the colliders with non-zero density
/// attached to this rigid-body.
/// This is only the "additional" angular inertia because the total angular inertia of
/// the rigid-body is equal to the sum of this additional value and the angular inertia
/// computed from the colliders (with non-zero densities) attached to this rigid-body.
#[cfg(feature = "dim3")]
pub fn principal_angular_inertia(mut self, inertia: AngVector<Real>) -> Self {
pub fn additional_principal_angular_inertia(mut self, inertia: AngVector<Real>) -> Self {
self.mass_properties.inv_principal_inertia_sqrt =
inertia.map(|e| utils::inv(ComplexField::sqrt(e.max(0.0))));
self
}
/// Sets the principal angular inertia of this rigid-body.
#[cfg(feature = "dim3")]
#[deprecated(note = "renamed to `additional_principal_angular_inertia`.")]
pub fn principal_angular_inertia(self, inertia: AngVector<Real>) -> Self {
self.additional_principal_angular_inertia(inertia)
}
/// Use `self.principal_angular_inertia` instead.
#[cfg(feature = "dim3")]
#[deprecated(note = "renamed to `principal_angular_inertia`.")]
#[deprecated(note = "renamed to `additional_principal_angular_inertia`.")]
pub fn principal_inertia(self, inertia: AngVector<Real>) -> Self {
self.principal_angular_inertia(inertia)
self.additional_principal_angular_inertia(inertia)
}
/// Sets the damping factor for the linear part of the rigid-body motion.
@@ -888,6 +1030,12 @@ impl RigidBodyBuilder {
self
}
/// Enabled continuous collision-detection for this rigid-body.
pub fn ccd_enabled(mut self, enabled: bool) -> Self {
self.ccd_enabled = enabled;
self
}
/// Sets whether or not the rigid-body is to be created asleep.
pub fn sleeping(mut self, sleeping: bool) -> Self {
self.sleeping = sleeping;
@@ -897,8 +1045,8 @@ impl RigidBodyBuilder {
/// Build a new rigid-body with the parameters configured with this builder.
pub fn build(&self) -> RigidBody {
let mut rb = RigidBody::new();
rb.predicted_position = self.position; // FIXME: compute the correct value?
rb.set_position_internal(self.position);
rb.next_position = self.position; // FIXME: compute the correct value?
rb.position = self.position;
rb.linvel = self.linvel;
rb.angvel = self.angvel;
rb.body_status = self.body_status;
@@ -909,6 +1057,7 @@ impl RigidBodyBuilder {
rb.gravity_scale = self.gravity_scale;
rb.flags = self.flags;
rb.dominance_group = self.dominance_group;
rb.enable_ccd(self.ccd_enabled);
if self.can_sleep && self.sleeping {
rb.sleep();

View File

@@ -2,7 +2,7 @@
use rayon::prelude::*;
use crate::data::arena::Arena;
use crate::dynamics::{Joint, JointSet, RigidBody, RigidBodyChanges};
use crate::dynamics::{BodyStatus, Joint, JointSet, RigidBody, RigidBodyChanges};
use crate::geometry::{ColliderSet, InteractionGraph, NarrowPhase};
use parry::partitioning::IndexedData;
use std::ops::{Index, IndexMut};
@@ -220,13 +220,17 @@ impl RigidBodySet {
///
/// Using this is discouraged in favor of `self.get_mut(handle)` which does not
/// suffer form the ABA problem.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn get_unknown_gen_mut(&mut self, i: usize) -> Option<(&mut RigidBody, RigidBodyHandle)> {
let result = self.bodies.get_unknown_gen_mut(i)?;
if !self.modified_all_bodies && !result.0.changes.contains(RigidBodyChanges::MODIFIED) {
result.0.changes = RigidBodyChanges::MODIFIED;
self.modified_bodies.push(RigidBodyHandle(result.1));
}
Some((result.0, RigidBodyHandle(result.1)))
let (rb, handle) = self.bodies.get_unknown_gen_mut(i)?;
let handle = RigidBodyHandle(handle);
Self::mark_as_modified(
handle,
rb,
&mut self.modified_bodies,
self.modified_all_bodies,
);
Some((rb, handle))
}
/// Gets the rigid-body with the given handle.
@@ -247,6 +251,7 @@ impl RigidBodySet {
}
/// Gets a mutable reference to the rigid-body with the given handle.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn get_mut(&mut self, handle: RigidBodyHandle) -> Option<&mut RigidBody> {
let result = self.bodies.get_mut(handle.0)?;
Self::mark_as_modified(
@@ -262,6 +267,22 @@ impl RigidBodySet {
self.bodies.get_mut(handle.0)
}
// Just a very long name instead of `.get_mut` to make sure
// this is really the method we wanted to use instead of `get_mut_internal`.
pub(crate) fn get_mut_internal_with_modification_tracking(
&mut self,
handle: RigidBodyHandle,
) -> Option<&mut RigidBody> {
let result = self.bodies.get_mut(handle.0)?;
Self::mark_as_modified(
handle,
result,
&mut self.modified_bodies,
self.modified_all_bodies,
);
Some(result)
}
pub(crate) fn get2_mut_internal(
&mut self,
h1: RigidBodyHandle,
@@ -276,6 +297,7 @@ impl RigidBodySet {
}
/// Iterates mutably through all the rigid-bodies on this set.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn iter_mut(&mut self) -> impl Iterator<Item = (RigidBodyHandle, &mut RigidBody)> {
self.modified_bodies.clear();
self.modified_all_bodies = true;
@@ -317,6 +339,7 @@ impl RigidBodySet {
/// Applies the given function on all the active dynamic rigid-bodies
/// contained by this set.
#[inline(always)]
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn foreach_active_dynamic_body_mut(
&mut self,
mut f: impl FnMut(RigidBodyHandle, &mut RigidBody),
@@ -430,71 +453,133 @@ impl RigidBodySet {
// Utility function to avoid some borrowing issue in the `maintain` method.
fn maintain_one(
bodies: &mut Arena<RigidBody>,
colliders: &mut ColliderSet,
handle: RigidBodyHandle,
rb: &mut RigidBody,
modified_inactive_set: &mut Vec<RigidBodyHandle>,
active_kinematic_set: &mut Vec<RigidBodyHandle>,
active_dynamic_set: &mut Vec<RigidBodyHandle>,
) {
// Update the positions of the colliders.
if rb.changes.contains(RigidBodyChanges::POSITION)
|| rb.changes.contains(RigidBodyChanges::COLLIDERS)
{
rb.update_colliders_positions(colliders);
if rb.is_static() {
modified_inactive_set.push(handle);
}
if rb.is_kinematic() && active_kinematic_set.get(rb.active_set_id) != Some(&handle) {
rb.active_set_id = active_kinematic_set.len();
active_kinematic_set.push(handle);
}
enum FinalAction {
UpdateActiveKinematicSetId,
UpdateActiveDynamicSetId,
}
// Push the body to the active set if it is not
// sleeping and if it is not already inside of the active set.
if rb.changes.contains(RigidBodyChanges::SLEEP)
&& !rb.is_sleeping() // May happen if the body was put to sleep manually.
&& rb.is_dynamic() // Only dynamic bodies are in the active dynamic set.
&& active_dynamic_set.get(rb.active_set_id) != Some(&handle)
{
rb.active_set_id = active_dynamic_set.len(); // This will handle the case where the activation_channel contains duplicates.
active_dynamic_set.push(handle);
}
if let Some(rb) = bodies.get_mut(handle.0) {
let mut final_action = None;
rb.changes = RigidBodyChanges::empty();
}
// The body's status changed. We need to make sure
// it is on the correct active set.
if rb.changes.contains(RigidBodyChanges::BODY_STATUS) {
match rb.body_status() {
BodyStatus::Dynamic => {
// Remove from the active kinematic set if it was there.
if active_kinematic_set.get(rb.active_set_id) == Some(&handle) {
active_kinematic_set.swap_remove(rb.active_set_id);
final_action =
Some((FinalAction::UpdateActiveKinematicSetId, rb.active_set_id));
}
pub(crate) fn maintain(&mut self, colliders: &mut ColliderSet) {
if self.modified_all_bodies {
for (handle, rb) in self.bodies.iter_mut() {
Self::maintain_one(
colliders,
RigidBodyHandle(handle),
rb,
&mut self.modified_inactive_set,
&mut self.active_kinematic_set,
&mut self.active_dynamic_set,
)
}
// Add to the active dynamic set.
rb.wake_up(true);
// Make sure the sleep change flag is set (even if for some
// reasons the rigid-body was already awake) to make
// sure the code handling sleeping change adds the body to
// the active_dynamic_set.
rb.changes.set(RigidBodyChanges::SLEEP, true);
}
BodyStatus::Kinematic => {
// Remove from the active dynamic set if it was there.
if active_dynamic_set.get(rb.active_set_id) == Some(&handle) {
active_dynamic_set.swap_remove(rb.active_set_id);
final_action =
Some((FinalAction::UpdateActiveDynamicSetId, rb.active_set_id));
}
self.modified_bodies.clear();
self.modified_all_bodies = false;
} else {
for handle in self.modified_bodies.drain(..) {
if let Some(rb) = self.bodies.get_mut(handle.0) {
Self::maintain_one(
colliders,
handle,
rb,
&mut self.modified_inactive_set,
&mut self.active_kinematic_set,
&mut self.active_dynamic_set,
)
// Add to the active kinematic set.
if active_kinematic_set.get(rb.active_set_id) != Some(&handle) {
rb.active_set_id = active_kinematic_set.len();
active_kinematic_set.push(handle);
}
}
BodyStatus::Static => {}
}
}
// Update the positions of the colliders.
if rb.changes.contains(RigidBodyChanges::POSITION)
|| rb.changes.contains(RigidBodyChanges::COLLIDERS)
{
rb.update_colliders_positions(colliders);
if rb.is_static() {
modified_inactive_set.push(handle);
}
if rb.is_kinematic() && active_kinematic_set.get(rb.active_set_id) != Some(&handle)
{
rb.active_set_id = active_kinematic_set.len();
active_kinematic_set.push(handle);
}
}
// Push the body to the active set if it is not
// sleeping and if it is not already inside of the active set.
if rb.changes.contains(RigidBodyChanges::SLEEP)
&& !rb.is_sleeping() // May happen if the body was put to sleep manually.
&& rb.is_dynamic() // Only dynamic bodies are in the active dynamic set.
&& active_dynamic_set.get(rb.active_set_id) != Some(&handle)
{
rb.active_set_id = active_dynamic_set.len(); // This will handle the case where the activation_channel contains duplicates.
active_dynamic_set.push(handle);
}
rb.changes = RigidBodyChanges::empty();
// Adjust some ids, if needed.
if let Some((action, id)) = final_action {
let active_set = match action {
FinalAction::UpdateActiveKinematicSetId => active_kinematic_set,
FinalAction::UpdateActiveDynamicSetId => active_dynamic_set,
};
if id < active_set.len() {
if let Some(rb2) = bodies.get_mut(active_set[id].0) {
rb2.active_set_id = id;
}
}
}
}
}
pub(crate) fn handle_user_changes(&mut self, colliders: &mut ColliderSet) {
if self.modified_all_bodies {
// Unfortunately, we have to push all the bodies to `modified_bodies`
// instead of just calling `maintain_one` on each element i
// `self.bodies.iter_mut()` because otherwise it would be difficult to
// handle the final change of active_set_id in Self::maintain_one
// (because it has to modify another rigid-body because of the swap-remove.
// So this causes borrowing problems if we do this while iterating
// through self.bodies.iter_mut()).
for (handle, _) in self.bodies.iter_mut() {
self.modified_bodies.push(RigidBodyHandle(handle));
}
}
for handle in self.modified_bodies.drain(..) {
Self::maintain_one(
&mut self.bodies,
colliders,
handle,
&mut self.modified_inactive_set,
&mut self.active_kinematic_set,
&mut self.active_dynamic_set,
)
}
if self.modified_all_bodies {
self.modified_bodies.shrink_to_fit(); // save some memory.
self.modified_all_bodies = false;
}
}
@@ -651,8 +736,16 @@ impl Index<RigidBodyHandle> for RigidBodySet {
}
}
#[cfg(not(feature = "dev-remove-slow-accessors"))]
impl IndexMut<RigidBodyHandle> for RigidBodySet {
fn index_mut(&mut self, index: RigidBodyHandle) -> &mut RigidBody {
&mut self.bodies[index.0]
fn index_mut(&mut self, handle: RigidBodyHandle) -> &mut RigidBody {
let rb = &mut self.bodies[handle.0];
Self::mark_as_modified(
handle,
rb,
&mut self.modified_bodies,
self.modified_all_bodies,
);
rb
}
}

View File

@@ -157,6 +157,17 @@ impl InteractionGroups {
}
}
#[cfg(not(feature = "parallel"))]
pub fn clear(&mut self) {
#[cfg(feature = "simd-is-enabled")]
{
self.buckets.clear();
self.body_masks.clear();
self.grouped_interactions.clear();
}
self.nongrouped_interactions.clear();
}
// FIXME: there is a lot of duplicated code with group_manifolds here.
// But we don't refactor just now because we may end up with distinct
// grouping strategies in the future.

View File

@@ -24,7 +24,25 @@ impl IslandSolver {
}
}
pub fn solve_island(
pub fn solve_position_constraints(
&mut self,
island_id: usize,
counters: &mut Counters,
params: &IntegrationParameters,
bodies: &mut RigidBodySet,
) {
counters.solver.position_resolution_time.resume();
self.position_solver.solve(
island_id,
params,
bodies,
&self.contact_constraints.position_constraints,
&self.joint_constraints.position_constraints,
);
counters.solver.position_resolution_time.pause();
}
pub fn init_constraints_and_solve_velocity_constraints(
&mut self,
island_id: usize,
counters: &mut Counters,
@@ -59,25 +77,19 @@ impl IslandSolver {
counters.solver.velocity_update_time.resume();
bodies.foreach_active_island_body_mut_internal(island_id, |_, rb| {
rb.integrate(params.dt)
rb.apply_damping(params.dt);
rb.integrate_next_position(params.dt);
});
counters.solver.velocity_update_time.pause();
counters.solver.position_resolution_time.resume();
self.position_solver.solve(
island_id,
params,
bodies,
&self.contact_constraints.position_constraints,
&self.joint_constraints.position_constraints,
);
counters.solver.position_resolution_time.pause();
} else {
self.contact_constraints.clear();
self.joint_constraints.clear();
counters.solver.velocity_update_time.resume();
bodies.foreach_active_island_body_mut_internal(island_id, |_, rb| {
// Since we didn't run the velocity solver we need to integrate the accelerations here
rb.integrate_accelerations(params.dt);
rb.integrate(params.dt);
rb.apply_damping(params.dt);
rb.integrate_next_position(params.dt);
});
counters.solver.velocity_update_time.pause();
}

View File

@@ -114,7 +114,7 @@ impl BallPositionGroundConstraint {
// are the local_anchors. The rb1 and rb2 have
// already been flipped by the caller.
Self {
anchor1: rb1.predicted_position * cparams.local_anchor2,
anchor1: rb1.next_position * cparams.local_anchor2,
im2: rb2.effective_inv_mass,
ii2: rb2.effective_world_inv_inertia_sqrt.squared(),
local_anchor2: cparams.local_anchor1,
@@ -123,7 +123,7 @@ impl BallPositionGroundConstraint {
}
} else {
Self {
anchor1: rb1.predicted_position * cparams.local_anchor1,
anchor1: rb1.next_position * cparams.local_anchor1,
im2: rb2.effective_inv_mass,
ii2: rb2.effective_world_inv_inertia_sqrt.squared(),
local_anchor2: cparams.local_anchor2,

View File

@@ -134,7 +134,7 @@ impl WBallPositionGroundConstraint {
cparams: [&BallJoint; SIMD_WIDTH],
flipped: [bool; SIMD_WIDTH],
) -> Self {
let position1 = Isometry::from(array![|ii| rbs1[ii].predicted_position; SIMD_WIDTH]);
let position1 = Isometry::from(array![|ii| rbs1[ii].next_position; SIMD_WIDTH]);
let anchor1 = position1
* Point::from(array![|ii| if flipped[ii] {
cparams[ii].local_anchor2

View File

@@ -100,10 +100,10 @@ impl FixedPositionGroundConstraint {
let local_anchor2;
if flipped {
anchor1 = rb1.predicted_position * cparams.local_anchor2;
anchor1 = rb1.next_position * cparams.local_anchor2;
local_anchor2 = cparams.local_anchor1;
} else {
anchor1 = rb1.predicted_position * cparams.local_anchor1;
anchor1 = rb1.next_position * cparams.local_anchor1;
local_anchor2 = cparams.local_anchor2;
};

View File

@@ -119,14 +119,14 @@ impl PrismaticPositionGroundConstraint {
let local_axis2;
if flipped {
frame1 = rb1.predicted_position * cparams.local_frame2();
frame1 = rb1.next_position * cparams.local_frame2();
local_frame2 = cparams.local_frame1();
axis1 = rb1.predicted_position * cparams.local_axis2;
axis1 = rb1.next_position * cparams.local_axis2;
local_axis2 = cparams.local_axis1;
} else {
frame1 = rb1.predicted_position * cparams.local_frame1();
frame1 = rb1.next_position * cparams.local_frame1();
local_frame2 = cparams.local_frame2();
axis1 = rb1.predicted_position * cparams.local_axis1;
axis1 = rb1.next_position * cparams.local_axis1;
local_axis2 = cparams.local_axis2;
};

View File

@@ -145,23 +145,23 @@ impl RevolutePositionGroundConstraint {
let local_basis2;
if flipped {
anchor1 = rb1.predicted_position * cparams.local_anchor2;
anchor1 = rb1.next_position * cparams.local_anchor2;
local_anchor2 = cparams.local_anchor1;
axis1 = rb1.predicted_position * cparams.local_axis2;
axis1 = rb1.next_position * cparams.local_axis2;
local_axis2 = cparams.local_axis1;
basis1 = [
rb1.predicted_position * cparams.basis2[0],
rb1.predicted_position * cparams.basis2[1],
rb1.next_position * cparams.basis2[0],
rb1.next_position * cparams.basis2[1],
];
local_basis2 = cparams.basis1;
} else {
anchor1 = rb1.predicted_position * cparams.local_anchor1;
anchor1 = rb1.next_position * cparams.local_anchor1;
local_anchor2 = cparams.local_anchor2;
axis1 = rb1.predicted_position * cparams.local_axis1;
axis1 = rb1.next_position * cparams.local_axis1;
local_axis2 = cparams.local_axis2;
basis1 = [
rb1.predicted_position * cparams.basis1[0],
rb1.predicted_position * cparams.basis1[1],
rb1.next_position * cparams.basis1[0],
rb1.next_position * cparams.basis1[1],
];
local_basis2 = cparams.basis2;
};

View File

@@ -70,6 +70,8 @@ pub(crate) struct ThreadContext {
pub impulse_writeback_index: AtomicUsize,
pub joint_writeback_index: AtomicUsize,
pub body_integration_index: AtomicUsize,
pub body_force_integration_index: AtomicUsize,
pub num_force_integrated_bodies: AtomicUsize,
pub num_integrated_bodies: AtomicUsize,
// Position solver.
pub position_constraint_initialization_index: AtomicUsize,
@@ -97,6 +99,8 @@ impl ThreadContext {
num_solved_interactions: AtomicUsize::new(0),
impulse_writeback_index: AtomicUsize::new(0),
joint_writeback_index: AtomicUsize::new(0),
body_force_integration_index: AtomicUsize::new(0),
num_force_integrated_bodies: AtomicUsize::new(0),
body_integration_index: AtomicUsize::new(0),
num_integrated_bodies: AtomicUsize::new(0),
position_constraint_initialization_index: AtomicUsize::new(0),
@@ -146,7 +150,82 @@ impl ParallelIslandSolver {
}
}
pub fn solve_island<'s>(
pub fn solve_position_constraints<'s>(
&'s mut self,
scope: &Scope<'s>,
island_id: usize,
params: &'s IntegrationParameters,
bodies: &'s mut RigidBodySet,
) {
let num_threads = rayon::current_num_threads();
let num_task_per_island = num_threads; // (num_threads / num_islands).max(1); // TODO: not sure this is the best value. Also, perhaps it is better to interleave tasks of each island?
self.thread = ThreadContext::new(8); // TODO: could we compute some kind of optimal value here?
self.positions.clear();
self.positions
.resize(bodies.active_island(island_id).len(), Isometry::identity());
for _ in 0..num_task_per_island {
// We use AtomicPtr because it is Send+Sync while *mut is not.
// See https://internals.rust-lang.org/t/shouldnt-pointers-be-send-sync-or/8818
let thread = &self.thread;
let positions = std::sync::atomic::AtomicPtr::new(&mut self.positions as *mut _);
let bodies = std::sync::atomic::AtomicPtr::new(bodies as *mut _);
let parallel_contact_constraints =
std::sync::atomic::AtomicPtr::new(&mut self.parallel_contact_constraints as *mut _);
let parallel_joint_constraints =
std::sync::atomic::AtomicPtr::new(&mut self.parallel_joint_constraints as *mut _);
scope.spawn(move |_| {
// Transmute *mut -> &mut
let positions: &mut Vec<Isometry<Real>> =
unsafe { std::mem::transmute(positions.load(Ordering::Relaxed)) };
let bodies: &mut RigidBodySet =
unsafe { std::mem::transmute(bodies.load(Ordering::Relaxed)) };
let parallel_contact_constraints: &mut ParallelSolverConstraints<AnyVelocityConstraint, AnyPositionConstraint> = unsafe {
std::mem::transmute(parallel_contact_constraints.load(Ordering::Relaxed))
};
let parallel_joint_constraints: &mut ParallelSolverConstraints<AnyJointVelocityConstraint, AnyJointPositionConstraint> = unsafe {
std::mem::transmute(parallel_joint_constraints.load(Ordering::Relaxed))
};
enable_flush_to_zero!(); // Ensure this is enabled on each thread.
// Write results back to rigid bodies and integrate velocities.
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_integration_index, thread.num_integrated_bodies] {
let rb = &mut bodies[handle.0];
positions[rb.active_set_offset] = rb.next_position;
}
}
ThreadContext::lock_until_ge(&thread.num_integrated_bodies, active_bodies.len());
ParallelPositionSolver::solve(
&thread,
params,
positions,
parallel_contact_constraints,
parallel_joint_constraints
);
// Write results back to rigid bodies.
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.position_writeback_index] {
let rb = &mut bodies[handle.0];
rb.set_next_position(positions[rb.active_set_offset]);
}
}
})
}
}
pub fn init_constraints_and_solve_velocity_constraints<'s>(
&'s mut self,
scope: &Scope<'s>,
island_id: usize,
@@ -184,37 +263,11 @@ impl ParallelIslandSolver {
self.positions
.resize(bodies.active_island(island_id).len(), Isometry::identity());
{
// Initialize `mj_lambdas` (per-body velocity deltas) with external accelerations (gravity etc):
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
let thread = &self.thread;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_integration_index, thread.num_integrated_bodies] {
let rb = &mut bodies[handle.0];
let dvel = &mut self.mj_lambdas[rb.active_set_offset];
dvel.linear += rb.force * (rb.effective_inv_mass * params.dt);
rb.force = na::zero();
// dvel.angular is actually storing angular velocity delta multiplied by the square root of the inertia tensor:
dvel.angular += rb.effective_world_inv_inertia_sqrt * rb.torque * params.dt;
rb.torque = na::zero();
}
}
}
for _ in 0..num_task_per_island {
// We use AtomicPtr because it is Send+Sync while *mut is not.
// See https://internals.rust-lang.org/t/shouldnt-pointers-be-send-sync-or/8818
let thread = &self.thread;
let mj_lambdas = std::sync::atomic::AtomicPtr::new(&mut self.mj_lambdas as *mut _);
let positions = std::sync::atomic::AtomicPtr::new(&mut self.positions as *mut _);
let bodies = std::sync::atomic::AtomicPtr::new(bodies as *mut _);
let manifolds = std::sync::atomic::AtomicPtr::new(manifolds as *mut _);
let joints = std::sync::atomic::AtomicPtr::new(joints as *mut _);
@@ -227,8 +280,6 @@ impl ParallelIslandSolver {
// Transmute *mut -> &mut
let mj_lambdas: &mut Vec<DeltaVel<Real>> =
unsafe { std::mem::transmute(mj_lambdas.load(Ordering::Relaxed)) };
let positions: &mut Vec<Isometry<Real>> =
unsafe { std::mem::transmute(positions.load(Ordering::Relaxed)) };
let bodies: &mut RigidBodySet =
unsafe { std::mem::transmute(bodies.load(Ordering::Relaxed)) };
let manifolds: &mut Vec<&mut ContactManifold> =
@@ -243,6 +294,32 @@ impl ParallelIslandSolver {
};
enable_flush_to_zero!(); // Ensure this is enabled on each thread.
// Initialize `mj_lambdas` (per-body velocity deltas) with external accelerations (gravity etc):
{
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_force_integration_index, thread.num_force_integrated_bodies] {
let rb = &mut bodies[handle.0];
let dvel = &mut mj_lambdas[rb.active_set_offset];
// NOTE: `dvel.angular` is actually storing angular velocity delta multiplied
// by the square root of the inertia tensor:
dvel.angular += rb.effective_world_inv_inertia_sqrt * rb.torque * params.dt;
dvel.linear += rb.force * (rb.effective_inv_mass * params.dt);
}
}
// We need to wait for every body to be force-integrated because their
// angular and linear velocities are needed by the constraints initialization.
ThreadContext::lock_until_ge(&thread.num_force_integrated_bodies, active_bodies.len());
}
parallel_contact_constraints.fill_constraints(&thread, params, bodies, manifolds);
parallel_joint_constraints.fill_constraints(&thread, params, bodies, joints);
ThreadContext::lock_until_ge(
@@ -276,29 +353,8 @@ impl ParallelIslandSolver {
let dvel = mj_lambdas[rb.active_set_offset];
rb.linvel += dvel.linear;
rb.angvel += rb.effective_world_inv_inertia_sqrt.transform_vector(dvel.angular);
rb.integrate(params.dt);
positions[rb.active_set_offset] = rb.position;
}
}
// We need to way for every body to be integrated because it also
// initialized `positions` to the updated values.
ThreadContext::lock_until_ge(&thread.num_integrated_bodies, active_bodies.len());
ParallelPositionSolver::solve(
&thread,
params,
positions,
parallel_contact_constraints,
parallel_joint_constraints
);
// Write results back to rigid bodies.
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.position_writeback_index] {
let rb = &mut bodies[handle.0];
rb.set_position_internal(positions[rb.active_set_offset]);
rb.apply_damping(params.dt);
rb.integrate_next_position(params.dt);
}
}
})

View File

@@ -21,11 +21,16 @@ impl PositionSolver {
contact_constraints: &[AnyPositionConstraint],
joint_constraints: &[AnyJointPositionConstraint],
) {
if contact_constraints.is_empty() && joint_constraints.is_empty() {
// Nothing to do.
return;
}
self.positions.clear();
self.positions.extend(
bodies
.iter_active_island(island_id)
.map(|(_, b)| b.position),
.map(|(_, b)| b.next_position),
);
for _ in 0..params.max_position_iterations {
@@ -39,7 +44,7 @@ impl PositionSolver {
}
bodies.foreach_active_island_body_mut_internal(island_id, |_, rb| {
rb.set_position_internal(self.positions[rb.active_set_offset])
rb.set_next_position(self.positions[rb.active_set_offset])
});
}
}

View File

@@ -38,6 +38,15 @@ impl<VelocityConstraint, PositionConstraint>
position_constraints: Vec::new(),
}
}
pub fn clear(&mut self) {
self.not_ground_interactions.clear();
self.ground_interactions.clear();
self.interaction_groups.clear();
self.ground_interaction_groups.clear();
self.velocity_constraints.clear();
self.position_constraints.clear();
}
}
impl SolverConstraints<AnyVelocityConstraint, AnyPositionConstraint> {

View File

@@ -208,6 +208,8 @@ impl VelocityConstraint {
let vel1 = rb1.linvel + rb1.angvel.gcross(dp1);
let vel2 = rb2.linvel + rb2.angvel.gcross(dp2);
let warmstart_correction;
constraint.limit = manifold_point.friction;
constraint.manifold_contact_id[k] = manifold_point.contact_id;
@@ -234,12 +236,15 @@ impl VelocityConstraint {
rhs += manifold_point.dist.max(0.0) * inv_dt;
rhs *= is_bouncy + is_resting * params.velocity_solve_fraction;
rhs += is_resting * velocity_based_erp_inv_dt * manifold_point.dist.min(0.0);
warmstart_correction = (params.warmstart_correction_slope
/ (rhs - manifold_point.prev_rhs).abs())
.min(warmstart_coeff);
constraint.elements[k].normal_part = VelocityConstraintNormalPart {
gcross1,
gcross2,
rhs,
impulse: manifold_point.data.impulse * warmstart_coeff,
impulse: manifold_point.warmstart_impulse * warmstart_correction,
r,
};
}
@@ -247,10 +252,12 @@ impl VelocityConstraint {
// Tangent parts.
{
#[cfg(feature = "dim3")]
let impulse =
tangent_rot1 * manifold_points[k].data.tangent_impulse * warmstart_coeff;
let impulse = tangent_rot1
* manifold_points[k].warmstart_tangent_impulse
* warmstart_correction;
#[cfg(feature = "dim2")]
let impulse = [manifold_points[k].data.tangent_impulse * warmstart_coeff];
let impulse =
[manifold_points[k].warmstart_tangent_impulse * warmstart_correction];
constraint.elements[k].tangent_part.impulse = impulse;
for j in 0..DIM - 1 {
@@ -332,6 +339,8 @@ impl VelocityConstraint {
let contact_id = self.manifold_contact_id[k];
let active_contact = &mut manifold.points[contact_id as usize];
active_contact.data.impulse = self.elements[k].normal_part.impulse;
active_contact.data.rhs = self.elements[k].normal_part.rhs;
#[cfg(feature = "dim2")]
{
active_contact.data.tangent_impulse = self.elements[k].tangent_part.impulse[0];

View File

@@ -9,6 +9,7 @@ use crate::math::{
#[cfg(feature = "dim2")]
use crate::utils::WBasis;
use crate::utils::{WAngularInertia, WCross, WDot};
use na::SimdComplexField;
use num::Zero;
use simba::simd::{SimdPartialOrd, SimdValue};
@@ -44,6 +45,7 @@ impl WVelocityConstraint {
}
let inv_dt = SimdReal::splat(params.inv_dt());
let warmstart_correction_slope = SimdReal::splat(params.warmstart_correction_slope);
let velocity_solve_fraction = SimdReal::splat(params.velocity_solve_fraction);
let velocity_based_erp_inv_dt = SimdReal::splat(params.velocity_based_erp_inv_dt());
@@ -123,8 +125,11 @@ impl WVelocityConstraint {
let tangent_velocity =
Vector::from(array![|ii| manifold_points[ii][k].tangent_velocity; SIMD_WIDTH]);
let impulse =
SimdReal::from(array![|ii| manifold_points[ii][k].data.impulse; SIMD_WIDTH]);
let impulse = SimdReal::from(
array![|ii| manifold_points[ii][k].warmstart_impulse; SIMD_WIDTH],
);
let prev_rhs =
SimdReal::from(array![|ii| manifold_points[ii][k].prev_rhs; SIMD_WIDTH]);
let dp1 = point - world_com1;
let dp2 = point - world_com2;
@@ -132,6 +137,8 @@ impl WVelocityConstraint {
let vel1 = linvel1 + angvel1.gcross(dp1);
let vel2 = linvel2 + angvel2.gcross(dp2);
let warmstart_correction;
constraint.limit = friction;
constraint.manifold_contact_id[k] =
array![|ii| manifold_points[ii][k].contact_id; SIMD_WIDTH];
@@ -150,12 +157,15 @@ impl WVelocityConstraint {
rhs *= is_bouncy + is_resting * velocity_solve_fraction;
rhs +=
dist.simd_min(SimdReal::zero()) * (velocity_based_erp_inv_dt * is_resting);
warmstart_correction = (warmstart_correction_slope
/ (rhs - prev_rhs).simd_abs())
.simd_min(warmstart_coeff);
constraint.elements[k].normal_part = VelocityConstraintNormalPart {
gcross1,
gcross2,
rhs,
impulse: impulse * warmstart_coeff,
impulse: impulse * warmstart_correction,
r,
};
}
@@ -163,14 +173,15 @@ impl WVelocityConstraint {
// tangent parts.
#[cfg(feature = "dim2")]
let impulse = [SimdReal::from(
array![|ii| manifold_points[ii][k].data.tangent_impulse; SIMD_WIDTH],
)];
array![|ii| manifold_points[ii][k].warmstart_tangent_impulse; SIMD_WIDTH],
) * warmstart_correction];
#[cfg(feature = "dim3")]
let impulse = tangent_rot1
* na::Vector2::from(
array![|ii| manifold_points[ii][k].data.tangent_impulse; SIMD_WIDTH],
);
array![|ii| manifold_points[ii][k].warmstart_tangent_impulse; SIMD_WIDTH],
)
* warmstart_correction;
constraint.elements[k].tangent_part.impulse = impulse;
@@ -281,6 +292,7 @@ impl WVelocityConstraint {
pub fn writeback_impulses(&self, manifolds_all: &mut [&mut ContactManifold]) {
for k in 0..self.num_contacts as usize {
let impulses: [_; SIMD_WIDTH] = self.elements[k].normal_part.impulse.into();
let rhs: [_; SIMD_WIDTH] = self.elements[k].normal_part.rhs.into();
#[cfg(feature = "dim2")]
let tangent_impulses: [_; SIMD_WIDTH] = self.elements[k].tangent_part.impulse[0].into();
#[cfg(feature = "dim3")]
@@ -292,6 +304,7 @@ impl WVelocityConstraint {
let manifold = &mut manifolds_all[self.manifold_id[ii]];
let contact_id = self.manifold_contact_id[k][ii];
let active_contact = &mut manifold.points[contact_id as usize];
active_contact.data.rhs = rhs[ii];
active_contact.data.impulse = impulses[ii];
#[cfg(feature = "dim2")]

View File

@@ -133,6 +133,7 @@ impl VelocityGroundConstraint {
let dp1 = manifold_point.point - rb1.world_com;
let vel1 = rb1.linvel + rb1.angvel.gcross(dp1);
let vel2 = rb2.linvel + rb2.angvel.gcross(dp2);
let warmstart_correction;
constraint.limit = manifold_point.friction;
constraint.manifold_contact_id[k] = manifold_point.contact_id;
@@ -153,11 +154,14 @@ impl VelocityGroundConstraint {
rhs += manifold_point.dist.max(0.0) * inv_dt;
rhs *= is_bouncy + is_resting * params.velocity_solve_fraction;
rhs += is_resting * velocity_based_erp_inv_dt * manifold_point.dist.min(0.0);
warmstart_correction = (params.warmstart_correction_slope
/ (rhs - manifold_point.prev_rhs).abs())
.min(warmstart_coeff);
constraint.elements[k].normal_part = VelocityGroundConstraintNormalPart {
gcross2,
rhs,
impulse: manifold_point.data.impulse * warmstart_coeff,
impulse: manifold_point.warmstart_impulse * warmstart_correction,
r,
};
}
@@ -165,10 +169,12 @@ impl VelocityGroundConstraint {
// Tangent parts.
{
#[cfg(feature = "dim3")]
let impulse =
tangent_rot1 * manifold_points[k].data.tangent_impulse * warmstart_coeff;
let impulse = tangent_rot1
* manifold_points[k].warmstart_tangent_impulse
* warmstart_correction;
#[cfg(feature = "dim2")]
let impulse = [manifold_points[k].data.tangent_impulse * warmstart_coeff];
let impulse =
[manifold_points[k].warmstart_tangent_impulse * warmstart_correction];
constraint.elements[k].tangent_part.impulse = impulse;
for j in 0..DIM - 1 {
@@ -237,6 +243,8 @@ impl VelocityGroundConstraint {
let contact_id = self.manifold_contact_id[k];
let active_contact = &mut manifold.points[contact_id as usize];
active_contact.data.impulse = self.elements[k].normal_part.impulse;
active_contact.data.rhs = self.elements[k].normal_part.rhs;
#[cfg(feature = "dim2")]
{
active_contact.data.tangent_impulse = self.elements[k].tangent_part.impulse[0];

View File

@@ -10,6 +10,7 @@ use crate::math::{
#[cfg(feature = "dim2")]
use crate::utils::WBasis;
use crate::utils::{WAngularInertia, WCross, WDot};
use na::SimdComplexField;
use num::Zero;
use simba::simd::{SimdPartialOrd, SimdValue};
@@ -77,6 +78,7 @@ impl WVelocityGroundConstraint {
let warmstart_multiplier =
SimdReal::from(array![|ii| manifolds[ii].data.warmstart_multiplier; SIMD_WIDTH]);
let warmstart_coeff = warmstart_multiplier * SimdReal::splat(params.warmstart_coeff);
let warmstart_correction_slope = SimdReal::splat(params.warmstart_correction_slope);
let num_active_contacts = manifolds[0].data.num_active_contacts();
#[cfg(feature = "dim2")]
@@ -118,13 +120,17 @@ impl WVelocityGroundConstraint {
let tangent_velocity =
Vector::from(array![|ii| manifold_points[ii][k].tangent_velocity; SIMD_WIDTH]);
let impulse =
SimdReal::from(array![|ii| manifold_points[ii][k].data.impulse; SIMD_WIDTH]);
let impulse = SimdReal::from(
array![|ii| manifold_points[ii][k].warmstart_impulse; SIMD_WIDTH],
);
let prev_rhs =
SimdReal::from(array![|ii| manifold_points[ii][k].prev_rhs; SIMD_WIDTH]);
let dp1 = point - world_com1;
let dp2 = point - world_com2;
let vel1 = linvel1 + angvel1.gcross(dp1);
let vel2 = linvel2 + angvel2.gcross(dp2);
let warmstart_correction;
constraint.limit = friction;
constraint.manifold_contact_id[k] =
@@ -142,11 +148,14 @@ impl WVelocityGroundConstraint {
rhs *= is_bouncy + is_resting * velocity_solve_fraction;
rhs +=
dist.simd_min(SimdReal::zero()) * (velocity_based_erp_inv_dt * is_resting);
warmstart_correction = (warmstart_correction_slope
/ (rhs - prev_rhs).simd_abs())
.simd_min(warmstart_coeff);
constraint.elements[k].normal_part = VelocityGroundConstraintNormalPart {
gcross2,
rhs,
impulse: impulse * warmstart_coeff,
impulse: impulse * warmstart_correction,
r,
};
}
@@ -154,13 +163,14 @@ impl WVelocityGroundConstraint {
// tangent parts.
#[cfg(feature = "dim2")]
let impulse = [SimdReal::from(
array![|ii| manifold_points[ii][k].data.tangent_impulse; SIMD_WIDTH],
)];
array![|ii| manifold_points[ii][k].warmstart_tangent_impulse; SIMD_WIDTH],
) * warmstart_correction];
#[cfg(feature = "dim3")]
let impulse = tangent_rot1
* na::Vector2::from(
array![|ii| manifold_points[ii][k].data.tangent_impulse; SIMD_WIDTH],
);
array![|ii| manifold_points[ii][k].warmstart_tangent_impulse; SIMD_WIDTH],
)
* warmstart_correction;
constraint.elements[k].tangent_part.impulse = impulse;
for j in 0..DIM - 1 {
@@ -237,6 +247,7 @@ impl WVelocityGroundConstraint {
// FIXME: duplicated code. This is exactly the same as in the non-ground velocity constraint.
pub fn writeback_impulses(&self, manifolds_all: &mut [&mut ContactManifold]) {
for k in 0..self.num_contacts as usize {
let rhs: [_; SIMD_WIDTH] = self.elements[k].normal_part.rhs.into();
let impulses: [_; SIMD_WIDTH] = self.elements[k].normal_part.impulse.into();
#[cfg(feature = "dim2")]
let tangent_impulses: [_; SIMD_WIDTH] = self.elements[k].tangent_part.impulse[0].into();
@@ -249,6 +260,7 @@ impl WVelocityGroundConstraint {
let manifold = &mut manifolds_all[self.manifold_id[ii]];
let contact_id = self.manifold_contact_id[k][ii];
let active_contact = &mut manifold.points[contact_id as usize];
active_contact.data.rhs = rhs[ii];
active_contact.data.impulse = impulses[ii];
#[cfg(feature = "dim2")]

View File

@@ -1,794 +0,0 @@
use crate::data::pubsub::Subscription;
use crate::dynamics::RigidBodySet;
use crate::geometry::{ColliderHandle, ColliderSet, RemovedCollider};
use crate::math::{Point, Real, Vector, DIM};
use bit_vec::BitVec;
use parry::bounding_volume::{BoundingVolume, AABB};
use parry::utils::hashmap::HashMap;
use std::cmp::Ordering;
use std::ops::{Index, IndexMut};
const NUM_SENTINELS: usize = 1;
const NEXT_FREE_SENTINEL: u32 = u32::MAX;
const SENTINEL_VALUE: Real = Real::MAX;
const CELL_WIDTH: Real = 20.0;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct ColliderPair {
pub collider1: ColliderHandle,
pub collider2: ColliderHandle,
}
impl ColliderPair {
pub fn new(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
ColliderPair {
collider1,
collider2,
}
}
pub fn new_sorted(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
if collider1.into_raw_parts().0 <= collider2.into_raw_parts().0 {
Self::new(collider1, collider2)
} else {
Self::new(collider2, collider1)
}
}
pub fn swap(self) -> Self {
Self::new(self.collider2, self.collider1)
}
pub fn zero() -> Self {
Self {
collider1: ColliderHandle::from_raw_parts(0, 0),
collider2: ColliderHandle::from_raw_parts(0, 0),
}
}
}
pub enum BroadPhasePairEvent {
AddPair(ColliderPair),
DeletePair(ColliderPair),
}
fn sort2(a: u32, b: u32) -> (u32, u32) {
assert_ne!(a, b);
if a < b {
(a, b)
} else {
(b, a)
}
}
fn point_key(point: Point<Real>) -> Point<i32> {
(point / CELL_WIDTH).coords.map(|e| e.floor() as i32).into()
}
fn region_aabb(index: Point<i32>) -> AABB {
let mins = index.coords.map(|i| i as Real * CELL_WIDTH).into();
let maxs = mins + Vector::repeat(CELL_WIDTH);
AABB::new(mins, maxs)
}
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
struct Endpoint {
value: Real,
packed_flag_proxy: u32,
}
const START_FLAG_MASK: u32 = 0b1 << 31;
const PROXY_MASK: u32 = u32::MAX ^ START_FLAG_MASK;
const START_SENTINEL_TAG: u32 = u32::MAX;
const END_SENTINEL_TAG: u32 = u32::MAX ^ START_FLAG_MASK;
impl Endpoint {
pub fn start_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy | START_FLAG_MASK,
}
}
pub fn end_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy & PROXY_MASK,
}
}
pub fn start_sentinel() -> Self {
Self {
value: -SENTINEL_VALUE,
packed_flag_proxy: START_SENTINEL_TAG,
}
}
pub fn end_sentinel() -> Self {
Self {
value: SENTINEL_VALUE,
packed_flag_proxy: END_SENTINEL_TAG,
}
}
pub fn is_sentinel(self) -> bool {
self.packed_flag_proxy & PROXY_MASK == PROXY_MASK
}
pub fn proxy(self) -> u32 {
self.packed_flag_proxy & PROXY_MASK
}
pub fn is_start(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) != 0
}
pub fn is_end(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) == 0
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
struct SAPAxis {
min_bound: Real,
max_bound: Real,
endpoints: Vec<Endpoint>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
new_endpoints: Vec<(Endpoint, usize)>, // Workspace
}
impl SAPAxis {
fn new(min_bound: Real, max_bound: Real) -> Self {
assert!(min_bound <= max_bound);
Self {
min_bound,
max_bound,
endpoints: vec![Endpoint::start_sentinel(), Endpoint::end_sentinel()],
new_endpoints: Vec::new(),
}
}
fn batch_insert(
&mut self,
dim: usize,
new_proxies: &[usize],
proxies: &Proxies,
reporting: Option<&mut HashMap<(u32, u32), bool>>,
) {
if new_proxies.is_empty() {
return;
}
self.new_endpoints.clear();
for proxy_id in new_proxies {
let proxy = &proxies[*proxy_id];
assert!(proxy.aabb.mins[dim] <= self.max_bound);
assert!(proxy.aabb.maxs[dim] >= self.min_bound);
let start_endpoint = Endpoint::start_endpoint(proxy.aabb.mins[dim], *proxy_id as u32);
let end_endpoint = Endpoint::end_endpoint(proxy.aabb.maxs[dim], *proxy_id as u32);
self.new_endpoints.push((start_endpoint, 0));
self.new_endpoints.push((end_endpoint, 0));
}
self.new_endpoints
.sort_by(|a, b| a.0.value.partial_cmp(&b.0.value).unwrap_or(Ordering::Equal));
let mut curr_existing_index = self.endpoints.len() - NUM_SENTINELS - 1;
let new_num_endpoints = self.endpoints.len() + self.new_endpoints.len();
self.endpoints
.resize(new_num_endpoints, Endpoint::end_sentinel());
let mut curr_shift_index = new_num_endpoints - NUM_SENTINELS - 1;
// Sort the endpoints.
// TODO: specialize for the case where this is the
// first time we insert endpoints to this axis?
for new_endpoint in self.new_endpoints.iter_mut().rev() {
loop {
let existing_endpoint = self.endpoints[curr_existing_index];
if existing_endpoint.value <= new_endpoint.0.value {
break;
}
self.endpoints[curr_shift_index] = existing_endpoint;
curr_shift_index -= 1;
curr_existing_index -= 1;
}
self.endpoints[curr_shift_index] = new_endpoint.0;
new_endpoint.1 = curr_shift_index;
curr_shift_index -= 1;
}
// Report pairs using a single mbp pass on each new endpoint.
let endpoints_wo_last_sentinel = &self.endpoints[..self.endpoints.len() - 1];
if let Some(reporting) = reporting {
for (endpoint, endpoint_id) in self.new_endpoints.drain(..).filter(|e| e.0.is_start()) {
let proxy1 = &proxies[endpoint.proxy() as usize];
let min = endpoint.value;
let max = proxy1.aabb.maxs[dim];
for endpoint2 in &endpoints_wo_last_sentinel[endpoint_id + 1..] {
if endpoint2.proxy() == endpoint.proxy() {
continue;
}
let proxy2 = &proxies[endpoint2.proxy() as usize];
// NOTE: some pairs with equal aabb.mins[dim] may end up being reported twice.
if (endpoint2.is_start() && endpoint2.value < max)
|| (endpoint2.is_end() && proxy2.aabb.mins[dim] <= min)
{
// Report pair.
if proxy1.aabb.intersects(&proxy2.aabb) {
// Report pair.
let pair = sort2(endpoint.proxy(), endpoint2.proxy());
reporting.insert(pair, true);
}
}
}
}
}
}
fn delete_out_of_bounds_proxies(&self, existing_proxies: &mut BitVec) -> usize {
let mut deleted = 0;
for endpoint in &self.endpoints {
if endpoint.value < self.min_bound {
let proxy_idx = endpoint.proxy() as usize;
if endpoint.is_end() && existing_proxies[proxy_idx] {
existing_proxies.set(proxy_idx, false);
deleted += 1;
}
} else {
break;
}
}
for endpoint in self.endpoints.iter().rev() {
if endpoint.value > self.max_bound {
let proxy_idx = endpoint.proxy() as usize;
if endpoint.is_start() && existing_proxies[proxy_idx] {
existing_proxies.set(endpoint.proxy() as usize, false);
deleted += 1;
}
} else {
break;
}
}
deleted
}
fn delete_out_of_bounds_endpoints(&mut self, existing_proxies: &BitVec) {
self.endpoints
.retain(|endpt| endpt.is_sentinel() || existing_proxies[endpt.proxy() as usize])
}
fn update_endpoints(
&mut self,
dim: usize,
proxies: &Proxies,
reporting: &mut HashMap<(u32, u32), bool>,
) {
let last_endpoint = self.endpoints.len() - NUM_SENTINELS;
for i in NUM_SENTINELS..last_endpoint {
let mut endpoint_i = self.endpoints[i];
let aabb_i = proxies[endpoint_i.proxy() as usize].aabb;
if endpoint_i.is_start() {
endpoint_i.value = aabb_i.mins[dim];
} else {
endpoint_i.value = aabb_i.maxs[dim];
}
let mut j = i;
if endpoint_i.is_start() {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_end() {
// Report start collision.
if aabb_i.intersects(&proxies[endpoint_j.proxy() as usize].aabb) {
let pair = sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, true);
}
}
j -= 1;
}
} else {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_start() {
// Report end collision.
if !aabb_i.intersects(&proxies[endpoint_j.proxy() as usize].aabb) {
let pair = sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, false);
}
}
j -= 1;
}
}
self.endpoints[j] = endpoint_i;
}
// println!(
// "Num start swaps: {}, end swaps: {}, dim: {}",
// num_start_swaps, num_end_swaps, dim
// );
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
struct SAPRegion {
axes: [SAPAxis; DIM],
existing_proxies: BitVec,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
to_insert: Vec<usize>, // Workspace
update_count: u8,
proxy_count: usize,
}
impl SAPRegion {
pub fn new(bounds: AABB) -> Self {
let axes = [
SAPAxis::new(bounds.mins.x, bounds.maxs.x),
SAPAxis::new(bounds.mins.y, bounds.maxs.y),
#[cfg(feature = "dim3")]
SAPAxis::new(bounds.mins.z, bounds.maxs.z),
];
SAPRegion {
axes,
existing_proxies: BitVec::new(),
to_insert: Vec::new(),
update_count: 0,
proxy_count: 0,
}
}
pub fn recycle(bounds: AABB, mut old: Self) -> Self {
// Correct the bounds
for (axis, &bound) in old.axes.iter_mut().zip(bounds.mins.iter()) {
axis.min_bound = bound;
}
for (axis, &bound) in old.axes.iter_mut().zip(bounds.maxs.iter()) {
axis.max_bound = bound;
}
old.update_count = 0;
// The rest of the fields should be "empty"
assert_eq!(old.proxy_count, 0);
assert!(old.to_insert.is_empty());
debug_assert!(!old.existing_proxies.any());
assert!(old.axes.iter().all(|ax| ax.endpoints.len() == 2));
old
}
pub fn recycle_or_new(bounds: AABB, pool: &mut Vec<Self>) -> Self {
if let Some(old) = pool.pop() {
Self::recycle(bounds, old)
} else {
Self::new(bounds)
}
}
pub fn predelete_proxy(&mut self, _proxy_id: usize) {
// We keep the proxy_id as argument for uniformity with the "preupdate"
// method. However we don't actually need it because the deletion will be
// handled transparently during the next update.
self.update_count = 1;
}
pub fn preupdate_proxy(&mut self, proxy_id: usize) -> bool {
let mask_len = self.existing_proxies.len();
if proxy_id >= mask_len {
self.existing_proxies.grow(proxy_id + 1 - mask_len, false);
}
if !self.existing_proxies[proxy_id] {
self.to_insert.push(proxy_id);
self.existing_proxies.set(proxy_id, true);
self.proxy_count += 1;
false
} else {
// Here we need a second update if all proxies exit this region. In this case, we need
// to delete the final proxy, but the region may not have AABBs overlapping it, so it
// wouldn't get an update otherwise.
self.update_count = 2;
true
}
}
pub fn update(&mut self, proxies: &Proxies, reporting: &mut HashMap<(u32, u32), bool>) {
if self.update_count > 0 {
// Update endpoints.
let mut deleted = 0;
for dim in 0..DIM {
self.axes[dim].update_endpoints(dim, proxies, reporting);
deleted += self.axes[dim].delete_out_of_bounds_proxies(&mut self.existing_proxies);
}
if deleted > 0 {
self.proxy_count -= deleted;
for dim in 0..DIM {
self.axes[dim].delete_out_of_bounds_endpoints(&self.existing_proxies);
}
}
self.update_count -= 1;
}
if !self.to_insert.is_empty() {
// Insert new proxies.
for dim in 1..DIM {
self.axes[dim].batch_insert(dim, &self.to_insert, proxies, None);
}
self.axes[0].batch_insert(0, &self.to_insert, proxies, Some(reporting));
self.to_insert.clear();
// In the rare event that all proxies leave this region in the next step, we need an
// update to remove them.
self.update_count = 1;
}
}
}
/// A broad-phase based on multiple Sweep-and-Prune instances running of disjoint region of the 3D world.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BroadPhase {
proxies: Proxies,
regions: HashMap<Point<i32>, SAPRegion>,
removed_colliders: Option<Subscription<RemovedCollider>>,
deleted_any: bool,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
region_pool: Vec<SAPRegion>, // To avoid repeated allocations.
#[cfg_attr(feature = "serde-serialize", serde(skip))]
regions_to_remove: Vec<Point<i32>>, // Workspace
// We could think serializing this workspace is useless.
// It turns out is is important to serialize at least its capacity
// and restore this capacity when deserializing the hashmap.
// This is because the order of future elements inserted into the
// hashmap depends on its capacity (because the internal bucket indices
// depend on this capacity). So not restoring this capacity may alter
// the order at which future elements are reported. This will in turn
// alter the order at which the pairs are registered in the narrow-phase,
// thus altering the order of the contact manifold. In the end, this
// alters the order of the resolution of contacts, resulting in
// diverging simulation after restoration of a snapshot.
#[cfg_attr(
feature = "serde-serialize",
serde(
serialize_with = "parry::utils::hashmap::serialize_hashmap_capacity",
deserialize_with = "parry::utils::hashmap::deserialize_hashmap_capacity"
)
)]
reporting: HashMap<(u32, u32), bool>, // Workspace
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub(crate) struct BroadPhaseProxy {
handle: ColliderHandle,
aabb: AABB,
next_free: u32,
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
struct Proxies {
elements: Vec<BroadPhaseProxy>,
first_free: u32,
}
impl Proxies {
pub fn new() -> Self {
Self {
elements: Vec::new(),
first_free: NEXT_FREE_SENTINEL,
}
}
pub fn insert(&mut self, proxy: BroadPhaseProxy) -> usize {
if self.first_free != NEXT_FREE_SENTINEL {
let proxy_id = self.first_free as usize;
self.first_free = self.elements[proxy_id].next_free;
self.elements[proxy_id] = proxy;
proxy_id
} else {
self.elements.push(proxy);
self.elements.len() - 1
}
}
pub fn remove(&mut self, proxy_id: usize) {
self.elements[proxy_id].next_free = self.first_free;
self.first_free = proxy_id as u32;
}
// // FIXME: take holes into account?
// pub fn get(&self, i: usize) -> Option<&BroadPhaseProxy> {
// self.elements.get(i)
// }
// FIXME: take holes into account?
pub fn get_mut(&mut self, i: usize) -> Option<&mut BroadPhaseProxy> {
self.elements.get_mut(i)
}
}
impl Index<usize> for Proxies {
type Output = BroadPhaseProxy;
fn index(&self, i: usize) -> &BroadPhaseProxy {
self.elements.index(i)
}
}
impl IndexMut<usize> for Proxies {
fn index_mut(&mut self, i: usize) -> &mut BroadPhaseProxy {
self.elements.index_mut(i)
}
}
impl BroadPhase {
/// Create a new empty broad-phase.
pub fn new() -> Self {
BroadPhase {
removed_colliders: None,
proxies: Proxies::new(),
regions: HashMap::default(),
region_pool: Vec::new(),
regions_to_remove: Vec::new(),
reporting: HashMap::default(),
deleted_any: false,
}
}
/// Maintain the broad-phase internal state by taking collider removal into account.
pub fn maintain(&mut self, colliders: &mut ColliderSet) {
// Ensure we already subscribed.
if self.removed_colliders.is_none() {
self.removed_colliders = Some(colliders.removed_colliders.subscribe());
}
let cursor = self.removed_colliders.take().unwrap();
for collider in colliders.removed_colliders.read(&cursor) {
self.remove_collider(collider.proxy_index);
}
colliders.removed_colliders.ack(&cursor);
self.removed_colliders = Some(cursor);
}
fn remove_collider(&mut self, proxy_index: usize) {
if proxy_index == crate::INVALID_USIZE {
// This collider has not been added to the broad-phase yet.
return;
}
let proxy = &mut self.proxies[proxy_index];
// Discretize the AABB to find the regions that need to be invalidated.
let start = point_key(proxy.aabb.mins);
let end = point_key(proxy.aabb.maxs);
#[cfg(feature = "dim2")]
for i in start.x..=end.x {
for j in start.y..=end.y {
if let Some(region) = self.regions.get_mut(&Point::new(i, j)) {
region.predelete_proxy(proxy_index);
self.deleted_any = true;
}
}
}
#[cfg(feature = "dim3")]
for i in start.x..=end.x {
for j in start.y..=end.y {
for k in start.z..=end.z {
if let Some(region) = self.regions.get_mut(&Point::new(i, j, k)) {
region.predelete_proxy(proxy_index);
self.deleted_any = true;
}
}
}
}
// Push the proxy to infinity, but not beyond the sentinels.
proxy.aabb.mins.coords.fill(SENTINEL_VALUE / 2.0);
proxy.aabb.maxs.coords.fill(SENTINEL_VALUE / 2.0);
self.proxies.remove(proxy_index);
}
pub(crate) fn update_aabbs(
&mut self,
prediction_distance: Real,
bodies: &RigidBodySet,
colliders: &mut ColliderSet,
) {
// First, if we have any pending removals we have
// to deal with them now because otherwise we will
// end up with an ABA problems when reusing proxy
// ids.
self.complete_removals();
for body_handle in bodies
.modified_inactive_set
.iter()
.chain(bodies.active_dynamic_set.iter())
.chain(bodies.active_kinematic_set.iter())
{
for handle in &bodies[*body_handle].colliders {
let collider = &mut colliders[*handle];
let aabb = collider.compute_aabb().loosened(prediction_distance / 2.0);
if let Some(proxy) = self.proxies.get_mut(collider.proxy_index) {
proxy.aabb = aabb;
} else {
let proxy = BroadPhaseProxy {
handle: *handle,
aabb,
next_free: NEXT_FREE_SENTINEL,
};
collider.proxy_index = self.proxies.insert(proxy);
}
// Discretize the aabb.
let proxy_id = collider.proxy_index;
// let start = Point::origin();
// let end = Point::origin();
let start = point_key(aabb.mins);
let end = point_key(aabb.maxs);
let regions = &mut self.regions;
let pool = &mut self.region_pool;
#[cfg(feature = "dim2")]
for i in start.x..=end.x {
for j in start.y..=end.y {
let region_key = Point::new(i, j);
let region_bounds = region_aabb(region_key);
let region = regions
.entry(region_key)
.or_insert_with(|| SAPRegion::recycle_or_new(region_bounds, pool));
let _ = region.preupdate_proxy(proxy_id);
}
}
#[cfg(feature = "dim3")]
for i in start.x..=end.x {
for j in start.y..=end.y {
for k in start.z..=end.z {
let region_key = Point::new(i, j, k);
let region_bounds = region_aabb(region_key);
let region = regions
.entry(region_key)
.or_insert_with(|| SAPRegion::recycle_or_new(region_bounds, pool));
let _ = region.preupdate_proxy(proxy_id);
}
}
}
}
}
}
fn update_regions(&mut self) {
for (point, region) in &mut self.regions {
region.update(&self.proxies, &mut self.reporting);
if region.proxy_count == 0 {
self.regions_to_remove.push(*point);
}
}
// Remove all the empty regions and store them in the region pool
let regions = &mut self.regions;
self.region_pool.extend(
self.regions_to_remove
.drain(..)
.map(|p| regions.remove(&p).unwrap()),
);
}
pub(crate) fn complete_removals(&mut self) {
if self.deleted_any {
self.update_regions();
// NOTE: we don't care about reporting pairs.
self.reporting.clear();
self.deleted_any = false;
}
}
pub(crate) fn find_pairs(&mut self, out_events: &mut Vec<BroadPhasePairEvent>) {
// println!("num regions: {}", self.regions.len());
self.reporting.clear();
self.update_regions();
// Convert reports to broad phase events.
// let t = instant::now();
// let mut num_add_events = 0;
// let mut num_delete_events = 0;
for ((proxy1, proxy2), colliding) in &self.reporting {
let proxy1 = &self.proxies[*proxy1 as usize];
let proxy2 = &self.proxies[*proxy2 as usize];
let handle1 = proxy1.handle;
let handle2 = proxy2.handle;
if *colliding {
out_events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(
handle1, handle2,
)));
// num_add_events += 1;
} else {
out_events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new(
handle1, handle2,
)));
// num_delete_events += 1;
}
}
// println!(
// "Event conversion time: {}, add: {}/{}, delete: {}/{}",
// instant::now() - t,
// num_add_events,
// out_events.len(),
// num_delete_events,
// out_events.len()
// );
}
}
#[cfg(test)]
mod test {
use crate::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet};
#[test]
fn test_add_update_remove() {
let mut broad_phase = BroadPhase::new();
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut joints = JointSet::new();
let rb = RigidBodyBuilder::new_dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
colliders.insert(co, hrb, &mut bodies);
broad_phase.update_aabbs(0.0, &bodies, &mut colliders);
bodies.remove(hrb, &mut colliders, &mut joints);
broad_phase.maintain(&mut colliders);
broad_phase.update_aabbs(0.0, &bodies, &mut colliders);
// Create another body.
let rb = RigidBodyBuilder::new_dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
colliders.insert(co, hrb, &mut bodies);
// Make sure the proxy handles is recycled properly.
broad_phase.update_aabbs(0.0, &bodies, &mut colliders);
}
}

View File

@@ -0,0 +1,556 @@
use super::{
BroadPhasePairEvent, ColliderPair, SAPLayer, SAPProxies, SAPProxy, SAPProxyData, SAPRegionPool,
};
use crate::data::pubsub::Subscription;
use crate::geometry::broad_phase_multi_sap::SAPProxyIndex;
use crate::geometry::collider::ColliderChanges;
use crate::geometry::{ColliderSet, RemovedCollider};
use crate::math::Real;
use crate::utils::IndexMut2;
use parry::bounding_volume::BoundingVolume;
use parry::utils::hashmap::HashMap;
/// A broad-phase combining a Hierarchical Grid and Sweep-and-Prune.
///
/// The basic Sweep-and-Prune (SAP) algorithm has one significant flaws:
/// the interactions between far-away objects. This means that objects
/// that are very far away will still have some of their endpoints swapped
/// within the SAP data-structure. This results in poor scaling because this
/// results in lots of swapping between endpoints of AABBs that won't ever
/// actually interact.
///
/// The first optimization to address this problem is to use the Multi-SAP
/// method. This basically combines an SAP with a grid. The grid subdivides
/// the spaces into equally-sized subspaces (grid cells). Each subspace, which we call
/// a "region" contains an SAP instance (i.e. there SAP axes responsible for
/// collecting endpoints and swapping them when they move to detect interaction pairs).
/// Each AABB is inserted in all the regions it intersects.
/// This prevents the far-away problem because two objects that are far away will
/// be located on different regions. So their endpoints will never meed.
///
/// However, the Multi-SAP approach has one notable problem: the region size must
/// be chosen wisely. It could be user-defined, but that's makes it more difficult
/// to use (for the end-user). Or it can be given a fixed value. Using a fixed
/// value may result in large objects intersecting lots of regions, resulting in
/// poor performances and very high memory usage.
///
/// So a solution to that large-objects problem is the Multi-SAP approach is to
/// replace the grid by a hierarchical grid. A hierarchical grid is composed of
/// several layers. And each layer have different region sizes. For example all
/// the regions on layer 0 will have the size 1x1x1. All the regions on the layer
/// 1 will have the size 10x10x10, etc. That way, a given AABB will be inserted
/// on the layer that has regions big enough to avoid the large-object problem.
/// For example a 20x20x20 object will be inserted in the layer with region
/// of size 10x10x10, resulting in only 8 regions being intersect by the AABB.
/// (If it was inserted in the layer with regions of size 1x1x1, it would have intersected
/// 8000 regions, which is a problem performancewise.)
///
/// We call this new method the Hierarchical-SAP.
///
/// Now with the Hierarchical-SAP, we can update each layer independently from one another.
/// However, objects belonging to different layers will never be detected as intersecting that
/// way. So we need a way to do inter-layer interference detection. There is a lot ways of doing
/// this: performing inter-layer Multi-Box-Pruning passes is one example (but this is not what we do).
/// In our implementation, we do the following:
/// - The AABB bounds of each region of the layer `n` are inserted into the corresponding larger region
/// of the layer `n + 1`.
/// - When an AABB in the region of the layer `n + 1` intersects the AABB corresponding to one of the
/// regions at the smaller layer `n`, we add that AABB to that smaller region.
/// So in the end it means that a given AABB will be inserted into all the region it intersects at
/// the layer `n`. And it will also be inserted into all the regions it intersects at the smaller layers
/// (the layers `< n`), but only for the regions that already exist (so we don't have to discretize
/// our AABB into the layers `< n`). This involves a fair amount of bookkeeping unfortunately, but
/// this has the benefit of keep the overall complexity of the algorithm O(1) in the typical specially
/// coherent scenario.
///
/// From an implementation point-of-view, our hierarchical SAP is implemented with the following structures:
/// - There is one `SAPLayer` per layer of the hierarchical grid.
/// - Each `SAPLayer` contains multiple `SAPRegion` (each being a region of the grid represented by that layer).
/// - Each `SAPRegion` contains three `SAPAxis`, representing the "classical" SAP algorithm running on this region.
/// - Each `SAPAxis` maintains a sorted list of `SAPEndpoints` representing the endpoints of the AABBs intersecting
/// the bounds on the `SAPRegion` containing this `SAPAxis`.
/// - A set of `SAPProxy` are maintained separately. It contains the AABBs of all the colliders managed by this
/// broad-phase, as well as the AABBs of all the regions part of this broad-phase.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BroadPhase {
proxies: SAPProxies,
layers: Vec<SAPLayer>,
smallest_layer: u8,
largest_layer: u8,
removed_colliders: Option<Subscription<RemovedCollider>>,
deleted_any: bool,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
region_pool: SAPRegionPool, // To avoid repeated allocations.
// We could think serializing this workspace is useless.
// It turns out is is important to serialize at least its capacity
// and restore this capacity when deserializing the hashmap.
// This is because the order of future elements inserted into the
// hashmap depends on its capacity (because the internal bucket indices
// depend on this capacity). So not restoring this capacity may alter
// the order at which future elements are reported. This will in turn
// alter the order at which the pairs are registered in the narrow-phase,
// thus altering the order of the contact manifold. In the end, this
// alters the order of the resolution of contacts, resulting in
// diverging simulation after restoration of a snapshot.
#[cfg_attr(
feature = "serde-serialize",
serde(
serialize_with = "parry::utils::hashmap::serialize_hashmap_capacity",
deserialize_with = "parry::utils::hashmap::deserialize_hashmap_capacity"
)
)]
reporting: HashMap<(u32, u32), bool>, // Workspace
}
impl BroadPhase {
/// Create a new empty broad-phase.
pub fn new() -> Self {
BroadPhase {
removed_colliders: None,
proxies: SAPProxies::new(),
layers: Vec::new(),
smallest_layer: 0,
largest_layer: 0,
region_pool: Vec::new(),
reporting: HashMap::default(),
deleted_any: false,
}
}
/// Maintain the broad-phase internal state by taking collider removal into account.
///
/// For each colliders marked as removed, we make their containing layer mark
/// its proxy as pre-deleted. The actual proxy removal will happen at the end
/// of the `BroadPhase::update`.
fn handle_removed_colliders(&mut self, colliders: &mut ColliderSet) {
// Ensure we already subscribed the collider-removed events.
if self.removed_colliders.is_none() {
self.removed_colliders = Some(colliders.removed_colliders.subscribe());
}
// Extract the cursor to avoid borrowing issues.
let cursor = self.removed_colliders.take().unwrap();
// Read all the collider-removed events, and remove the corresponding proxy.
for collider in colliders.removed_colliders.read(&cursor) {
self.predelete_proxy(collider.proxy_index);
}
// NOTE: We don't acknowledge the cursor just yet because we need
// to traverse the set of removed colliders one more time after
// the broad-phase update.
// Re-insert the cursor we extracted to avoid borrowing issues.
self.removed_colliders = Some(cursor);
}
/// Pre-deletes a proxy from this broad-phase.
///
/// The removal of a proxy is a semi-lazy process. It will mark
/// the proxy as predeleted, and will set its AABB as +infinity.
/// After this method has been called with all the proxies to
/// remove, the `complete_removal` method MUST be called to
/// complete the removal of these proxies, by actually removing them
/// from all the relevant layers/regions/axes.
fn predelete_proxy(&mut self, proxy_index: SAPProxyIndex) {
if proxy_index == crate::INVALID_U32 {
// This collider has not been added to the broad-phase yet.
return;
}
let proxy = &mut self.proxies[proxy_index];
let layer = &mut self.layers[proxy.layer_id as usize];
// Let the layer know that the proxy is being deleted.
layer.predelete_proxy(&mut self.proxies, proxy_index);
}
/// Completes the removal of the deleted proxies.
///
/// If `self.predelete_proxy` was called, then this `complete_removals`
/// method must be called to complete the removals.
///
/// This method will actually remove from the proxy list all the proxies
/// marked as deletable by `self.predelete_proxy`, making their proxy
/// handles re-usable by new proxies.
fn complete_removals(&mut self, colliders: &mut ColliderSet) {
// If there is no layer, there is nothing to remove.
if self.layers.is_empty() {
return;
}
// This is a bottom-up pass:
// - Complete the removal on the layer `n`. This may cause so regions to be deleted.
// - Continue with the layer `n + 1`. This will delete from `n + 1` all the proxies
// of the regions originating from `n`.
// This bottom-up approach will propagate region removal from the smallest layer up
// to the largest layer.
let mut curr_layer_id = self.smallest_layer;
loop {
let curr_layer = &mut self.layers[curr_layer_id as usize];
if let Some(larger_layer_id) = curr_layer.larger_layer {
let (curr_layer, larger_layer) = self
.layers
.index_mut2(curr_layer_id as usize, larger_layer_id as usize);
curr_layer.complete_removals(
Some(larger_layer),
&mut self.proxies,
&mut self.region_pool,
);
// NOTE: we don't care about reporting pairs.
self.reporting.clear();
curr_layer_id = larger_layer_id;
} else {
curr_layer.complete_removals(None, &mut self.proxies, &mut self.region_pool);
// NOTE: we don't care about reporting pairs.
self.reporting.clear();
break;
}
}
/*
* Actually remove the colliders proxies.
*/
let cursor = self.removed_colliders.as_ref().unwrap();
for collider in colliders.removed_colliders.read(&cursor) {
if collider.proxy_index != crate::INVALID_U32 {
self.proxies.remove(collider.proxy_index);
}
}
colliders.removed_colliders.ack(&cursor);
}
/// Finalize the insertion of the layer identified by `layer_id`.
///
/// This will:
/// - Remove all the subregion proxies from the larger layer.
/// - Pre-insert all the smaller layer's region proxies into this layer.
fn finalize_layer_insertion(&mut self, layer_id: u8) {
// Remove all the region endpoints from the larger layer.
// They will be automatically replaced by the new layer's regions.
if let Some(larger_layer) = self.layers[layer_id as usize].larger_layer {
self.layers[larger_layer as usize].unregister_all_subregions(&mut self.proxies);
}
// Add all the regions from the smaller layer to the new layer.
// This will result in new regions to be created in the new layer.
// These new regions will automatically propagate to the larger layers in
// the Phase 3 of `Self::update`.
if let Some(smaller_layer) = self.layers[layer_id as usize].smaller_layer {
let (smaller_layer, new_layer) = self
.layers
.index_mut2(smaller_layer as usize, layer_id as usize);
smaller_layer.propagate_existing_regions(
new_layer,
&mut self.proxies,
&mut self.region_pool,
);
}
}
/// Ensures that a given layer exists.
///
/// If the layer does not exist then:
/// 1. It is created and added to `self.layers`.
/// 2. The smaller/larger layer indices are updated to order them
/// properly depending on their depth.
/// 3. All the subregion proxies from the larger layer are deleted:
/// they will be replaced by this new layer's regions later in
/// the `update` function.
/// 4. All the regions from the smaller layer are added to that new
/// layer.
fn ensure_layer_exists(&mut self, new_depth: i8) -> u8 {
// Special case: we don't have any layers yet.
if self.layers.is_empty() {
let layer_id = self.layers.len() as u8; // TODO: check overflow.
self.layers
.push(SAPLayer::new(new_depth, layer_id, None, None));
return 0;
}
// Find the first layer with a depth larger or equal to new_depth.
let mut larger_layer_id = Some(self.smallest_layer);
while let Some(curr_layer_id) = larger_layer_id {
if self.layers[curr_layer_id as usize].depth >= new_depth {
break;
}
larger_layer_id = self.layers[curr_layer_id as usize].larger_layer;
}
match larger_layer_id {
None => {
// The layer we are currently creating is the new largest layer. So
// we need to update `self.largest_layer` accordingly then call
// `self.finalize_layer_insertion.
assert_ne!(self.layers.len() as u8, u8::MAX, "Not yet implemented.");
let new_layer_id = self.layers.len() as u8;
self.layers[self.largest_layer as usize].larger_layer = Some(new_layer_id);
self.layers.push(SAPLayer::new(
new_depth,
new_layer_id,
Some(self.largest_layer),
None,
));
self.largest_layer = new_layer_id;
self.finalize_layer_insertion(new_layer_id);
new_layer_id
}
Some(larger_layer_id) => {
if self.layers[larger_layer_id as usize].depth == new_depth {
// Found it! The layer already exists.
larger_layer_id
} else {
// The layer does not exist yet. Create it.
// And we found another layer that is larger than this one.
// So we need to adjust the smaller/larger layer indices too
// keep the list sorted, and then call `self.finalize_layer_insertion`
// to deal with region propagation.
let new_layer_id = self.layers.len() as u8;
let smaller_layer_id = self.layers[larger_layer_id as usize].smaller_layer;
self.layers[larger_layer_id as usize].smaller_layer = Some(new_layer_id);
if let Some(smaller_layer_id) = smaller_layer_id {
self.layers[smaller_layer_id as usize].larger_layer = Some(new_layer_id);
} else {
self.smallest_layer = new_layer_id;
}
self.layers.push(SAPLayer::new(
new_depth,
new_layer_id,
smaller_layer_id,
Some(larger_layer_id),
));
self.finalize_layer_insertion(new_layer_id);
new_layer_id
}
}
}
}
/// Updates the broad-phase, taking into account the new collider positions.
pub fn update(
&mut self,
prediction_distance: Real,
colliders: &mut ColliderSet,
events: &mut Vec<BroadPhasePairEvent>,
) {
// Phase 1: pre-delete the collisions that have been deleted.
self.handle_removed_colliders(colliders);
let mut need_region_propagation = false;
// Phase 2: pre-delete the collisions that have been deleted.
colliders.foreach_modified_colliders_mut_internal(|handle, collider| {
if !collider.changes.needs_broad_phase_update() {
return;
}
let mut aabb = collider.compute_aabb().loosened(prediction_distance / 2.0);
aabb.mins = super::clamp_point(aabb.mins);
aabb.maxs = super::clamp_point(aabb.maxs);
let layer_id = if let Some(proxy) = self.proxies.get_mut(collider.proxy_index) {
let mut layer_id = proxy.layer_id;
proxy.aabb = aabb;
if collider.changes.contains(ColliderChanges::SHAPE) {
// If the shape was changed, then we need to see if this proxy should be
// migrated to a larger layer. Indeed, if the shape was replaced by
// a much larger shape, we need to promote the proxy to a bigger layer
// to avoid the O(n²) discretization problem.
let new_layer_depth = super::layer_containing_aabb(&aabb);
if new_layer_depth > proxy.layer_depth {
self.layers[proxy.layer_id as usize].proper_proxy_moved_to_bigger_layer(
&mut self.proxies,
collider.proxy_index,
);
// We need to promote the proxy to the bigger layer.
layer_id = self.ensure_layer_exists(new_layer_depth);
self.proxies[collider.proxy_index].layer_id = layer_id;
}
}
layer_id
} else {
let layer_depth = super::layer_containing_aabb(&aabb);
let layer_id = self.ensure_layer_exists(layer_depth);
// Create the proxy.
let proxy = SAPProxy::collider(handle, aabb, layer_id, layer_depth);
collider.proxy_index = self.proxies.insert(proxy);
layer_id
};
let layer = &mut self.layers[layer_id as usize];
// Preupdate the collider in the layer.
layer.preupdate_collider(collider, &aabb, &mut self.proxies, &mut self.region_pool);
need_region_propagation = need_region_propagation || !layer.created_regions.is_empty();
});
// Phase 3: bottom-up pass to propagate new regions from smaller layers to larger layers.
if need_region_propagation {
self.propagate_created_regions();
}
// Phase 4: top-down pass to propagate proxies from larger layers to smaller layers.
self.update_layers_and_find_pairs(events);
// Phase 5: bottom-up pass to remove proxies, and propagate region removed from smaller
// layers to possible remove regions from larger layers that would become empty that way.
self.complete_removals(colliders);
}
/// Propagate regions from the smallest layers up to the larger layers.
///
/// Whenever a region is created on a layer `n`, then its AABB must be
/// added to its larger layer so we can detect when an object
/// in a larger layer may start interacting with objects in a smaller
/// layer.
fn propagate_created_regions(&mut self) {
let mut curr_layer = Some(self.smallest_layer);
while let Some(curr_layer_id) = curr_layer {
let layer = &mut self.layers[curr_layer_id as usize];
let larger_layer = layer.larger_layer;
if !layer.created_regions.is_empty() {
if let Some(larger_layer) = larger_layer {
let (layer, larger_layer) = self
.layers
.index_mut2(curr_layer_id as usize, larger_layer as usize);
layer.propagate_created_regions(
larger_layer,
&mut self.proxies,
&mut self.region_pool,
);
layer.created_regions.clear();
} else {
// Always clear the set of created regions, even if
// there is no larger layer.
layer.created_regions.clear();
}
}
curr_layer = larger_layer;
}
}
fn update_layers_and_find_pairs(&mut self, out_events: &mut Vec<BroadPhasePairEvent>) {
if self.layers.is_empty() {
return;
}
// This is a top-down operation: we start by updating the largest
// layer, and continue down to the smallest layer.
//
// This must be top-down because:
// 1. If a non-region proxy from the layer `n` interacts with one of
// the regions from the layer `n - 1`, it must be added to that
// smaller layer before that smaller layer is updated.
// 2. If a region has been updated, then all its subregions at the
// layer `n - 1` must be marked as "needs-to-be-updated" too, in
// order to account for the fact that a big proxy moved.
// NOTE: this 2nd point could probably be improved: instead of updating
// all the subregions, we could perhaps just update the subregions
// that crosses the boundary of the AABB of the big proxies that
// moved in they layer `n`.
let mut layer_id = Some(self.largest_layer);
while let Some(curr_layer_id) = layer_id {
self.layers[curr_layer_id as usize]
.update_regions(&mut self.proxies, &mut self.reporting);
layer_id = self.layers[curr_layer_id as usize].smaller_layer;
for ((proxy_id1, proxy_id2), colliding) in &self.reporting {
let (proxy1, proxy2) = self
.proxies
.elements
.index_mut2(*proxy_id1 as usize, *proxy_id2 as usize);
match (&mut proxy1.data, &mut proxy2.data) {
(SAPProxyData::Collider(handle1), SAPProxyData::Collider(handle2)) => {
if *colliding {
out_events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(
*handle1, *handle2,
)));
} else {
out_events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new(
*handle1, *handle2,
)));
}
}
(SAPProxyData::Collider(_), SAPProxyData::Region(_)) => {
if *colliding {
// Add the collider to the subregion.
proxy2
.data
.as_region_mut()
.preupdate_proxy(*proxy_id1, false);
}
}
(SAPProxyData::Region(_), SAPProxyData::Collider(_)) => {
if *colliding {
// Add the collider to the subregion.
proxy1
.data
.as_region_mut()
.preupdate_proxy(*proxy_id2, false);
}
}
(SAPProxyData::Region(_), SAPProxyData::Region(_)) => {
// This will only happen between two adjacent subregions because
// they share some identical bounds. So this case does not matter.
}
}
}
self.reporting.clear();
}
}
}
#[cfg(test)]
mod test {
use crate::dynamics::{JointSet, RigidBodyBuilder, RigidBodySet};
use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet};
#[test]
fn test_add_update_remove() {
let mut broad_phase = BroadPhase::new();
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut joints = JointSet::new();
let rb = RigidBodyBuilder::new_dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
colliders.insert(co, hrb, &mut bodies);
let mut events = Vec::new();
broad_phase.update(0.0, &mut colliders, &mut events);
bodies.remove(hrb, &mut colliders, &mut joints);
broad_phase.update(0.0, &mut colliders, &mut events);
// Create another body.
let rb = RigidBodyBuilder::new_dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
colliders.insert(co, hrb, &mut bodies);
// Make sure the proxy handles is recycled properly.
broad_phase.update(0.0, &mut colliders, &mut events);
}
}

View File

@@ -0,0 +1,33 @@
use crate::geometry::ColliderHandle;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct ColliderPair {
pub collider1: ColliderHandle,
pub collider2: ColliderHandle,
}
impl ColliderPair {
pub fn new(collider1: ColliderHandle, collider2: ColliderHandle) -> Self {
ColliderPair {
collider1,
collider2,
}
}
pub fn swap(self) -> Self {
Self::new(self.collider2, self.collider1)
}
pub fn zero() -> Self {
Self {
collider1: ColliderHandle::from_raw_parts(0, 0),
collider2: ColliderHandle::from_raw_parts(0, 0),
}
}
}
pub enum BroadPhasePairEvent {
AddPair(ColliderPair),
DeletePair(ColliderPair),
}

View File

@@ -0,0 +1,19 @@
pub use self::broad_phase::BroadPhase;
pub use self::broad_phase_pair_event::{BroadPhasePairEvent, ColliderPair};
pub use self::sap_proxy::SAPProxyIndex;
pub(self) use self::sap_axis::*;
pub(self) use self::sap_endpoint::*;
pub(self) use self::sap_layer::*;
pub(self) use self::sap_proxy::*;
pub(self) use self::sap_region::*;
pub(self) use self::sap_utils::*;
mod broad_phase;
mod broad_phase_pair_event;
mod sap_axis;
mod sap_endpoint;
mod sap_layer;
mod sap_proxy;
mod sap_region;
mod sap_utils;

View File

@@ -0,0 +1,274 @@
use super::{SAPEndpoint, SAPProxies, NUM_SENTINELS};
use crate::geometry::broad_phase_multi_sap::DELETED_AABB_VALUE;
use crate::geometry::SAPProxyIndex;
use crate::math::Real;
use bit_vec::BitVec;
use parry::bounding_volume::BoundingVolume;
use parry::utils::hashmap::HashMap;
use std::cmp::Ordering;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct SAPAxis {
pub min_bound: Real,
pub max_bound: Real,
pub endpoints: Vec<SAPEndpoint>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub new_endpoints: Vec<(SAPEndpoint, usize)>, // Workspace
}
impl SAPAxis {
pub fn new(min_bound: Real, max_bound: Real) -> Self {
assert!(min_bound <= max_bound);
Self {
min_bound,
max_bound,
endpoints: vec![SAPEndpoint::start_sentinel(), SAPEndpoint::end_sentinel()],
new_endpoints: Vec::new(),
}
}
pub fn clear(&mut self) {
self.new_endpoints.clear();
self.endpoints.clear();
self.endpoints.push(SAPEndpoint::start_sentinel());
self.endpoints.push(SAPEndpoint::end_sentinel());
}
pub fn batch_insert(
&mut self,
dim: usize,
new_proxies: &[SAPProxyIndex],
proxies: &SAPProxies,
reporting: Option<&mut HashMap<(u32, u32), bool>>,
) {
if new_proxies.is_empty() {
return;
}
self.new_endpoints.clear();
for proxy_id in new_proxies {
let proxy = &proxies[*proxy_id];
assert!(proxy.aabb.mins[dim] <= self.max_bound);
assert!(proxy.aabb.maxs[dim] >= self.min_bound);
let start_endpoint =
SAPEndpoint::start_endpoint(proxy.aabb.mins[dim], *proxy_id as u32);
let end_endpoint = SAPEndpoint::end_endpoint(proxy.aabb.maxs[dim], *proxy_id as u32);
self.new_endpoints.push((start_endpoint, 0));
self.new_endpoints.push((end_endpoint, 0));
}
self.new_endpoints
.sort_by(|a, b| a.0.value.partial_cmp(&b.0.value).unwrap_or(Ordering::Equal));
let mut curr_existing_index = self.endpoints.len() - NUM_SENTINELS - 1;
let new_num_endpoints = self.endpoints.len() + self.new_endpoints.len();
self.endpoints
.resize(new_num_endpoints, SAPEndpoint::end_sentinel());
let mut curr_shift_index = new_num_endpoints - NUM_SENTINELS - 1;
// Sort the endpoints.
// TODO: specialize for the case where this is the
// first time we insert endpoints to this axis?
for new_endpoint in self.new_endpoints.iter_mut().rev() {
loop {
let existing_endpoint = self.endpoints[curr_existing_index];
if existing_endpoint.value <= new_endpoint.0.value {
break;
}
self.endpoints[curr_shift_index] = existing_endpoint;
curr_shift_index -= 1;
curr_existing_index -= 1;
}
self.endpoints[curr_shift_index] = new_endpoint.0;
new_endpoint.1 = curr_shift_index;
curr_shift_index -= 1;
}
// Report pairs using a single mbp pass on each new endpoint.
let endpoints_wo_last_sentinel = &self.endpoints[..self.endpoints.len() - 1];
if let Some(reporting) = reporting {
for (endpoint, endpoint_id) in self.new_endpoints.drain(..).filter(|e| e.0.is_start()) {
let proxy1 = &proxies[endpoint.proxy()];
let min = endpoint.value;
let max = proxy1.aabb.maxs[dim];
for endpoint2 in &endpoints_wo_last_sentinel[endpoint_id + 1..] {
if endpoint2.proxy() == endpoint.proxy() {
continue;
}
let proxy2 = &proxies[endpoint2.proxy()];
// NOTE: some pairs with equal aabb.mins[dim] may end up being reported twice.
if (endpoint2.is_start() && endpoint2.value < max)
|| (endpoint2.is_end() && proxy2.aabb.mins[dim] <= min)
{
// Report pair.
if proxy1.aabb.intersects(&proxy2.aabb) {
// Report pair.
let pair = super::sort2(endpoint.proxy(), endpoint2.proxy());
reporting.insert(pair, true);
}
}
}
}
}
}
/// Removes from this axis all the endpoints that are out of bounds from this axis.
///
/// Returns the number of deleted proxies as well as the number of proxies deleted
/// such that `proxy.layer_depth <= layer_depth`.
pub fn delete_out_of_bounds_proxies(
&self,
proxies: &SAPProxies,
existing_proxies: &mut BitVec,
layer_depth: i8,
) -> (usize, usize) {
let mut num_subproper_proxies_deleted = 0;
let mut num_proxies_deleted = 0;
for endpoint in &self.endpoints {
if endpoint.value < self.min_bound {
let proxy_id = endpoint.proxy();
if endpoint.is_end() && existing_proxies[proxy_id as usize] {
existing_proxies.set(proxy_id as usize, false);
if proxies[proxy_id].layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
num_proxies_deleted += 1;
}
} else {
break;
}
}
for endpoint in self.endpoints.iter().rev() {
if endpoint.value > self.max_bound {
let proxy_id = endpoint.proxy();
if endpoint.is_start() && existing_proxies[proxy_id as usize] {
existing_proxies.set(proxy_id as usize, false);
if proxies[proxy_id].layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
num_proxies_deleted += 1;
}
} else {
break;
}
}
(num_proxies_deleted, num_subproper_proxies_deleted)
}
pub fn delete_out_of_bounds_endpoints(&mut self, existing_proxies: &BitVec) {
self.endpoints
.retain(|endpt| endpt.is_sentinel() || existing_proxies[endpt.proxy() as usize])
}
/// Removes from this axis all the endpoints corresponding to a proxy with an AABB mins/maxs values
/// equal to DELETED_AABB_VALUE, indicating that the endpoints should be deleted.
///
/// Returns the number of deleted proxies such that `proxy.layer_depth <= layer_depth`.
pub fn delete_deleted_proxies_and_endpoints_after_subregion_removal(
&mut self,
proxies: &SAPProxies,
existing_proxies: &mut BitVec,
layer_depth: i8,
) -> usize {
let mut num_subproper_proxies_deleted = 0;
self.endpoints.retain(|endpt| {
if !endpt.is_sentinel() {
let proxy = &proxies[endpt.proxy()];
if proxy.aabb.mins.x == DELETED_AABB_VALUE {
if existing_proxies.get(endpt.proxy() as usize) == Some(true) {
existing_proxies.set(endpt.proxy() as usize, false);
if proxy.layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
}
return false;
}
}
true
});
num_subproper_proxies_deleted
}
pub fn update_endpoints(
&mut self,
dim: usize,
proxies: &SAPProxies,
reporting: &mut HashMap<(u32, u32), bool>,
) {
let last_endpoint = self.endpoints.len() - NUM_SENTINELS;
for i in NUM_SENTINELS..last_endpoint {
let mut endpoint_i = self.endpoints[i];
let aabb_i = proxies[endpoint_i.proxy()].aabb;
if endpoint_i.is_start() {
endpoint_i.value = aabb_i.mins[dim];
} else {
endpoint_i.value = aabb_i.maxs[dim];
}
let mut j = i;
if endpoint_i.is_start() {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_end() {
// Report start collision.
let proxy_j = &proxies[endpoint_j.proxy()];
if aabb_i.intersects(&proxy_j.aabb) {
let pair = super::sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, true);
}
}
j -= 1;
}
} else {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_start() {
// Report end collision.
if !aabb_i.intersects(&proxies[endpoint_j.proxy()].aabb) {
let pair = super::sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, false);
}
}
j -= 1;
}
}
self.endpoints[j] = endpoint_i;
}
// println!(
// "Num start swaps: {}, end swaps: {}, dim: {}",
// num_start_swaps, num_end_swaps, dim
// );
}
}

View File

@@ -0,0 +1,60 @@
use super::SENTINEL_VALUE;
use crate::math::Real;
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct SAPEndpoint {
pub value: Real,
pub packed_flag_proxy: u32,
}
const START_FLAG_MASK: u32 = 0b1 << 31;
const PROXY_MASK: u32 = u32::MAX ^ START_FLAG_MASK;
const START_SENTINEL_TAG: u32 = u32::MAX;
const END_SENTINEL_TAG: u32 = u32::MAX ^ START_FLAG_MASK;
impl SAPEndpoint {
pub fn start_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy | START_FLAG_MASK,
}
}
pub fn end_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy & PROXY_MASK,
}
}
pub fn start_sentinel() -> Self {
Self {
value: -SENTINEL_VALUE,
packed_flag_proxy: START_SENTINEL_TAG,
}
}
pub fn end_sentinel() -> Self {
Self {
value: SENTINEL_VALUE,
packed_flag_proxy: END_SENTINEL_TAG,
}
}
pub fn is_sentinel(self) -> bool {
self.packed_flag_proxy & PROXY_MASK == PROXY_MASK
}
pub fn proxy(self) -> u32 {
self.packed_flag_proxy & PROXY_MASK
}
pub fn is_start(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) != 0
}
pub fn is_end(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) == 0
}
}

View File

@@ -0,0 +1,372 @@
use super::{SAPProxies, SAPProxy, SAPRegion, SAPRegionPool};
use crate::geometry::broad_phase_multi_sap::DELETED_AABB_VALUE;
use crate::geometry::{Collider, SAPProxyIndex, AABB};
use crate::math::{Point, Real};
use parry::utils::hashmap::{Entry, HashMap};
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub(crate) struct SAPLayer {
pub depth: i8,
pub layer_id: u8,
pub smaller_layer: Option<u8>,
pub larger_layer: Option<u8>,
region_width: Real,
pub regions: HashMap<Point<i32>, SAPProxyIndex>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
regions_to_potentially_remove: Vec<Point<i32>>, // Workspace
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub created_regions: Vec<SAPProxyIndex>,
}
impl SAPLayer {
pub fn new(
depth: i8,
layer_id: u8,
smaller_layer: Option<u8>,
larger_layer: Option<u8>,
) -> Self {
Self {
depth,
smaller_layer,
larger_layer,
layer_id,
region_width: super::region_width(depth),
regions: HashMap::default(),
regions_to_potentially_remove: vec![],
created_regions: vec![],
}
}
/// Deletes from all the regions of this layer, all the endpoints corresponding
/// to subregions. Clears the arrays of subregions indices from all the regions of
/// this layer.
pub fn unregister_all_subregions(&mut self, proxies: &mut SAPProxies) {
for region_id in self.regions.values() {
// Extract the region to make the borrow-checker happy.
let mut region = proxies[*region_id]
.data
.take_region()
.expect("Should be a region proxy.");
// Delete the endpoints.
region.delete_all_region_endpoints(proxies);
// Clear the subregions vec and reset the subregions parent ids.
for subregion in region.subregions.drain(..) {
proxies[subregion]
.data
.as_region_mut()
.id_in_parent_subregion = crate::INVALID_U32;
}
// Re set the region to make the borrow-checker happy.
proxies[*region_id].data.set_region(region);
}
}
/// Register into `larger_layer` all the region proxies of the recently-created regions
/// contained by `self`.
///
/// This method must be called in a bottom-up loop, propagating new regions from the
/// smallest layer, up to the largest layer. That loop is done by the Phase 3 of the
/// BroadPhase::update.
pub fn propagate_created_regions(
&mut self,
larger_layer: &mut Self,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
for proxy_id in self.created_regions.drain(..) {
larger_layer.register_subregion(proxy_id, proxies, pool)
}
}
/// Register into `larger_layer` all the region proxies of the region contained in `self`.
pub fn propagate_existing_regions(
&mut self,
larger_layer: &mut Self,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
for proxy_id in self.regions.values() {
larger_layer.register_subregion(*proxy_id, proxies, pool)
}
}
/// Registers a subregion of this layer.
///
/// The subregion proxy will be added to the region of `self` that contains
/// that subregion center. Because the hierarchical grid cells have aligned boundaries
/// at each depth, we have the guarantee that a given subregion will only be part of
/// one region on its parent "larger" layer.
fn register_subregion(
&mut self,
proxy_id: SAPProxyIndex,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
if let Some(proxy) = proxies.get(proxy_id) {
let curr_id_in_parent_subregion = proxy.data.as_region().id_in_parent_subregion;
if curr_id_in_parent_subregion == crate::INVALID_U32 {
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
let region_id = self.ensure_region_exists(region_key, proxies, pool);
let region = proxies[region_id].data.as_region_mut();
let id_in_parent_subregion = region.register_subregion(proxy_id);
proxies[proxy_id]
.data
.as_region_mut()
.id_in_parent_subregion = id_in_parent_subregion as u32;
} else {
// NOTE: all the following are just assertions to make sure the
// region ids are correctly wired. If this piece of code causes
// any performance problem, it can be deleted completely without
// hesitation.
if curr_id_in_parent_subregion != crate::INVALID_U32 {
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
let region_id = self.regions.get(&region_key).unwrap();
let region = proxies[*region_id].data.as_region_mut();
assert_eq!(
region.subregions[curr_id_in_parent_subregion as usize],
proxy_id
);
}
}
}
}
fn unregister_subregion(
&mut self,
proxy_id: SAPProxyIndex,
proxy_region: &SAPRegion,
proxies: &mut SAPProxies,
) {
if let Some(proxy) = proxies.get(proxy_id) {
let id_in_parent_subregion = proxy_region.id_in_parent_subregion;
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
if let Some(region_id) = self.regions.get(&region_key) {
let proxy = &mut proxies[*region_id];
let region = proxy.data.as_region_mut();
if !region.needs_update_after_subregion_removal {
self.regions_to_potentially_remove.push(region_key);
region.needs_update_after_subregion_removal = true;
}
let removed = region
.subregions
.swap_remove(id_in_parent_subregion as usize); // Remove the subregion index from the subregion list.
assert_eq!(removed, proxy_id);
// Re-adjust the id_in_parent_subregion of the subregion that was swapped in place
// of the deleted one.
if let Some(subregion_to_update) = region
.subregions
.get(id_in_parent_subregion as usize)
.copied()
{
proxies[subregion_to_update]
.data
.as_region_mut()
.id_in_parent_subregion = id_in_parent_subregion;
}
}
}
}
/// Ensures a given region exists in this layer.
///
/// If the region with the given region key does not exist yet, it is created.
/// When a region is created, it creates a new proxy for that region, and its
/// proxy ID is added to `self.created_region` so it can be propagated during
/// the Phase 3 of `BroadPhase::update`.
///
/// This returns the proxy ID of the already existing region if it existed, or
/// of the new region if it did not exist and has been created by this method.
pub fn ensure_region_exists(
&mut self,
region_key: Point<i32>,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) -> SAPProxyIndex {
match self.regions.entry(region_key) {
// Yay, the region already exists!
Entry::Occupied(occupied) => *occupied.get(),
// The region does not exist, create it.
Entry::Vacant(vacant) => {
let region_bounds = super::region_aabb(region_key, self.region_width);
let region = SAPRegion::recycle_or_new(region_bounds, pool);
// Create a new proxy for that region.
let region_proxy =
SAPProxy::subregion(region, region_bounds, self.layer_id, self.depth);
let region_proxy_id = proxies.insert(region_proxy);
// Push this region's proxy ID to the set of created regions.
self.created_regions.push(region_proxy_id as u32);
// Insert the new region to this layer's region hashmap.
let _ = vacant.insert(region_proxy_id);
region_proxy_id
}
}
}
pub fn preupdate_collider(
&mut self,
collider: &Collider,
aabb: &AABB,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
let proxy_id = collider.proxy_index;
let start = super::point_key(aabb.mins, self.region_width);
let end = super::point_key(aabb.maxs, self.region_width);
// Discretize the aabb.
#[cfg(feature = "dim2")]
let k_range = 0..1;
#[cfg(feature = "dim3")]
let k_range = start.z..=end.z;
for i in start.x..=end.x {
for j in start.y..=end.y {
for _k in k_range.clone() {
#[cfg(feature = "dim2")]
let region_key = Point::new(i, j);
#[cfg(feature = "dim3")]
let region_key = Point::new(i, j, _k);
let region_id = self.ensure_region_exists(region_key, proxies, pool);
let region = proxies[region_id].data.as_region_mut();
region.preupdate_proxy(proxy_id, true);
}
}
}
}
pub fn predelete_proxy(&mut self, proxies: &mut SAPProxies, proxy_index: SAPProxyIndex) {
// Discretize the AABB to find the regions that need to be invalidated.
let proxy_aabb = &mut proxies[proxy_index].aabb;
let start = super::point_key(proxy_aabb.mins, self.region_width);
let end = super::point_key(proxy_aabb.maxs, self.region_width);
// Set the AABB of the proxy to a very large value.
proxy_aabb.mins.coords.fill(DELETED_AABB_VALUE);
proxy_aabb.maxs.coords.fill(DELETED_AABB_VALUE);
#[cfg(feature = "dim2")]
let k_range = 0..1;
#[cfg(feature = "dim3")]
let k_range = start.z..=end.z;
for i in start.x..=end.x {
for j in start.y..=end.y {
for _k in k_range.clone() {
#[cfg(feature = "dim2")]
let key = Point::new(i, j);
#[cfg(feature = "dim3")]
let key = Point::new(i, j, _k);
if let Some(region_id) = self.regions.get(&key) {
let region = proxies[*region_id].data.as_region_mut();
region.predelete_proxy(proxy_index);
}
}
}
}
}
pub fn update_regions(
&mut self,
proxies: &mut SAPProxies,
reporting: &mut HashMap<(u32, u32), bool>,
) {
// println!(
// "Num regions at depth {}: {}",
// self.depth,
// self.regions.len(),
// );
for (point, region_id) in &self.regions {
if let Some(mut region) = proxies[*region_id].data.take_region() {
// Update the region.
region.update(proxies, self.depth, reporting);
// Mark all subregions as to-be-updated.
for subregion_id in &region.subregions {
proxies[*subregion_id].data.as_region_mut().mark_as_dirty();
}
if !region.contains_subproper_proxies() {
self.regions_to_potentially_remove.push(*point);
}
proxies[*region_id].data.set_region(region);
}
}
}
/// Complete the removals of empty regions on this layer.
///
/// This method must be called in a bottom-up fashion, i.e.,
/// by starting with the smallest layer up to the larger layer.
/// This is needed in order to properly propagate the removal
/// of a region (which is a subregion registered into the larger
/// layer).
pub fn complete_removals(
&mut self,
mut larger_layer: Option<&mut Self>,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
// Delete all the empty regions and store them in the region pool.
for region_to_remove in self.regions_to_potentially_remove.drain(..) {
if let Entry::Occupied(region_id) = self.regions.entry(region_to_remove) {
if let Some(proxy) = proxies.get_mut(*region_id.get()) {
// First, we need to remove the endpoints of the deleted subregions.
let mut region = proxy.data.take_region().unwrap();
region.update_after_subregion_removal(proxies, self.depth);
// Check if we can actually delete this region.
if !region.contains_subproper_proxies() {
let region_id = region_id.remove();
// We can delete this region. So we need to tell the larger
// layer that one of its subregion is being deleted.
// The next call to `complete_removals` on the larger layer
// will take care of updating its own regions by taking into
// account this deleted subregion.
if let Some(larger_layer) = &mut larger_layer {
larger_layer.unregister_subregion(region_id, &region, proxies);
}
// Move the proxy to infinity.
let proxy = &mut proxies[region_id];
proxy.aabb.mins.coords.fill(DELETED_AABB_VALUE);
proxy.aabb.maxs.coords.fill(DELETED_AABB_VALUE);
// Mark the proxy as deleted.
proxies.remove(region_id);
pool.push(region);
} else {
proxies[*region_id.get()].data.set_region(region);
}
}
}
}
}
pub fn proper_proxy_moved_to_bigger_layer(
&mut self,
proxies: &mut SAPProxies,
proxy_id: SAPProxyIndex,
) {
for (point, region_id) in &self.regions {
let region = &mut proxies[*region_id].data.as_region_mut();
let region_contains_proxy = region.proper_proxy_moved_to_a_bigger_layer(proxy_id);
// If that proper proxy was the last one keeping that region
// alive, mark the region as potentially removable.
if region_contains_proxy && !region.contains_subproper_proxies() {
self.regions_to_potentially_remove.push(*point);
}
}
}
}

View File

@@ -0,0 +1,139 @@
use super::NEXT_FREE_SENTINEL;
use crate::geometry::broad_phase_multi_sap::SAPRegion;
use crate::geometry::ColliderHandle;
use parry::bounding_volume::AABB;
use std::ops::{Index, IndexMut};
pub type SAPProxyIndex = u32;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub enum SAPProxyData {
Collider(ColliderHandle),
Region(Option<Box<SAPRegion>>),
}
impl SAPProxyData {
pub fn is_region(&self) -> bool {
match self {
SAPProxyData::Region(_) => true,
_ => false,
}
}
pub fn as_region(&self) -> &SAPRegion {
match self {
SAPProxyData::Region(r) => r.as_ref().unwrap(),
_ => panic!("Invalid proxy type."),
}
}
pub fn as_region_mut(&mut self) -> &mut SAPRegion {
match self {
SAPProxyData::Region(r) => r.as_mut().unwrap(),
_ => panic!("Invalid proxy type."),
}
}
pub fn take_region(&mut self) -> Option<Box<SAPRegion>> {
match self {
SAPProxyData::Region(r) => r.take(),
_ => None,
}
}
pub fn set_region(&mut self, region: Box<SAPRegion>) {
*self = SAPProxyData::Region(Some(region));
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPProxy {
pub data: SAPProxyData,
pub aabb: AABB,
pub next_free: SAPProxyIndex,
// TODO: pack the layer_id and layer_depth into a single u16?
pub layer_id: u8,
pub layer_depth: i8,
}
impl SAPProxy {
pub fn collider(handle: ColliderHandle, aabb: AABB, layer_id: u8, layer_depth: i8) -> Self {
Self {
data: SAPProxyData::Collider(handle),
aabb,
next_free: NEXT_FREE_SENTINEL,
layer_id,
layer_depth,
}
}
pub fn subregion(subregion: Box<SAPRegion>, aabb: AABB, layer_id: u8, layer_depth: i8) -> Self {
Self {
data: SAPProxyData::Region(Some(subregion)),
aabb,
next_free: NEXT_FREE_SENTINEL,
layer_id,
layer_depth,
}
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPProxies {
pub elements: Vec<SAPProxy>,
pub first_free: SAPProxyIndex,
}
impl SAPProxies {
pub fn new() -> Self {
Self {
elements: Vec::new(),
first_free: NEXT_FREE_SENTINEL,
}
}
pub fn insert(&mut self, proxy: SAPProxy) -> SAPProxyIndex {
let result = if self.first_free != NEXT_FREE_SENTINEL {
let proxy_id = self.first_free;
self.first_free = self.elements[proxy_id as usize].next_free;
self.elements[proxy_id as usize] = proxy;
proxy_id
} else {
self.elements.push(proxy);
self.elements.len() as u32 - 1
};
result
}
pub fn remove(&mut self, proxy_id: SAPProxyIndex) {
let proxy = &mut self.elements[proxy_id as usize];
proxy.next_free = self.first_free;
self.first_free = proxy_id as u32;
}
// NOTE: this must not take holes into account.
pub fn get_mut(&mut self, i: SAPProxyIndex) -> Option<&mut SAPProxy> {
self.elements.get_mut(i as usize)
}
// NOTE: this must not take holes into account.
pub fn get(&self, i: SAPProxyIndex) -> Option<&SAPProxy> {
self.elements.get(i as usize)
}
}
impl Index<SAPProxyIndex> for SAPProxies {
type Output = SAPProxy;
fn index(&self, i: SAPProxyIndex) -> &SAPProxy {
self.elements.index(i as usize)
}
}
impl IndexMut<SAPProxyIndex> for SAPProxies {
fn index_mut(&mut self, i: SAPProxyIndex) -> &mut SAPProxy {
self.elements.index_mut(i as usize)
}
}

View File

@@ -0,0 +1,231 @@
use super::{SAPAxis, SAPProxies};
use crate::geometry::SAPProxyIndex;
use crate::math::DIM;
use bit_vec::BitVec;
use parry::bounding_volume::AABB;
use parry::utils::hashmap::HashMap;
pub type SAPRegionPool = Vec<Box<SAPRegion>>;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPRegion {
pub axes: [SAPAxis; DIM],
pub existing_proxies: BitVec,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub to_insert: Vec<SAPProxyIndex>, // Workspace
pub subregions: Vec<SAPProxyIndex>,
pub id_in_parent_subregion: u32,
pub update_count: u8,
pub needs_update_after_subregion_removal: bool,
// Number of proxies (added to this region) that originates
// from the layer at depth <= the depth of the layer containing
// this region.
subproper_proxy_count: usize,
}
impl SAPRegion {
pub fn new(bounds: AABB) -> Self {
let axes = [
SAPAxis::new(bounds.mins.x, bounds.maxs.x),
SAPAxis::new(bounds.mins.y, bounds.maxs.y),
#[cfg(feature = "dim3")]
SAPAxis::new(bounds.mins.z, bounds.maxs.z),
];
SAPRegion {
axes,
existing_proxies: BitVec::new(),
to_insert: Vec::new(),
subregions: Vec::new(),
id_in_parent_subregion: crate::INVALID_U32,
update_count: 0,
needs_update_after_subregion_removal: false,
subproper_proxy_count: 0,
}
}
pub fn recycle(bounds: AABB, mut old: Box<Self>) -> Box<Self> {
// Correct the bounds
for i in 0..DIM {
// Make sure the axis is empty (it may still contain
// some old endpoints from non-proper proxies.
old.axes[i].clear();
old.axes[i].min_bound = bounds.mins[i];
old.axes[i].max_bound = bounds.maxs[i];
}
old.update_count = 0;
old.existing_proxies.clear();
old.id_in_parent_subregion = crate::INVALID_U32;
// The rest of the fields should be "empty"
assert_eq!(old.subproper_proxy_count, 0);
assert!(old.to_insert.is_empty());
old
}
pub fn recycle_or_new(bounds: AABB, pool: &mut Vec<Box<Self>>) -> Box<Self> {
if let Some(old) = pool.pop() {
Self::recycle(bounds, old)
} else {
Box::new(Self::new(bounds))
}
}
/// Does this region still contain endpoints of subproper proxies?
pub fn contains_subproper_proxies(&self) -> bool {
self.subproper_proxy_count > 0
}
/// If this region contains the given proxy, this will decrement this region's proxy count.
///
/// Returns `true` if this region contained the proxy. Returns `false` otherwise.
pub fn proper_proxy_moved_to_a_bigger_layer(&mut self, proxy_id: SAPProxyIndex) -> bool {
if self.existing_proxies[proxy_id as usize] {
self.subproper_proxy_count -= 1;
true
} else {
false
}
}
/// Deletes from the axes of this region all the endpoints that point
/// to a region.
pub fn delete_all_region_endpoints(&mut self, proxies: &SAPProxies) {
let mut num_deleted_subregion_endpoints = [0; DIM];
for (i, axis) in self.axes.iter_mut().enumerate() {
let existing_proxies = &mut self.existing_proxies;
axis.endpoints.retain(|e| {
// NOTE: we use `if let` instead of `unwrap` because no
// proxy will be found for the sentinels.
if let Some(proxy) = proxies.get(e.proxy()) {
if proxy.data.is_region() {
existing_proxies.set(e.proxy() as usize, false);
num_deleted_subregion_endpoints[i] += 1;
return false;
}
}
true
});
}
// All axes should have deleted the same number of region endpoints.
for k in 1..DIM {
assert_eq!(
num_deleted_subregion_endpoints[0],
num_deleted_subregion_endpoints[k]
);
}
// The number of deleted endpoints should be even because there
// are two endpoints per proxy on each axes.
assert_eq!(num_deleted_subregion_endpoints[0] % 2, 0);
// All the region endpoints are subproper proxies.
// So update the subproper proxy count accordingly.
self.subproper_proxy_count -= num_deleted_subregion_endpoints[0] / 2;
}
pub fn predelete_proxy(&mut self, _proxy_id: SAPProxyIndex) {
// We keep the proxy_id as argument for uniformity with the "preupdate"
// method. However we don't actually need it because the deletion will be
// handled transparently during the next update.
self.update_count = self.update_count.max(1);
}
pub fn mark_as_dirty(&mut self) {
self.update_count = self.update_count.max(1);
}
pub fn register_subregion(&mut self, proxy_id: SAPProxyIndex) -> usize {
let subregion_index = self.subregions.len();
self.subregions.push(proxy_id);
self.preupdate_proxy(proxy_id, true);
subregion_index
}
pub fn preupdate_proxy(&mut self, proxy_id: SAPProxyIndex, is_subproper_proxy: bool) -> bool {
let mask_len = self.existing_proxies.len();
if proxy_id as usize >= mask_len {
self.existing_proxies
.grow(proxy_id as usize + 1 - mask_len, false);
}
if !self.existing_proxies[proxy_id as usize] {
self.to_insert.push(proxy_id);
self.existing_proxies.set(proxy_id as usize, true);
if is_subproper_proxy {
self.subproper_proxy_count += 1;
}
false
} else {
// Here we need a second update if all proxies exit this region. In this case, we need
// to delete the final proxy, but the region may not have AABBs overlapping it, so it
// wouldn't get an update otherwise.
self.update_count = 2;
true
}
}
pub fn update_after_subregion_removal(&mut self, proxies: &SAPProxies, layer_depth: i8) {
if self.needs_update_after_subregion_removal {
for axis in &mut self.axes {
self.subproper_proxy_count -= axis
.delete_deleted_proxies_and_endpoints_after_subregion_removal(
proxies,
&mut self.existing_proxies,
layer_depth,
);
}
self.needs_update_after_subregion_removal = false;
}
}
pub fn update(
&mut self,
proxies: &SAPProxies,
layer_depth: i8,
reporting: &mut HashMap<(u32, u32), bool>,
) {
if self.update_count > 0 {
// Update endpoints.
let mut total_deleted = 0;
let mut total_deleted_subproper = 0;
for dim in 0..DIM {
self.axes[dim].update_endpoints(dim, proxies, reporting);
let (num_deleted, num_deleted_subproper) = self.axes[dim]
.delete_out_of_bounds_proxies(proxies, &mut self.existing_proxies, layer_depth);
total_deleted += num_deleted;
total_deleted_subproper += num_deleted_subproper;
}
if total_deleted > 0 {
self.subproper_proxy_count -= total_deleted_subproper;
for dim in 0..DIM {
self.axes[dim].delete_out_of_bounds_endpoints(&self.existing_proxies);
}
}
self.update_count -= 1;
}
if !self.to_insert.is_empty() {
// Insert new proxies.
for dim in 1..DIM {
self.axes[dim].batch_insert(dim, &self.to_insert, proxies, None);
}
self.axes[0].batch_insert(0, &self.to_insert, proxies, Some(reporting));
self.to_insert.clear();
// In the rare event that all proxies leave this region in the next step, we need an
// update to remove them.
self.update_count = 1;
}
}
}

View File

@@ -0,0 +1,62 @@
use crate::math::{Point, Real, Vector};
use parry::bounding_volume::AABB;
pub(crate) const NUM_SENTINELS: usize = 1;
pub(crate) const NEXT_FREE_SENTINEL: u32 = u32::MAX;
pub(crate) const SENTINEL_VALUE: Real = Real::MAX;
pub(crate) const DELETED_AABB_VALUE: Real = SENTINEL_VALUE / 2.0;
pub(crate) const MAX_AABB_EXTENT: Real = SENTINEL_VALUE / 4.0;
pub(crate) const REGION_WIDTH_BASE: Real = 1.0;
pub(crate) const REGION_WIDTH_POWER_BASIS: Real = 5.0;
pub(crate) fn sort2(a: u32, b: u32) -> (u32, u32) {
assert_ne!(a, b);
if a < b {
(a, b)
} else {
(b, a)
}
}
pub(crate) fn clamp_point(point: Point<Real>) -> Point<Real> {
point.map(|e| na::clamp(e, -MAX_AABB_EXTENT, MAX_AABB_EXTENT))
}
pub(crate) fn point_key(point: Point<Real>, region_width: Real) -> Point<i32> {
(point / region_width)
.coords
.map(|e| e.floor() as i32)
.into()
}
pub(crate) fn region_aabb(index: Point<i32>, region_width: Real) -> AABB {
let mins = index.coords.map(|i| i as Real * region_width).into();
let maxs = mins + Vector::repeat(region_width);
AABB::new(mins, maxs)
}
pub(crate) fn region_width(depth: i8) -> Real {
(REGION_WIDTH_BASE * REGION_WIDTH_POWER_BASIS.powi(depth as i32)).min(MAX_AABB_EXTENT)
}
/// Computes the depth of the layer the given AABB should be part of.
///
/// The idea here is that an AABB should be part of a layer which has
/// regions large enough so that one AABB doesn't crosses too many
/// regions. But the regions must also not be too large, otherwise
/// we are loosing the benefits of Multi-SAP.
///
/// If the code bellow, we select a layer such that each region can
/// contain at least a chain of 10 contiguous objects with that AABB.
pub(crate) fn layer_containing_aabb(aabb: &AABB) -> i8 {
// Max number of elements of this size we would like one region to be able to contain.
const NUM_ELEMENTS_PER_DIMENSION: Real = 10.0;
let width = 2.0 * aabb.half_extents().norm() * NUM_ELEMENTS_PER_DIMENSION;
(width / REGION_WIDTH_BASE)
.log(REGION_WIDTH_POWER_BASIS)
.round()
.max(i8::MIN as Real)
.min(i8::MAX as Real) as i8
}

View File

@@ -1,8 +1,9 @@
use crate::dynamics::{CoefficientCombineRule, MassProperties, RigidBodyHandle};
use crate::geometry::{InteractionGroups, SharedShape, SolverFlags};
use crate::geometry::{InteractionGroups, SAPProxyIndex, SharedShape, SolverFlags};
use crate::math::{AngVector, Isometry, Point, Real, Rotation, Vector, DIM};
use crate::parry::transformation::vhacd::VHACDParameters;
use parry::bounding_volume::AABB;
use na::Unit;
use parry::bounding_volume::{BoundingVolume, AABB};
use parry::shape::Shape;
bitflags::bitflags! {
@@ -49,6 +50,34 @@ enum MassInfo {
MassProperties(Box<MassProperties>),
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Flags describing how the collider has been modified by the user.
pub(crate) struct ColliderChanges: u32 {
const MODIFIED = 1 << 0;
const POSITION_WRT_PARENT = 1 << 1; // => BF & NF updates.
const POSITION = 1 << 2; // => BF & NF updates.
const COLLISION_GROUPS = 1 << 3; // => NF update.
const SOLVER_GROUPS = 1 << 4; // => NF update.
const SHAPE = 1 << 5; // => BF & NF update. NF pair workspace invalidation.
const SENSOR = 1 << 6; // => NF update. NF pair invalidation.
}
}
impl ColliderChanges {
pub fn needs_broad_phase_update(self) -> bool {
self.intersects(
ColliderChanges::POSITION_WRT_PARENT
| ColliderChanges::POSITION
| ColliderChanges::SHAPE,
)
}
pub fn needs_narrow_phase_update(self) -> bool {
self.bits() > 1
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
/// A geometric entity that can be attached to a body so it can be affected by contacts and proximity queries.
@@ -59,17 +88,17 @@ pub struct Collider {
mass_info: MassInfo,
pub(crate) flags: ColliderFlags,
pub(crate) solver_flags: SolverFlags,
pub(crate) changes: ColliderChanges,
pub(crate) parent: RigidBodyHandle,
pub(crate) delta: Isometry<Real>,
pub(crate) position: Isometry<Real>,
pub(crate) predicted_position: Isometry<Real>,
/// The friction coefficient of this collider.
pub friction: Real,
/// The restitution coefficient of this collider.
pub restitution: Real,
pub(crate) collision_groups: InteractionGroups,
pub(crate) solver_groups: InteractionGroups,
pub(crate) proxy_index: usize,
pub(crate) proxy_index: SAPProxyIndex,
/// User-defined data associated to this rigid-body.
pub user_data: u128,
}
@@ -77,7 +106,8 @@ pub struct Collider {
impl Collider {
pub(crate) fn reset_internal_references(&mut self) {
self.parent = RigidBodyHandle::invalid();
self.proxy_index = crate::INVALID_USIZE;
self.proxy_index = crate::INVALID_U32;
self.changes = ColliderChanges::empty();
}
/// The rigid body this collider is attached to.
@@ -90,6 +120,42 @@ impl Collider {
self.flags.is_sensor()
}
/// The combine rule used by this collider to combine its friction
/// coefficient with the friction coefficient of the other collider it
/// is in contact with.
pub fn friction_combine_rule(&self) -> CoefficientCombineRule {
CoefficientCombineRule::from_value(self.flags.friction_combine_rule_value())
}
/// Sets the combine rule used by this collider to combine its friction
/// coefficient with the friction coefficient of the other collider it
/// is in contact with.
pub fn set_friction_combine_rule(&mut self, rule: CoefficientCombineRule) {
self.flags = self.flags.with_friction_combine_rule(rule);
}
/// The combine rule used by this collider to combine its restitution
/// coefficient with the restitution coefficient of the other collider it
/// is in contact with.
pub fn restitution_combine_rule(&self) -> CoefficientCombineRule {
CoefficientCombineRule::from_value(self.flags.restitution_combine_rule_value())
}
/// Sets the combine rule used by this collider to combine its restitution
/// coefficient with the restitution coefficient of the other collider it
/// is in contact with.
pub fn set_restitution_combine_rule(&mut self, rule: CoefficientCombineRule) {
self.flags = self.flags.with_restitution_combine_rule(rule)
}
/// Sets whether or not this is a sensor collider.
pub fn set_sensor(&mut self, is_sensor: bool) {
if is_sensor != self.is_sensor() {
self.changes.insert(ColliderChanges::SENSOR);
self.flags.set(ColliderFlags::SENSOR, is_sensor);
}
}
#[doc(hidden)]
pub fn set_position_debug(&mut self, position: Isometry<Real>) {
self.position = position;
@@ -106,21 +172,49 @@ impl Collider {
&self.position
}
/// Sets the position of this collider wrt. its parent rigid-body.
pub(crate) fn set_position(&mut self, position: Isometry<Real>) {
self.changes.insert(ColliderChanges::POSITION);
self.position = position;
}
/// The position of this collider wrt the body it is attached to.
pub fn position_wrt_parent(&self) -> &Isometry<Real> {
&self.delta
}
/// Sets the position of this collider wrt. its parent rigid-body.
pub fn set_position_wrt_parent(&mut self, position: Isometry<Real>) {
self.changes.insert(ColliderChanges::POSITION_WRT_PARENT);
self.delta = position;
}
/// The collision groups used by this collider.
pub fn collision_groups(&self) -> InteractionGroups {
self.collision_groups
}
/// Sets the collision groups of this collider.
pub fn set_collision_groups(&mut self, groups: InteractionGroups) {
if self.collision_groups != groups {
self.changes.insert(ColliderChanges::COLLISION_GROUPS);
self.collision_groups = groups;
}
}
/// The solver groups used by this collider.
pub fn solver_groups(&self) -> InteractionGroups {
self.solver_groups
}
/// Sets the solver groups of this collider.
pub fn set_solver_groups(&mut self, groups: InteractionGroups) {
if self.solver_groups != groups {
self.changes.insert(ColliderChanges::SOLVER_GROUPS);
self.solver_groups = groups;
}
}
/// The density of this collider, if set.
pub fn density(&self) -> Option<Real> {
match &self.mass_info {
@@ -134,16 +228,32 @@ impl Collider {
&*self.shape.0
}
/// A mutable reference to the geometric shape of this collider.
///
/// If that shape is shared by multiple colliders, it will be
/// cloned first so that `self` contains a unique copy of that
/// shape that you can modify.
pub fn shape_mut(&mut self) -> &mut dyn Shape {
self.shape.make_mut()
}
/// Sets the shape of this collider.
pub fn set_shape(&mut self, shape: SharedShape) {
self.changes.insert(ColliderChanges::SHAPE);
self.shape = shape;
}
/// Compute the axis-aligned bounding box of this collider.
pub fn compute_aabb(&self) -> AABB {
self.shape.compute_aabb(&self.position)
}
// pub(crate) fn compute_aabb_with_prediction(&self) -> AABB {
// let aabb1 = self.shape.compute_aabb(&self.position);
// let aabb2 = self.shape.compute_aabb(&self.predicted_position);
// aabb1.merged(&aabb2)
// }
/// Compute the axis-aligned bounding box of this collider.
pub fn compute_swept_aabb(&self, next_position: &Isometry<Real>) -> AABB {
let aabb1 = self.shape.compute_aabb(&self.position);
let aabb2 = self.shape.compute_aabb(next_position);
aabb1.merged(&aabb2)
}
/// Compute the local-space mass properties of this collider.
pub fn mass_properties(&self) -> MassProperties {
@@ -218,6 +328,12 @@ impl ColliderBuilder {
Self::new(SharedShape::ball(radius))
}
/// Initialize a new collider build with a half-space shape defined by the outward normal
/// of its planar boundary.
pub fn halfspace(outward_normal: Unit<Vector<Real>>) -> Self {
Self::new(SharedShape::halfspace(outward_normal))
}
/// Initialize a new collider builder with a cylindrical shape defined by its half-height
/// (along along the y axis) and its radius.
#[cfg(feature = "dim3")]
@@ -553,6 +669,14 @@ impl ColliderBuilder {
/// Sets the initial position (translation and orientation) of the collider to be created,
/// relative to the rigid-body it is attached to.
pub fn position_wrt_parent(mut self, pos: Isometry<Real>) -> Self {
self.delta = pos;
self
}
/// Sets the initial position (translation and orientation) of the collider to be created,
/// relative to the rigid-body it is attached to.
#[deprecated(note = "Use `.position_wrt_parent` instead.")]
pub fn position(mut self, pos: Isometry<Real>) -> Self {
self.delta = pos;
self
@@ -594,10 +718,10 @@ impl ColliderBuilder {
delta: self.delta,
flags,
solver_flags,
changes: ColliderChanges::all(),
parent: RigidBodyHandle::invalid(),
position: Isometry::identity(),
predicted_position: Isometry::identity(),
proxy_index: crate::INVALID_USIZE,
proxy_index: crate::INVALID_U32,
collision_groups: self.collision_groups,
solver_groups: self.solver_groups,
user_data: self.user_data,

View File

@@ -1,7 +1,8 @@
use crate::data::arena::Arena;
use crate::data::pubsub::PubSub;
use crate::dynamics::{RigidBodyHandle, RigidBodySet};
use crate::geometry::Collider;
use crate::geometry::collider::ColliderChanges;
use crate::geometry::{Collider, SAPProxyIndex};
use parry::partitioning::IndexedData;
use std::ops::{Index, IndexMut};
@@ -45,7 +46,7 @@ impl IndexedData for ColliderHandle {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub(crate) struct RemovedCollider {
pub handle: ColliderHandle,
pub(crate) proxy_index: usize,
pub(crate) proxy_index: SAPProxyIndex,
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
@@ -54,6 +55,8 @@ pub(crate) struct RemovedCollider {
pub struct ColliderSet {
pub(crate) removed_colliders: PubSub<RemovedCollider>,
pub(crate) colliders: Arena<Collider>,
pub(crate) modified_colliders: Vec<ColliderHandle>,
pub(crate) modified_all_colliders: bool,
}
impl ColliderSet {
@@ -62,6 +65,8 @@ impl ColliderSet {
ColliderSet {
removed_colliders: PubSub::new(),
colliders: Arena::new(),
modified_colliders: Vec::new(),
modified_all_colliders: false,
}
}
@@ -75,6 +80,37 @@ impl ColliderSet {
self.colliders.iter().map(|(h, c)| (ColliderHandle(h), c))
}
/// Iterates mutably through all the colliders on this set.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn iter_mut(&mut self) -> impl Iterator<Item = (ColliderHandle, &mut Collider)> {
self.modified_colliders.clear();
self.modified_all_colliders = true;
self.colliders
.iter_mut()
.map(|(h, b)| (ColliderHandle(h), b))
}
#[inline(always)]
pub(crate) fn foreach_modified_colliders(&self, mut f: impl FnMut(ColliderHandle, &Collider)) {
for handle in &self.modified_colliders {
if let Some(rb) = self.colliders.get(handle.0) {
f(*handle, rb)
}
}
}
#[inline(always)]
pub(crate) fn foreach_modified_colliders_mut_internal(
&mut self,
mut f: impl FnMut(ColliderHandle, &mut Collider),
) {
for handle in &self.modified_colliders {
if let Some(rb) = self.colliders.get_mut(handle.0) {
f(*handle, rb)
}
}
}
/// The number of colliders on this set.
pub fn len(&self) -> usize {
self.colliders.len()
@@ -90,6 +126,24 @@ impl ColliderSet {
self.colliders.contains(handle.0)
}
pub(crate) fn contains_any_modified_collider(&self) -> bool {
self.modified_all_colliders || !self.modified_colliders.is_empty()
}
pub(crate) fn clear_modified_colliders(&mut self) {
if self.modified_all_colliders {
for collider in self.colliders.iter_mut() {
collider.1.changes = ColliderChanges::empty();
}
self.modified_colliders.clear();
self.modified_all_colliders = false;
} else {
for handle in self.modified_colliders.drain(..) {
self.colliders[handle.0].changes = ColliderChanges::empty();
}
}
}
/// Inserts a new collider to this set and retrieve its handle.
pub fn insert(
&mut self,
@@ -106,11 +160,12 @@ impl ColliderSet {
// NOTE: we use `get_mut` instead of `get_mut_internal` so that the
// modification flag is updated properly.
let parent = bodies
.get_mut(parent_handle)
.get_mut_internal_with_modification_tracking(parent_handle)
.expect("Parent rigid body not found.");
coll.position = parent.position * coll.delta;
coll.predicted_position = parent.predicted_position * coll.delta;
let handle = ColliderHandle(self.colliders.insert(coll));
self.modified_colliders.push(handle);
let coll = self.colliders.get(handle.0).unwrap();
parent.add_collider(handle, &coll);
handle
@@ -133,7 +188,7 @@ impl ColliderSet {
*/
// NOTE: we use `get_mut` instead of `get_mut_internal` so that the
// modification flag is updated properly.
if let Some(parent) = bodies.get_mut(collider.parent) {
if let Some(parent) = bodies.get_mut_internal_with_modification_tracking(collider.parent) {
parent.remove_collider_internal(handle, &collider);
if wake_up {
@@ -178,10 +233,17 @@ impl ColliderSet {
///
/// Using this is discouraged in favor of `self.get_mut(handle)` which does not
/// suffer form the ABA problem.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn get_unknown_gen_mut(&mut self, i: usize) -> Option<(&mut Collider, ColliderHandle)> {
self.colliders
.get_unknown_gen_mut(i)
.map(|(c, h)| (c, ColliderHandle(h)))
let (collider, handle) = self.colliders.get_unknown_gen_mut(i)?;
let handle = ColliderHandle(handle);
Self::mark_as_modified(
handle,
collider,
&mut self.modified_colliders,
self.modified_all_colliders,
);
Some((collider, handle))
}
/// Get the collider with the given handle.
@@ -189,31 +251,79 @@ impl ColliderSet {
self.colliders.get(handle.0)
}
fn mark_as_modified(
handle: ColliderHandle,
collider: &mut Collider,
modified_colliders: &mut Vec<ColliderHandle>,
modified_all_colliders: bool,
) {
if !modified_all_colliders && !collider.changes.contains(ColliderChanges::MODIFIED) {
collider.changes = ColliderChanges::MODIFIED;
modified_colliders.push(handle);
}
}
/// Gets a mutable reference to the collider with the given handle.
#[cfg(not(feature = "dev-remove-slow-accessors"))]
pub fn get_mut(&mut self, handle: ColliderHandle) -> Option<&mut Collider> {
let result = self.colliders.get_mut(handle.0)?;
Self::mark_as_modified(
handle,
result,
&mut self.modified_colliders,
self.modified_all_colliders,
);
Some(result)
}
pub(crate) fn get_mut_internal(&mut self, handle: ColliderHandle) -> Option<&mut Collider> {
self.colliders.get_mut(handle.0)
}
// pub(crate) fn get2_mut_internal(
// &mut self,
// h1: ColliderHandle,
// h2: ColliderHandle,
// ) -> (Option<&mut Collider>, Option<&mut Collider>) {
// self.colliders.get2_mut(h1, h2)
// }
// Just a very long name instead of `.get_mut` to make sure
// this is really the method we wanted to use instead of `get_mut_internal`.
pub(crate) fn get_mut_internal_with_modification_tracking(
&mut self,
handle: ColliderHandle,
) -> Option<&mut Collider> {
let result = self.colliders.get_mut(handle.0)?;
Self::mark_as_modified(
handle,
result,
&mut self.modified_colliders,
self.modified_all_colliders,
);
Some(result)
}
// pub fn iter_mut(&mut self) -> impl Iterator<Item = (ColliderHandle, ColliderMut)> {
// // let sender = &self.activation_channel_sender;
// self.colliders.iter_mut().map(move |(h, rb)| {
// (h, ColliderMut::new(h, rb /*sender.clone()*/))
// })
// }
// Utility function to avoid some borrowing issue in the `maintain` method.
fn maintain_one(bodies: &mut RigidBodySet, collider: &mut Collider) {
if collider
.changes
.contains(ColliderChanges::POSITION_WRT_PARENT)
{
if let Some(parent) = bodies.get_mut_internal(collider.parent()) {
let position = parent.position * collider.position_wrt_parent();
// NOTE: the set_position method will add the ColliderChanges::POSITION flag,
// which is needed for the broad-phase/narrow-phase to detect the change.
collider.set_position(position);
}
}
}
// pub(crate) fn iter_mut_internal(
// &mut self,
// ) -> impl Iterator<Item = (ColliderHandle, &mut Collider)> {
// self.colliders.iter_mut()
// }
pub(crate) fn handle_user_changes(&mut self, bodies: &mut RigidBodySet) {
if self.modified_all_colliders {
for (_, rb) in self.colliders.iter_mut() {
Self::maintain_one(bodies, rb)
}
} else {
for handle in self.modified_colliders.iter() {
if let Some(rb) = self.colliders.get_mut(handle.0) {
Self::maintain_one(bodies, rb)
}
}
}
}
}
impl Index<ColliderHandle> for ColliderSet {
@@ -224,8 +334,16 @@ impl Index<ColliderHandle> for ColliderSet {
}
}
#[cfg(not(feature = "dev-remove-slow-accessors"))]
impl IndexMut<ColliderHandle> for ColliderSet {
fn index_mut(&mut self, index: ColliderHandle) -> &mut Collider {
&mut self.colliders[index.0]
fn index_mut(&mut self, handle: ColliderHandle) -> &mut Collider {
let collider = &mut self.colliders[handle.0];
Self::mark_as_modified(
handle,
collider,
&mut self.modified_colliders,
self.modified_all_colliders,
);
collider
}
}

View File

@@ -1,5 +1,5 @@
use crate::dynamics::{BodyPair, RigidBodyHandle};
use crate::geometry::{ColliderPair, ContactManifold};
use crate::geometry::{ColliderPair, Contact, ContactManifold};
use crate::math::{Point, Real, Vector};
use parry::query::ContactManifoldsWorkspace;
@@ -38,6 +38,8 @@ pub struct ContactData {
/// collider's rigid-body.
#[cfg(feature = "dim3")]
pub tangent_impulse: na::Vector2<Real>,
/// The target velocity correction at the contact point.
pub rhs: Real,
}
impl Default for ContactData {
@@ -45,6 +47,7 @@ impl Default for ContactData {
Self {
impulse: 0.0,
tangent_impulse: na::zero(),
rhs: 0.0,
}
}
}
@@ -73,6 +76,35 @@ impl ContactPair {
workspace: None,
}
}
/// Finds the contact with the smallest signed distance.
///
/// If the colliders involved in this contact pair are penetrating, then
/// this returns the contact with the largest penetration depth.
///
/// Returns a reference to the contact, as well as the contact manifold
/// it is part of.
pub fn find_deepest_contact(&self) -> Option<(&ContactManifold, &Contact)> {
let mut deepest = None;
for m2 in &self.manifolds {
let deepest_candidate = m2.find_deepest_contact();
deepest = match (deepest, deepest_candidate) {
(_, None) => deepest,
(None, Some(c2)) => Some((m2, c2)),
(Some((m1, c1)), Some(c2)) => {
if c1.dist <= c2.dist {
Some((m1, c1))
} else {
Some((m2, c2))
}
}
}
}
deepest
}
}
#[derive(Clone, Debug)]
@@ -143,16 +175,25 @@ pub struct SolverContact {
/// This is set to zero by default. Set to a non-zero value to
/// simulate, e.g., conveyor belts.
pub tangent_velocity: Vector<Real>,
/// Associated contact data used to warm-start the constraints
/// solver.
pub data: ContactData,
/// The warmstart impulse, along the contact normal, applied by this contact to the first collider's rigid-body.
pub warmstart_impulse: Real,
/// The warmstart friction impulse along the vector orthonormal to the contact normal, applied to the first
/// collider's rigid-body.
#[cfg(feature = "dim2")]
pub warmstart_tangent_impulse: Real,
/// The warmstart friction impulses along the basis orthonormal to the contact normal, applied to the first
/// collider's rigid-body.
#[cfg(feature = "dim3")]
pub warmstart_tangent_impulse: na::Vector2<Real>,
/// The last velocity correction targeted by this contact.
pub prev_rhs: Real,
}
impl SolverContact {
/// Should we treat this contact as a bouncy contact?
/// If `true`, use [`Self::restitution`].
pub fn is_bouncy(&self) -> bool {
let is_new = self.data.impulse == 0.0;
let is_new = self.warmstart_impulse == 0.0;
if is_new {
// Treat new collisions as bouncing at first, unless we have zero restitution.
self.restitution > 0.0

View File

@@ -84,7 +84,7 @@ impl IntersectionEvent {
}
}
pub(crate) use self::broad_phase_multi_sap::{BroadPhasePairEvent, ColliderPair};
pub(crate) use self::broad_phase_multi_sap::{BroadPhasePairEvent, ColliderPair, SAPProxyIndex};
pub(crate) use self::collider_set::RemovedCollider;
pub(crate) use self::narrow_phase::ContactManifoldIndex;
pub(crate) use parry::partitioning::SimdQuadTree;

View File

@@ -4,9 +4,10 @@ use rayon::prelude::*;
use crate::data::pubsub::Subscription;
use crate::data::Coarena;
use crate::dynamics::{BodyPair, CoefficientCombineRule, RigidBodySet};
use crate::geometry::collider::ColliderChanges;
use crate::geometry::{
BroadPhasePairEvent, ColliderGraphIndex, ColliderHandle, ColliderSet, ContactData,
ContactEvent, ContactManifold, ContactManifoldData, ContactPair, InteractionGraph,
BroadPhasePairEvent, ColliderGraphIndex, ColliderHandle, ColliderPair, ColliderSet,
ContactData, ContactEvent, ContactManifold, ContactManifoldData, ContactPair, InteractionGraph,
IntersectionEvent, RemovedCollider, SolverContact, SolverFlags,
};
use crate::math::{Real, Vector};
@@ -34,6 +35,13 @@ impl ColliderGraphIndices {
}
}
#[derive(Copy, Clone, PartialEq, Eq)]
enum PairRemovalMode {
FromContactGraph,
FromIntersectionGraph,
Auto,
}
/// The narrow-phase responsible for computing precise contact information between colliders.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
@@ -71,6 +79,14 @@ impl NarrowPhase {
}
}
/// The query dispatcher used by this narrow-phase to select the right collision-detection
/// algorithms depending of the shape types.
pub fn query_dispatcher(
&self,
) -> &dyn PersistentQueryDispatcher<ContactManifoldData, ContactData> {
&*self.query_dispatcher
}
/// The contact graph containing all contact pairs and their contact information.
pub fn contact_graph(&self) -> &InteractionGraph<ColliderHandle, ContactPair> {
&self.contact_graph
@@ -156,7 +172,12 @@ impl NarrowPhase {
// }
/// Maintain the narrow-phase internal state by taking collider removal into account.
pub fn maintain(&mut self, colliders: &mut ColliderSet, bodies: &mut RigidBodySet) {
pub fn handle_user_changes(
&mut self,
colliders: &mut ColliderSet,
bodies: &mut RigidBodySet,
events: &dyn EventHandler,
) {
// Ensure we already subscribed.
if self.removed_colliders.is_none() {
self.removed_colliders = Some(colliders.removed_colliders.subscribe());
@@ -199,6 +220,8 @@ impl NarrowPhase {
colliders.removed_colliders.ack(&cursor);
self.removed_colliders = Some(cursor);
self.handle_modified_colliders(colliders, bodies, events);
}
pub(crate) fn remove_collider(
@@ -240,6 +263,212 @@ impl NarrowPhase {
}
}
pub(crate) fn handle_modified_colliders(
&mut self,
colliders: &mut ColliderSet,
bodies: &mut RigidBodySet,
events: &dyn EventHandler,
) {
let mut pairs_to_remove = vec![];
colliders.foreach_modified_colliders(|handle, collider| {
if collider.changes.needs_narrow_phase_update() {
// No flag relevant to the narrow-phase is enabled for this collider.
return;
}
if let Some(gid) = self.graph_indices.get(handle.0) {
// For each modified colliders, we need to wake-up the bodies it is in contact with
// so that the narrow-phase properly takes into account the change in, e.g.,
// collision groups. Waking up the modified collider's parent isn't enough because
// it could be a static or kinematic body which don't propagate the wake-up state.
bodies.wake_up(collider.parent, true);
for inter in self
.contact_graph
.interactions_with(gid.contact_graph_index)
{
let other_handle = if handle == inter.0 { inter.1 } else { inter.0 };
if let Some(other_collider) = colliders.get(other_handle) {
bodies.wake_up(other_collider.parent, true);
}
}
// For each collider which had their sensor status modified, we need
// to transfer their contact/intersection graph edges to the intersection/contact graph.
// To achieve this we will remove the relevant contact/intersection pairs form the
// contact/intersection graphs, and then add them into the other graph.
if collider.changes.contains(ColliderChanges::SENSOR) {
if collider.is_sensor() {
// Find the contact pairs for this collider and
// push them to `pairs_to_remove`.
for inter in self
.contact_graph
.interactions_with(gid.contact_graph_index)
{
pairs_to_remove.push((
ColliderPair::new(inter.0, inter.1),
PairRemovalMode::FromContactGraph,
));
}
} else {
// Find the contact pairs for this collider and
// push them to `pairs_to_remove` if both involved
// colliders are not sensors.
for inter in self
.intersection_graph
.interactions_with(gid.intersection_graph_index)
.filter(|(h1, h2, _)| {
!colliders[*h1].is_sensor() && !colliders[*h2].is_sensor()
})
{
pairs_to_remove.push((
ColliderPair::new(inter.0, inter.1),
PairRemovalMode::FromIntersectionGraph,
));
}
}
}
}
});
// Remove the pair from the relevant graph.
for pair in &pairs_to_remove {
self.remove_pair(colliders, bodies, &pair.0, events, pair.1);
}
// Add the paid removed pair to the relevant graph.
for pair in pairs_to_remove {
self.add_pair(colliders, &pair.0);
}
}
fn remove_pair(
&mut self,
colliders: &mut ColliderSet,
bodies: &mut RigidBodySet,
pair: &ColliderPair,
events: &dyn EventHandler,
mode: PairRemovalMode,
) {
if let (Some(co1), Some(co2)) =
(colliders.get(pair.collider1), colliders.get(pair.collider2))
{
// TODO: could we just unwrap here?
// Don't we have the guarantee that we will get a `AddPair` before a `DeletePair`?
if let (Some(gid1), Some(gid2)) = (
self.graph_indices.get(pair.collider1.0),
self.graph_indices.get(pair.collider2.0),
) {
if mode == PairRemovalMode::FromIntersectionGraph
|| (mode == PairRemovalMode::Auto && (co1.is_sensor() || co2.is_sensor()))
{
let was_intersecting = self
.intersection_graph
.remove_edge(gid1.intersection_graph_index, gid2.intersection_graph_index);
// Emit an intersection lost event if we had an intersection before removing the edge.
if Some(true) == was_intersecting {
let prox_event =
IntersectionEvent::new(pair.collider1, pair.collider2, false);
events.handle_intersection_event(prox_event)
}
} else {
let contact_pair = self
.contact_graph
.remove_edge(gid1.contact_graph_index, gid2.contact_graph_index);
// Emit a contact stopped event if we had a contact before removing the edge.
// Also wake up the dynamic bodies that were in contact.
if let Some(ctct) = contact_pair {
if ctct.has_any_active_contact {
bodies.wake_up(co1.parent, true);
bodies.wake_up(co2.parent, true);
events.handle_contact_event(ContactEvent::Stopped(
pair.collider1,
pair.collider2,
))
}
}
}
}
}
}
fn add_pair(&mut self, colliders: &mut ColliderSet, pair: &ColliderPair) {
if let (Some(co1), Some(co2)) =
(colliders.get(pair.collider1), colliders.get(pair.collider2))
{
if co1.parent == co2.parent {
// Same parents. Ignore collisions.
return;
}
let (gid1, gid2) = self.graph_indices.ensure_pair_exists(
pair.collider1.0,
pair.collider2.0,
ColliderGraphIndices::invalid(),
);
if co1.is_sensor() || co2.is_sensor() {
// NOTE: the collider won't have a graph index as long
// as it does not interact with anything.
if !InteractionGraph::<(), ()>::is_graph_index_valid(gid1.intersection_graph_index)
{
gid1.intersection_graph_index =
self.intersection_graph.graph.add_node(pair.collider1);
}
if !InteractionGraph::<(), ()>::is_graph_index_valid(gid2.intersection_graph_index)
{
gid2.intersection_graph_index =
self.intersection_graph.graph.add_node(pair.collider2);
}
if self
.intersection_graph
.graph
.find_edge(gid1.intersection_graph_index, gid2.intersection_graph_index)
.is_none()
{
let _ = self.intersection_graph.add_edge(
gid1.intersection_graph_index,
gid2.intersection_graph_index,
false,
);
}
} else {
// NOTE: same code as above, but for the contact graph.
// TODO: refactor both pieces of code somehow?
// NOTE: the collider won't have a graph index as long
// as it does not interact with anything.
if !InteractionGraph::<(), ()>::is_graph_index_valid(gid1.contact_graph_index) {
gid1.contact_graph_index = self.contact_graph.graph.add_node(pair.collider1);
}
if !InteractionGraph::<(), ()>::is_graph_index_valid(gid2.contact_graph_index) {
gid2.contact_graph_index = self.contact_graph.graph.add_node(pair.collider2);
}
if self
.contact_graph
.graph
.find_edge(gid1.contact_graph_index, gid2.contact_graph_index)
.is_none()
{
let interaction = ContactPair::new(*pair);
let _ = self.contact_graph.add_edge(
gid1.contact_graph_index,
gid2.contact_graph_index,
interaction,
);
}
}
}
}
pub(crate) fn register_pairs(
&mut self,
colliders: &mut ColliderSet,
@@ -250,135 +479,10 @@ impl NarrowPhase {
for event in broad_phase_events {
match event {
BroadPhasePairEvent::AddPair(pair) => {
if let (Some(co1), Some(co2)) =
(colliders.get(pair.collider1), colliders.get(pair.collider2))
{
if co1.parent == co2.parent {
// Same parents. Ignore collisions.
continue;
}
let (gid1, gid2) = self.graph_indices.ensure_pair_exists(
pair.collider1.0,
pair.collider2.0,
ColliderGraphIndices::invalid(),
);
if co1.is_sensor() || co2.is_sensor() {
// NOTE: the collider won't have a graph index as long
// as it does not interact with anything.
if !InteractionGraph::<(), ()>::is_graph_index_valid(
gid1.intersection_graph_index,
) {
gid1.intersection_graph_index =
self.intersection_graph.graph.add_node(pair.collider1);
}
if !InteractionGraph::<(), ()>::is_graph_index_valid(
gid2.intersection_graph_index,
) {
gid2.intersection_graph_index =
self.intersection_graph.graph.add_node(pair.collider2);
}
if self
.intersection_graph
.graph
.find_edge(
gid1.intersection_graph_index,
gid2.intersection_graph_index,
)
.is_none()
{
let _ = self.intersection_graph.add_edge(
gid1.intersection_graph_index,
gid2.intersection_graph_index,
false,
);
}
} else {
// NOTE: same code as above, but for the contact graph.
// TODO: refactor both pieces of code somehow?
// NOTE: the collider won't have a graph index as long
// as it does not interact with anything.
if !InteractionGraph::<(), ()>::is_graph_index_valid(
gid1.contact_graph_index,
) {
gid1.contact_graph_index =
self.contact_graph.graph.add_node(pair.collider1);
}
if !InteractionGraph::<(), ()>::is_graph_index_valid(
gid2.contact_graph_index,
) {
gid2.contact_graph_index =
self.contact_graph.graph.add_node(pair.collider2);
}
if self
.contact_graph
.graph
.find_edge(gid1.contact_graph_index, gid2.contact_graph_index)
.is_none()
{
let interaction = ContactPair::new(*pair);
let _ = self.contact_graph.add_edge(
gid1.contact_graph_index,
gid2.contact_graph_index,
interaction,
);
}
}
}
self.add_pair(colliders, pair);
}
BroadPhasePairEvent::DeletePair(pair) => {
if let (Some(co1), Some(co2)) =
(colliders.get(pair.collider1), colliders.get(pair.collider2))
{
// TODO: could we just unwrap here?
// Don't we have the guarantee that we will get a `AddPair` before a `DeletePair`?
if let (Some(gid1), Some(gid2)) = (
self.graph_indices.get(pair.collider1.0),
self.graph_indices.get(pair.collider2.0),
) {
if co1.is_sensor() || co2.is_sensor() {
let was_intersecting = self.intersection_graph.remove_edge(
gid1.intersection_graph_index,
gid2.intersection_graph_index,
);
// Emit an intersection lost event if we had an intersection before removing the edge.
if Some(true) == was_intersecting {
let prox_event = IntersectionEvent::new(
pair.collider1,
pair.collider2,
false,
);
events.handle_intersection_event(prox_event)
}
} else {
let contact_pair = self.contact_graph.remove_edge(
gid1.contact_graph_index,
gid2.contact_graph_index,
);
// Emit a contact stopped event if we had a contact before removing the edge.
// Also wake up the dynamic bodies that were in contact.
if let Some(ctct) = contact_pair {
if ctct.has_any_active_contact {
bodies.wake_up(co1.parent, true);
bodies.wake_up(co2.parent, true);
events.handle_contact_event(ContactEvent::Stopped(
pair.collider1,
pair.collider2,
))
}
}
}
}
}
self.remove_pair(colliders, bodies, pair, events, PairRemovalMode::Auto);
}
}
}
@@ -391,17 +495,28 @@ impl NarrowPhase {
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
if !colliders.contains_any_modified_collider() {
return;
}
let nodes = &self.intersection_graph.graph.nodes;
let query_dispatcher = &*self.query_dispatcher;
let active_hooks = hooks.active_hooks();
// TODO: don't iterate on all the edges.
par_iter_mut!(&mut self.intersection_graph.graph.edges).for_each(|edge| {
let handle1 = nodes[edge.source().index()].weight;
let handle2 = nodes[edge.target().index()].weight;
let co1 = &colliders[handle1];
let co2 = &colliders[handle2];
// FIXME: avoid lookup into bodies.
if !co1.changes.needs_narrow_phase_update() && !co2.changes.needs_narrow_phase_update()
{
// No update needed for these colliders.
return;
}
// TODO: avoid lookup into bodies.
let rb1 = &bodies[co1.parent];
let rb2 = &bodies[co2.parent];
@@ -467,15 +582,26 @@ impl NarrowPhase {
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
if !colliders.contains_any_modified_collider() {
return;
}
let query_dispatcher = &*self.query_dispatcher;
let active_hooks = hooks.active_hooks();
// TODO: don't iterate on all the edges.
par_iter_mut!(&mut self.contact_graph.graph.edges).for_each(|edge| {
let pair = &mut edge.weight;
let co1 = &colliders[pair.pair.collider1];
let co2 = &colliders[pair.pair.collider2];
// FIXME: avoid lookup into bodies.
if !co1.changes.needs_narrow_phase_update() && !co2.changes.needs_narrow_phase_update()
{
// No update needed for these colliders.
return;
}
// TODO: avoid lookup into bodies.
let rb1 = &bodies[co1.parent];
let rb2 = &bodies[co2.parent];
@@ -525,6 +651,13 @@ impl NarrowPhase {
solver_flags.remove(SolverFlags::COMPUTE_IMPULSES);
}
if co1.changes.contains(ColliderChanges::SHAPE)
|| co2.changes.contains(ColliderChanges::SHAPE)
{
// The shape changed so the workspace is no longer valid.
pair.workspace = None;
}
let pos12 = co1.position().inv_mul(co2.position());
let _ = query_dispatcher.contact_manifolds(
&pos12,
@@ -576,7 +709,9 @@ impl NarrowPhase {
friction,
restitution,
tangent_velocity: Vector::zeros(),
data: contact.data,
warmstart_impulse: contact.data.impulse,
warmstart_tangent_impulse: contact.data.tangent_impulse,
prev_rhs: contact.data.rhs,
};
manifold.data.solver_contacts.push(solver_contact);
@@ -637,7 +772,7 @@ impl NarrowPhase {
/// Retrieve all the interactions with at least one contact point, happening between two active bodies.
// NOTE: this is very similar to the code from JointSet::select_active_interactions.
pub(crate) fn sort_and_select_active_contacts<'a>(
pub(crate) fn select_active_contacts<'a>(
&'a mut self,
bodies: &RigidBodySet,
out_manifolds: &mut Vec<&'a mut ContactManifold>,
@@ -647,7 +782,7 @@ impl NarrowPhase {
out_island.clear();
}
// FIXME: don't iterate through all the interactions.
// TODO: don't iterate through all the interactions.
for inter in self.contact_graph.graph.edges.iter_mut() {
for manifold in &mut inter.weight.manifolds {
let rb1 = &bodies[manifold.data.body_pair.body1];

View File

@@ -44,16 +44,15 @@ impl CollisionPipeline {
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
bodies.maintain(colliders);
colliders.handle_user_changes(bodies);
bodies.handle_user_changes(colliders);
self.broadphase_collider_pairs.clear();
broad_phase.update_aabbs(prediction_distance, bodies, colliders);
self.broad_phase_events.clear();
broad_phase.find_pairs(&mut self.broad_phase_events);
broad_phase.update(prediction_distance, colliders, &mut self.broad_phase_events);
narrow_phase.handle_user_changes(colliders, bodies, events);
narrow_phase.register_pairs(colliders, bodies, &self.broad_phase_events, events);
narrow_phase.compute_contacts(prediction_distance, bodies, colliders, hooks, events);
narrow_phase.compute_intersections(bodies, colliders, hooks, events);
@@ -61,26 +60,17 @@ impl CollisionPipeline {
colliders,
narrow_phase,
self.empty_joints.joint_graph(),
0,
128,
);
// // Update kinematic bodies velocities.
// bodies.foreach_active_kinematic_body_mut_internal(|_, body| {
// body.compute_velocity_from_predicted_position(integration_parameters.inv_dt());
// });
// Update colliders positions and kinematic bodies positions.
bodies.foreach_active_body_mut_internal(|_, rb| {
if rb.is_kinematic() {
rb.position = rb.predicted_position;
} else {
rb.update_predicted_position(0.0);
}
rb.position = rb.next_position;
rb.update_colliders_positions(colliders);
for handle in &rb.colliders {
let collider = &mut colliders[*handle];
let collider = colliders.get_mut_internal(*handle).unwrap();
collider.position = rb.position * collider.delta;
collider.predicted_position = rb.predicted_position * collider.delta;
}
});

View File

@@ -6,7 +6,7 @@ pub use physics_hooks::{
ContactModificationContext, PairFilterContext, PhysicsHooks, PhysicsHooksFlags,
};
pub use physics_pipeline::PhysicsPipeline;
pub use query_pipeline::QueryPipeline;
pub use query_pipeline::{QueryPipeline, QueryPipelineMode};
mod collision_pipeline;
mod event_handler;

View File

@@ -3,7 +3,7 @@
use crate::counters::Counters;
#[cfg(not(feature = "parallel"))]
use crate::dynamics::IslandSolver;
use crate::dynamics::{IntegrationParameters, JointSet, RigidBodySet};
use crate::dynamics::{CCDSolver, IntegrationParameters, JointSet, RigidBodySet};
#[cfg(feature = "parallel")]
use crate::dynamics::{JointGraphEdge, ParallelIslandSolver as IslandSolver};
use crate::geometry::{
@@ -58,57 +58,37 @@ impl PhysicsPipeline {
}
}
/// Executes one timestep of the physics simulation.
pub fn step(
fn detect_collisions(
&mut self,
gravity: &Vector<Real>,
integration_parameters: &IntegrationParameters,
broad_phase: &mut BroadPhase,
narrow_phase: &mut NarrowPhase,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
joints: &mut JointSet,
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
handle_user_changes: bool,
) {
self.counters.step_started();
bodies.maintain(colliders);
broad_phase.maintain(colliders);
narrow_phase.maintain(colliders, bodies);
self.counters.stages.collision_detection_time.resume();
self.counters.cd.broad_phase_time.resume();
// Update kinematic bodies velocities.
// TODO: what is the best place for this? It should at least be
// located before the island computation because we test the velocity
// there to determine if this kinematic body should wake-up dynamic
// bodies it is touching.
bodies.foreach_active_kinematic_body_mut_internal(|_, body| {
body.compute_velocity_from_predicted_position(integration_parameters.inv_dt());
});
self.counters.stages.collision_detection_time.start();
self.counters.cd.broad_phase_time.start();
self.broadphase_collider_pairs.clear();
// let t = instant::now();
broad_phase.update_aabbs(
integration_parameters.prediction_distance,
bodies,
colliders,
);
// println!("Update AABBs time: {}", instant::now() - t);
// let t = instant::now();
// Update broad-phase.
self.broad_phase_events.clear();
broad_phase.find_pairs(&mut self.broad_phase_events);
// println!("Find pairs time: {}", instant::now() - t);
self.broadphase_collider_pairs.clear();
broad_phase.update(
integration_parameters.prediction_distance,
colliders,
&mut self.broad_phase_events,
);
narrow_phase.register_pairs(colliders, bodies, &self.broad_phase_events, events);
self.counters.cd.broad_phase_time.pause();
self.counters.cd.narrow_phase_time.resume();
// println!("Num contact pairs: {}", pairs.len());
self.counters.cd.narrow_phase_time.start();
// let t = instant::now();
// Update narrow-phase.
if handle_user_changes {
narrow_phase.handle_user_changes(colliders, bodies, events);
}
narrow_phase.register_pairs(colliders, bodies, &self.broad_phase_events, events);
narrow_phase.compute_contacts(
integration_parameters.prediction_distance,
bodies,
@@ -117,9 +97,73 @@ impl PhysicsPipeline {
events,
);
narrow_phase.compute_intersections(bodies, colliders, hooks, events);
// println!("Compute contact time: {}", instant::now() - t);
self.counters.stages.island_construction_time.start();
// Clear colliders modification flags.
colliders.clear_modified_colliders();
self.counters.cd.narrow_phase_time.pause();
self.counters.stages.collision_detection_time.pause();
}
fn solve_position_constraints(
&mut self,
integration_parameters: &IntegrationParameters,
bodies: &mut RigidBodySet,
) {
#[cfg(not(feature = "parallel"))]
{
enable_flush_to_zero!();
for island_id in 0..bodies.num_islands() {
self.solvers[island_id].solve_position_constraints(
island_id,
&mut self.counters,
integration_parameters,
bodies,
)
}
}
#[cfg(feature = "parallel")]
{
use rayon::prelude::*;
use std::sync::atomic::Ordering;
let num_islands = bodies.num_islands();
let solvers = &mut self.solvers[..num_islands];
let bodies = &std::sync::atomic::AtomicPtr::new(bodies as *mut _);
rayon::scope(|scope| {
enable_flush_to_zero!();
solvers
.par_iter_mut()
.enumerate()
.for_each(|(island_id, solver)| {
let bodies: &mut RigidBodySet =
unsafe { std::mem::transmute(bodies.load(Ordering::Relaxed)) };
solver.solve_position_constraints(
scope,
island_id,
integration_parameters,
bodies,
)
});
});
}
}
fn build_islands_and_solve_velocity_constraints(
&mut self,
gravity: &Vector<Real>,
integration_parameters: &IntegrationParameters,
narrow_phase: &mut NarrowPhase,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
joints: &mut JointSet,
) {
self.counters.stages.island_construction_time.resume();
bodies.update_active_set_with_contacts(
colliders,
narrow_phase,
@@ -139,25 +183,17 @@ impl PhysicsPipeline {
}
let mut manifolds = Vec::new();
narrow_phase.sort_and_select_active_contacts(
bodies,
&mut manifolds,
&mut self.manifold_indices,
);
narrow_phase.select_active_contacts(bodies, &mut manifolds, &mut self.manifold_indices);
joints.select_active_interactions(bodies, &mut self.joint_constraint_indices);
self.counters.cd.narrow_phase_time.pause();
self.counters.stages.collision_detection_time.pause();
self.counters.stages.update_time.start();
self.counters.stages.update_time.resume();
bodies.foreach_active_dynamic_body_mut_internal(|_, b| {
b.update_world_mass_properties();
b.add_gravity(*gravity)
});
self.counters.stages.update_time.pause();
self.counters.solver.reset();
self.counters.stages.solver_time.start();
self.counters.stages.solver_time.resume();
if self.solvers.len() < bodies.num_islands() {
self.solvers
.resize_with(bodies.num_islands(), IslandSolver::new);
@@ -168,7 +204,7 @@ impl PhysicsPipeline {
enable_flush_to_zero!();
for island_id in 0..bodies.num_islands() {
self.solvers[island_id].solve_island(
self.solvers[island_id].init_constraints_and_solve_velocity_constraints(
island_id,
&mut self.counters,
integration_parameters,
@@ -209,7 +245,7 @@ impl PhysicsPipeline {
let joints: &mut Vec<JointGraphEdge> =
unsafe { std::mem::transmute(joints.load(Ordering::Relaxed)) };
solver.solve_island(
solver.init_constraints_and_solve_velocity_constraints(
scope,
island_id,
integration_parameters,
@@ -222,31 +258,224 @@ impl PhysicsPipeline {
});
});
}
self.counters.stages.solver_time.pause();
}
// Update colliders positions and kinematic bodies positions.
// FIXME: do this in the solver?
fn run_ccd_motion_clamping(
&mut self,
integration_parameters: &IntegrationParameters,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
narrow_phase: &NarrowPhase,
ccd_solver: &mut CCDSolver,
events: &dyn EventHandler,
) {
self.counters.ccd.toi_computation_time.start();
// Handle CCD
let impacts = ccd_solver.predict_impacts_at_next_positions(
integration_parameters.dt,
bodies,
colliders,
narrow_phase,
events,
);
ccd_solver.clamp_motions(integration_parameters.dt, bodies, &impacts);
self.counters.ccd.toi_computation_time.pause();
}
fn advance_to_final_positions(
&mut self,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
clear_forces: bool,
) {
// Set the rigid-bodies and kinematic bodies to their final position.
bodies.foreach_active_body_mut_internal(|_, rb| {
if rb.is_kinematic() {
rb.position = rb.predicted_position;
rb.linvel = na::zero();
rb.angvel = na::zero();
} else {
rb.update_predicted_position(integration_parameters.dt);
}
if clear_forces {
rb.force = na::zero();
rb.torque = na::zero();
}
rb.position = rb.next_position;
rb.update_colliders_positions(colliders);
});
}
self.counters.stages.solver_time.pause();
fn interpolate_kinematic_velocities(
&mut self,
integration_parameters: &IntegrationParameters,
bodies: &mut RigidBodySet,
) {
// Update kinematic bodies velocities.
// TODO: what is the best place for this? It should at least be
// located before the island computation because we test the velocity
// there to determine if this kinematic body should wake-up dynamic
// bodies it is touching.
bodies.foreach_active_kinematic_body_mut_internal(|_, body| {
body.compute_velocity_from_next_position(integration_parameters.inv_dt());
});
}
/// Executes one timestep of the physics simulation.
pub fn step(
&mut self,
gravity: &Vector<Real>,
integration_parameters: &IntegrationParameters,
broad_phase: &mut BroadPhase,
narrow_phase: &mut NarrowPhase,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
joints: &mut JointSet,
ccd_solver: &mut CCDSolver,
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
self.counters.reset();
self.counters.step_started();
colliders.handle_user_changes(bodies);
bodies.handle_user_changes(colliders);
self.detect_collisions(
integration_parameters,
broad_phase,
narrow_phase,
bodies,
colliders,
hooks,
events,
true,
);
let mut remaining_time = integration_parameters.dt;
let mut integration_parameters = *integration_parameters;
let (ccd_is_enabled, mut remaining_substeps) =
if integration_parameters.max_ccd_substeps == 0 {
(false, 1)
} else {
(true, integration_parameters.max_ccd_substeps)
};
while remaining_substeps > 0 {
// If there are more than one CCD substep, we need to split
// the timestep into multiple intervals. First, estimate the
// size of the time slice we will integrate for this substep.
//
// Note that we must do this now, before the constrains resolution
// because we need to use the correct timestep length for the
// integration of external forces.
//
// If there is only one or zero CCD substep, there is no need
// to split the timetsep interval. So we can just skip this part.
if ccd_is_enabled && remaining_substeps > 1 {
// NOTE: Take forces into account when updating the bodies CCD activation flags
// these forces have not been integrated to the body's velocity yet.
let ccd_active = ccd_solver.update_ccd_active_flags(bodies, remaining_time, true);
let first_impact = if ccd_active {
ccd_solver.find_first_impact(remaining_time, bodies, colliders, narrow_phase)
} else {
None
};
if let Some(toi) = first_impact {
let original_interval = remaining_time / (remaining_substeps as Real);
if toi < original_interval {
integration_parameters.dt = original_interval;
} else {
integration_parameters.dt =
toi + (remaining_time - toi) / (remaining_substeps as Real);
}
remaining_substeps -= 1;
} else {
// No impact, don't do any other substep after this one.
integration_parameters.dt = remaining_time;
remaining_substeps = 0;
}
remaining_time -= integration_parameters.dt;
// Avoid substep length that are too small.
if remaining_time <= integration_parameters.min_ccd_dt {
integration_parameters.dt += remaining_time;
remaining_substeps = 0;
}
} else {
integration_parameters.dt = remaining_time;
remaining_time = 0.0;
remaining_substeps = 0;
}
self.counters.ccd.num_substeps += 1;
self.interpolate_kinematic_velocities(&integration_parameters, bodies);
self.build_islands_and_solve_velocity_constraints(
gravity,
&integration_parameters,
narrow_phase,
bodies,
colliders,
joints,
);
// If CCD is enabled, execute the CCD motion clamping.
if ccd_is_enabled {
// NOTE: don't the forces into account when updating the CCD active flags because
// they have already been integrated into the velocities by the solver.
let ccd_active =
ccd_solver.update_ccd_active_flags(bodies, integration_parameters.dt, false);
if ccd_active {
self.run_ccd_motion_clamping(
&integration_parameters,
bodies,
colliders,
narrow_phase,
ccd_solver,
events,
);
}
}
// NOTE: we need to run the position solver **after** the
// CCD motion clamping because otherwise the clamping
// would undo the depenetration done by the position
// solver.
// This happens because our CCD use the real rigid-body
// velocities instead of just interpolating between
// isometries.
self.solve_position_constraints(&integration_parameters, bodies);
let clear_forces = remaining_substeps == 0;
self.advance_to_final_positions(bodies, colliders, clear_forces);
self.detect_collisions(
&integration_parameters,
broad_phase,
narrow_phase,
bodies,
colliders,
hooks,
events,
false,
);
bodies.modified_inactive_set.clear();
}
bodies.modified_inactive_set.clear();
self.counters.step_completed();
}
}
#[cfg(test)]
mod test {
use crate::dynamics::{IntegrationParameters, JointSet, RigidBodyBuilder, RigidBodySet};
use crate::dynamics::{
CCDSolver, IntegrationParameters, JointSet, RigidBodyBuilder, RigidBodySet,
};
use crate::geometry::{BroadPhase, ColliderBuilder, ColliderSet, NarrowPhase};
use crate::math::Vector;
use crate::pipeline::PhysicsPipeline;
@@ -278,6 +507,7 @@ mod test {
&mut bodies,
&mut colliders,
&mut joints,
&mut CCDSolver::new(),
&(),
&(),
);
@@ -321,6 +551,7 @@ mod test {
&mut bodies,
&mut colliders,
&mut joints,
&mut CCDSolver::new(),
&(),
&(),
);

View File

@@ -1,10 +1,9 @@
use crate::dynamics::RigidBodySet;
use crate::geometry::{
Collider, ColliderHandle, ColliderSet, InteractionGroups, PointProjection, Ray,
RayIntersection, SimdQuadTree,
RayIntersection, SimdQuadTree, AABB,
};
use crate::math::{Isometry, Point, Real, Vector};
use crate::parry::motion::RigidMotion;
use parry::query::details::{
IntersectionCompositeShapeShapeBestFirstVisitor,
NonlinearTOICompositeShapeShapeBestFirstVisitor, PointCompositeShapeProjBestFirstVisitor,
@@ -15,7 +14,7 @@ use parry::query::details::{
use parry::query::visitors::{
BoundingVolumeIntersectionsVisitor, PointIntersectionsVisitor, RayIntersectionsVisitor,
};
use parry::query::{DefaultQueryDispatcher, QueryDispatcher, TOI};
use parry::query::{DefaultQueryDispatcher, NonlinearRigidMotion, QueryDispatcher, TOI};
use parry::shape::{FeatureId, Shape, TypedSimdCompositeShape};
use std::sync::Arc;
@@ -39,6 +38,22 @@ struct QueryPipelineAsCompositeShape<'a> {
groups: InteractionGroups,
}
/// Indicates how the colliders position should be taken into account when
/// updating the query pipeline.
pub enum QueryPipelineMode {
/// The `Collider::position` is taken into account.
CurrentPosition,
/// The `RigidBody::next_position * Collider::position_wrt_parent` is taken into account for
/// the colliders positions.
SweepTestWithNextPosition,
/// The `RigidBody::predict_position_using_velocity_and_forces * Collider::position_wrt_parent`
/// is taken into account for the colliders position.
SweepTestWithPredictedPosition {
/// The time used to integrate the rigid-body's velocity and acceleration.
dt: Real,
},
}
impl<'a> TypedSimdCompositeShape for QueryPipelineAsCompositeShape<'a> {
type PartShape = dyn Shape;
type PartId = ColliderHandle;
@@ -95,7 +110,7 @@ impl QueryPipeline {
/// Initializes an empty query pipeline with a custom `QueryDispatcher`.
///
/// Use this constructor in order to use a custom `QueryDispatcher` that is
/// awary of your own user-defined shapes.
/// aware of your own user-defined shapes.
pub fn with_query_dispatcher<D>(d: D) -> Self
where
D: 'static + QueryDispatcher,
@@ -108,11 +123,48 @@ impl QueryPipeline {
}
}
/// The query dispatcher used by this query pipeline for running scene queries.
pub fn query_dispatcher(&self) -> &dyn QueryDispatcher {
&*self.query_dispatcher
}
/// Update the acceleration structure on the query pipeline.
pub fn update(&mut self, bodies: &RigidBodySet, colliders: &ColliderSet) {
self.update_with_mode(bodies, colliders, QueryPipelineMode::CurrentPosition)
}
/// Update the acceleration structure on the query pipeline.
pub fn update_with_mode(
&mut self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
mode: QueryPipelineMode,
) {
if !self.tree_built {
let data = colliders.iter().map(|(h, c)| (h, c.compute_aabb()));
self.quadtree.clear_and_rebuild(data, self.dilation_factor);
match mode {
QueryPipelineMode::CurrentPosition => {
let data = colliders.iter().map(|(h, c)| (h, c.compute_aabb()));
self.quadtree.clear_and_rebuild(data, self.dilation_factor);
}
QueryPipelineMode::SweepTestWithNextPosition => {
let data = colliders.iter().map(|(h, c)| {
let next_position =
bodies[c.parent()].next_position * c.position_wrt_parent();
(h, c.compute_swept_aabb(&next_position))
});
self.quadtree.clear_and_rebuild(data, self.dilation_factor);
}
QueryPipelineMode::SweepTestWithPredictedPosition { dt } => {
let data = colliders.iter().map(|(h, c)| {
let next_position = bodies[c.parent()]
.predict_position_using_velocity_and_forces(dt)
* c.position_wrt_parent();
(h, c.compute_swept_aabb(&next_position))
});
self.quadtree.clear_and_rebuild(data, self.dilation_factor);
}
}
// FIXME: uncomment this once we handle insertion/removals properly.
// self.tree_built = true;
return;
@@ -127,10 +179,37 @@ impl QueryPipeline {
}
}
self.quadtree.update(
|handle| colliders[*handle].compute_aabb(),
self.dilation_factor,
);
match mode {
QueryPipelineMode::CurrentPosition => {
self.quadtree.update(
|handle| colliders[*handle].compute_aabb(),
self.dilation_factor,
);
}
QueryPipelineMode::SweepTestWithNextPosition => {
self.quadtree.update(
|handle| {
let co = &colliders[*handle];
let next_position =
bodies[co.parent()].next_position * co.position_wrt_parent();
co.compute_swept_aabb(&next_position)
},
self.dilation_factor,
);
}
QueryPipelineMode::SweepTestWithPredictedPosition { dt } => {
self.quadtree.update(
|handle| {
let co = &colliders[*handle];
let next_position = bodies[co.parent()]
.predict_position_using_velocity_and_forces(dt)
* co.position_wrt_parent();
co.compute_swept_aabb(&next_position)
},
self.dilation_factor,
);
}
}
}
/// Find the closest intersection between a ray and a set of collider.
@@ -336,6 +415,16 @@ impl QueryPipeline {
.map(|h| (h.1 .1 .0, h.1 .0, h.1 .1 .1))
}
/// Finds all handles of all the colliders with an AABB intersecting the given AABB.
pub fn colliders_with_aabb_intersecting_aabb(
&self,
aabb: &AABB,
mut callback: impl FnMut(&ColliderHandle) -> bool,
) {
let mut visitor = BoundingVolumeIntersectionsVisitor::new(aabb, &mut callback);
self.quadtree.traverse_depth_first(&mut visitor);
}
/// Casts a shape at a constant linear velocity and retrieve the first collider it hits.
///
/// This is similar to ray-casting except that we are casting a whole shape instead of
@@ -386,20 +475,24 @@ impl QueryPipeline {
pub fn nonlinear_cast_shape(
&self,
colliders: &ColliderSet,
shape_motion: &dyn RigidMotion,
shape_motion: &NonlinearRigidMotion,
shape: &dyn Shape,
max_toi: Real,
target_distance: Real,
start_time: Real,
end_time: Real,
stop_at_penetration: bool,
groups: InteractionGroups,
) -> Option<(ColliderHandle, TOI)> {
let pipeline_shape = self.as_composite_shape(colliders, groups);
let pipeline_motion = NonlinearRigidMotion::identity();
let mut visitor = NonlinearTOICompositeShapeShapeBestFirstVisitor::new(
&*self.query_dispatcher,
shape_motion,
&pipeline_motion,
&pipeline_shape,
shape_motion,
shape,
max_toi,
target_distance,
start_time,
end_time,
stop_at_penetration,
);
self.quadtree.traverse_best_first(&mut visitor).map(|h| h.1)
}

View File

@@ -3,6 +3,7 @@
use na::{Matrix3, Point2, Point3, Scalar, SimdRealField, Vector2, Vector3};
use num::Zero;
use simba::simd::SimdValue;
use std::ops::IndexMut;
use parry::utils::SdpMatrix3;
use {
@@ -676,3 +677,34 @@ pub(crate) fn select_other<T: PartialEq>(pair: (T, T), elt: T) -> T {
pair.0
}
}
/// Methods for simultaneously indexing a container with two distinct indices.
pub trait IndexMut2<I>: IndexMut<I> {
/// Gets mutable references to two distinct elements of the container.
///
/// Panics if `i == j`.
fn index_mut2(&mut self, i: usize, j: usize) -> (&mut Self::Output, &mut Self::Output);
/// Gets a mutable reference to one element, and immutable reference to a second one.
///
/// Panics if `i == j`.
#[inline]
fn index_mut_const(&mut self, i: usize, j: usize) -> (&mut Self::Output, &Self::Output) {
let (a, b) = self.index_mut2(i, j);
(a, &*b)
}
}
impl<T> IndexMut2<usize> for Vec<T> {
#[inline]
fn index_mut2(&mut self, i: usize, j: usize) -> (&mut T, &mut T) {
assert!(i != j, "Unable to index the same element twice.");
assert!(i < self.len() && j < self.len(), "Index out of bounds.");
unsafe {
let a = &mut *(self.get_unchecked_mut(i) as *mut _);
let b = &mut *(self.get_unchecked_mut(j) as *mut _);
(a, b)
}
}
}

View File

@@ -37,7 +37,7 @@ impl Box2dWorld {
joints: &JointSet,
) -> Self {
let mut world = b2::World::new(&na_vec_to_b2_vec(gravity));
world.set_continuous_physics(false);
world.set_continuous_physics(bodies.iter().any(|b| b.1.is_ccd_enabled()));
let mut res = Box2dWorld {
world,
@@ -77,14 +77,11 @@ impl Box2dWorld {
angular_velocity: body.angvel(),
linear_damping,
angular_damping,
bullet: body.is_ccd_enabled(),
..b2::BodyDef::new()
};
let b2_handle = self.world.create_body(&def);
self.rapier2box2d.insert(handle, b2_handle);
// Collider.
let mut b2_body = self.world.body_mut(b2_handle);
b2_body.set_bullet(false /* collider.is_ccd_enabled() */);
}
}
@@ -163,7 +160,7 @@ impl Box2dWorld {
fixture_def.restitution = collider.restitution;
fixture_def.friction = collider.friction;
fixture_def.density = collider.density();
fixture_def.density = collider.density().unwrap_or(1.0);
fixture_def.is_sensor = collider.is_sensor();
fixture_def.filter = b2::Filter::new();
@@ -215,8 +212,6 @@ impl Box2dWorld {
}
pub fn step(&mut self, counters: &mut Counters, params: &IntegrationParameters) {
// self.world.set_continuous_physics(world.integration_parameters.max_ccd_substeps != 0);
counters.step_started();
self.world.step(
params.dt,

View File

@@ -60,7 +60,6 @@ pub struct GraphicsManager {
b2wireframe: HashMap<RigidBodyHandle, bool>,
ground_color: Point3<f32>,
camera: Camera,
ground_handle: Option<RigidBodyHandle>,
}
impl GraphicsManager {
@@ -87,14 +86,9 @@ impl GraphicsManager {
c2color: HashMap::new(),
ground_color: Point3::new(0.5, 0.5, 0.5),
b2wireframe: HashMap::new(),
ground_handle: None,
}
}
pub fn set_ground_handle(&mut self, handle: Option<RigidBodyHandle>) {
self.ground_handle = handle
}
pub fn clear(&mut self, window: &mut Window) {
for sns in self.b2sn.values_mut() {
for sn in sns.iter_mut() {
@@ -630,19 +624,17 @@ impl GraphicsManager {
// );
for (_, ns) in self.b2sn.iter_mut() {
for n in ns.iter_mut() {
/*
if let Some(co) = colliders.get(n.collider()) {
let bo = &bodies[co.parent()];
if bo.is_dynamic() {
if bo.is_sleeping() {
n.set_color(Point3::new(1.0, 0.0, 0.0));
} else {
n.set_color(Point3::new(0.0, 1.0, 0.0));
}
}
}
*/
// if let Some(co) = colliders.get(n.collider()) {
// let bo = &_bodies[co.parent()];
//
// if bo.is_dynamic() {
// if bo.is_ccd_active() {
// n.set_color(Point3::new(1.0, 0.0, 0.0));
// } else {
// n.set_color(Point3::new(0.0, 1.0, 0.0));
// }
// }
// }
n.update(colliders);
n.draw(window);

View File

@@ -4,7 +4,7 @@ use crate::{
};
use kiss3d::window::Window;
use plugin::HarnessPlugin;
use rapier::dynamics::{IntegrationParameters, JointSet, RigidBodySet};
use rapier::dynamics::{CCDSolver, IntegrationParameters, JointSet, RigidBodySet};
use rapier::geometry::{BroadPhase, ColliderSet, NarrowPhase};
use rapier::math::Vector;
use rapier::pipeline::{ChannelEventCollector, PhysicsHooks, PhysicsPipeline, QueryPipeline};
@@ -133,6 +133,7 @@ impl Harness {
self.physics.broad_phase = BroadPhase::new();
self.physics.narrow_phase = NarrowPhase::new();
self.state.timestep_id = 0;
self.physics.ccd_solver = CCDSolver::new();
self.physics.query_pipeline = QueryPipeline::new();
self.physics.pipeline = PhysicsPipeline::new();
self.physics.pipeline.counters.enable();
@@ -179,6 +180,7 @@ impl Harness {
&mut physics.bodies,
&mut physics.colliders,
&mut physics.joints,
&mut physics.ccd_solver,
&*physics.hooks,
event_handler,
);
@@ -194,6 +196,7 @@ impl Harness {
&mut self.physics.bodies,
&mut self.physics.colliders,
&mut self.physics.joints,
&mut self.physics.ccd_solver,
&*self.physics.hooks,
&self.event_handler,
);

View File

@@ -228,7 +228,11 @@ fn nphysics_collider_from_rapier_collider(
}
};
let density = if is_dynamic { collider.density() } else { 0.0 };
let density = if is_dynamic {
collider.density().unwrap_or(0.0)
} else {
0.0
};
Some(
ColliderDesc::new(shape)

View File

@@ -1,5 +1,5 @@
use crossbeam::channel::Receiver;
use rapier::dynamics::{IntegrationParameters, JointSet, RigidBodySet};
use rapier::dynamics::{CCDSolver, IntegrationParameters, JointSet, RigidBodySet};
use rapier::geometry::{BroadPhase, ColliderSet, ContactEvent, IntersectionEvent, NarrowPhase};
use rapier::math::Vector;
use rapier::pipeline::{PhysicsHooks, PhysicsPipeline, QueryPipeline};
@@ -73,6 +73,7 @@ pub struct PhysicsState {
pub bodies: RigidBodySet,
pub colliders: ColliderSet,
pub joints: JointSet,
pub ccd_solver: CCDSolver,
pub pipeline: PhysicsPipeline,
pub query_pipeline: QueryPipeline,
pub integration_parameters: IntegrationParameters,
@@ -88,6 +89,7 @@ impl PhysicsState {
bodies: RigidBodySet::new(),
colliders: ColliderSet::new(),
joints: JointSet::new(),
ccd_solver: CCDSolver::new(),
pipeline: PhysicsPipeline::new(),
query_pipeline: QueryPipeline::new(),
integration_parameters: IntegrationParameters::default(),

View File

@@ -13,8 +13,8 @@ use physx::prelude::*;
use physx::scene::FrictionType;
use physx::traits::Class;
use physx_sys::{
PxBitAndByte, PxConvexFlags, PxConvexMeshGeometryFlags, PxHeightFieldSample,
PxMeshGeometryFlags, PxMeshScale_new, PxRigidActor,
FilterShaderCallbackInfo, PxBitAndByte, PxConvexFlags, PxConvexMeshGeometryFlags,
PxHeightFieldSample, PxMeshGeometryFlags, PxMeshScale_new, PxRigidActor,
};
use rapier::counters::Counters;
use rapier::dynamics::{
@@ -160,15 +160,24 @@ impl PhysxWorld {
FrictionType::Patch
};
let scene_desc = SceneDescriptor {
let mut scene_desc = SceneDescriptor {
gravity: gravity.into_physx(),
thread_count: num_threads as u32,
broad_phase_type: BroadPhaseType::AutomaticBoxPruning,
solver_type: SolverType::PGS,
friction_type,
ccd_max_passes: integration_parameters.max_ccd_substeps as u32,
..SceneDescriptor::new(())
};
let ccd_enabled = bodies.iter().any(|(_, rb)| rb.is_ccd_enabled());
if ccd_enabled {
scene_desc.simulation_filter_shader =
FilterShaderDescriptor::CallDefaultFirst(ccd_filter_shader);
scene_desc.flags.insert(SceneFlag::EnableCcd);
}
let mut scene: Owner<PxScene> = physics.create(scene_desc).unwrap();
let mut rapier2dynamic = HashMap::new();
let mut rapier2static = HashMap::new();
@@ -231,13 +240,13 @@ impl PhysxWorld {
}
}
// Update mass properties.
// Update mass properties and CCD flags.
for (rapier_handle, actor) in rapier2dynamic.iter_mut() {
let rb = &bodies[*rapier_handle];
let densities: Vec<_> = rb
.colliders()
.iter()
.map(|h| colliders[*h].density())
.map(|h| colliders[*h].density().unwrap_or(0.0))
.collect();
unsafe {
@@ -248,6 +257,23 @@ impl PhysxWorld {
std::ptr::null(),
false,
);
if rb.is_ccd_enabled() {
physx_sys::PxRigidBody_setRigidBodyFlag_mut(
std::mem::transmute(actor.as_mut()),
RigidBodyFlag::EnableCCD as u32,
true,
);
// physx_sys::PxRigidBody_setMinCCDAdvanceCoefficient_mut(
// std::mem::transmute(actor.as_mut()),
// 0.0,
// );
// physx_sys::PxRigidBody_setRigidBodyFlag_mut(
// std::mem::transmute(actor.as_mut()),
// RigidBodyFlag::EnableCCDFriction as u32,
// true,
// );
}
}
}
@@ -699,3 +725,8 @@ impl AdvanceCallback<PxArticulationLink, PxRigidDynamic> for OnAdvance {
) {
}
}
unsafe extern "C" fn ccd_filter_shader(data: *mut FilterShaderCallbackInfo) -> u16 {
(*(*data).pairFlags).mBits |= physx_sys::PxPairFlag::eDETECT_CCD_CONTACT as u16;
0
}

View File

@@ -24,7 +24,7 @@ use rapier::dynamics::{
use rapier::geometry::{ColliderHandle, ColliderSet, NarrowPhase};
#[cfg(feature = "dim3")]
use rapier::geometry::{InteractionGroups, Ray};
use rapier::math::{Isometry, Vector};
use rapier::math::Vector;
use rapier::pipeline::PhysicsHooks;
#[cfg(all(feature = "dim2", feature = "other-backends"))]
@@ -125,7 +125,6 @@ pub struct Testbed {
nsteps: usize,
camera_locked: bool, // Used so that the camera can remain the same before and after we change backend or press the restart button.
plugins: Vec<Box<dyn TestbedPlugin>>,
hide_counters: bool,
// persistant_contacts: HashMap<ContactId, bool>,
font: Rc<Font>,
cursor_pos: Point2<f32>,
@@ -185,7 +184,6 @@ impl Testbed {
graphics,
nsteps: 1,
camera_locked: false,
hide_counters: true,
// persistant_contacts: HashMap::new(),
font: Font::default(),
cursor_pos: Point2::new(0.0f32, 0.0),
@@ -225,14 +223,6 @@ impl Testbed {
self.state.can_grab_behind_ground = allow;
}
pub fn hide_performance_counters(&mut self) {
self.hide_counters = true;
}
pub fn show_performance_counters(&mut self) {
self.hide_counters = false;
}
pub fn integration_parameters_mut(&mut self) -> &mut IntegrationParameters {
&mut self.harness.physics.integration_parameters
}
@@ -769,11 +759,38 @@ impl Testbed {
}
#[cfg(feature = "dim3")]
fn handle_special_event(&mut self, window: &mut Window, _event: Event) {
fn handle_special_event(&mut self, window: &mut Window, event: Event) {
use rapier::dynamics::RigidBodyBuilder;
use rapier::geometry::ColliderBuilder;
if window.is_conrod_ui_capturing_mouse() {
return;
}
match event.value {
WindowEvent::Key(Key::Space, Action::Release, _) => {
let cam_pos = self.graphics.camera().view_transform().inverse();
let forward = cam_pos * -Vector::z();
let vel = forward * 1000.0;
let bodies = &mut self.harness.physics.bodies;
let colliders = &mut self.harness.physics.colliders;
let collider = ColliderBuilder::cuboid(4.0, 2.0, 0.4).density(20.0).build();
// let collider = ColliderBuilder::ball(2.0).density(1.0).build();
let body = RigidBodyBuilder::new_dynamic()
.position(cam_pos)
.linvel(vel.x, vel.y, vel.z)
.ccd_enabled(true)
.build();
let body_handle = bodies.insert(body);
colliders.insert(collider, body_handle, bodies);
self.graphics.add(window, body_handle, bodies, colliders);
}
_ => {}
}
/*
match event.value {
WindowEvent::MouseButton(MouseButton::Button1, Action::Press, modifier) => {
@@ -1198,18 +1215,18 @@ impl State for Testbed {
}
}
if self
.state
.prev_flags
.contains(TestbedStateFlags::SUB_STEPPING)
!= self.state.flags.contains(TestbedStateFlags::SUB_STEPPING)
{
self.harness
.physics
.integration_parameters
.return_after_ccd_substep =
self.state.flags.contains(TestbedStateFlags::SUB_STEPPING);
}
// if self
// .state
// .prev_flags
// .contains(TestbedStateFlags::SUB_STEPPING)
// != self.state.flags.contains(TestbedStateFlags::SUB_STEPPING)
// {
// self.harness
// .physics
// .integration_parameters
// .return_after_ccd_substep =
// self.state.flags.contains(TestbedStateFlags::SUB_STEPPING);
// }
if self.state.prev_flags.contains(TestbedStateFlags::SHAPES)
!= self.state.flags.contains(TestbedStateFlags::SHAPES)
@@ -1315,14 +1332,6 @@ impl State for Testbed {
for plugin in &mut self.plugins {
plugin.run_callbacks(window, &mut self.harness.physics, &self.harness.state);
}
// if true {
// // !self.hide_counters {
// #[cfg(not(feature = "log"))]
// println!("{}", self.world.counters);
// #[cfg(feature = "log")]
// debug!("{}", self.world.counters);
// }
}
}
@@ -1452,8 +1461,19 @@ Hashes at frame: {}
}
fn draw_contacts(window: &mut Window, nf: &NarrowPhase, colliders: &ColliderSet) {
use rapier::math::Isometry;
for pair in nf.contact_pairs() {
for manifold in &pair.manifolds {
/*
for contact in &manifold.data.solver_contacts {
let p = contact.point;
let n = manifold.data.normal;
use crate::engine::GraphicsWindow;
window.draw_graphics_line(&p, &(p + n * 0.4), &Point3::new(0.5, 1.0, 0.5));
}
*/
for pt in manifold.contacts() {
let color = if pt.dist > 0.0 {
Point3::new(0.0, 0.0, 1.0)