Fix the parallel solver to work properly with CCD.

This commit is contained in:
Crozet Sébastien
2021-03-31 10:53:44 +02:00
parent 88933bd431
commit e9f6384081
6 changed files with 148 additions and 53 deletions

View File

@@ -3,6 +3,7 @@ name = "rapier-examples-2d"
version = "0.1.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
default-run = "all_examples2"
[features]
parallel = [ "rapier2d/parallel", "rapier_testbed2d/parallel" ]
@@ -26,4 +27,4 @@ path = "../build/rapier2d"
[[bin]]
name = "all_examples2"
path = "./all_examples2.rs"
path = "./all_examples2.rs"

View File

@@ -3,6 +3,7 @@ name = "rapier-examples-3d"
version = "0.1.0"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
edition = "2018"
default-run = "all_examples3"
[features]
parallel = [ "rapier3d/parallel", "rapier_testbed3d/parallel" ]

View File

@@ -158,9 +158,12 @@ impl InteractionGroups {
}
pub fn clear(&mut self) {
self.buckets.clear();
self.body_masks.clear();
self.grouped_interactions.clear();
#[cfg(feature = "simd-is-enabled")]
{
self.buckets.clear();
self.body_masks.clear();
self.grouped_interactions.clear();
}
self.nongrouped_interactions.clear();
}

View File

@@ -70,6 +70,8 @@ pub(crate) struct ThreadContext {
pub impulse_writeback_index: AtomicUsize,
pub joint_writeback_index: AtomicUsize,
pub body_integration_index: AtomicUsize,
pub body_force_integration_index: AtomicUsize,
pub num_force_integrated_bodies: AtomicUsize,
pub num_integrated_bodies: AtomicUsize,
// Position solver.
pub position_constraint_initialization_index: AtomicUsize,
@@ -97,6 +99,8 @@ impl ThreadContext {
num_solved_interactions: AtomicUsize::new(0),
impulse_writeback_index: AtomicUsize::new(0),
joint_writeback_index: AtomicUsize::new(0),
body_force_integration_index: AtomicUsize::new(0),
num_force_integrated_bodies: AtomicUsize::new(0),
body_integration_index: AtomicUsize::new(0),
num_integrated_bodies: AtomicUsize::new(0),
position_constraint_initialization_index: AtomicUsize::new(0),
@@ -146,7 +150,82 @@ impl ParallelIslandSolver {
}
}
pub fn solve_island<'s>(
pub fn solve_position_constraints<'s>(
&'s mut self,
scope: &Scope<'s>,
island_id: usize,
params: &'s IntegrationParameters,
bodies: &'s mut RigidBodySet,
) {
let num_threads = rayon::current_num_threads();
let num_task_per_island = num_threads; // (num_threads / num_islands).max(1); // TODO: not sure this is the best value. Also, perhaps it is better to interleave tasks of each island?
self.thread = ThreadContext::new(8); // TODO: could we compute some kind of optimal value here?
self.positions.clear();
self.positions
.resize(bodies.active_island(island_id).len(), Isometry::identity());
for _ in 0..num_task_per_island {
// We use AtomicPtr because it is Send+Sync while *mut is not.
// See https://internals.rust-lang.org/t/shouldnt-pointers-be-send-sync-or/8818
let thread = &self.thread;
let positions = std::sync::atomic::AtomicPtr::new(&mut self.positions as *mut _);
let bodies = std::sync::atomic::AtomicPtr::new(bodies as *mut _);
let parallel_contact_constraints =
std::sync::atomic::AtomicPtr::new(&mut self.parallel_contact_constraints as *mut _);
let parallel_joint_constraints =
std::sync::atomic::AtomicPtr::new(&mut self.parallel_joint_constraints as *mut _);
scope.spawn(move |_| {
// Transmute *mut -> &mut
let positions: &mut Vec<Isometry<Real>> =
unsafe { std::mem::transmute(positions.load(Ordering::Relaxed)) };
let bodies: &mut RigidBodySet =
unsafe { std::mem::transmute(bodies.load(Ordering::Relaxed)) };
let parallel_contact_constraints: &mut ParallelSolverConstraints<AnyVelocityConstraint, AnyPositionConstraint> = unsafe {
std::mem::transmute(parallel_contact_constraints.load(Ordering::Relaxed))
};
let parallel_joint_constraints: &mut ParallelSolverConstraints<AnyJointVelocityConstraint, AnyJointPositionConstraint> = unsafe {
std::mem::transmute(parallel_joint_constraints.load(Ordering::Relaxed))
};
enable_flush_to_zero!(); // Ensure this is enabled on each thread.
// Write results back to rigid bodies and integrate velocities.
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_integration_index, thread.num_integrated_bodies] {
let rb = &mut bodies[handle.0];
positions[rb.active_set_offset] = rb.next_position;
}
}
ThreadContext::lock_until_ge(&thread.num_integrated_bodies, active_bodies.len());
ParallelPositionSolver::solve(
&thread,
params,
positions,
parallel_contact_constraints,
parallel_joint_constraints
);
// Write results back to rigid bodies.
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.position_writeback_index] {
let rb = &mut bodies[handle.0];
rb.set_next_position(positions[rb.active_set_offset]);
}
}
})
}
}
pub fn init_constraints_and_solve_velocity_constraints<'s>(
&'s mut self,
scope: &Scope<'s>,
island_id: usize,
@@ -184,29 +263,6 @@ impl ParallelIslandSolver {
self.positions
.resize(bodies.active_island(island_id).len(), Isometry::identity());
{
// Initialize `mj_lambdas` (per-body velocity deltas) with external accelerations (gravity etc):
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
let thread = &self.thread;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_integration_index, thread.num_integrated_bodies] {
let rb = &mut bodies[handle.0];
let dvel = &mut self.mj_lambdas[rb.active_set_offset];
dvel.linear += rb.force * (rb.effective_inv_mass * params.dt);
// dvel.angular is actually storing angular velocity delta multiplied by the square root of the inertia tensor:
dvel.angular += rb.effective_world_inv_inertia_sqrt * rb.torque * params.dt;
}
}
}
for _ in 0..num_task_per_island {
// We use AtomicPtr because it is Send+Sync while *mut is not.
// See https://internals.rust-lang.org/t/shouldnt-pointers-be-send-sync-or/8818
@@ -241,6 +297,32 @@ impl ParallelIslandSolver {
};
enable_flush_to_zero!(); // Ensure this is enabled on each thread.
// Initialize `mj_lambdas` (per-body velocity deltas) with external accelerations (gravity etc):
{
let island_range = bodies.active_island_range(island_id);
let active_bodies = &bodies.active_dynamic_set[island_range];
let bodies = &mut bodies.bodies;
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.body_force_integration_index, thread.num_force_integrated_bodies] {
let rb = &mut bodies[handle.0];
let dvel = &mut mj_lambdas[rb.active_set_offset];
// NOTE: `dvel.angular` is actually storing angular velocity delta multiplied
// by the square root of the inertia tensor:
dvel.angular += rb.effective_world_inv_inertia_sqrt * rb.torque * params.dt;
dvel.linear += rb.force * (rb.effective_inv_mass * params.dt);
}
}
// We need to wait for every body to be force-integrated because their
// angular and linear velocities are needed by the constraints initialization.
ThreadContext::lock_until_ge(&thread.num_force_integrated_bodies, active_bodies.len());
}
parallel_contact_constraints.fill_constraints(&thread, params, bodies, manifolds);
parallel_joint_constraints.fill_constraints(&thread, params, bodies, joints);
ThreadContext::lock_until_ge(
@@ -274,29 +356,7 @@ impl ParallelIslandSolver {
let dvel = mj_lambdas[rb.active_set_offset];
rb.linvel += dvel.linear;
rb.angvel += rb.effective_world_inv_inertia_sqrt.transform_vector(dvel.angular);
rb.integrate(params.dt);
positions[rb.active_set_offset] = rb.next_position;
}
}
// We need to way for every body to be integrated because it also
// initialized `positions` to the updated values.
ThreadContext::lock_until_ge(&thread.num_integrated_bodies, active_bodies.len());
ParallelPositionSolver::solve(
&thread,
params,
positions,
parallel_contact_constraints,
parallel_joint_constraints
);
// Write results back to rigid bodies.
concurrent_loop! {
let batch_size = thread.batch_size;
for handle in active_bodies[thread.position_writeback_index] {
let rb = &mut bodies[handle.0];
rb.set_next_position(positions[rb.active_set_offset]);
rb.integrate_next_position(params.dt, true);
}
}
})

View File

@@ -123,6 +123,36 @@ impl PhysicsPipeline {
)
}
}
#[cfg(feature = "parallel")]
{
use crate::geometry::ContactManifold;
use rayon::prelude::*;
use std::sync::atomic::Ordering;
let num_islands = bodies.num_islands();
let solvers = &mut self.solvers[..num_islands];
let bodies = &std::sync::atomic::AtomicPtr::new(bodies as *mut _);
rayon::scope(|scope| {
enable_flush_to_zero!();
solvers
.par_iter_mut()
.enumerate()
.for_each(|(island_id, solver)| {
let bodies: &mut RigidBodySet =
unsafe { std::mem::transmute(bodies.load(Ordering::Relaxed)) };
solver.solve_position_constraints(
scope,
island_id,
integration_parameters,
bodies,
)
});
});
}
}
fn build_islands_and_solve_velocity_constraints(
@@ -216,7 +246,7 @@ impl PhysicsPipeline {
let joints: &mut Vec<JointGraphEdge> =
unsafe { std::mem::transmute(joints.load(Ordering::Relaxed)) };
solver.solve_island(
solver.init_constraints_and_solve_velocity_constraints(
scope,
island_id,
integration_parameters,
@@ -225,7 +255,6 @@ impl PhysicsPipeline {
&manifold_indices[island_id],
joints,
&joint_constraint_indices[island_id],
is_last_substep,
)
});
});

View File

@@ -180,6 +180,7 @@ impl Harness {
&mut physics.bodies,
&mut physics.colliders,
&mut physics.joints,
&mut physics.ccd_solver,
&*physics.hooks,
event_handler,
);