Verified Commit 9df73032 authored by Justin Carpentier's avatar Justin Carpentier
Browse files

python: expose // algos

parent a2564620
......@@ -84,6 +84,9 @@ ENDIF(BUILD_WITH_HPP_FCL_PYTHON_BINDINGS)
MAKE_DIRECTORY("${${PROJECT_NAME}_BINARY_DIR}/include/pinocchio/bindings/python/parsers")
MAKE_DIRECTORY("${${PROJECT_NAME}_BINARY_DIR}/include/pinocchio/bindings/python/serialization")
MAKE_DIRECTORY("${${PROJECT_NAME}_BINARY_DIR}/include/pinocchio/bindings/python/algorithm")
IF(BUILD_WITH_OPENMP_SUPPORT)
MAKE_DIRECTORY("${${PROJECT_NAME}_BINARY_DIR}/include/pinocchio/bindings/python/algorithm/parallel")
ENDIF(BUILD_WITH_OPENMP_SUPPORT)
MAKE_DIRECTORY("${${PROJECT_NAME}_BINARY_DIR}/include/pinocchio/bindings/python/utils")
SYMLINK_AND_INSTALL_HEADERS("${${PROJECT_NAME}_PYTHON_HEADERS}" "bindings/python")
......
//
// Copyright (c) 2021 INRIA
//
#include "pinocchio/bindings/python/algorithm/algorithms.hpp"
#include "pinocchio/algorithm/parallel/aba.hpp"
#include <eigenpy/eigen-from-python.hpp>
namespace pinocchio
{
namespace python
{
static void aba_proxy_res(const int num_thread, ModelPool & pool,
const Eigen::MatrixXd & q, const Eigen::MatrixXd & v, const Eigen::MatrixXd & tau,
Eigen::Ref<Eigen::MatrixXd> a)
{
aba(num_thread,pool,q,v,tau,a);
}
static Eigen::MatrixXd aba_proxy(const int num_thread, ModelPool & pool,
const Eigen::MatrixXd & q, const Eigen::MatrixXd & v, const Eigen::MatrixXd & tau)
{
Eigen::MatrixXd a(v.rows(),v.cols());
aba(num_thread,pool,q,v,tau,a);
return a;
}
void exposeParallelABA()
{
namespace bp = boost::python;
using namespace Eigen;
bp::def("aba",
aba_proxy,
bp::args("num_thread","pool","q","v","a"),
"Computes in parallel the ABA and returns the result.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of model/data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tv: the joint velocity vector (size model.nv x batch_size)\n"
"\ttau: the joint torque vector (size model.nv x batch_size)\n");
bp::def("aba",
aba_proxy_res,
bp::args("num_thread","pool","q","v","a","tau"),
"Computes in parallel the ABA, store the result in a.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of model/data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tv: the joint velocity vector (size model.nv x batch_size)\n"
"\ttau: the joint torque vector (size model.nv x batch_size)\n"
"\ta: the resulting joint acceleration vectors (size model.nv x batch_size)\n");
}
} // namespace python
} // namespace pinocchio
//
// Copyright (c) 2021 INRIA
//
#include <omp.h>
#include "pinocchio/bindings/python/fwd.hpp"
namespace pinocchio
{
namespace python
{
void exposeParallelRNEA();
void exposeParallelABA();
void exposeParallelGeometry();
void exposeParallelAlgorithms()
{
namespace bp = boost::python;
exposeParallelRNEA();
exposeParallelABA();
#ifdef PINOCCHIO_WITH_HPP_FCL
exposeParallelGeometry();
#endif
bp::def("omp_get_max_threads",&omp_get_max_threads,
"Returns an upper bound on the number of threads that could be used.");
}
} // namespace python
} // namespace pinocchio
//
// Copyright (c) 2021 INRIA
//
#include "pinocchio/bindings/python/algorithm/algorithms.hpp"
#include "pinocchio/algorithm/parallel/geometry.hpp"
#include <eigenpy/eigen-from-python.hpp>
namespace pinocchio
{
namespace python
{
using namespace Eigen;
typedef Eigen::Matrix<bool,Eigen::Dynamic,1> VectorXb;
static bool computeCollisions_proxy(const int num_threads,
const GeometryModel & geom_model,
GeometryData & geom_data,
const bool stopAtFirstCollision = false)
{
return computeCollisions(num_threads, geom_model, geom_data, stopAtFirstCollision);
}
static bool computeCollisions_full_proxy(const int num_threads,
const Model & model,
Data & data,
const GeometryModel & geom_model,
GeometryData & geom_data,
const Eigen::VectorXd & q,
const bool stopAtFirstCollision = false)
{
return computeCollisions(num_threads, model, data, geom_model, geom_data, q, stopAtFirstCollision);
}
static void computeCollisions_pool_proxy_res(const int num_thread, GeometryPool & pool,
const Eigen::MatrixXd & q, Eigen::Ref<VectorXb> res,
bool stop_at_first_collision = false)
{
computeCollisions(num_thread,pool,q,res,stop_at_first_collision);
}
static VectorXb computeCollisions_pool_proxy(const int num_thread, GeometryPool & pool,
const Eigen::MatrixXd & q,
bool stop_at_first_collision = false)
{
VectorXb res(q.cols());
computeCollisions(num_thread,pool,q,res,stop_at_first_collision);
return res;
}
BOOST_PYTHON_FUNCTION_OVERLOADS(computeCollisions_pool_proxy_res_overload,computeCollisions_pool_proxy_res,4,5)
BOOST_PYTHON_FUNCTION_OVERLOADS(computeCollisions_pool_proxy_overload,computeCollisions_pool_proxy,3,4)
BOOST_PYTHON_FUNCTION_OVERLOADS(computeCollisions_overload,computeCollisions_proxy,3,4)
BOOST_PYTHON_FUNCTION_OVERLOADS(computeCollisions_full_overload,computeCollisions_full_proxy,6,7)
void exposeParallelGeometry()
{
namespace bp = boost::python;
using namespace Eigen;
bp::def("computeCollisions",
computeCollisions_proxy,
computeCollisions_overload(bp::args("num_thread","geometry_model","geometry_data","stop_at_first_collision"),
"Evaluates in parallel the collisions for a single data and returns the result.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tgeometry_model: the geometry model\n"
"\tgeometry_data: the geometry data\n"
"\tstop_at_first_collision: if set to true, stops when encountering the first collision.\n"));
bp::def("computeCollisions",
computeCollisions_full_proxy,
computeCollisions_full_overload(bp::args("num_thread","model","data","geometry_model","geometry_data","q","stop_at_first_collision"),
"Evaluates in parallel the collisions for a single data and returns the result.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tmodel: the kinematic model\n"
"\tdata: the data associated to the model\n"
"\tgeometry_model: the geometry model\n"
"\tgeometry_data: the geometry data associated to the tgeometry_model\n"
"\tq: the joint configuration vector (size model.nq)\n"
"\tstop_at_first_collision: if set to true, stops when encountering the first collision.\n"));
bp::def("computeCollisions",
computeCollisions_pool_proxy,
computeCollisions_pool_proxy_overload(bp::args("num_thread","pool","q","stop_at_first_collision"),
"Evaluates in parallel the collisions and returns the result.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of geometry model/ geometry data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tstop_at_first_collision: if set to true, stop when encountering the first collision in a batch element.\n"));
bp::def("computeCollisions",
computeCollisions_pool_proxy_res,
computeCollisions_pool_proxy_res_overload(bp::args("num_thread","pool","q","res","stop_at_first_collision"),
"Evaluates in parallel the collisions and stores the result in res.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of geometry model/ geometry data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tres: the resulting collision vector (batch_size)\n"
"\tstop_at_first_collision: if set to true, stop when encountering the first collision in a batch element.\n"));
}
} // namespace python
} // namespace pinocchio
//
// Copyright (c) 2021 INRIA
//
#include "pinocchio/bindings/python/algorithm/algorithms.hpp"
#include "pinocchio/algorithm/parallel/rnea.hpp"
#include <eigenpy/eigen-from-python.hpp>
namespace pinocchio
{
namespace python
{
static void rnea_proxy_res(const int num_thread, ModelPool & pool,
const Eigen::MatrixXd & q, const Eigen::MatrixXd & v, const Eigen::MatrixXd & a,
Eigen::Ref<Eigen::MatrixXd> tau)
{
rnea(num_thread,pool,q,v,a,tau);
}
static Eigen::MatrixXd rnea_proxy(const int num_thread, ModelPool & pool,
const Eigen::MatrixXd & q, const Eigen::MatrixXd & v, const Eigen::MatrixXd & a)
{
Eigen::MatrixXd tau(v.rows(),v.cols());
rnea(num_thread,pool,q,v,a,tau);
return tau;
}
void exposeParallelRNEA()
{
namespace bp = boost::python;
using namespace Eigen;
bp::def("rnea",
rnea_proxy,
bp::args("num_thread","pool","q","v","a"),
"Computes in parallel the RNEA and returns the result.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of model/data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tv: the joint velocity vector (size model.nv x batch_size)\n"
"\ta: the joint acceleration vector (size model.nv x batch_size)\n");
bp::def("rnea",
rnea_proxy_res,
bp::args("num_thread","pool","q","v","a","tau"),
"Computes in parallel the RNEA and stores the result in tau.\n\n"
"Parameters:\n"
"\tnum_thread: number of threads used for the computation\n"
"\tpool: pool of model/data\n"
"\tq: the joint configuration vector (size model.nq x batch_size)\n"
"\tv: the joint velocity vector (size model.nv x batch_size)\n"
"\ta: the joint acceleration vector (size model.nv x batch_size)\n"
"\ttau: the resulting joint torque vectors (size model.nv x batch_size)\n");
}
} // namespace python
} // namespace pinocchio
......@@ -46,6 +46,7 @@ namespace pinocchio
#ifdef PINOCCHIO_PYTHON_INTERFACE_WITH_OPENMP
void exposePool();
void exposeParallelAlgorithms();
#endif
} // namespace python
......
......@@ -98,6 +98,7 @@ BOOST_PYTHON_MODULE(pinocchio_pywrap)
#ifdef PINOCCHIO_PYTHON_INTERFACE_WITH_OPENMP
exposePool();
exposeParallelAlgorithms();
#endif
exposeVersion();
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment