From 0048ee99c766fd7c5bf4c2599ec6099789bba589 Mon Sep 17 00:00:00 2001
From: Guilhem Saurel <guilhem.saurel@laas.fr>
Date: Tue, 17 Apr 2018 16:50:14 +0200
Subject: [PATCH] [Doc] add labs, rework structure

---
 doc/{maths => a-maths}/dynamics.md            |   0
 doc/{maths => a-maths}/geometry.md            |   0
 doc/{maths => a-maths}/intro.md               |   0
 doc/{maths => a-maths}/kinematics.md          |   0
 doc/{maths => a-maths}/se3.md                 |   0
 doc/{usage => b-usage}/creating-models.md     |   0
 doc/{usage => b-usage}/crtp.md                |   0
 doc/b-usage/intro.md                          |   1 +
 .../direct-geometry.md                        |   0
 doc/c-tutorials/intro.md                      |   4 +
 .../inverse-kinematics.md                     |   0
 doc/d-labs/1-directgeom.md                    | 303 ++++++++++++++++++
 doc/d-labs/2-invgeom.md                       | 270 ++++++++++++++++
 doc/d-labs/3-invkine.md                       | 139 ++++++++
 doc/d-labs/4-dyn.md                           | 224 +++++++++++++
 doc/d-labs/5-planner.md                       | 199 ++++++++++++
 doc/d-labs/6-wpg.md                           | 193 +++++++++++
 doc/d-labs/7-learn.md                         | 209 ++++++++++++
 doc/d-labs/intro.md                           |   3 +
 doc/treeview.dox                              |  32 +-
 doc/tutorials/intro.md                        |   4 -
 21 files changed, 1566 insertions(+), 15 deletions(-)
 rename doc/{maths => a-maths}/dynamics.md (100%)
 rename doc/{maths => a-maths}/geometry.md (100%)
 rename doc/{maths => a-maths}/intro.md (100%)
 rename doc/{maths => a-maths}/kinematics.md (100%)
 rename doc/{maths => a-maths}/se3.md (100%)
 rename doc/{usage => b-usage}/creating-models.md (100%)
 rename doc/{usage => b-usage}/crtp.md (100%)
 create mode 100644 doc/b-usage/intro.md
 rename doc/{tutorials => c-tutorials}/direct-geometry.md (100%)
 create mode 100644 doc/c-tutorials/intro.md
 rename doc/{tutorials => c-tutorials}/inverse-kinematics.md (100%)
 create mode 100644 doc/d-labs/1-directgeom.md
 create mode 100644 doc/d-labs/2-invgeom.md
 create mode 100644 doc/d-labs/3-invkine.md
 create mode 100644 doc/d-labs/4-dyn.md
 create mode 100644 doc/d-labs/5-planner.md
 create mode 100644 doc/d-labs/6-wpg.md
 create mode 100644 doc/d-labs/7-learn.md
 create mode 100644 doc/d-labs/intro.md
 delete mode 100644 doc/tutorials/intro.md

diff --git a/doc/maths/dynamics.md b/doc/a-maths/dynamics.md
similarity index 100%
rename from doc/maths/dynamics.md
rename to doc/a-maths/dynamics.md
diff --git a/doc/maths/geometry.md b/doc/a-maths/geometry.md
similarity index 100%
rename from doc/maths/geometry.md
rename to doc/a-maths/geometry.md
diff --git a/doc/maths/intro.md b/doc/a-maths/intro.md
similarity index 100%
rename from doc/maths/intro.md
rename to doc/a-maths/intro.md
diff --git a/doc/maths/kinematics.md b/doc/a-maths/kinematics.md
similarity index 100%
rename from doc/maths/kinematics.md
rename to doc/a-maths/kinematics.md
diff --git a/doc/maths/se3.md b/doc/a-maths/se3.md
similarity index 100%
rename from doc/maths/se3.md
rename to doc/a-maths/se3.md
diff --git a/doc/usage/creating-models.md b/doc/b-usage/creating-models.md
similarity index 100%
rename from doc/usage/creating-models.md
rename to doc/b-usage/creating-models.md
diff --git a/doc/usage/crtp.md b/doc/b-usage/crtp.md
similarity index 100%
rename from doc/usage/crtp.md
rename to doc/b-usage/crtp.md
diff --git a/doc/b-usage/intro.md b/doc/b-usage/intro.md
new file mode 100644
index 000000000..8f04b05ad
--- /dev/null
+++ b/doc/b-usage/intro.md
@@ -0,0 +1 @@
+# Usage
diff --git a/doc/tutorials/direct-geometry.md b/doc/c-tutorials/direct-geometry.md
similarity index 100%
rename from doc/tutorials/direct-geometry.md
rename to doc/c-tutorials/direct-geometry.md
diff --git a/doc/c-tutorials/intro.md b/doc/c-tutorials/intro.md
new file mode 100644
index 000000000..15f02f5a3
--- /dev/null
+++ b/doc/c-tutorials/intro.md
@@ -0,0 +1,4 @@
+# Tutorials
+
+These tutorials contains quick exemples showing how Pinocchio can be used.
+If you need more detailed exercises, go to [Labs](md_doc_labs_intro)
diff --git a/doc/tutorials/inverse-kinematics.md b/doc/c-tutorials/inverse-kinematics.md
similarity index 100%
rename from doc/tutorials/inverse-kinematics.md
rename to doc/c-tutorials/inverse-kinematics.md
diff --git a/doc/d-labs/1-directgeom.md b/doc/d-labs/1-directgeom.md
new file mode 100644
index 000000000..07c86fac2
--- /dev/null
+++ b/doc/d-labs/1-directgeom.md
@@ -0,0 +1,303 @@
+1. Move your body (aka direct geoemtry)
+=======================================
+
+Objective
+---------
+
+In this first series of exercises, we are going to start using the
+library Pinocchio. We will load in the simulator the model of a simple
+manipulator arm, the Universal Robot 5, or UR5. This model will be used
+for a positioning task using simple methods. This set of exercices
+emphasizes the first part of the class, about direct geometry.
+
+The software Pinocchio is a C++ library provided with a python wrapping
+that allows us to control it with a python terminal. Let's see how it
+works.
+
+Tutorial 1.0. Tips
+------------------
+
+Setup ~\~~\~\~
+
+For all the exercices, we suppose that Pinocchio is installed in a
+VirtualBox and the user ID is "student" (password student). The
+VirtualBox contains all the software to install. Alternatively,
+Pinocchio might be installed directly in any Ubuntu 14.04 / 16.04, after
+some work in other Linux, OSX, and likely in Windows (if you like to
+gamble).
+
+Two directories have been created at the root of the homedir. The
+directory "student" is where you should develop your scripts. The
+directory "models" contains the models of the robot that we will use
+during the class.
+
+Pinocchio has been installed using a software for "installation from
+sources" named +robot-pkg+. The sources of Pinocchio are available in
++\~/robotpkg/wip/pinocchio/work.robotics/pinochio-1.2.2+. After
+compilation, the software has been installed in +/opt/openrobots'. See
+for example the python scripts in +/opt/openrobots/lib/python2.7+.
+
+Python ~~\~~~\~ We remind you that you can open a python terminal in
+your own shell. Simply type : bash terminal ....
+student@student-virtualbox:\~\$ ipython &gt;&gt;&gt; .... Afterwards
+you'll just have to type your commands in this newly opened terminal.
+
+To close the python terminal, just type CTRL-D (CTRL-C first to
+interrupt any on-going execution).
+
+You can also write you command lines in a file and launch this script
+without entering the interactive mode (ie. without starting a new python
+terminal). In your shell, just type: bash terminal ....
+student@student-virtualbox:\~\$ ipython script.py &gt;&gt;&gt; ....
+
+Pinocchio ~\~~\~~~\~
+
+*Basic mathematical objects:*
+
+In the following, we will use numpy Matrix class to represent matrices
+and vectors. In numpy, vectors simply are matrices with one column. See
+the following example. \[source, python\] ---- import numpy as np A =
+np.matrix(\[ \[1,2,3,4\],\[5,6,7,8\]\]) \# Define a 2x4 matrix b =
+np.zeros(\[4,1\]) \# Define a 4 vector (ie a 4x1 matrix) initialized
+with 0. c = A\*b \# Obtain c by multiplying A by b. ----
+
+A bunch of useful functions are packaged in the utils of pinocchio.
+
+\[source,python\]
+-----------------
+
+from pinocchio.utils import \* eye(6) \# Return a 6x6 identity matrix
+zero(6) \# Return a zero 6x1 vector zero(\[6,4\]) \# Return az zero 6x4
+matrix rand(6) \# Random 6x1 vector isapprox(zero(6),rand(6)) \# Test
+epsilon equality mprint(rand(\[6,6\])) \# Matlab-style print
+skew(rand(3)) \# Skew "cross-product" 3x3 matrix from a 3x1 vector
+cross(rand(3),rand(3)) \# Cross product of R\^3 rotate('x',0.4) \# Build
+a rotation matrix of 0.4rad around X. ----
+
+Specific classes are defined to represent objects of SE(3), se(3) and
+se(3)\^\*. Rigid displacements, elements of SE(3), are represented by
+the class SE3.
+
+\[source,python\]
+-----------------
+
+import pinocchio as se3 R = eye(3); p = zero(3) M0 = se3.SE3(R,p) M =
+se3.SE3.Random() M.translation = p; M.rotation = R ----
+
+Spatial velocities, elements of se(3) = M\^6, are represented by the
+class Motion.
+
+\[source,python\]
+-----------------
+
+v = zero(3); w = zero(3) nu0 = se3.Motion(v,w) nu = se3.Motion.Random()
+nu.linear = v; nu.angular = w ----
+
+Spatial forces, elements of se(3)\^\* = F\^6, are represented by the
+class Force.
+
+\[source,python\]
+-----------------
+
+f = zero(3); tau = zero(3) phi0 = se3.Force(f,tau) phi =
+se3.Force.Random() phi.linear = f; phi.angular = tau ----
+
+Tutorial 1.1. Creating and displaying the robot
+-----------------------------------------------
+
+Robot kinematic tree ~~~~\~~~~~~\~~~~\~~\~~
+
+The kinematic tree is represented by two C++ objects called Model (which
+contains the model constants: lengths, masses, names, etc) and Data
+(which contains the working memory used by the model algorithms). Both
+C++ objects are contained in a unique Python class. The first class is
+called RobotWrapper and is generic.
+
+For the next steps, we are going to work with the RobotWrapper.
+
+Import the class +RobotWrapper+ and create an instance of this class in
+the python terminal. At initialization, RobotWrapper will read the model
+description in the URDF file given as argument. In the following, we
+will use the model of the UR5 robot, available in the directory "models"
+of pinocchio (available in the homedir of the VBox).
+
+\[source, python\]
+------------------
+
+from pinocchio.robot\_wrapper import RobotWrapper
+
+pkg = '/home/student/models/' urdf = pkg +
+'/ur\_description/urdf/ur5\_gripper.urdf' robot =
+RobotWrapper(urdf,\[pkg,\]) ---- The code of the RobotWrapper class is
+in
+/opt/openrobots/lib/python2.7/site-packages/pinocchio/robot\_wrapper.py
+. Do not hesitate to have a look at it and to take inspiration from the
+implementation of the class functions.
+
+UR5 is a fixed robot with one 6-DOF arms developed by the Danish company
+Universal Robot. All its 6 joints are revolute joints. Its configuration
+is in R\^6 and is not subject to any constraint. The model of UR5 is
+described in a URDF file, with the visuals of the bodies of the robot
+being described as meshed (i.e. polygon soups) using the Collada format
+".dae". Both the URDF and the DAE files are available in the attached
+ZIP archive. Uncompressed it in the VirtualBox, for example in the
+directory "/home/student/models".
+
+-   UR5 model and code
+    link:http://homepages.laas.fr/nmansard/teach/supaero2017/ur5.zip\[*ur5.zip*\]
+
+Exploring the model ~~~\~~~~~~\~~~~\~~
+
+The robot model is available in robot.model. It contains the names of
+all the robot joint \[names\], the kinematic tree \[parents\] (i.e. the
+graph of parents, 0 being the root and having no parents), the position
+of the current joint in the parent coordinate frame \[jointPosition\],
+the mass, inertia and center-of-gravity position of all the bodies
+(condensed in a spatial inertia 6x6 matrix) \[inertias\] and the gravity
+of the associated world \[gravity\]. All these functions are documented
+and are available in the correponding class dictionnary.
+
+\[source,python\]
+-----------------
+
+for name,function in robot.model.**class**.**dict**.items(): print "
+\*\*\*\* ", name, ": ", function.**doc** ----
+
+Similarly, the robot data are available in robot.data. All the variables
+allocated by the classical rigid-body dynamics algorithms are stored in
+robot.data and are available through the python wrapping. Similarly to
+the model object, the function are documented and are available from the
+class dictionnary. The most useful in the following will be the
+placement of the frame associated which each joint output stored in
+robot.data.oMi.
+
+For example, the robot end effector corresponds to the output of the
+last joint, called +wrist\_1\_joint+. The ID of the joint in the joint
+list can be recovered from its name, and then used to access its
+placement:
+
+\[source,python\]
+-----------------
+
+Get index of end effector
+=========================
+
+idx = robot.index('wrist\_3\_joint')
+
+Compute and get the placement of joint number idx
+=================================================
+
+placement = robot.position(q,idx) \# Be carreful, Python always returns
+references to values. \# You can often .copy() the object to avoid side
+effects \# Only get the placement placement =
+robot.data.oMi\[idx\].copy()
+
+------------------------------------------------------------------------
+
+Finally, some recurring datas (used in Model and Data) have been wrapped
+to functions in some python shortcuts, also available in RomeoWrapper:
++The size of the robot configuration is given by nq. +The dimension of
+its tangent space (velocity) is nv. +The index of a joint in the tree
+can be accessed from its name by index (see above). +The classical
+algorithms are also binded: com, Jcom, mass, biais, joint gravity,
+position and velocity of each joint.
+
+\[source,python\]
+-----------------
+
+q = zero(robot.nq) v = rand(robot.nv) robot.com(q) \# Compute the robot
+center of mass. robot.position(q,3) \# Compute the placement of joint 3
+----
+
+Display the robot ~\~~~~~~\~~~~~~~\~
+
+To display the robot, we need an external program called +Gepetto
+Viewer+. If you completed the installation in the previous page, you can
+launch this program, open a new terminal in an empty workspace.
+
+.... student@student-virtualbox:\~\$ gepetto-gui .... This will start a
+server waiting for instructions. We will now create a client that will
+ask the server to perform some requests (such as creating a window or
+displaying our robot)
+
+In a python terminal you can now load the visual model of the robot in
+the viewer \[source,python\] ---- robot.initDisplay(loadModel=True) ----
+This will flush the robot model inside the GUI. The argument
+"loadModel=True" is mandatory when you start or restart the GUI. In
+later call to your scripts, you can set the argument to "False". A side
+effector of "=True" is that it will move the viewpoint inside the GUI to
+a reference zero position.
+
+More details about loading the model (optionnal)
+~~~~~~~~~~~~~~~~\~~~~~~~~~~~~~~~~~~~~~~\~~~~~~~~
+
+You can access the visual object composing the robot model by
+robot.visual\_model.geometryObject. \[source,python\] ---- visualObj =
+robot.visual\_model.geometryObjects\[4\] \# 3D object representing the
+robot forarm visualName = visualObj.name \# Name associated to this
+object visualRef = robot.viewerNodeNames(visualObj) \# Viewer reference
+(string) representing this object ----
+
+Moving one object \[source,python\] ---- q1 = \[1,1,1, 1,0,0,0\] \#
+x,y,z , quaternion robot.viewer.gui.applyConfiguration(visualRef,q1)
+robot.viewer.gui.refresh() \# Refresh the window. ----
+
+Additional objects can be created, like a sphere as follows.
+\[source,python\] ---- rgbt = \[1.0,0.2,0.2,1.0\] \#
+red-green-blue-transparency robot.viewer.gui.addSphere("world/sphere",
+.1, rgbt) \# .1 is the radius ----
+
+The exhaustive list of the object that can be created is available in
+the IDL of the GUI:
++/opt/openrobots/share/idl/gepetto/corbaserver/graphical-interface.idl+
+
+Tutorial 1.2. Simple pick and place
+-----------------------------------
+
+*Objectives:* Display the robot at a given configuration or along a
+given trajectory
+
+Pick: ~\~~\~\~
+
+Say we have a target at position \[.5,.1,.2\] and we would like the
+robot to grasp it. \[source,python\] ----
+robot.viewer.gui.applyConfiguration("world/sphere",\[.5,.1,.2,
+1.,0.,0.,0.\]) robot.viewer.gui.refresh() \# Refresh the window. ----
+
+First display a small sphere at this position to visualize it.
+
+Then decide by any mean you want a configuration of the robot so that
+the end effector is touching the sphere.
+
+At the reference position you built, the end effector placement can be
+obtained by robot.position(q,6). Only the translation part of the
+placement has been selected. The rotation is free.
+
+\[Optional\] Say now that the object is a rectangle and not a sphere.
+Pick the object at a reference position with the rotation that is
+imposed, so that the end effector is aligned with one of the faces of
+the rectangle.
+
+Place: ~~\~~~\~
+
+Choose any trajectory you want in the configuration space, starting from
+the reference position built in the previous exercice (it can be
+sinus-cosinus waves, polynomials, splines, straight lines).
+
+Make a for loop to display the robot at sampling positions along this
+trajectory. The function sleep in module time (from time import sleep)
+can be used to slow down the loop.
+
+At each instant of your loop, recompute the position of the ball and
+display it so that it always "sticks" to the robot end effector.
+
+//// Homework --------
+
+Send by mail at nmansard@laas.fr a mail containing a single python file.
+The subject of the mail should start with +\[SUPAERO\] TP1+ When
+executed, the script should place the robot at the initial starting
+position where the end effector touches the sphere (optionally the
+rectangle) and move the robot with the sphere attached to the end
+effector.
+
+////
diff --git a/doc/d-labs/2-invgeom.md b/doc/d-labs/2-invgeom.md
new file mode 100644
index 000000000..dfc4dd783
--- /dev/null
+++ b/doc/d-labs/2-invgeom.md
@@ -0,0 +1,270 @@
+2. Grasp an object (aka inverse Geometry)
+=========================================
+
+Objectives
+----------
+
+The main objective of the first tutorial is to compute a configuration
+of the robot minimizing a cost (maximizing a reward) and respecting some
+given constraints. Additionally, the objective is to have a first hands
+on the difficulties of working outside of real vector spaces and to
+consider what are the guesses that are taken by an optimal solver.
+
+Tutorial 2.0. Technical prerequisites
+-------------------------------------
+
+Python SciPy and MatplotLib ~~~~~~~~~~~\~~~~~~~~~~~~\~~\~~
+
+You will need the two libraries Python +SciPy+ (scientific Python) and
++MatPlotLib+ (plot mathematical data).
+
+SciPy can be installed by +sudo apt-get install python-scipy+. It is
+already installed if you are using the VirtualBox. Examples of calls of
+these two functions are given below. We will use both solvers with
+numerical (finite-differencing) differenciation, to avoid the extra work
+of differencing the cost and constraint functions by hand. In general,
+it is strongly advice to first test a numerical program with finite
+differencing, before implementing the true derivatives only if needed.
+In any case, the true derivatives must always be checked by comparing
+the results with the finite differenciation.
+
+Additionally, the provided implementation of BFGS allows the user to
+provide a callback function and track the path taken by the solver, but
+does not provide the possibility to specify constraints (constraints can
+be added as penalty functions in the cost, but this requires additional
+work). The constrained least-square implementation allows the user to
+specify equality and inequality constraints, but not the callback. In
+the following, start to use BFGS before moving to the constrained
+least-square only when constraints are really needed. \[source,python\]
+---- \# Example of use a the optimization toolbox of SciPy. import numpy
+as np from scipy.optimize import fmin\_bfgs, fmin\_slsqp
+
+def cost(x): '''Cost f(x,y) = x\^2 + 2y\^2 - 2xy - 2x ''' x0 = x\[0\] x1
+= x\[1\] return -1*(2*x0*x1 + 2*x0 - x0\*\*2 - 2\*x1\*\*2)
+
+def constraint\_eq(x): ''' Constraint x\^3 = y ''' return np.array(\[
+x\[0\]\*\*3-x\[1\] \])
+
+def constraint\_ineq(x): '''Constraint x&gt;=2, y&gt;=2''' return
+np.array(\[ x\[0\]-2,x\[1\]-2 \])
+
+class CallbackLogger: def **init**(self): self.nfeval = 1 def
+**call**(self,x): print '===CBK=== {0:4d} {1: 3.6f} {2: 3.6f} {3:
+3.6f}'.format(self.nfeval, x\[0\], x\[1\], cost(x)) self.nfeval += 1
+
+x0 = np.array(\[0.0,0.0\]) \# Optimize cost without any constraints in
+BFGS, with traces. xopt\_bfgs = fmin\_bfgs(cost, x0,
+callback=CallbackLogger()) print '\n *\*\* Xopt in BFGS =
+',xopt\_bfgs,'\n\n\n\n'
+
+Optimize cost without any constraints in CLSQ
+=============================================
+
+xopt\_lsq = fmin\_slsqp(cost,\[-1.0,1.0\], iprint=2, full\_output=1)
+print '\n *\*\* Xopt in LSQ = ',xopt\_lsq,'\n\n\n\n'
+
+Optimize cost with equality and inequality constraints in CLSQ
+==============================================================
+
+xopt\_clsq = fmin\_slsqp(cost,\[-1.0,1.0\], f\_eqcons=constraint\_eq,
+f\_ieqcons=constraint\_ineq, iprint=2, full\_output=1) print '\n *\*\*
+Xopt in c-lsq = ',xopt\_clsq,'\n\n\n\n' ---- Take care that all +SciPy+
+always works with vectors represented as 1-dimensional array, while
+Pinocchio works with vectors represented as matrices (which are in fact
+two-dimensional arrays, with the second dimension being 1). You can pass
+from a SciPy-like vector to a Pinocchio-like vector using:
+\[source,python\] ---- import numpy as np x = np.array(\[ 1.0, 2.0, 3.0
+\]) q = np.matrix(x).T x = q.getA()\[:,0\] ----
+
+The second library +MatPlotLib+ plots values on a 2D graph. Once more,
+documentation is available here:
+https://www.labri.fr/perso/nrougier/teaching/matplotlib/ An example is
+provided below.
+
+\[source, python\]
+------------------
+
+import numpy as np import matplotlib.pyplot as plt \# In plt, the
+following functions are the most useful: \#
+ion,plot,draw,show,subplot,figure,title,savefig
+
+For use in interactive python mode (ipthyon -i)
+===============================================
+
+interactivePlot = False
+
+if interactivePlot: plt.ion() \# Plot functions now instantaneously
+display, shell is not blocked
+
+Build numpy array for x axis
+============================
+
+x = 1e-3 \* np.array (range (100)) \# Build numpy array for y axis y =
+x\*\*2
+
+fig = plt.figure () ax = fig.add\_subplot ('111') ax.plot (x, y)
+ax.legend (("x\^2",))
+
+if not interactivePlot: \# Display all the plots and block the shell. \#
+The script will only ends when all windows are closed. plt.show () ----
+
+Robots ~~\~~~\~
+
+We mostly use here the model UR5, used in the first tutorial. Refer to
+the instructions of Tuto 1 to load it.
+
+Optionally, we might want to use a more complex robot model. Romeo is a
+humanoid robot developed by the French-Japanese company Aldebaran
+Robotics. It has two legs, two arms and a head, for a total of 31 joints
+(plus 6DOF on the free flyer). Its model is natively in Pinocchio (no
+additional dowload needed). Romeo can be loaded with: \[source,python\]
+---- import pinocchio as se3 from pinocchio.romeo\_wrapper import
+RomeoWrapper path =
+'/home/nmansard/src/pinocchio/pinocchio/models/romeo/' urdf = path +
+'urdf/romeo.urdf' \# Explicitly specify that the first joint is a free
+flyer. robot = RomeoWrapper(urdf,\[path,\]) \# Load urdf model
+robot.initDisplay(loadModel=True) ---- Additionally, the index of right
+and left hands and feet are stored in romeo.rh, romeo.lh, romeo.rf and
+romeo.lf.
+
+Tutorial 2.1. Position the end effector
+---------------------------------------
+
+The first tutorial is to position (i.e. translation only) the end
+effector of a manipulator robot to a given position. For this first
+part, we will use the fixed serial-chain robot model.
+
+Recall first that the position (3D) of the joint with index "i" at
+position "q" can be access by the following two lines of code:
+\[source,python\] ---- \# Compute all joint placements and put the
+position of joint "i" in variable "p". import pinocchio as se3
+se3.forwardKinematics(robot.model,robot.data,q) p =
+robot.data.oMi\[i\].translation ----
+
+*Question 1* Using this, build a cost function to be the norm of the
+difference between the end-effector position +p+ and a desired position
++pdes+. The cost function is a function that accepts as input an
+1-dimensional array and return a float.
+
+*Question 2* Then use +fmin\_bfgs+ to find a configuration q with the
+end effector at position +pdes+.
+
+*Question 3* Finally, implements a callback function that display in
+Gepetto-Viewer every candidate configuration tried by the solver.
+
+Tutorial 2.2. Approaching the redundancy (optionnal)
+----------------------------------------------------
+
+The manipulator arm has 6 DOF, while the cost function only constraints
+3 of them (the position of the end effector). A continuum of solutions
+then exists. The two next questions are aiming at giving an intuition of
+this continuum.
+
+*Question 4* Sample several configurations respecting +pdes+ by giving
+various initial guesses to the solver. Store this sampling of solutions
+in a list, then display this list in Gepetto-Viewer, each configuration
+begin displayed during 1 second (pause of 1 seconds can be obtained
+using: import time; time.sleep(1.0)).
+
+A configurations in this continuum can then be selected with particular
+properties, like for example being the closest to a reference
+configuration, or using some joints more than the others, or any other
+criterion that you can imagine.
+
+*Question 5* Sum a secondary cost term to the first positioning cost, to
+select the posture that maximizes the similarity (minimizes the norm of
+the difference) to a reference posture. The relative importance of the
+two cost terms can be adjusted by weighting the sum: find the weight so
+that the reference position is obtained with a negligible error (below
+millimeter) while the posture is properly taken into account.
+
+Tutorial 2.3. Placing the end-effector
+--------------------------------------
+
+The next step is to find a configuration of the robot so that the end
+effector respects a reference placement, i.e. position and orientation.
+The stake is to find a metric in SE(3) to continuously quantify the
+distance between two placements. There is no canonical metric in SE(3),
+i.e. no absolute way of weighting the position with respect to the
+orientation. Two metrics can be considered, namely the log in SE(3) or
+in R\^3 x SE(3). The tutorial will guide you through the first choice.
+
+The SE(3) and SO(3) logarithm are implemented in Pinocchio in the explog
+module. \[source,python\] ---- from pinocchio.explog import log from
+pinocchio import SE3 nu = log(SE3.Random()) nu\_vec = nu.vector ----
+
+*Question 6* Solve for the configuration that minimizes the norm of the
+logarithm of the difference between the end effector placement and the
+desired placement.
+
+Optionally, try other metrics, like the log metric of R\^3 x SO(3), or
+the Froebenius norm of the homogeneous matrix.
+
+Tutorial 2.4. Working with a mobile robot (optionnal)
+-----------------------------------------------------
+
+Until now, the tutorial only worked with a simple manipulator robot,
+i.e. whose configuration space is a real vector space. Consider now the
+humanoid robot, whose first joint is a free joint: it has 6 degrees of
+freedom (3 rotations, 3 translations) but its configuration vector is
+dimension 7. You can check it with +robot.model.nq+, that stores the
+dimension of the configuration, and +robot.model.nv+, that stores the
+dimension of the configuration velocity, i.e. the number of degrees of
+freedom. For the humanoid, nq = nv+1.
+
+Indeed, the configuration coefficients 3 to 7 are indeed representing a
+quaternion. The additional constraint is that these 4 coefficients must
+be normalize.
+
+*Question 7* Display a configuration of the robot for which the norm of
+the quaternion is bigger than one (e.g. 2.0). What happens?
+
+During the search, the solver must respect this constraint. A solution
+is to make this constraint explicit in the numerical program. However,
+we will start by an easier quick-and-dirty trick. With quaternions, the
+trick is simply to normalize any invalid quaternions. In the cost
+function, first normalize the quaternion before computing the cost due
+to the end-effector placement. An additional term should also be added
+to the cost function to avoid excessive drift of the quaternion norm, in
+particular with the norm going to 0.
+
+*Question 8* Use +fmin\_bfgs+ to compute a configuration respecting a
+given placement with the humanoid model, by normalizing the quaternion
+at each step.
+
+*Question 9* (harder) Do the same with the solver C-LSQ +fmin\_slsqp+,
+with the explicit constraint that the norm of the quaternion must be 1.
+
+Tutorial 2.5. Configuration of a parallel robot
+-----------------------------------------------
+
+A parallel robot is composed of several kinematic chains (called the
+robot legs) that are all attached to the same end effector. This imposes
+strict constraints in the configuration space of the robot: a
+configuration is valide iff all the legs meets the same end-effector
+placement. We consider here only the geometry aspect of parallel robots
+(additionnally, some joints are not actuated, which causes additional
+problems).
+
+The kinematic structure of a paralel robot indeed induces loops in the
+joint connection graph. In Pinocchio, we can only represents (one of)
+the underlying kinematic tree. The loop constraints have to be handled
+separately. An example that loads 4 manipulator arms is
+link:tp2\_ur5x4\_py.html\[available here\]. Each leg i (for i=0,1,2,3)
+of the robot is loaded in the list robots\[i\]. The loop constraints are
+that the relative placement of every leg end-effector must stay the same
+that in the initial configuration given as example in the above file.
+
+*Question 10* Consider now that the orientation of the tool plate is
+given by the quaternion Quaternion(0.7,0.2,0.2,0.6), with the
+translation that you like. Find using the above optimization routines
+the configuration of each robot leg so that the loop constraints are all
+met.
+
+//// Homework --------
+
+Send by mail at nmansard@laas.fr a mail containing a single python file.
+The subject of the mail should start with +\[SUPAERO\] TP2+ When
+executed, the script should place the parallel robot toolplate at the
+reference placement (whose rotation is Quaternion(0.7,0.2,0.2,0.6)) and
+then move the legs to meet the loop constraints. ////
diff --git a/doc/d-labs/3-invkine.md b/doc/d-labs/3-invkine.md
new file mode 100644
index 000000000..1891d17fb
--- /dev/null
+++ b/doc/d-labs/3-invkine.md
@@ -0,0 +1,139 @@
+3. Drag and Drop (aka Inverse kinematics)
+=========================================
+
+Objectives
+----------
+
+The main objective of the tutorial is to perform one or several tasks by
+inverse kinematics, i.e. pseudo inversing a jacobian iteratively until
+convergence of the task error.
+
+Tutorial 3.0. Technical prerequisites
+-------------------------------------
+
+Robots ~~\~~~\~
+
+We are going to use again the UR5 robot model, however this time mounted
+as a mobile robot. The source code of the mobile robot is available
+link:tp3\_mobilerobot\_py.html\[here\]. The robot has 3+6 DOF and can
+move (2 translations + 1 rotation) freely on the plane. Two operation
+frames have been defined: at the front of the basis, and at the tip of
+the tool. They are displayed when the robot moves.
+
+Example of how to use the robot is has below.
+
+\[source,python\]
+-----------------
+
+import pinocchio as se3 from mobilerobot import MobileRobotWrapper from
+pinocchio.utils import \*
+
+pkg = '/home/student/models/' urdf = pkg +
+'ur\_description/urdf/ur5\_gripper.urdf'
+
+robot = MobileRobotWrapper(urdf,\[pkg,\])
+robot.initDisplay(loadModel=True)
+
+robot.viewer.gui.addFloor('world/floor')
+========================================
+
+NQ = robot.model.nq NV = robot.model.nv
+
+q = rand(NQ) robot.display(q)
+
+IDX\_TOOL = 24 IDX\_BASIS = 23
+
+se3.framesKinematics(robot.model,robot.data) Mtool =
+robot.data.oMf\[IDX\_TOOL\] Mbasis = robot.data.oMf\[IDX\_BASIS\] ----
+
+Tutorial 3.1. Position the end effector
+---------------------------------------
+
+The first task will be concerned with the end effector. First define a
+goal placement.
+
+\[source,python\]
+-----------------
+
+from mobilerobot import M2v def place(name,M):
+robot.viewer.gui.applyConfiguration(name,M2v(M))
+robot.viewer.gui.refresh()
+
+def Rquat(x,y,z,w): q = se3.Quaternion(x,y,z,w) q.normalize() return
+q.matrix()
+
+Mgoal = se3.SE3(Rquat(0.4,0.02, -0.5,0.7), np.matrix(\[.2,-.4,.7\]).T)
+robot.viewer.gui.addXYZaxis('world/framegoal',\[1.,0.,0.,1.\],.015,4)
+place('world/framegoal',Mgoal) ----
+
+The current placement of the tool at configuration +q+ is available as
+follows: \[source,python\] ---- IDX\_TOOL = 24
+se3.forwardKinematics(robot.model,robot.data,q) \# Compute joint
+placements se3.framesKinematics(robot.model,robot.data) \# Also compute
+operational frame placements Mtool = robot.data.oMf\[IDX\_TOOL\] \# Get
+placement from world frame o to frame f oMf ----
+
+The desired velocity of the tool in tool frame is given by the log:
+\[source,python\] ---- nu = se3.log(Mtool.inverse()\*Mgoal).vector ----
+
+The tool Jacobian, also in tool frame, is available as follows:
+
+\[source,python\]
+-----------------
+
+J = se3.frameJacobian(robot.model,robot.data,q,IDX\_TOOL,True,True)
+-------------------------------------------------------------------
+
+Pseudoinverse operator is available in +numpy.linalg+ toolbox.
+\[source,python\] ---- from numpy.linalg import pinv ----
+
+The integration of joint velocity vq in configuration q can be done
+directly (q += vq dt). More generically, the se3 method integrate can be
+used:
+
+\[source,python\]
+-----------------
+
+q = se3.integrate(robot.model,q,vq\*dt) ----
+
+*Question 1* Implement a for-loop that computes the jacobian and the
+desired velocity in tool frame, and deduced the joint velocity using the
+pseudoinverse. At each iteration, also integrate the current velocity
+and display the robot configuration.
+
+Tutorial 3.2. Position the basis on a line
+------------------------------------------
+
+A line displaying "x=0" is also displayed in Gepetto viewer. Next step
+is to servo the front of the basis on this line.
+
+Similarly, the distance of the basis frame to the line, with
+corresponding jacobian, are: \[source,python\] ---- error =
+Mbasis.translation\[0\] J =
+se3.frameJacobian(robot.model,robot.data,q,IDX\_BASIS,False,True)\[0,:\]
+----
+
+Implement a second loop to servo the basis on the line. It becomes
+interesting when both tasks are performed together. We can do that
+simply by summing both tasks. For that, the numpy method +hstack+ can be
+used to make a single error vector stacking the errors of tool and basis
+tasks, and similarly for the jacobians. \[source,python\] ---- nu =
+np.vstack(\[nu1, nu2\]) J = np.vstack(\[J1, J2\]) ----
+
+However, it is stronger to move the basis only in the null space of the
+basis. The null space projector of +J1+ can be computed using the
+pseudoinverse. Following the control law performing task 1 and task 2 in
+the null space of task 1 is:
+
+\["latex"\] $vq_1 = J_1^+ v_1^*$ \["latex"\] $P_1 = I_9 - J_1^+ J_1$
+\["latex"\] $vq_2 = vq_1 + (J_2 P_1)^+ ( v_2^* - J_2 vq_1)$
+
+*Question 2* Implement two loops: the first one regulate the tool
+placement alone. When the tool is properly placed, the second regulate
+the tool placement and the basis position in the null-space of the tool.
+
+//// Homework --------
+
+Send by mail at nmansard@laas.fr a mail containing a single python file.
+The subject of the mail should start with +\[SUPAERO\] TP3+ When
+executed, the script should execute question 2. ////
diff --git a/doc/d-labs/4-dyn.md b/doc/d-labs/4-dyn.md
new file mode 100644
index 000000000..b9d1f1f77
--- /dev/null
+++ b/doc/d-labs/4-dyn.md
@@ -0,0 +1,224 @@
+4. Snap your fingers (aka direct and inverse dynamics)
+======================================================
+
+Objectives
+----------
+
+The main objective of the tutorial is to implement a simple torque
+control inside a home-made contact simulator.
+
+Tutorial 4.0. Technical prerequisites
+-------------------------------------
+
+Robots ~~\~~~\~
+
+We are going to use a 4-finger hand, whose model is defined in Python
+(no urdf model) using capsule volumes. The code of the robot is
+available in the link:hand\_robot.zip\[ZIP archive\]. It contains a
++display+ class wrapping the Gepetto-viewer client, a +robot+ class
+implementing the robot hand and a simple example +hand\_example.py+.
+
+\[source,python\]
+-----------------
+
+from robot\_hand import Robot
+
+robot = Robot() robot.display(robot.q0) ----
+
+Take care that the hand is small: zoom in to see it in the window (or
+press the space bar).
+
+Solver ~~\~~~\~
+
+We will need a proper QP solver with inequality. QuadProg is a Python
+wrap of a nice Golub-based solver. Install it with PIP
+
+\[source,python\]
+-----------------
+
+pip install quadprog
+--------------------
+
+QuadProg main function is solve\_qp. You have a bit of documentation
+using the Python help command +help(solve\_qp)+. A simple example
+follows:
+
+\[source,python\]
+-----------------
+
+from quadprog import solve\_qp
+
+Solve min\_x 1/2\*xHx - gx s.t Cx &lt;= d x,*,*,*,*,\_ = solve\_qp( H,g,C,d ) ----
+==================================================================================
+
+A more complete example is also in the archive given above.
+
+Tutorial 4.1. Direct dynamics
+-----------------------------
+
+Choosing an arbitrary joint torque tauq, we now compute the robot
+acceleration and integrate it.
+
+The dynamic equation of the robot is M aq + b = tauq, with M the mass,
+aq the joint acceleration and b the drift. The mass matrix can be
+computed using +CRB+ algorithm (function of q). The drift is computed
+using +RNE+ algorithm (function of q, vq and aq with aq=0).
+\[source,python\] ---- import pinocchio as se3 q = rand(robot.model.nq)
+vq = rand(robot.model.nv) aq0 = zero(robot.model.nv) b =
+se3.rnea(robot.model,robot.data,q,vq,aq0) \# compute dynamic drift --
+Coriolis, centrifugal, gravity M = se3.crba(robot.model,robot.data,q) \#
+compute mass matrix M ----
+
+These terms correspond to the inverse dynamics. They can be numerically
+inverted to compute the direct dynamics.
+
+*Question 1* Using M and b computed by the above algorithms, and knowing
+a given set of joint torques tauq, compute aq so that M\*aq+b = tauq.
+
+Once aq as been computed, it is straight forward to integrate it to
+velocity using vq += aq*dt. Integration to joint position is more
+complex in general. It is implemented in pinocchio: \[source,python\]
+---- q = se3.integrate(robot.model,q,vq*dt) ---- In the particular case
+of only simple joints (like the robot hand), the same integration
+q+=vq\*Dt also holds.
+
+*Question 2* Implement the simulation of the robot hand moving freely
+with constant (possibly 0) torques. Implement a variation where the
+torques are only joint friction (tauq = -Kf vq at each iteration).
+
+Tutorial 4.2. PD and computed torques
+-------------------------------------
+
+Now choose a reference joint position (possibly time varying, like in
+the hand example). The joint torques can then be computed to track the
+desired position, with tauq = -Kp (q-qdes) - Kv vq. Both gains Kp and Kv
+should be properly chosen. Optimal tracking is obtained with Kv = 2
+sqrt(Kp). In general, a desired velocity is also tracked to avoid
+tracking errors.
+
+*Question 3* Implement then simulate a PD, by compute the torques from a
+PD law, then integrate it using the simulator of question 2.
+
+Here, there is a strong coupling between joints, due to the mass matrix
+that is not compensated in the simple PD law. In theory, the computed
+torques is to compute the joint torque by inverse dynamics from a
+reference joint acceleration. With boils down to canceling the
+simulation equation by choosing the proper terms in the control law. It
+is now very interesting to implement in case of perfect dynamics
+knowledge. It might be more interesting to study in case the simulation
+is done with the perfect M, while the control is computed with
+approximate M (for example, using only the diagonal terms of the mass
+matrix). Let's rather simulate contact.
+
+Tutorial 4.3. Collision checking
+--------------------------------
+
+The robot hand is composed of capsules, i.e. level-set of constant
+distance to a segment. Collision checking and distances are then easy to
+implement. The source code of collision checking is available in the
+robot\_hand.py. Pinocchio also implement a complete and efficient
+collision checking based on FCL, also not used in the tutorial.
+
+Collision checking are done for a set of collision pairs that must be
+specified to the robot. The collision checking method indeed compute the
+distance between the two objects, along with the so-called witness
+points. A method can also be used to display them.
+
+\[source,python\]
+-----------------
+
+from robot\_hand import Robot robot = Robot() robot.display(robot.q0)
+
+Create 10 witness points in the rendering window
+================================================
+
+for i in range(10):
+robot.viewer.viewer.gui.addCylinder('world/wa'+str(i), .01, .003, \[
+1.0,0,0,1\]) robot.viewer.viewer.gui.addCylinder('world/wb'+str(i), .01,
+.003, \[ 1.0,0,0,1\])
+robot.viewer.viewer.gui.setVisibility('world/wa'+str(i),'OFF')
+robot.viewer.viewer.gui.setVisibility('world/wb'+str(i),'OFF')
+
+Add 4 pairs between finger tips and palm
+========================================
+
+robot.collisionPairs.append(\[2,8\])
+robot.collisionPairs.append(\[2,11\])
+robot.collisionPairs.append(\[2,14\])
+robot.collisionPairs.append(\[2,16\])
+
+Compute distance between object 2 and 8, i.e the first collision pair
+=====================================================================
+
+idx = 0 dist = robot.checkCollision(idx)
+
+Display the collision pair by adding two disks at the witness points.
+=====================================================================
+
+robot.displayCollision(idx,0)
+-----------------------------
+
+The Jacobian of the corresponding pair can be computed using the
+collisionJacobian method \[source,python\] ---- J =
+robot.collisionJacobian(idx,q) ---- The jacobian is a 1xN matrix (row
+matrix) corresponding to the contact normal. Take care that some
+information are stored in the visual objects when calling
+checkCollision, that are later used by collisionJacobian. You have to
+call collisionJacobian right after checkCollision, or the resulting
+jacobian might not be coherent.
+
+For all collision pairs in contact (distance below 1e-3), the Jacobian
+must be collected and stacked in a single J matrix (which has as many
+rows as active constraints). Similarly, distances must be stacked in a
+vector (same number of rows as the jacobian).
+
+Now, the joint acceleration is constrained by the contact constraint. It
+can be written as a minimization problem using Gauss principle
+
+\["latex"\]
+$min \quad \frac{1}{2} (\ddot{q} - \ddot{q}_0 )^T M (\ddot{q} - \ddot{q}_0 )$
+\["latex"\] \$s.t. \quad J \ddot{q} &gt; 0 \$
+
+where qddot\_0 is the free acceleration, i.e. the acceleration obtained
+in Question 2 where no constraint is active.
+
+In theory, the acceleration should be above the "centrifugal"
+acceleration (i.e. the acceleration caused by joint velocity only, often
+written Jdot qdot) but we neglect it here.
+
+In case of penetration or negative velocity, having only position
+acceleration is not enough. A "trick" is often to require the contact
+acceleration to be above a proportional depending of the penetration
+distance: J qddot &gt;= -dist, with dist the vector of stacked
+distances.
+
+*Question 4* Implement a contact simulator using QuadProg, the results
+of Question 2 and the jacobian matrix of constraints whose distance is
+below 1e-3.
+
+A better solution to avoid penetration is to implement an impact model.
+The simplest one is the inelastic impact, where normal velocity is
+simply canceled at impact. For that, remember inactive contact (i.e.
+those that were not in collision at previous simulation step). When a
+collision pair is detected that was not previously active, project the
+current velocity on the null space of all contacts:
+
+\["latex"\] $\dot q = \dot q - J^+ J \dot q$
+
+*Question 5* The complete loop should be as follows: tauq is computed
+from a PD tracking a time-varying joint position (question 3). After
+computing tauq, all collision pairs must be checked to find those with
+distances below 1e-3. Corresponding Jacobians must be computed and
+stacked. If a new collision as appeared, the joint velocity must be
+projected to nullify it. If not collision is active, the joint
+acceleration is computed from inverting the mass matrix (question 2).
+Otherwise, it is computed using QuadProg (question 4). The resulting
+acceleration is integrated twice (question 1) before displaying the
+robot starting a new simulation iteration.
+
+Homework
+--------
+
+Send by mail at nmansard@laas.fr a mail containing a single python file.
+The subject of the mail should start with +\[SUPAERO\] TP4+ When
+executed, the script should execute question 5.
diff --git a/doc/d-labs/5-planner.md b/doc/d-labs/5-planner.md
new file mode 100644
index 000000000..97d85f599
--- /dev/null
+++ b/doc/d-labs/5-planner.md
@@ -0,0 +1,199 @@
+5. look ahead (aka motion planning)
+===================================
+
+Objective
+---------
+
+The objective of this work is to introduce some key algorithm of motion
+planning: collision checking, probabilistic roadmaps, visibility PRM,
+A\* (a-star), random shortcut.
+
+Tutorial 5.0: prerequisites
+---------------------------
+
+*Prerequisite \#1* A robot model with simple (static actuated) dynamics,
+like a UR5 manipulator robot. See Tutorial 1 for loading the UR5 robot.
+
+*Prerequisite \#2* A collision checking library, provided by Pinocchio.
+
+Pinocchio provides collision checking for a large class of 3D objects
+(sphere, capsule, box, triangle soup) using library FCL. Any 3D object
+of FCL can be loaded using the C++ interface or the URDF model.
+Although, only capsules can yet be added through the Python API, and
+only URDF-loaded meshes and Python-loaded capsules can be easily
+connected with Gepetto viewer (help is welcome to correct this, it would
+requires 4h work).
+
+An example of loading a capsule in Pinocchio and Gepetto viewer is
+below: \[source,python\] ---- obs =
+se3.GeometryObject.CreateCapsule(rad,length) \# Pinocchio obstacle
+object obs.name = "obs" \# Set object name obs.parentJoint = 0 \# Set
+object parent = 0 = universe obs.placement = se3.SE3(
+rotate('x',0.1)*rotate('y',0.1)*rotate('z',0.1), np.matrix(0.1\[:3\]).T
+) \# Set object placement wrt parent
+robot.collision\_model.addGeometryObject(obs,robot.model,False) \# Add
+object to collision model robot.visual\_model
+.addGeometryObject(obs,robot.model,False) \# Add object to visual model
+\# Also create a geometric object in gepetto viewer, with according
+name. robot.viewer.gui.addCapsule( "world/pinocchio/"+obs.name,
+rad,length, \[ 1.0, 0.2, 0.2, 1.0 \] ) ----
+
+URDF specifications does not allow to define which collision pairs
+should be tested. By default, Pinocchio does not load any collision
+pair. A simple strategy is to add all pairs, but often, some meshes of
+the models induce wrong collision. Then manually remove them by testing
+valid configurations. To be clean, you can store the valid collision
+pair in a SRDF file. For UR5: \[source,python\] ----
+robot.collision\_model.addAllCollisionPairs() for idx in \[ 56,35,23 \]:
+robot.collision\_model.removeCollisionPair(robot.collision\_model.collisionPairs\[idx\])
+----
+
+Collision checking are done through the following algorithms:
+\[source,python\] ----
+se3.updateGeometryPlacements(robot.model,robot.data,robot.collision\_model,robot.collision\_data,q)
+se3.computeCollision(robot.collision\_model,robot.collision\_data,pairId)
+se3.computeCollisions(robot.collision\_model,robot.collision\_data,False)
+\# last arg to stop early. ---- Both collision algorithms requires a
+preliminary update of placement and return True if configuration is in
+collision (False otherwise).
+
+Tutorial 5.1: Testing collision
+-------------------------------
+
+We need to define a simple function to check whether a configuration is
+respecting the robot constraints (joint limits and collision, plus any
+other inequality-defined constraints you might want).
+
+*Question 1* Implement the function +check+ taking a configuration q in
+argument and return True if and only if q is acceptable -- The solution
+only uses the 2 collision algorithms of Pinocchio listed above and
+standard python otherwise.
+
+Tutorial 5.2: Steering method
+-----------------------------
+
+We need to define a local controller, aka a steering method, to define
+the behavior of the robot when it tries to connect to configuration
+together. Here we will simply use linear interpolation. More complex
+controllers (like optimal trajectories) might be preferred if more
+knowledge about the system is available.
+
+In the meantime, we will also need a method to check whether a local
+path is collision free. We will do that be simply regularly sampling the
+path with a given step length. Continuous collision detection might be
+implemented for better and safer performances.
+
+Here we propose to implement the steering method and the path validation
+in a single connected method. More versatile implementation is obtained
+by defining two different functions.
+
+*Question 2* Implement a +connect+ function, that takes as argument an
+initial q1 and a final q2 configuration, and return True is it is
+possible to connect both using linear interpolation while avoiding
+collision. Optionally, the function should also returns the sampling of
+the path as a list of intermediate configurations -- The solution does
+not need any new Pinocchio calls.
+
+Tutorial 5.3: Nearest neighbors
+-------------------------------
+
+Finally, we need a k-nearest-neighbors algorithms.
+
+*Question 3* Implement a function +nearestNeighbors+ that takes as
+argument a new configuration q, a list of candidates qs, and the number
+of requested neighbors k, and returns the list of the k nearest
+neighbors of q in qs. Optionally, the distance function that scores how
+close a configuration q1 is close to a configuration q2 might be also
+provided. If qs contains less that k elements, simply returns them all
+-- no new Pinocchio method is needed for the solution.
+
+Tutorial 5.2: Probabilistic roadmap
+-----------------------------------
+
+Basically, probabilistic roadmaps are build by maintaining a graph of
+configurations. At each iteration, a new configuration is randomly
+sampled and connected to the nearest configurations already in the
+graph. The algorithm stops when both start configuration qstart and goal
+configuration qgoal can be directly connected to some elements of the
+graph.
+
+We propose here to implement the visibility PRM algorithm. This
+algorithm also maintains the connected components of the graph. When a
+new configuration qnew is sampled, we try to connect it to its nearest
+neighbor in each of the already-sampled connected component.
+Configuration qnew is added to the graph only if one of the two
+following conditions is respected:
+
+-   if qnew cannot be connected to any existing connected component
+
+-   or if it can be connected to at least two connected component.
+
+In the second case, the connected components that can be connected are
+also merged.
+
+A graph structure with connected components is
+link:tp5\_graph\_py.html\[provided here\].
+
+*Question 4* Implement a +visibilityPRM+ that takes in argument two
+start and goal configurations qstart and qgoal, and the number of random
+sampling that must be achieved before failing. The returns True if qgoal
+can be connected to qstart. The graph must also be returned -- no fancy
+Pinocchio algorithm is needed here.
+
+The PRM can be visualized in Gepetto-viewer using the function
++displayPRM+ link:tp5\_prm\_display\_py.html\[provided here\].
+
+Tutorial 5.4: Searching a path in the roadmap
+---------------------------------------------
+
+A\* is an algorithm to find the shortest path in a graph (discrete
+problem). A\* iterativelly explore the nodes of the graph starting for
+the given start. One a node gets explored, its exact cost from start
+(cost to here) is exactly known. Then, all its children are added to the
+"frontier" of the set of already-explored nodes. Cost from nodes of the
+frontier to the goal is not (yet) exactly known but is anticipated
+through a heuristic function (also provided). At next iteration, the
+algorithm examines a node of the frontier, looking first at the node
+that is the most likely to be in the shortest path using the heuristic
+distance.
+
+See the fairly complete description of A\*
+link:http://theory.stanford.edu/\~amitp/GameProgramming/AStarComparison.html\#the-a-star-algorithm\[provided
+here\].
+
+*Question 5* Implement the A\* algorithm. The A\* returns a sequence of
+node ID from start to goal. We only work here with the existing graph of
+configuration, meaning that no new nodes a sampled, and no new collision
+test are computed. Pinocchio is not necessary here. A\* returns a list
+containing the indexes of the nodes of the PRM graph that one should
+cross to reach qgoal from qstart.
+
+Tutorial 5.4: Shortcut
+----------------------
+
+Being given a list of configurations, a random shortcut simply randomly
+tries to shortcut some subpart of the list: it randomly selects the
+start and end of the subpart, and tries to directly connect them (using
+the above-defined steering method). In case of success, it skips the
+unnecessary subpart. The algorithm iterates a given number of time.
+
+The shortcut can be run on either the list of configuration output by
+the A*, or on a sampling of the trajectory connecting the nodes of the
+A*. We propose here the second version.
+
+*Question 6* Defines a function +samplePath+ to uniformly sample that
+trajectory connecting the nodes selected by A*: for each edge of the A*
+optimal sequence, call +connect+ and add the resulting sampling to the
+previously-computed sample. It takes as argument the PRM graph and the
+list of indexes computed by A\* and returns a list of robot
+configuration starting by qstart and ending by qgoal -- no Pinocchio
+method is needed here.
+
+The sampled path can be displayed in Gepetto-viewer using the function
++displayPath+ link:prm\_display\[provided here\].
+
+*Question 7* Implement the +shortcut+ algorithm that tries to randomly
+connect two configuration of the sampled path. It takes the list of
+configuration output by +samplePath+ and the number of random shortcut
+it should tries. It returns an optimized list of configurations -- no
+Pinocchio method is needed here.
diff --git a/doc/d-labs/6-wpg.md b/doc/d-labs/6-wpg.md
new file mode 100644
index 000000000..9702f0a39
--- /dev/null
+++ b/doc/d-labs/6-wpg.md
@@ -0,0 +1,193 @@
+6. Take a walk (aka optimal control)
+====================================
+
+Objective
+---------
+
+The objective of this work is to get a first touch of optimal control
+The tutorial will guide you to generate a dynamically balanced walk
+motion for a humanoid robot using a LQR to compute the robot
+center-of-mass trajectory.
+
+Tutorial 6.0: prerequesites
+---------------------------
+
+*Prerequesite \#1* A humanoid robot models, with at least two legs.
+
+*Prerequesite \#2* An inverse geometry solver based on BFGS.
+
+Yet, the inverse geometry only solves the motion of the robot for a
+constant task, like reaching a specific position of the hand, or a
+constant position of the center of mass.
+
+It is possible to modify the BFGS call to perform an inverse kinematics
+by (i) limiting the number of iteration of BFGS to a small value e.g 10
+iterations maximum, (ii) initializing the non-linear search from the
+previous configuration of the robot, and (iii) turning off the default
+verbose output of BFGS. For example, the robot can track a target moving
+vertically using the following example:
+
+\[source,python\]
+-----------------
+
+cost.Mdes = se3.SE3( eye(3),np.matrix(\[0.2,0,0.1+t/100.\]) ) \#
+Reference target at time 0. q = np.copy(robot.q0) for t in range(100):
+cost.Mdes.translation = np.matrix(\[0.2,0,0.1+t/100.\]) q =
+fmin\_bfgs(cost, q,maxiter=10,disp=False) robot.display(q) ----
+
+Implement a motion of the right foot of the robot tracking a straight
+line from the initial position of the robot to a position 10cm forward,
+while keeping a constant rotation of the foot.
+
+Tutorial 6.1: defining input
+----------------------------
+
+The input of the walk generation algorithm is a sequence of steps with a
+given timing. This input will be represented by two sequences as in the
+examples below. The class +FootSteps+ (link:footsteps.html\[*see code
+link*\]) can be used to define, store and access to the footstep plan.
+
+\[source, python\]
+------------------
+
+Define 6 steps forward, starting with the left foot and stoping at the same forward position.
+=============================================================================================
+
+footsteps = FootSteps( \[0.0,-0.1\] , \[0.0,0.1\] ) footsteps.addPhase(
+.3, 'none' ) footsteps.addPhase( .7, 'left' , \[0.1,+0.1\] )
+footsteps.addPhase( .1, 'none' ) footsteps.addPhase( .7, 'right',
+\[0.2,-0.1\] ) footsteps.addPhase( .1, 'none' ) footsteps.addPhase( .7,
+'left' , \[0.3,+0.1\] ) footsteps.addPhase( .1, 'none' )
+footsteps.addPhase( .7, 'right', \[0.4,-0.1\] ) footsteps.addPhase( .1,
+'none' ) footsteps.addPhase( .7, 'left' , \[0.5,+0.1\] )
+footsteps.addPhase( .1, 'none' ) footsteps.addPhase( .7, 'right',
+\[0.5,-0.1\] ) footsteps.addPhase( .5, 'none' ) ----
+
+The class stores three functions of time: left, right and flyingFoot.
+Each function is piecewise constant. For each function, the user can ask
+what is the value of this function at time t.
+
+The storage is composed of three lists for left, right and flyingFoot,
+and a list for time. The list of times stores the time intervals, i.e.
+each element of the list is the start of a time interval. The first
+element of the list is 0. The value of the functions
+left,right,flyingFoot one this time interval is stored at the same
+position is their respective list (i.e. value of left on interval \[
+time\[i\],time\[i+1\] \] is stored in left\[i\].
+
+The 4 lists are set up using function addPhase(). The values of
+functions left,right,flyingFoot can be accessed through the function
+getPhaseType(t), getLeftPosition(t), getRightPosition(t). PhaseType are
+'left' (meaning left foot is flying, right foot is fixed), 'right' (ie
+the opposite) or 'none' (meaning no foot is flying, both are fixed on
+the ground).
+
+Additionnally, functions getLeftNextPosition(t), getRightNextPosition(t)
+can be used to get the next position of the flying foot (in that case,
+additional work is needed to compute the position of flying foot at time
+t by interpolating getLeftPosition(t) and getLeftNextPosition(t).
+
+Functions getPhaseStart(t), getPhaseDuration(t) and getPhaseRemaining(t)
+can be used to get the starting time, the duration and the remaining
+time of the current phase at time t.
+
+A phase 'none' defines a double support phase (no foot moving). A phase
+'left' (resp. 'right') defines a simple support phase while indicating
+the flying foot. The time is a duration. The position is absolute.
+
+Each interval corresponds to the following constant support phases:
+
+\["latex"\]
+\begin{tabular}{lll}
+$\left[t_0,t_1\right]$ &double support phase& left foot in steps [0], right foot in steps [1] \\
+$\left[t_1,t_2\right]$ &left foot support phase& right foot moving to steps [2]\\
+$\left[t_2,t_3\right]$ &double support phase,& \\
+$\left[t_3,t_4\right]$ &right foot support phase,& left foot moving to steps [3]\\
+&\vdots&\\
+$\left[t_{m-2}, t_{m-1}\right]$ &double support phase,& left foot in steps [p-2], right foot in steps [p-1]
+\end{tabular}
+\[source,python\]
+-----------------
+
+Example of use
+==============
+
+footsteps.getPhaseType(.4) \# return 'left'
+footsteps.getLeftPosition(0.4) \# return 0,0.1
+footsteps.getLeftNextPosition(0.4) \# return 0.1,0.1
+footsteps.getPhaseStart(0.4) \# return 0.3
+footsteps.getPhaseDuration(0.4) \# return 0.7
+footsteps.getPhaseRemaining(0.4) \# return 0.6
+footsteps.isDoubleFromLeftToRight(0) \# return False
+footsteps.isDoubleFromLeftToRight(1) \# return True ----
+
+Tutorial 6.2: computing reference ZMP
+-------------------------------------
+
+Implement a python class called +ZmpRef+ that takes as input a sequence
+of times and a sequence of steps. Objects of this class behave as a
+function of time that returns a 2 dimensional vector:
+
+\[source, python\]
+------------------
+
+> > > zmp = ZmpRef (footsteps) zmp (2.5) array(\[ 0.41 , 0.096\]) ----
+
+The function should be a piecewise affine function
+
+-   starting in the middle of the ankles of the two first steps,
+-   finishing in the middle of the two ankles of the two last steps,
+-   constant under the foot support during single support phases.
+
+You can use the template below.
+
+\[source, python\]
+------------------
+
+class ZmpRef (object): def **init** (self, footsteps) : self.footsteps =
+footsteps \# Operator () def **call** (self, t): return array
+(self.steps \[0\]) ----
+
+For the inputs provided above, the graph of +zmp+ is given below.
+
+image::images/zmp-ref.png\[width="100%",alt="zmp\_ref against time"\]
+
+Tutorial 6.3: reference trajectory of the center of mass
+--------------------------------------------------------
+
+Using the reference zmp trajectory implemented in Tutorial 6.3,
+implement a class +ComRef+ that computes the reference trajectory of the
+center of mass by optimal control.
+
+To write the underlying optimization problem, you can use a factor
+graph. A simple implementation is available in
+link:factor\_graph.html\[(*see the source code*)\]. An example of use is
+the following. Try to guess the solution before executing it.
+
+\[source,python\]
+-----------------
+
+f = FactorGraph(1,5) \# Define a factor of 5 variables of dimension 1
+
+M = eye(1) \# M is simply 1 written as a 1x1 matrix. for i in range(4):
+f.addFactorConstraint( \[ Factor( i,M ), Factor( i+1,-M ) \], zero(1) )
+
+f.addFactor( \[ Factor(0,M) \], M*10 ) f.addFactor( \[ Factor(4,M) \],
+M*20 )
+
+x = f.solve()
+-------------
+
+Tutorial 6.4: reference trajectories of the feet
+------------------------------------------------
+
+Using the same method as in Tutorial 6.2, implement two classes
++RightAnkleRef+ and +LeftAnkleRef+ that return reference positions of
+the ankles as homogeneous matrices. Unlike zmp reference, trajectories
+of the feet should be continuously differentiable.
+
+Tutorial 6.5: generate walk motion
+----------------------------------
+
+Use the classes defined in the previous sections to generate a walk
+motion using the inverse kinematics solver of Tutorial 2.
diff --git a/doc/d-labs/7-learn.md b/doc/d-labs/7-learn.md
new file mode 100644
index 000000000..989c96b44
--- /dev/null
+++ b/doc/d-labs/7-learn.md
@@ -0,0 +1,209 @@
+7. Learning to flight (aka policy learning)
+===========================================
+
+Objective
+---------
+
+The objective of this tutorial is to study how to directly solve an
+optimal control problem, either by computing a trajectory from current
+state to goal, or by computing a policy. To keep decent computation
+timings with simple python code, we will only work with a simple
+inverted pendulum with limited torque, that must swing to raise to
+standing configuration. The presented algorithms successively compute an
+optimal trajectory, a discretized policy with a Q-table and with a
+linear network, and a continuous policy with a deep neural network.
+
+Tutorial 7.0: prerequesites
+---------------------------
+
+We need a pendulum model and a neural network. All the source code for
+this tutorial is gathered in the archive (link:tuto7.zip\[*available at
+this url*\]).
+
+*Prerequesite \#1* Inverted pendulum model
+
+Two models are provided. The first one is continuous and is implemented
+with Pinocchio and is available in pendulum.py with class Pendulum. The
+code is generic for a N-pendulum. We will use the 1-dof model. The state
+is \[q,v\] the angle and angular velocity of the pendulum. The control
+is the joint torque. The pendulum weights 1kg, measures 1m with COM at
+0.5m of the joint. Do not forget to start +gepetto-viewer-server+ before
+rendering the model. Each time the simulator is integrated, it returns a
+new state and the reward for the previous action (implement to be the
+weighted sum of squared position, velocity and control). The state is
+recorded as a member of the class and can be accessed through env.x. Do
+not forget to copy it before modifying it.
+
+\[source, python\]
+------------------
+
+from pendulum import Pendulum from pinocchio.utils import \*
+
+env = Pendulum(1) \# Continuous pendulum NX = env.nobs \# ... training
+converges with q,qdot with 2x more neurones. NU = env.nu \# Control is
+dim-1: joint torque
+
+x = env.reset() \# Sample an initial state u = rand(NU) \# Sample a
+control x,reward = env.step(u) \# Integrate simulator for control u and
+get reward. env.render() \# Display model at state env.x ----
+
+A second version of the same model is provided with a discrete dynamics,
+in dpendulum.py. The state is again \[q,v\], however discretized in NQ
+position and NV velocity. The state is then a integer equal to iq*NV+iv,
+ranging from 0 to NQ*NV=NX. Controls are also discretized from 0 to NU.
+\[source, python\] ---- from dpendulum import DPendulum
+
+env = DPendulum() NX = env.nx \# Number of (discrete) states NU = env.nu
+\# Number of (discrete) controls env.reset() env.step(5) env.render()
+----
+
+Other models could be used. In particular, we used a similar API to the
+Gym from OpenAI, that you might be interested to browsed and possibly
+try with the following algorithms.
+
+*Prerequesite \#2* A neural network with optimizers
+
+We will use the Tensor Flow from Google, available thanks to pip.
+
+\[source,python\]
+-----------------
+
+sudo apt-get install python-pip cython python-h5py export
+TF\_BINARY\_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp27-none-linux\_x86\_64.whl
+pip install --upgrade \$TF\_BINARY\_URL pip install tflearn==0.2.1 ----
+
+Tutorial 7.1. Optimizing an optimal trajectory
+----------------------------------------------
+
+For the first tutorial, we implement a nonlinear optimization program
+optimizing the cost of a single trajectory for the continious pendulum.
+The trajectory is represented by its initial state x0 and the vector of
+piecewise-constant control U=\[u0 ... uT-1\], with T the number of
+timestep. The cost is simply the integral of the cost function l(x,u)
+returned by the pendulum environment.
+
+Then the integral cost is optimized starting from a 0 control
+trajectory, until the pendulum finally reaches a standing state. Observe
+that the number of swings is possibly sub-optimal.
+
+The code to optimize the trajectory is available in ocp.py in the ZIP
+archive.
+
+Tutorial 7.2. Q-table resolution for discrete pendulum
+------------------------------------------------------
+
+We now consider the discrete model, with NX state and NU control.
+Imagine this model as a chess board with a maze (possibly nonplanar)
+drawn on it, and you ask the system to discover a path from an inital
+state to the final state at the center of the board. When performing a
+trial, the system is informed of success or failure by receiving reward
+1 when reaching the goal, or otherwise 0 after 100 moves. To record the
+already-explored path, we can stored a table of NX per NU values, each
+giving how likely we would be rewarded if taking action U at state X.
+This table is named the Q-table, and corresponds to the Hamiltonian
+(Q-value) of the discrete system. The Q-values can be back-propagated
+along the table using the Dijkstra algorithm. Since we do not now the
+goal(s) states, the back propagation is done along random roll-outs
+inside the maze, which likely converges to an approximation of the exact
+Hamiltonian. Once the Q-table is computed, the optimal policy is simply
+chosen by maximizing the vector of Q-values corresponding to the row of
+state X.
+
+This algorithm is available in the file qtable.py of the ZIP archive.
+
+Tutorial 7.3. Q-table using a linear net
+----------------------------------------
+
+The idea is to similarly approximate the Q-value for the continuous
+model. Since the continious model has both infinitely many states and
+controls, a table can not make it. We will rather use any function basis
+to approximate the Q-value. For the tutorial, we have chosen to use a
+deep neural net. Firt, let's use a simple net for storing the Q-table.
+
+Basically, the vectory of Q-values for all possible control u is
+obtained by multiplying the Q-table by a one-hot vector (0 everywhere
+except a single 1) corresponding to the state. The optimal policy is
+then the maximum of this vector: iu\^\* = argmax( Q\*h(ix) ), with h(ix)
+= \[ 0 0 ... 0 1 0 ... 0\], ix and iu being indexes of both state and
+control. We use tensor flow to store array Q. The Q-value net is simply
+the multiplication of Q by one-hot x, and the policy the argmax of the
+result.
+
+Now, the coefficients of Q are the parameters defining the Q-value (and
+then the policy). They must be optimized to fit the cost function. From
+Hamiltion-Jacobi-Belman equation, we know that Q(x,u) = l(x,u) + max\_u2
+Q(f(x,u),u2). We optimize the Q value so that this residual is minimized
+along the samples collected from successive roll-outs inside the maze.
+
+The implementation of this algorithm is available in qnet.py in the ZIP
+archive. Observe that the convergence is not as fast as with the Q-Table
+algorithm.
+
+Tutorial 7.4. Actor-critic network
+----------------------------------
+
+We will now optimize a continuous policy and the corresponding
+Q-function, using an "Actor-Critic" method proposed in "Continuous
+control with deep reinforcement learning", by Lillicrap et al,
+arXiv:1509.02971.
+
+Two networks are used to represent the Q function and the policy. The
+first network has two inputs: state x and control u. Its outpout is a
+scalar. It is optimized to minimize the residual corresponding to HJB
+equation along a batch of sample points collected along previous
+roll-outs.
+
+The policy function has a single input: state X. Its output is a control
+vector U (dimension 1 for the pendulum). It is optimize to maximize the
+Q function , i.e at each state, Pi(x) corresponds to the maximum over
+all possible controls u of Q(x,u).
+
+Two critical aspects are reported in the paper and implemented in the
+tutorial. First, we learn over a batch of random samples collected from
+many previous roll-outs, in order to break the temporal dependancy in
+the batch. Second, we regularize the optimization of both Q-value
+(critic) and policy (actor) networks by storing a copy of both network,
+and only slightly modifying these copy at each steps.
+
+The corresponding algorithm is implemented in the file continuous.py of
+the ZIP archive. The training phase requires 100 roll-outs and some
+minutes (maybe more on the virual machine). To make the training faster,
+backup of the network weights are available
+(link:tuto6\_backup.zip\[*following this link*\]). You can restore the
+networks weights by uncommenting the following code line:
+\[source,python\] ---- tf.train.Saver().restore(sess,
+"netvalues/actorcritic.pre.ckpt") tf.train.Saver().restore(sess,
+"netvalues/actorcritic.full.ckpt") ---- The "pre" backup contains
+partially-optimized weiths (with 30 rollouts). The "full" backup
+corresponds to fully-optimized weights after 100 roll-outs.
+
+Tutorial 7.5. Training the network with the OCP solver
+------------------------------------------------------
+
+Using the OCP solver, you might compute a few optimal trajectories (say
+10) starting from various initial conditions. Initialize the replay
+memory with the 10x50 points composing the 10 optimal trajectories and
+optimize the network from these replay memory only (without additional
+roll-outs, but using the same small-size batch). Play with the learning
+parameters until the network converges.
+
+When properly implemented, the OCP produces better accuracy than the
+policy. However, at run-time, the policy is much cheaper to evaluate
+than solving a new OCP. I am currently considering how to use the
+network to warm-start or guide the OCP solver at run-time.
+
+The provided solvers (trajectory and policy) runs reasonably well for
+the 1-pendulum. It is more difficult to tune for a more-complex
+dynamics, such as a 2-pendulum. You may want to try on a quadcopter
+robot (hence the title of the tutorial) but I except it to be a serious
+job. A model of the quadcopter (link:quadcopter.py\[*is available
+here*\]).
+
+Homework
+--------
+
+Send by mail at nmansard@laas.fr a mail containing a single python file.
+The subject of the mail should start with +\[SUPAERO\] TP6+ When
+executed, the script should compute 5 optimal trajectory, feed them to
+the replay deque and train from scratch the actor-critic networks using
+batchs from these replay deque.
diff --git a/doc/d-labs/intro.md b/doc/d-labs/intro.md
new file mode 100644
index 000000000..9808be485
--- /dev/null
+++ b/doc/d-labs/intro.md
@@ -0,0 +1,3 @@
+# Labs
+
+This section contains complete exercices to understand Pinocchio.
diff --git a/doc/treeview.dox b/doc/treeview.dox
index 133e80379..58d7614e5 100644
--- a/doc/treeview.dox
+++ b/doc/treeview.dox
@@ -27,21 +27,31 @@ namespace pinocchio {
   // Pages/ tutorials organization
   //
 
-  /** \page md_doc_maths_intro Mathematical Formulation
-       - \subpage md_doc_maths_geometry
-       - \subpage md_doc_maths_kinematics
-       - \subpage md_doc_maths_dynamics
-       - \subpage md_doc_maths_se3
+  /** \page md_doc_a-maths_intro Mathematical Formulation
+       - \subpage md_doc_a-maths_geometry
+       - \subpage md_doc_a-maths_kinematics
+       - \subpage md_doc_a-maths_dynamics
+       - \subpage md_doc_a-maths_se3
   */
 
-  /** \page Usage Usage
-       - \subpage md_doc_usage_creating-models
-       - \subpage md_doc_usage_crtp
+  /** \page md_doc_b-usage_intro Usage
+       - \subpage md_doc_b-usage_creating-models
+       - \subpage md_doc_b-usage_crtp
   */
 
-  /** \page md_doc_tutorials_intro Tutorials
-       - \subpage md_doc_tutorials_direct-geometry
-       - \subpage md_doc_tutorials_inverse-kinematics
+  /** \page md_doc_c-tutorials_intro Tutorials
+       - \subpage md_doc_c-tutorials_direct-geometry
+       - \subpage md_doc_c-tutorials_inverse-kinematics
+  */
+
+  /** \page md_doc_d-labs_intro Labs
+       - \subpage md_doc_d-labs_1-directgeom
+       - \subpage md_doc_d-labs_2-invgeom
+       - \subpage md_doc_d-labs_3-invkine
+       - \subpage md_doc_d-labs_4-dyn
+       - \subpage md_doc_d-labs_5-planner
+       - \subpage md_doc_d-labs_6-wpg
+       - \subpage md_doc_d-labs_7-learn
   */
 
   //
diff --git a/doc/tutorials/intro.md b/doc/tutorials/intro.md
deleted file mode 100644
index 61dfd9284..000000000
--- a/doc/tutorials/intro.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Tutorials
-
-These tutorials contains quick exemples showing how Pinocchio can be used.
-If you need more detailed exercises, go to https://github.com/stack-of-tasks/pinocchio-tutorials
-- 
GitLab