Commit b4cc8c8a authored by Pierre Fernbach's avatar Pierre Fernbach
Browse files

[script] update memmo/talos_platform_random to sample init/end goal on flat plateforms

parent f6e987ae
......@@ -6,23 +6,7 @@ print "Plan guide trajectory ..."
import scenarios.memmo.talos_platform_random_path as tp
#Robot.urdfSuffix += "_safeFeet"
pId = 0
"""
print "Done."
import time
statusFilename = tp.statusFilename
f = open(statusFilename,"a")
if tp.ps.numberPaths() > 0 :
print "Path planning OK."
f.write("Planning_success: True"+"\n")
f.close()
else :
print "Error during path planning"
f.write("Planning_success: False"+"\n")
f.close()
import sys
sys.exit(1)
"""
fullBody = Robot ()
......@@ -64,25 +48,6 @@ q_init = q_ref[::]
fullBody.setReferenceConfig(q_ref)
fullBody.setPostureWeights(fullBody.postureWeights[::]+[0]*6)
"""
if abs(tp.q_goal[1]) <= abs(tp.q_goal[0]) :
fullBody.setPostureWeights(fullBody.postureWeights[::]+[0]*6)
heuristicR = "fixedStep08"
heuristicL = "fixedStep08"
print "Use weight for straight walk"
fullBody.usePosturalTaskContactCreation(True)
else :
fullBody.setPostureWeights(fullBody.postureWeights_straff[::]+[0]*6)
print "Use weight for straff walk"
if tp.q_goal[1] < 0 :
print "start with right leg"
heuristicL = "static"
heuristicR = "fixedStep06"
else:
print "start with left leg"
heuristicR = "static"
heuristicL = "fixedStep06"
"""
fullBody.setCurrentConfig (q_init)
......@@ -92,9 +57,9 @@ tStart = time.time()
nbSamples = 100000
fullBody.addLimb(fullBody.rLegId,fullBody.rleg,fullBody.rfoot,fullBody.rLegOffset,fullBody.rLegNormal, fullBody.rLegx, fullBody.rLegy, nbSamples, "fixedStep06", 0.01,kinematicConstraintsPath=fullBody.rLegKinematicConstraints,kinematicConstraintsMin = 0.85)
fullBody.runLimbSampleAnalysis(fullBody.rLegId, "ReferenceConfiguration", True)
#fullBody.runLimbSampleAnalysis(fullBody.rLegId, "ReferenceConfiguration", True)
fullBody.addLimb(fullBody.lLegId,fullBody.lleg,fullBody.lfoot,fullBody.lLegOffset,fullBody.rLegNormal, fullBody.lLegx, fullBody.lLegy, nbSamples, "fixedStep06", 0.01,kinematicConstraintsPath=fullBody.lLegKinematicConstraints,kinematicConstraintsMin = 0.85)
fullBody.runLimbSampleAnalysis(fullBody.lLegId, "ReferenceConfiguration", True)
#fullBody.runLimbSampleAnalysis(fullBody.lLegId, "ReferenceConfiguration", True)
tGenerate = time.time() - tStart
......@@ -122,14 +87,14 @@ q_goal[configSize+3:configSize+6] = [0,0,0]
state_id = fullBody.generateStateInContact(q_init, vel_init,robustnessThreshold=5)
assert fullBody.rLegId in fullBody.getAllLimbsInContact(state_id), "Right leg not in contact in initial state"
assert fullBody.lLegId in fullBody.getAllLimbsInContact(state_id), "Left leg not in contact in initial state"
q0 = fullBody.getConfigAtState(state_id)
v(q0)
q_init = fullBody.getConfigAtState(state_id)
v(q_init)
state_id = fullBody.generateStateInContact(q_goal, vel_goal,robustnessThreshold=5)
assert fullBody.rLegId in fullBody.getAllLimbsInContact(state_id), "Right leg not in contact in final state"
assert fullBody.lLegId in fullBody.getAllLimbsInContact(state_id), "Left leg not in contact in final state"
q1 = fullBody.getConfigAtState(state_id)
v(q1)
q_goal = fullBody.getConfigAtState(state_id)
v(q_goal)
'''
......@@ -137,18 +102,28 @@ fullBody.setStaticStability(True)
v.addLandmark('talos/base_link',0.3)
# FOR EASY SCENARIOS ?
q0 = q_init[::]
q1 = q_goal[::]
q0[2]=q_ref[2]+0.02
q1[2]=q_ref[2]+0.02
# specify the full body configurations as start and goal state of the problem
fullBody.setStartState(q0,[fullBody.rLegId,fullBody.lLegId])
fullBody.setEndState(q1,[fullBody.rLegId,fullBody.lLegId])
q_init[2]=q_ref[2]
q_goal[2]=q_ref[2]
# define init gait according the direction of motion, try to move first the leg on the outside of the turn :
if q_goal[0] > q_init[0] : #go toward x positif
if q_goal[1] > q_init[1]: # turn left
gait = [fullBody.rLegId,fullBody.lLegId]
else : # turn right
gait = [fullBody.lLegId,fullBody.rLegId]
else : # go toward x negatif
if q_goal[1] > q_init[1]: # turn right
gait = [fullBody.lLegId,fullBody.rLegId]
else : # turn left
gait = [fullBody.rLegId,fullBody.lLegId]
fullBody.setStartState(q_init,gait)
fullBody.setEndState(q_goal,gait)
v(q_init)
print "Generate contact plan ..."
tStart = time.time()
configs = fullBody.interpolate(0.001,pathId=pId,robustnessTreshold = 3, filterStates = True,testReachability=True,quasiStatic=True)
configs = fullBody.interpolate(0.005,pathId=pId,robustnessTreshold = 1, filterStates = True,testReachability=True,quasiStatic=True)
tInterpolateConfigs = time.time() - tStart
print "Done."
print "Contact plan generated in : "+str(tInterpolateConfigs)+" s"
......@@ -156,36 +131,6 @@ print "number of configs :", len(configs)
#raw_input("Press Enter to display the contact sequence ...")
#displayContactSequence(v,configs)
"""
if len(configs) < 2 :
cg_success = False
print "Error during contact generation."
else:
cg_success = True
print "Contact generation Done."
if abs(configs[-1][0] - tp.q_goal[0]) < 0.01 and abs(configs[-1][1]- tp.q_goal[1]) < 0.01 and (len(fullBody.getContactsVariations(len(configs)-2,len(configs)-1))==1):
print "Contact generation successful."
cg_reach_goal = True
else:
print "Contact generation failed to reach the goal."
cg_reach_goal = False
if len(configs) > 5 :
cg_too_many_states = True
cg_success = False
print "Discarded contact sequence because it was too long."
else:
cg_too_many_states = False
f = open(statusFilename,"a")
f.write("cg_success: "+str(cg_success)+"\n")
f.write("cg_reach_goal: "+str(cg_reach_goal)+"\n")
f.write("cg_too_many_states: "+str(cg_too_many_states)+"\n")
f.close()
if (not cg_success) or cg_too_many_states or (not cg_reach_goal):
import sys
sys.exit(1)
"""
# put back original bounds for wholebody methods
fullBody.resetJointsBounds()
......
......@@ -17,7 +17,7 @@ mu=0.5# coefficient of friction
# Creating an instance of the helper class, and loading the robot
rbprmBuilder = Robot ()
# Define bounds for the root : bounding box of the scenario
rootBounds = [0.2,3.9, 0.2, 2.2, 0.95, 1.05]
rootBounds = [0.1,4., 0.2, 2.2, 0.95, 1.05]
rbprmBuilder.setJointBounds ("root_joint", rootBounds)
# The following lines set constraint on the valid configurations:
......@@ -26,7 +26,7 @@ rbprmBuilder.setFilter([])#'talos_lleg_rom','talos_rleg_rom'])
rbprmBuilder.setAffordanceFilter('talos_lleg_rom', ['Support',])
rbprmBuilder.setAffordanceFilter('talos_rleg_rom', ['Support'])
# We also bound the rotations of the torso. (z, y, x)
rbprmBuilder.boundSO3([-1.7,1.7,-0.1,0.1,-0.1,0.1])
rbprmBuilder.boundSO3([-3.14,3.14,-0.1,0.1,-0.1,0.1])
# Add 6 extraDOF to the problem, used to store the linear velocity and acceleration of the root
rbprmBuilder.client.robot.setDimensionExtraConfigSpace(extraDof)
# We set the bounds of this extraDof with velocity and acceleration bounds (expect on z axis)
......@@ -52,7 +52,7 @@ vf = ViewerFactory (ps)
from hpp.corbaserver.affordance.affordance import AffordanceTool
afftool = AffordanceTool ()
afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005])
afftool.loadObstacleModel ("hpp_environments", "multicontact/plateforme_not_flat", "planning", vf, reduceSizes=[0.15,0,0])
afftool.loadObstacleModel ("hpp_environments", "multicontact/plateforme_not_flat", "planning", vf, reduceSizes=[0.16,0,0])
try :
v = vf.createViewer(displayArrows = True)
except Exception:
......@@ -68,74 +68,53 @@ except Exception:
v.addLandmark(v.sceneName,1)
q_init = rbprmBuilder.getCurrentConfig ();
q_init[0:3] = [0.20,1.15,0.99]
#q_init[0:3] = [0.20,1.15,1.0]
q_init[3:7] = [0,0,0,1]
print "q_init",q_init[1]
# Q init => Set position and orientation
# X will be between [groundMinX, groundMaxX]
# Y will be between [groundMinY, groundMaxY]
lengthPath = 0.9
marginX = 1.7
marginY = 0.8
groundMinX = marginX
groundMaxX = 4.0 - marginX
groundMinY = marginY
groundMaxY = 2.4 - marginY
minAngleDegree = 80
maxAngleDegree = 100
positionIsRandomOnFlatGround = True
# INIT
radius = 0.01
# Generate random init and goal position.
# these position will be on the flat part of the environment, with an orientation such that they can be connected by a straight line, and an angle between +- 25 degree from the x axis
Y_BOUNDS=[0.3,2.1]
Z_VALUE = 0.98
MAX_ANGLE = 0.4363323129985824 # 25 degree
import random
random.seed()
alpha = 0.0
if random.uniform(0.,1.) > 0.5:
alpha = random.uniform(minAngleDegree,maxAngleDegree)
else:
alpha = random.uniform(minAngleDegree+180,maxAngleDegree+180)
print "Test on a circle, alpha deg = ",alpha
alpha = alpha*np.pi/180.0
move_Y = random.uniform(-0.2,0.2)
q_init_random= q_init[::]
q_init_random [0:3] = [radius*np.sin(alpha), -radius*np.cos(alpha), 1.]
# select randomly the initial and final plateform, they cannot be the same
# end plateform is always after the init plateform on the x axis
init_plateform_id = random.randint(0,3)
end_plateform_id = random.randint(init_plateform_id+1,4)
#if end_plateform_id >= init_plateform_id:
# end_plateform_id+=1
# set x position from the plateform choosen :
x_init = 0.16 + 0.92*init_plateform_id
x_goal = 0.16 + 0.92*end_plateform_id
# uniformly sample y position
y_init = random.uniform(Y_BOUNDS[0],Y_BOUNDS[1])
q_init[0:3] = [x_init,y_init, Z_VALUE]
# y_goal must be random inside Y_BOUNDS but such that the line between q_init and q_goal is between +- MAX_ANGLE radian from the x axis
y_bound_goal = Y_BOUNDS[::]
y_angle_max = math.sin(MAX_ANGLE)*abs(x_init-x_goal) + y_init
y_angle_min = math.sin(-MAX_ANGLE)*abs(x_init-x_goal) + y_init
y_bound_goal[0] = max(y_angle_min,y_bound_goal[0])
y_bound_goal[1] = min(y_angle_max,y_bound_goal[1])
y_goal = random.uniform(y_bound_goal[0],y_bound_goal[1])
# compute the orientation such that q_init face q_goal :
# set final orientation to be along the circle :
vx = np.matrix([1,0,0]).T
v_init = np.matrix([q_init_random[0],q_init_random[1],0]).T
v_init = np.matrix([x_goal-x_init,y_goal-y_init,0]).T
quat = Quaternion.FromTwoVectors(vx,v_init)
q_init_random[3:7] = quat.coeffs().T.tolist()[0]
# set initial velocity to fix the orientation
q_init_random[-6] = vInit*np.sin(alpha)
q_init_random[-5] = -vInit*np.cos(alpha)
if positionIsRandomOnFlatGround :
# Set robot on flat ground
q_init_random[0] = 2.0
q_init_random[1] = 1.2 + move_Y
else:
# Set robot at random position on platform
q_init_random[0] = random.uniform(groundMinX,groundMaxX)
q_init_random[1] = random.uniform(groundMinY,groundMaxY)
v(q_init_random)
# GOAL
# Q goal => Set straight position in square of size (4,2)
# Orientation is the vector between position init and goal
q_goal_random = q_init_random[::]
# Set robot goal at random position on platform
q_goal_random[0] = q_init_random[0] + np.sin(alpha)*lengthPath
q_goal_random[1] = q_init_random[1] - np.cos(alpha)*lengthPath
v(q_goal_random)
# Set new q_init and q_goal
q_init = q_init_random[::]
q_goal = q_goal_random[::]
print "initial root position/velocity : ",q_init
print "final root position/velocity : ",q_goal
q_init[3:7] = quat.coeffs().T.tolist()[0]
q_goal=q_init[::]
q_goal[0:2] = [x_goal,y_goal]
print "initial root position : ",q_init
print "final root position : ",q_goal
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment