Skip to content

Commit

Permalink
Merge pull request #16 from sunava/suturo
Browse files Browse the repository at this point in the history
[detect-actions] you now get a list of objects back you can decide if…
  • Loading branch information
sunava authored Dec 10, 2023
2 parents 684adbe + f05a069 commit ca34049
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 22 deletions.
9 changes: 9 additions & 0 deletions src/pycram/bullet_world.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,15 @@ def get_objects_by_type(self, obj_type: str) -> List[Object]:
"""
return list(filter(lambda obj: obj.type == obj_type, self.objects))

def get_all_objets_not_robot(self) -> List[Object]:
"""
Returns a list of all Objects except robot and environment.
:return: A list of all Objects except robot and environment.
"""
return list(filter
(lambda obj: (obj.type != ObjectType.ROBOT and obj.type != ObjectType.ENVIRONMENT), self.objects))

def get_object_by_id(self, id: int) -> Object:
"""
Returns the single Object that has the unique id.
Expand Down
19 changes: 15 additions & 4 deletions src/pycram/designators/action_designator.py
Original file line number Diff line number Diff line change
Expand Up @@ -697,7 +697,10 @@ def ground(self) -> Action:

class DetectAction(ActionDesignatorDescription):
"""
Detects an object that fits the object description and returns an object designator describing the object.
Detects an object that fits the object description and returns a list of an object designator describing the object.
If you choose technique=all, it will return a list of all objects in the field of view.
returns a dict of perceived_objects. You can enter it by either perceived_object_dict["cereal"].name or
perceived_object_dict.values())[0].name..
"""

@dataclasses.dataclass
Expand All @@ -707,17 +710,24 @@ class Action(ActionDesignatorDescription.Action):
Object designator loosely describing the object, e.g. only type.
"""

technique: str
"""
Technique means how the object should be detected, e.g. 'color', 'shape', etc.
Or 'all' if all objects should be detected
"""

@with_tree
def perform(self) -> Any:
return DetectingMotion(object_type=self.object_designator.type).resolve().perform()
return DetectingMotion(object_type=self.object_designator.type,
technique=self.technique).resolve().perform()

def to_sql(self) -> Base:
raise NotImplementedError()

def insert(self, session: sqlalchemy.orm.session.Session, *args, **kwargs) -> Base:
raise NotImplementedError()

def __init__(self, object_designator_description: ObjectDesignatorDescription, resolver=None):
def __init__(self, object_designator_description: ObjectDesignatorDescription, technique='default', resolver=None):
"""
Tries to detect an object in the field of view (FOV) of the robot.
Expand All @@ -726,14 +736,15 @@ def __init__(self, object_designator_description: ObjectDesignatorDescription, r
"""
super().__init__(resolver)
self.object_designator_description: ObjectDesignatorDescription = object_designator_description
self.technique: str = technique

def ground(self) -> Action:
"""
Default resolver that returns a performable designator with the resolved object description.
:return: A performable designator
"""
return self.Action(self.object_designator_description.resolve())
return self.Action(self.object_designator_description.resolve(), self.technique)


class OpenAction(ActionDesignatorDescription):
Expand Down
34 changes: 21 additions & 13 deletions src/pycram/designators/motion_designator.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,20 +314,27 @@ class Motion(MotionDesignatorDescription.Motion):
Type of the object that should be detected
"""

technique: str
"""
Technique means how the object should be detected, e.g. 'color', 'shape', 'all', etc.
"""

def perform(self):
pm_manager = ProcessModuleManager.get_manager()
bullet_world_object = pm_manager.detecting().execute(self)
if not bullet_world_object:
raise PerceptionObjectNotFound(
f"Could not find an object with the type {self.object_type} in the FOV of the robot")
if ProcessModuleManager.execution_type == "real":
return RealObject.Object(bullet_world_object.name, bullet_world_object.type,
bullet_world_object, bullet_world_object.get_pose())

return ObjectDesignatorDescription.Object(bullet_world_object.name, bullet_world_object.type,
bullet_world_object)

def __init__(self, object_type: str, resolver: Optional[Callable] = None):
bullet_world_objects = pm_manager.detecting().execute(self)
#return bullet_world_objects
if not bullet_world_objects:
raise PerceptionObjectNotFound(
f"Could not find an object with the type {self.object_type} in the FOV of the robot")
return bullet_world_objects
# if ProcessModuleManager.execution_type == "real":
# return RealObject.Object(bullet_world_object.name, bullet_world_object.type,
# bullet_world_object, bullet_world_object.get_pose())
#
# return ObjectDesignatorDescription.Object(bullet_world_object.name, bullet_world_object.type,
# bullet_world_object)

def __init__(self, object_type: str, technique: str, resolver: Optional[Callable] = None):
"""
Checks for every object in the FOV of the robot if it fits the given object type. If the types match an object
designator describing the object will be returned.
Expand All @@ -337,6 +344,7 @@ def __init__(self, object_type: str, resolver: Optional[Callable] = None):
"""
super().__init__(resolver)
self.cmd: str = 'detecting'
self.technique: str = technique
self.object_type: str = object_type

def ground(self) -> Motion:
Expand All @@ -345,7 +353,7 @@ def ground(self) -> Motion:
:return: A resolved motion designator
"""
return self.Motion(self.cmd, self.object_type)
return self.Motion(self.cmd, self.object_type, self.technique)


class MoveArmJointsMotion(MotionDesignatorDescription):
Expand Down
22 changes: 17 additions & 5 deletions src/pycram/process_modules/hsrb_process_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,23 @@ def _execute(self, desig: DetectingMotion.Motion):
cam_frame_name = robot_description.get_camera_frame()
# should be [0, 0, 1]
front_facing_axis = robot_description.front_facing_axis

objects = BulletWorld.current_bullet_world.get_objects_by_type(object_type)
if desig.technique == 'all':
rospy.loginfo("Detecting all generic objects")
objects = BulletWorld.current_bullet_world.get_all_objets_not_robot()
else:
rospy.loginfo("Detecting specific object type")
objects = BulletWorld.current_bullet_world.get_objects_by_type(object_type)
object_dict = {}

perceived_objects = []
for obj in objects:
if btr.visible(obj, robot.get_link_pose(cam_frame_name), front_facing_axis):
return obj
perceived_objects.append(ObjectDesignatorDescription.Object(obj.name, obj.type,
obj))
# Iterate over the list of objects and store each one in the dictionary
for i, obj in enumerate(perceived_objects):
object_dict[obj.name] = obj
return object_dict


class HSRBMoveTCP(ProcessModule):
Expand Down Expand Up @@ -294,8 +306,7 @@ def _execute(self, desig: LookingMotion.Motion):
pose_in_tilt = local_transformer.transform_pose(target, robot.get_link_tf_frame("head_tilt_link"))

new_pan = np.arctan2(pose_in_pan.position.y, pose_in_pan.position.x)
new_tilt = np.arctan2(pose_in_tilt.position.z, pose_in_tilt.position.x + pose_in_tilt.position.y )

new_tilt = np.arctan2(pose_in_tilt.position.z, pose_in_tilt.position.x + pose_in_tilt.position.y)

current_pan = robot.get_joint_state("head_pan_joint")
current_tilt = robot.get_joint_state("head_tilt_joint")
Expand All @@ -306,6 +317,7 @@ def _execute(self, desig: LookingMotion.Motion):
giskard.achieve_joint_goal({"head_pan_joint": new_pan + current_pan,
"head_tilt_joint": new_tilt + current_tilt})


class HSRBDetectingReal(ProcessModule):
"""
Process Module for the real HSRB that tries to detect an object fitting the given object description. Uses Robokudo
Expand Down

0 comments on commit ca34049

Please sign in to comment.