Skip to content

Commit

Permalink
Merge pull request #8 from pge23-24/main
Browse files Browse the repository at this point in the history
add Launch files for vision
  • Loading branch information
Arthuino authored Feb 8, 2024
2 parents 12c33a7 + 81a9449 commit ce3438e
Show file tree
Hide file tree
Showing 11 changed files with 125 additions and 182 deletions.
57 changes: 28 additions & 29 deletions vision_ws/README.md
Original file line number Diff line number Diff line change
@@ -1,56 +1,55 @@
# VISION_WS

Cette documentation ne prent pas en compte Docker

## Build
## Utilisation

Premier build :
### Build

```bash
colcon build --packages-select py_pubsub_msgs --cmake-args -DPYTHON_EXECUTABLE=/usr/bin/python3
source scripts/build_vision_ws.sh
```

Ensuite, celle ci suffit :
### Lancement

#### Traitement Yolo toutes les caméras

```bash
colcon build --packages-select py_pubsub_msgs
bash scripts/run_all_camera_yolo.sh
# ou
ros2 launch py_pubsub launch_all_camera.py
```

## Utilisation
#### Display toutes les caméras

Activer la venv :
```bash
source .venv/bin/activate
bash scripts/run_all_display.sh
# ou
ros2 launch py_pubsub launch_all_camera.py
```

Désactiver la venv :
```bash
deactivate
```
#### Traitement Yolo sur une caméra

Lancement script traitement caméra (entre 1 et 4):
```bash
bash run_camera_yolo.sh <num_cam>
bash scripts/run_camera_yolo.sh <id>
ros2 run py_pubsub camera --ros-args -p image_topic_id:=<id>
```

Lancement script display caméra (entre 1 et 4):
#### Display une caméra

```bash
bash run_display.sh <num_cam>
bash scripts/run_display.sh <id>
ros2 run py_pubsub display --ros-args -p image_topic_id:=<id>
```

## Communication

Les messages sont :
* angles et classes + header : py_pubsub_msgs.msg ClassCoordinates
* l'image au format standard ROS : sensor_msgs.msg Image
## Prototype

# Charger l'environnement ROS
source /opt/ros/foxy/setup.bash
Activer la venv :

# Construire le package spécifié
```bash
colcon build --packages-select py_pubsub_msgs --cmake-args -DPYTHON_EXECUTABLE=/usr/bin/python3
colcon build --packages-select py_pubsub --cmake-args -DPYTHON_EXECUTABLE=/usr/bin/python3
source .venv/bin/activate
```

source install/setup.bash
Désactiver la venv :

```bash
deactivate
```
3 changes: 3 additions & 0 deletions vision_ws/scripts/run_all_camera_yolo.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
source install/setup.bash

ros2 launch py_pubsub launch_all_camera.py
3 changes: 3 additions & 0 deletions vision_ws/scripts/run_all_display.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
source install/setup.bash

ros2 launch py_pubsub launch_all_display.py
8 changes: 1 addition & 7 deletions vision_ws/scripts/run_camera_yolo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,5 @@ source install/setup.bash
cam_id=${1:-1}
echo "Cam : $cam_id"

yolo_version=${2:-v8}
echo "Yolo : $yolo_version"

tracker_enabled=${3:-False}
echo "Tracker : $tracker_enabled"

# Exécuter camera_yolo avec l'identifiant de la caméra
ros2 run py_pubsub camera_yolo --cam $cam_id --yolo $yolo_version --tracker $tracker_enabled
ros2 run py_pubsub camera_yolo --ros-args -p cam_id:=$cam_id
5 changes: 2 additions & 3 deletions vision_ws/scripts/run_display.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@
source install/setup.bash

# Exécuter le noeud ROS avec l'argument de la caméra passé au script
cam_id=${1:-1}
topic_name="annotated_images_$cam_id"
topic_id=${1:-1}

# Exécuter display avec l'identifiant de la caméra
ros2 run py_pubsub display --topic "$topic_name"
ros2 run py_pubsub display --ros-args -p image_topic_id:=$topic_id
22 changes: 22 additions & 0 deletions vision_ws/src/py_pubsub/launch/launch_all_camera.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import launch
from launch import LaunchDescription
from launch_ros.actions import Node

def generate_launch_description():
ld = LaunchDescription()

for cam_id in range(1, 5):
node = Node(
package='py_pubsub',
executable='camera_yolo',
name=f'camera_yolo_{cam_id}',
parameters=[
{"cam_id": cam_id}
]
)
ld.add_action(node)

return ld

if __name__ == '__main__':
generate_launch_description()
22 changes: 22 additions & 0 deletions vision_ws/src/py_pubsub/launch/launch_all_display.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import launch
from launch import LaunchDescription
from launch_ros.actions import Node

def generate_launch_description():
ld = LaunchDescription()

for cam_id in range(1, 5):
node = Node(
package='py_pubsub',
executable='display',
name=f'display_{cam_id}',
parameters=[
{"image_topic_id": cam_id}
]
)
ld.add_action(node)

return ld

if __name__ == '__main__':
generate_launch_description()
2 changes: 2 additions & 0 deletions vision_ws/src/py_pubsub/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>

<exec_depend>ros2launch</exec_depend>

<export>
<build_type>ament_python</build_type>
</export>
Expand Down
152 changes: 27 additions & 125 deletions vision_ws/src/py_pubsub/py_pubsub/camera_yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
import argparse

from ultralytics import YOLO # YOLOv8 import
from ultralytics.utils import LOGGER # LOGGER import
import torch
import time
from deep_sort_realtime.deepsort_tracker import DeepSort

from py_pubsub_msgs.msg import CameraDetectionStamped
from py_pubsub_msgs.msg import CameraDetectionStampedArray
Expand All @@ -22,7 +21,6 @@

class YOLOv8Detector:
type = "YOLOv8"

def __init__(self):
try:
self.model = YOLO(f"{models_path}yolov8s.pt")
Expand All @@ -35,80 +33,36 @@ def compute(self, image):
return results


class YOLOv5Detector:
type = "YOLOv5"

def __init__(self, model_name=f"{models_path}/yolov5l.pt"):
# Load YOLOv5 model
try:
self.model = torch.hub.load(
"ultralytics/yolov5",
"custom",
path=f"{models_path}yolv5l_custom.pt",
)
except Exception as e:
LOGGER.error(
f"Error loading custom YOLOv5 model. {e} \nLoading default model."
)
try:
self.model = YOLO(model_name)
except Exception as e:
LOGGER.error(f"Error loading default YOLOv5 model. {e}")
self.model = None

def compute(self, image):
"""Process a single image"""
if image is not None:
results = self.model(image)
return results


class MinimalPublisher(Node):
def __init__(self, camera_id, yolo_version, tracker_enabled):
def __init__(self):
super().__init__("minimal_publisher")
self.camera_id = camera_id
self.declare_parameter('cam_id', 1)

self.camera_id = self.get_parameter('cam_id').get_parameter_value().integer_value

self.tracker_enabled = tracker_enabled

self.topic_name_image = f"annotated_images_{camera_id}"
# Publishers
self.topic_name_image = f"annotated_images_{ self.camera_id}"
self.publisher_annotated_image = self.create_publisher(
Image, self.topic_name_image, 10
)

self.topic_name_information = f"camera_detection_{camera_id}"
self.topic_name_information = f"camera_detection_{ self.camera_id}"
self.publisher_information = self.create_publisher(
CameraDetectionStampedArray, self.topic_name_information, 10
)

# Subscribers
self.subscription = self.create_subscription(
Image, f"Cam{camera_id}/image_raw", self.listener_callback, 10
Image, f"Cam{self.camera_id}/image_raw", self.listener_callback, 10
)
self._cv_bridge = CvBridge()
if yolo_version == "v8":
self.detector = YOLOv8Detector()
elif yolo_version == "v5":
self.detector = YOLOv5Detector()

if tracker_enabled:
self.tracker = DeepSort(
max_age=5,
n_init=2,
nms_max_overlap=1.0,
max_cosine_distance=0.3,
nn_budget=None,
override_track_class=None,
embedder="mobilenet",
half=True,
bgr=True,
embedder_gpu=True,
embedder_model_name=None,
embedder_wts=None,
polygon=False,
today=None,
)

self.detector = YOLOv8Detector()

self.get_logger().info(
f"Camera {camera_id} YOLO{yolo_version} Tracker {tracker_enabled} Node has started"
f"Camera { self.camera_id} Node has started"
)


def toData(self, result, classes):
bounding_box_height = result[3] - result[1]
Expand Down Expand Up @@ -158,49 +112,15 @@ def listener_callback(self, image):
results = self.detector.compute(cv_image)
if results:
# Annotated image part
# check type of results
# YOLOv8-v5
if isinstance(results, list):
annotated_image = results[0].plot()
new_results = results[0].boxes.data.cpu().tolist()
class_names = results[0].names

# yoloV5
else:
annotated_image = results.render()[0]
# For information datas
new_results = results.xyxy[0].cpu().numpy().tolist()
class_names = results.names

if self.tracker_enabled:
# For tracker
res = results.xyxyn[0][:, -1], results.xyxyn[0][:, :-1]
detections = self.formalize_detection(
res, height=cv_image.shape[0], width=cv_image.shape[1]
)
tracks = self.tracker.update_tracks(detections, frame=cv_image)
for track in tracks:
if not track.is_confirmed():
continue
track_id = track.track_id
ltrb = track.to_ltrb()
bbox = ltrb
cv2.putText(
cv_image,
"ID : " + str(track_id),
(int(bbox[0]) + 10, int(bbox[1] + 30)),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 0),
thickness=2,
)
encoded_annotated_image = self._cv_bridge.cv2_to_imgmsg(
cv_image, "rgb8"
)
else:
encoded_annotated_image = self._cv_bridge.cv2_to_imgmsg(
annotated_image, "rgb8"
)
# YOLOv8

annotated_image = results[0].plot()
new_results = results[0].boxes.data.cpu().tolist()
class_names = results[0].names

encoded_annotated_image = self._cv_bridge.cv2_to_imgmsg(
annotated_image, "rgb8"
)

self.publisher_annotated_image.publish(encoded_annotated_image)

Expand Down Expand Up @@ -236,29 +156,11 @@ def listener_callback(self, image):


def main(args=None):
# Initialize ROS without passing args
rclpy.init()

# Create an argument parser for your script
parser = argparse.ArgumentParser(description="ROS 2 YOLO Object Detection Node")

# Add your custom argument
parser.add_argument("--cam", type=str, default="1", help="Camera identifier")
parser.add_argument("--yolo", type=str, default="v8", help="Yolo version")
parser.add_argument(
"--tracker", type=str, default="false", help="Enable tracker (false | true)"
)

# Parse the command line arguments
custom_args = parser.parse_args()

if custom_args.tracker == "true" or custom_args.tracker == "True":
tracker = True
else:
tracker = False
# Initialize ROS with args
rclpy.init(args=args)

# Create and spin your node
minimal_publisher = MinimalPublisher(custom_args.cam, custom_args.yolo, tracker)
minimal_publisher = MinimalPublisher()
rclpy.spin(minimal_publisher)

# Shutdown and cleanup
Expand Down
Loading

0 comments on commit ce3438e

Please sign in to comment.