diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..ee0aed5
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,13 @@
+name: run unittests
+on: ["push"]
+jobs:
+ test:
+ runs-on: "ubuntu-latest"
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.11
+ - name: Install dependencies
+ run: pip install -r prototyping/setup/requirements.txt
+ - run: python -m unittest discover -v -s prototyping/test
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0fffdc0
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,71 @@
+# Prerequisites
+*.d
+
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Fortran module files
+*.mod
+*.smod
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+
+# Virtual environment
+.venv
+
+# Models
+*.ckpt
+*.pth
+*.pt
+
+# Pycache
+*.pyc
+
+# Log folder
+*log*
+
+# COCO dataset
+COCO_2017/
+COCO2017/
+
+# Surround dataset
+*/DATA*
+
+# ZIP fodlers
+*.zip
+
+# USE CASES
+*USE_CASE*
+
+# Vscode cache
+*.vscode
+
+# Irrevelant assets
+localization
+bird_view
+
+# Mac folders
+.DS_Store
+*/.DS_Store
+*DS_Store
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cf00710..089a75c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -8,3 +8,54 @@ repos:
language: python
files: \.(h\+\+|h|hh|hxx|hpp|cuh|c|cc|cpp|cu|c\+\+|cxx|tpp|txx)$
entry: cpplint
+
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ args:
+ - --filter-files
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.1.1
+ hooks:
+ - id: ruff
+ args:
+ - --fix
+ - --exit-non-zero-on-fix
+
+ - repo: https://github.com/cheshirekow/cmake-format-precommit
+ rev: v0.6.13
+ hooks:
+ - id: cmake-format
+
+ - repo: https://github.com/pre-commit/mirrors-clang-format
+ rev: v17.0.3
+ hooks:
+ - id: clang-format
+ args:
+ - --style=Google
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-ast
+ - id: check-executables-have-shebangs
+ - id: check-json
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-toml
+ - id: check-yaml
+ - id: debug-statements
+ - id: destroyed-symlinks
+ - id: detect-private-key
+ - id: end-of-file-fixer
+ - id: fix-byte-order-marker
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+
+ - repo: https://github.com/psf/black
+ rev: 23.10.0
+ hooks:
+ - id: black
\ No newline at end of file
diff --git a/README.md b/README.md
index dc6f64e..e14ce13 100644
--- a/README.md
+++ b/README.md
@@ -74,3 +74,21 @@ Cpplint est executé automatiquement avant chaque commit. Si des erreurs sont d
- **Google Test** : framework de test unitaire pour c++
Les tests unitaires seront tous executé avant de merge une pull request sur le repo principal. Le taux de couverture de ces tests sera vérifié.
La compilation sera également vérifiée à ce moment là.
+
+## Prototypage Python
+
+### INSTALLATION DE L'ENVIRONNEMENT VIRTUEL
+
+Pour installer l'environnement virtuel nécessaire au projet, exécutez la commande suivante dans le dossier `prototyping` du dossier du projet :
+
+```bash
+bash ./setup/venv_install.sh
+```
+
+Afin de désinstaller la venv, la procédure est identique mais le script à lancer est `venv_uninstall.sh`, toujours dans le dossier `prototyping`.
+
+```bash
+bash ./setup/venv_uninstall.sh
+```
+
+Le but actuel de cette venv n'est pas de faire tourner le workspace ros, mais de pouvoir avoir un environnement commun aux prototypages.
diff --git a/prototyping/setup/requirements.txt b/prototyping/setup/requirements.txt
new file mode 100644
index 0000000..ae52b03
--- /dev/null
+++ b/prototyping/setup/requirements.txt
@@ -0,0 +1,8 @@
+ultralytics
+opencv-python
+pillow
+pywavefront
+transformers
+accelerate
+numpy
+natsort
\ No newline at end of file
diff --git a/prototyping/setup/venv_install.sh b/prototyping/setup/venv_install.sh
new file mode 100644
index 0000000..9cbae0a
--- /dev/null
+++ b/prototyping/setup/venv_install.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Define the name of the virtual environment directory
+VENV_DIR=".venv"
+
+# Check if the virtual environment directory exists
+if [ -d "$VENV_DIR" ]; then
+ echo "Virtual environment already exists"
+else
+ # Create the virtual environment
+ python3 -m venv $VENV_DIR
+ echo "Virtual environment created"
+fi
+
+# Activate the virtual environment
+source $VENV_DIR/bin/activate
+
+# Upgrade pip
+pip install --upgrade pip
+
+# Check if requirements.txt exists
+if [ -f "setup/requirements.txt" ]; then
+ # Install requirements
+ pip install -r setup/requirements.txt
+ echo "Requirements installed"
+else
+ echo "requirements.txt not found"
+fi
+
+# Deactivate the virtual environment
+deactivate
diff --git a/prototyping/setup/venv_uninstall.sh b/prototyping/setup/venv_uninstall.sh
new file mode 100644
index 0000000..70dfd4f
--- /dev/null
+++ b/prototyping/setup/venv_uninstall.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+
+# Define the name of the virtual environment directory
+VENV_DIR=".venv"
+
+# Function to ask for confirmation
+confirm_deletion() {
+ read -p "Are you sure you want to delete the virtual environment? (y/n): " -n 1 -r
+ echo # Move to a new line
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ # Confirmation received, proceed with deletion
+ return 0
+ else
+ # Confirmation not received, do not delete
+ return 1
+ fi
+}
+
+# Check if the virtual environment directory exists
+if [ -d "$VENV_DIR" ]; then
+ echo "Virtual environment directory detected."
+
+ # Ask for confirmation
+ if confirm_deletion; then
+ # Remove the virtual environment directory
+ rm -rf $VENV_DIR
+ echo "Virtual environment deleted."
+ else
+ echo "Deletion cancelled."
+ fi
+else
+ echo "Virtual environment does not exist."
+fi
\ No newline at end of file
diff --git a/prototyping/src/__init__.py b/prototyping/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/prototyping/src/bird_view.py b/prototyping/src/bird_view.py
new file mode 100644
index 0000000..919f668
--- /dev/null
+++ b/prototyping/src/bird_view.py
@@ -0,0 +1,130 @@
+import cv2
+import numpy as np
+
+
+def read_images(image_paths):
+ """Read images from the given paths."""
+ return [cv2.imread(path) for path in image_paths]
+
+
+def find_keypoints_and_descriptors(images):
+ """Find keypoints and descriptors in all images."""
+ orb = cv2.ORB_create()
+ keypoints, descriptors = [], []
+ for image in images:
+ kp, desc = orb.detectAndCompute(image, None)
+ keypoints.append(kp)
+ descriptors.append(desc)
+ return keypoints, descriptors
+
+
+def match_keypoints(descriptors):
+ """Match keypoints between all image pairs."""
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
+ matches = []
+ for i in range(len(descriptors) - 1):
+ matches.append(bf.match(descriptors[i], descriptors[i + 1]))
+ return matches
+
+
+def stitch_images(images, keypoints, matches):
+ """Stitch images together using the matches."""
+ # Start with the first image
+ result = images[0]
+ result_kp = keypoints[0]
+
+ for i in range(1, len(images)):
+ # Retrieve keypoints, descriptors, and matches
+ kp1, kp2 = result_kp, keypoints[i]
+ good_matches = matches[i - 1]
+
+ # Extract location of good matches
+ points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
+ points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
+
+ for j, match in enumerate(good_matches):
+ points1[j, :] = kp1[match.queryIdx].pt
+ points2[j, :] = kp2[match.trainIdx].pt
+
+ # Find homography
+ H, _ = cv2.findHomography(points1, points2, cv2.RANSAC)
+
+ # Warp image
+ height, width = images[0].shape[:2]
+ result = cv2.warpPerspective(result, H, (width, height))
+
+ # Stitch the image with the result
+ result = cv2.addWeighted(result, 0.5, images[i], 0.5, 0)
+
+ # Update keypoints for the next iteration
+ result_kp = kp2
+
+ return result
+
+
+def transform_to_birds_eye_view(image, src_points, dest_size=(300, 300)):
+ """Transform the stitched image to a bird's eye view.
+
+ Args:
+ - image: The input stitched image.
+ - src_points: Four source points in the image defining the area to transform.
+ - dest_size: Size of the output bird's eye view image.
+
+ Returns:
+ - Bird's eye view of the selected portion of the input image.
+ """
+ # Destination points are the corners of a rectangle with the given size
+ dst_points = np.array([
+ [0, 0],
+ [dest_size[0] - 1, 0],
+ [dest_size[0] - 1, dest_size[1] - 1],
+ [0, dest_size[1] - 1]
+ ], dtype=np.float32)
+
+ # Compute the perspective transform matrix and apply it
+ M = cv2.getPerspectiveTransform(src_points, dst_points)
+ birds_eye_view = cv2.warpPerspective(image, M, dest_size)
+
+ return birds_eye_view
+
+
+def visualize_src_points(image, src_points):
+ for point in src_points:
+ int_point = (int(point[0]), int(point[1])) # Convert to integer
+ cv2.circle(image, int_point, 5, (0, 0, 255), -1) # Red circles
+ cv2.imshow("Source Points on Panorama", image)
+ cv2.waitKey(0)
+ cv2.destroyAllWindows()
+
+
+def visualize_matches(img1, kp1, img2, kp2, matches):
+ """Visualize matches between two images."""
+ match_img = cv2.drawMatches(
+ img1, kp1, img2, kp2, matches[:50], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS
+ )
+ cv2.imshow('Matches', match_img)
+ cv2.waitKey(0)
+ cv2.destroyAllWindows()
+
+
+# Example usage
+image_paths = ['assets/bird_view/front.png', 'assets/bird_view/left.png',
+ 'assets/bird_view/rear.png', 'assets/bird_view/right.png']
+images = read_images(image_paths)
+keypoints, descriptors = find_keypoints_and_descriptors(images)
+matches = match_keypoints(descriptors)
+panorama = stitch_images(images, keypoints, matches)
+
+
+# Define source points (these should be adjusted based on your specific image)
+src_points = np.float32([[100, 100], [400, 100], [400, 400], [100, 400]])
+
+# Visualize src_points on the panorama
+visualize_src_points(panorama.copy(), src_points)
+
+# Generate the bird's eye view
+birds_eye_view = transform_to_birds_eye_view(panorama, src_points)
+
+cv2.imshow("Bird's Eye View", birds_eye_view)
+cv2.waitKey(0)
+cv2.destroyAllWindows()
diff --git a/prototyping/src/communication/__init__.py b/prototyping/src/communication/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/prototyping/src/communication/network/__init__.py b/prototyping/src/communication/network/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/prototyping/src/communication/network/receiver.py b/prototyping/src/communication/network/receiver.py
new file mode 100644
index 0000000..4ba0eb2
--- /dev/null
+++ b/prototyping/src/communication/network/receiver.py
@@ -0,0 +1,23 @@
+import socket
+
+
+class Receiver:
+ def __init__(self, host="127.0.0.1", port=65432):
+ self.host = host
+ self.port = port
+ self.last_received_data = None # New attribute to store the last received data
+
+ def start_server(self):
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.bind((self.host, self.port))
+ s.listen()
+ print(f"Server started. Listening on {self.host}:{self.port}")
+ conn, addr = s.accept()
+ with conn:
+ print(f"Connected by {addr}")
+ while True:
+ data = conn.recv(1024)
+ if not data:
+ break
+ self.last_received_data = data.decode() # Store the received data
+ print(f"Received data: {self.last_received_data}")
\ No newline at end of file
diff --git a/prototyping/src/communication/network/sender.py b/prototyping/src/communication/network/sender.py
new file mode 100644
index 0000000..b25cdd3
--- /dev/null
+++ b/prototyping/src/communication/network/sender.py
@@ -0,0 +1,12 @@
+import socket
+
+
+class Sender:
+ def __init__(self, host="127.0.0.1", port=65432):
+ self.host = host
+ self.port = port
+
+ def send_data(self, data):
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.connect((self.host, self.port))
+ s.sendall(data.encode())
\ No newline at end of file
diff --git a/prototyping/src/communication/shared_memory/__init__.py b/prototyping/src/communication/shared_memory/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/prototyping/src/communication/shared_memory/receiver.py b/prototyping/src/communication/shared_memory/receiver.py
new file mode 100644
index 0000000..5d237ed
--- /dev/null
+++ b/prototyping/src/communication/shared_memory/receiver.py
@@ -0,0 +1,16 @@
+import numpy as np
+from multiprocessing.shared_memory import SharedMemory
+from multiprocessing import Process
+import matplotlib.pyplot as plt
+
+
+class Receiver:
+ def __init__(self):
+ self.data = None
+ self.sm = SharedMemory("MyMemory")
+ self.last_received_data = None # New attribute to store the last received data
+
+ def receive(self, size):
+ self.data = [int(self.sm.buf[i]) for i in range(size)]
+ array_reshape = np.array(self.data).reshape(968, 1226, 3)
+ sm.close()
diff --git a/prototyping/src/communication/shared_memory/sender.py b/prototyping/src/communication/shared_memory/sender.py
new file mode 100644
index 0000000..c04ca5a
--- /dev/null
+++ b/prototyping/src/communication/shared_memory/sender.py
@@ -0,0 +1,45 @@
+import numpy as np
+from PIL import Image
+import time
+from multiprocessing.shared_memory import SharedMemory
+from multiprocessing import Process
+
+
+class Sender:
+ def __init__(self, image):
+ self.image = image
+ self.shared_mem = None
+ self.img_array = None
+ self.size = None
+
+ def prepare_image(self):
+ self.img_array = np.array(self.image).flatten()
+ self.img_array = self.img_array.reshape(-1, 1, 1)
+ self.size = len(self.img_array)
+
+ def create_shared_memory(self):
+ self.shared_mem = SharedMemory(name="MyMemory", create=True, size=self.size)
+
+ def send_data(self):
+ self.shared_mem.buf[: self.size] = bytearray(self.img_array)
+ self.shared_mem.close()
+
+ def get_shared_memory(self):
+ return self.shared_mem
+
+ def get_size(self):
+ return self.size
+
+
+if __name__ == "__main__":
+ sender = Sender()
+ sender.prepare_image()
+ sender.create_shared_memory()
+
+ process = Process(target=sender.send_data)
+ process.start()
+ process.join()
+
+ shared_mem = sender.get_shared_memory()
+ shared_mem.close()
+ shared_mem.unlink()
diff --git a/prototyping/src/reconstruction_video.py b/prototyping/src/reconstruction_video.py
new file mode 100644
index 0000000..af61782
--- /dev/null
+++ b/prototyping/src/reconstruction_video.py
@@ -0,0 +1,71 @@
+import os
+import cv2
+from natsort import natsorted
+import logging
+
+PATH_USE_CASES = "./assets/USE_CASES"
+PATH_VIDEO_USE_CASES = "./assets/VIDEO_USE_CASES"
+
+# Initialize logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+class VideoReconstruction:
+ def __init__(
+ self,
+ frame_width=1226,
+ frame_height=968,
+ ):
+ self.frame_width = frame_width
+ self.frame_height = frame_height
+
+ def video_detection(
+ self,
+ path_folder_image=PATH_USE_CASES,
+ path_save=PATH_VIDEO_USE_CASES,
+ ):
+ path_sorted = natsorted(os.listdir(path_folder_image))
+
+ out = cv2.VideoWriter(
+ path_save,
+ cv2.VideoWriter_fourcc(*"mp4v"),
+ 24,
+ (self.frame_width, self.frame_height),
+ )
+ for file in path_sorted:
+ path_file = path_folder_image + "/" + file
+ img = cv2.imread(path_file)
+
+ if img is not None and img.shape[:2] == (
+ self.frame_height,
+ self.frame_width,
+ ):
+ out.write(img)
+ else:
+ logger.info(f"Error: Invalid image size or format - {path_file}")
+ out.release()
+
+ def use_case_reconstruction(self, nb_use_case: int):
+ for i in range(1, nb_use_case + 1):
+ prefix = PATH_USE_CASES + "/USE_CASE_" + str(i)
+ save_prefix = PATH_VIDEO_USE_CASES + "/VIDEO_USE_CASE_" + str(i)
+ self.video_detection(
+ prefix + "/CAMERA_1", save_prefix + "/CAMERA_1/vid.mp4"
+ )
+ self.video_detection(
+ prefix + "/CAMERA_2", save_prefix + "/CAMERA_2/vid.mp4"
+ )
+ self.video_detection(
+ prefix + "/CAMERA_3", save_prefix + "/CAMERA_3/vid.mp4"
+ )
+ self.video_detection(
+ prefix + "/CAMERA_4", save_prefix + "/CAMERA_4/vid.mp4"
+ )
+ print(f"La vidéo du UseCase {i} est enregistrée sous : {save_prefix}")
+
+
+if __name__ == "__main__":
+ use_case = VideoReconstruction()
+ nb_use_case = 1
+ use_case.use_case_reconstruction(nb_use_case)
diff --git a/prototyping/src/reconstruction_video_V2.py b/prototyping/src/reconstruction_video_V2.py
new file mode 100644
index 0000000..a110fb6
--- /dev/null
+++ b/prototyping/src/reconstruction_video_V2.py
@@ -0,0 +1,112 @@
+import os
+import cv2
+from natsort import natsorted
+from ultralytics import YOLO
+
+
+class Yolo:
+ def __init__(self, model, confidence=0.60):
+ # Initialize a YOLO model with the specified weights file
+ self.model = YOLO(model)
+ self.confidence_threshold = confidence
+
+ def compute(self, image):
+ # This function processes an image using the YOLO model
+
+ # Check if the image input is valid
+ if image is not None:
+ # Run YOLOv8 pose estimation on the provided image
+ # 'persist=True' enables persistent tracking across frames
+ # 'tracker="bytetrack.yaml"' specifies the tracking configuration to use
+ results = self.model.track(
+ image,
+ persist=True,
+ tracker="bytetrack.yaml",
+ conf=self.confidence_threshold,
+ verbose=False,
+ )
+
+ # Return the filtered results
+ return results
+
+
+class Reconstruction:
+ def video_detection(
+ self, path_folder_image, path_save, frame_width=1226, frame_height=968
+ ):
+ path_sorted = natsorted(os.listdir(path_folder_image))
+ out = cv2.VideoWriter(
+ path_save, cv2.VideoWriter_fourcc(*"mp4v"), 24, (frame_width, frame_height)
+ )
+ for file in path_sorted:
+ path_file = path_file = path_folder_image + "/" + file
+ img = cv2.imread(path_file)
+ if img is not None and img.shape[:2] == (frame_height, frame_width):
+ out.write(img)
+ else:
+ print(f"Error: Invalid image size or format - {path_file}")
+ out.release()
+
+ def video_detection_yolov8(
+ self, nb_use_case, frame_width=1226, frame_height=968, threshold=0.60
+ ):
+ models = ["yolov8_n", "yolov8s", "yolov8m", "yolov8l", "yolov8x"]
+
+ for m in models:
+ yolov8 = Yolo(m + ".pt", threshold)
+ for i in range(1, nb_use_case + 1):
+ for c in range(1, 5):
+ path_folder_image = (
+ "./assets/USE_CASES/USE_CASE_" + str(i) + "/CAMERA_" + str(c)
+ )
+ path_save = (
+ "./assets/VIDEOS/VIDEO_"
+ + m
+ + "_USE_CASES/VIDEO_"
+ + m
+ + "_USE_CASE_"
+ + str(i)
+ + "/CAMERA_"
+ + str(c)
+ )
+ if not os.path.exists(path_save):
+ os.makedirs(path_save)
+ path_save = path_save + "/vid.mp4"
+ path_sorted = natsorted(os.listdir(path_folder_image))
+ out = cv2.VideoWriter(
+ path_save,
+ cv2.VideoWriter_fourcc(*"mp4v"),
+ 24,
+ (frame_width, frame_height),
+ )
+ for file in path_sorted:
+ path_file = path_folder_image + "/" + file
+ img = cv2.imread(path_file)
+ results = yolov8.compute(image=img)
+ if results:
+ annotated_image = results[0].plot()
+ if img is not None and img.shape[:2] == (
+ frame_height,
+ frame_width,
+ ):
+ out.write(annotated_image)
+ else:
+ print(
+ f"Error: Invalid image size or format - {path_file}"
+ )
+ out.release()
+ print(f"La vidéo du UseCase {i} est enregistrée sous : {path_save}")
+
+
+if __name__ == "__main__":
+ rec = Reconstruction()
+ """for i in range(1, nb_use_case + 1):
+ prefix = "./USE_CASES/USE_CASE_" + str(i)
+ save_prefix = "./VIDEO_USE_CASES/VIDEO_USE_CASE_" + str(i)
+ video_detection(prefix + "/CAMERA_1", save_prefix + "/CAMERA_1/vid.mp4")
+ video_detection(prefix + "/CAMERA_2", save_prefix + "/CAMERA_2/vid.mp4")
+ video_detection(prefix + "/CAMERA_3", save_prefix + "/CAMERA_3/vid.mp4")
+ video_detection(prefix + "/CAMERA_4", save_prefix + "/CAMERA_4/vid.mp4")
+ print(f"La vidéo du UseCase {i} est enregistrée sous : {save_prefix}")"""
+
+ rec.video_detection_yolov8(21)
diff --git a/prototyping/src/surround_yolo/4to1image.py b/prototyping/src/surround_yolo/4to1image.py
new file mode 100644
index 0000000..009a202
--- /dev/null
+++ b/prototyping/src/surround_yolo/4to1image.py
@@ -0,0 +1,120 @@
+import numpy as np
+import cv2 as cv
+import os
+
+
+class CvFunction:
+ def calibrate(self, folderImages):
+ CHECKERBOARD = (6, 9)
+ subpix_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.1)
+ calibration_flags = cv.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv.fisheye.CALIB_CHECK_COND + cv.fisheye.CALIB_FIX_SKEW
+ objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
+ objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
+ _img_shape = None
+ objpoints = [] # 3d point in real world space
+ imgpoints = [] # 2d points in image plane.
+ for fname in os.listdir(folderImages):
+ img = cv.imread(folderImages + "/" + fname)
+ gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+ # Find the chess board corners
+ if _img_shape == None:
+ _img_shape = img.shape[:2]
+ else:
+ assert _img_shape == img.shape[:2], "All images must share the same size."
+ gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+ # Chess board corners
+ ret, corners = cv.findChessboardCorners(gray, CHECKERBOARD,
+ cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_NORMALIZE_IMAGE)
+ # Image points (after refinin them)
+ if ret == True:
+ objpoints.append(objp)
+ cv.cornerSubPix(gray, corners, (3, 3), (-1, -1), subpix_criteria)
+ imgpoints.append(corners)
+ # Draw and display the corners
+ cv.drawChessboardCorners(img, (9, 6), corners, ret)
+ cv.imshow('img', img)
+ cv.waitKey(0)
+ cv.destroyAllWindows()
+ N_OK = len(objpoints)
+ K = np.zeros((3, 3))
+ D = np.zeros((4, 1))
+ rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
+ tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
+ rms, _, _, _, _ = cv.fisheye.calibrate(
+ objpoints,
+ imgpoints,
+ gray.shape[::-1],
+ K,
+ D,
+ rvecs,
+ tvecs,
+ calibration_flags,
+ (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
+ )
+ print("Found " + str(N_OK) + " valid images for calibration")
+ print("DIM=" + str(_img_shape[::-1]))
+ print("K=np.array(" + str(K.tolist()) + ")")
+ print("D=np.array(" + str(D.tolist()) + ")")
+ DIM = _img_shape[::-1]
+
+ return K, D, DIM
+
+ def undistort(self, img, K, D, DIM):
+ balance = 1
+ dim2 = None
+ dim3 = None
+ dim1 = img.shape[:2][::-1] # dim1 is the dimension of input image to un-distort
+ assert dim1[0] / dim1[1] == DIM[0] / DIM[
+ 1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
+ if not dim2:
+ dim2 = dim1
+ if not dim3:
+ dim3 = dim1
+ scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
+ scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
+ # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
+ new_K = cv.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
+ map1, map2 = cv.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv.CV_16SC2)
+ undistorted_img = cv.remap(img, map1, map2, interpolation=cv.INTER_LINEAR, borderMode=cv.BORDER_CONSTANT)
+
+ return undistorted_img
+
+
+class Fuze:
+ def concatener_images_horizontalement(self, listImgCorrected):
+ assert listImgCorrected[0].shape[0] == listImgCorrected[1].shape[0] == listImgCorrected[2].shape[0] == \
+ listImgCorrected[3].shape[0], "Les hauteurs des images doivent être les mêmes."
+
+ result = np.concatenate((listImgCorrected[0], listImgCorrected[2], listImgCorrected[1], listImgCorrected[3]),
+ axis=1)
+ return result
+
+
+# Exemple d'utilisation
+if __name__ == "__main__":
+ folderImagesMires = "selected_img_mires" # A remplir sur ordi conti
+ test_img_undistort = "selected_img_mires/image_1124.png" # A remplir sur ordi conti
+ path_use_cases = "USE_CASE_1" # A remplir sur ordi conti
+ folder_save_undistort_and_stich = "undistort_and_stich"
+
+ c = CvFunction()
+ # On obtient la matrice de la camera et la matrice de la dimension
+ k, d, dim = c.calibrate(folderImagesMires)
+
+ f = Fuze()
+ nbImg = 2 # A remplir
+ directories = [d for d in os.listdir(path_use_cases) if os.path.isdir(os.path.join(path_use_cases, d))]
+ print(directories)
+ for i in range(nbImg):
+ strImg = f"/image_{i}.png"
+ img1 = cv.imread(path_use_cases + "/" + directories[0] + strImg)
+ undistort_img1 = c.undistort(img1, k, d, dim)
+ img2 = cv.imread(path_use_cases + "/" + directories[1] + strImg)
+ undistort_img2 = c.undistort(img2, k, d, dim)
+ img3 = cv.imread(path_use_cases + "/" + directories[3] + strImg)
+ undistort_img3 = c.undistort(img3, k, d, dim)
+ img4 = cv.imread(path_use_cases + "/" + directories[2] + strImg)
+ undistort_img4 = c.undistort(img4, k, d, dim)
+ list4images = [undistort_img1, undistort_img2, undistort_img3, undistort_img4]
+ concat = f.concatener_images_horizontalement(list4images)
+ cv.imwrite(folder_save_undistort_and_stich + f"/concat{i}.png", concat)
diff --git a/prototyping/src/surround_yolo/image4to1.py b/prototyping/src/surround_yolo/image4to1.py
new file mode 100644
index 0000000..c9c34ad
--- /dev/null
+++ b/prototyping/src/surround_yolo/image4to1.py
@@ -0,0 +1,93 @@
+import cv2
+import numpy as np
+import cv2 as cv
+import os
+
+
+class CvFunction:
+ def calibrate(self, folderImages):
+ # termination criteria
+ criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
+ # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
+ objp = np.zeros((6 * 7, 3), np.float32)
+ objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
+ # Arrays to store object points and image points from all the images.
+ objpoints = [] # 3d point in real world space
+ imgpoints = [] # 2d points in image plane.
+ for fname in os.listdir(folderImages):
+ img = cv.imread(fname)
+ gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+ # Find the chess board corners
+ ret, corners = cv.findChessboardCorners(gray, (7, 6), None)
+ # If found, add object points, image points (after refining them)
+ if ret is True:
+ objpoints.append(objp)
+ corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
+ imgpoints.append(corners2)
+ # Draw and display the corners
+ cv.drawChessboardCorners(img, (7, 6), corners2, ret)
+ cv.waitKey(500)
+ cv.destroyAllWindows()
+ K = None
+ D = None
+ ret, mtx, dist, rvecs, tvecs = cv.fisheye.calibrate(
+ objpoints, imgpoints, np.shape(fname), K, D
+ )
+
+ def undistort(self, image, ret, mtx, dist):
+ h, w = image.shape[:2]
+ newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
+ # undistort
+ dst = cv.undistort(image, mtx, dist, None, newcameramtx)
+ # crop the image
+ x, y, w, h = roi
+ dst = dst[y : y + h, x : x + w]
+ cv.imwrite("calibresult.png", dst)
+
+
+class Fuze:
+ def concatener_images_horizontalement(self, listImgCorrected):
+ assert (
+ listImgCorrected[0].shape[0]
+ == listImgCorrected[1].shape[0]
+ == listImgCorrected[2].shape[0]
+ == listImgCorrected[3].shape[0]
+ ), "Les hauteurs des images doivent être les mêmes."
+
+ result = np.concatenate(
+ (
+ listImgCorrected[0],
+ listImgCorrected[2],
+ listImgCorrected[1],
+ listImgCorrected[3],
+ ),
+ axis=1,
+ )
+
+ return result
+
+
+# Exemple d'utilisation
+if __name__ == "__main__":
+ pth1 = "USE_CASES/USE_CASE_1/CAMERA_1/image_NO_0.png"
+ pth2 = "USE_CASES/USE_CASE_1/CAMERA_2/image_NO_0.png"
+ pth3 = "USE_CASES/USE_CASE_1/CAMERA_3/image_NO_0.png"
+ pth4 = "USE_CASES/USE_CASE_1/CAMERA_4/image_NO_0.png"
+
+ folderImagesMires = "vis2-tiago/folder_mire_images"
+
+ cx = None
+ cy = None
+ fx = None
+ fy = None
+
+ c = CvFunction()
+
+ c.calibrate(folderImagesMires)
+
+ f = Fuze()
+
+ list_corrected_image = f.correction_4distorsion()
+
+ resultat_concatene = f.concatener_images_horizontalement(list_corrected_image)
+ cv2.imwrite("USE_CASES/USE_CASE_1/stich.png", resultat_concatene)
diff --git a/prototyping/src/surround_yolo/undistort.py b/prototyping/src/surround_yolo/undistort.py
new file mode 100644
index 0000000..7c8d161
--- /dev/null
+++ b/prototyping/src/surround_yolo/undistort.py
@@ -0,0 +1,29 @@
+import cv2
+
+
+class ImageUndistorter:
+ def __init__(self, camera_matrix, dist_coeffs):
+ """
+ Initialise la classe avec les paramètres intrinsèques de la caméra et les coefficients de distorsion.
+ :param camera_matrix: Matrice de la caméra (numpy array de taille 3x3).
+ :param dist_coeffs: Coefficients de distorsion (numpy array).
+ """
+ self.camera_matrix = camera_matrix
+ self.dist_coeffs = dist_coeffs
+
+ def undistort(self, image):
+ """
+ Corrige la distorsion de l'image fournie.
+ :param image: Image à corriger (numpy array).
+ :return: Image corrigée (numpy array).
+ """
+ h, w = image.shape[:2]
+ new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
+ self.camera_matrix, self.dist_coeffs, (w, h), 1, (w, h)
+ )
+ undistorted_image = cv2.undistort(
+ image, self.camera_matrix, self.dist_coeffs, None, new_camera_matrix
+ )
+ x, y, w, h = roi
+ undistorted_image = undistorted_image[y : y + h, x : x + w]
+ return undistorted_image
diff --git a/prototyping/src/surround_yolo/video_reconstruction_before_yolo.py b/prototyping/src/surround_yolo/video_reconstruction_before_yolo.py
new file mode 100644
index 0000000..b9711d0
--- /dev/null
+++ b/prototyping/src/surround_yolo/video_reconstruction_before_yolo.py
@@ -0,0 +1,62 @@
+import os
+import cv2
+from natsort import natsorted
+from src.surround_yolo.ImageUndistorter import ImageUndistorter
+import numpy as np
+
+
+class Reconstruction:
+ def __init__(self):
+ self.undistorter = ImageUndistorter(
+ np.array([[1000, 0, 320], [0, 1000, 240], [0, 0, 1]]),
+ np.array([0.1, -0.25, 0.001, 0.001, 0.0]),
+ )
+
+ def video_reconstruction(self, nb_use_case, frame_width=1226, frame_height=968):
+ for i in range(1, nb_use_case + 1):
+ video_streams = []
+ for c in [4, 1, 3, 2]:
+ path_folder_image = (
+ "./assets/USE_CASES/USE_CASE_" + str(i) + "/CAMERA_" + str(c)
+ )
+ path_sorted = natsorted(os.listdir(path_folder_image))
+ video_frames = [
+ cv2.imread(path_folder_image + "/" + file)
+ for file in path_sorted
+ if file.endswith(".jpg") or file.endswith(".png")
+ ]
+ """video_frames = [self.undistorter.undistort(
+ cv2.imread(path_folder_image + "/" + file))
+ for file in path_sorted
+ if file.endswith(".jpg") or file.endswith(".png")
+ ]"""
+ video_streams.append(video_frames)
+
+ # Ensure all streams have the same number of frames
+ min_length = min([len(stream) for stream in video_streams])
+ video_streams = [stream[:min_length] for stream in video_streams]
+
+ # Create a VideoWriter for the output video
+ path_save = f"./assets/VIDEOS/COMBINED_VIDEO_USE_CASE_{i}.mp4"
+ out = cv2.VideoWriter(
+ path_save,
+ cv2.VideoWriter_fourcc(*"mp4v"),
+ 24,
+ (frame_width * 4, frame_height),
+ )
+
+ for frame_idx in range(min_length):
+ combined_frame = cv2.hconcat(
+ [stream[frame_idx] for stream in video_streams]
+ )
+ out.write(combined_frame)
+
+ out.release()
+ print(
+ f"La vidéo combinée du UseCase {i} est enregistrée sous : {path_save}"
+ )
+
+
+if __name__ == "__main__":
+ rec = Reconstruction()
+ rec.video_reconstruction(2)
diff --git a/prototyping/src/surround_yolo/yolov8_benchmark_surround.py b/prototyping/src/surround_yolo/yolov8_benchmark_surround.py
new file mode 100644
index 0000000..8b2becb
--- /dev/null
+++ b/prototyping/src/surround_yolo/yolov8_benchmark_surround.py
@@ -0,0 +1,85 @@
+import cv2
+from ultralytics import YOLO # YOLOv8 import
+from PIL import Image
+import numpy as np
+import time
+
+
+class YOLOv8Detector:
+ def __init__(self, model_name):
+ # Load YOLOv8 model
+ self.model = YOLO(model_name)
+
+ def process_and_save_video(self, input_video_path, output_video_path, model_name):
+ # Open the input video
+ cap = cv2.VideoCapture(input_video_path)
+ if not cap.isOpened():
+ print("Error opening video file")
+ return
+
+ # Get video properties
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ fps = cap.get(cv2.CAP_PROP_FPS)
+
+ # Define the codec and initialize the video writer
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+ out = cv2.VideoWriter(
+ output_video_path, fourcc, fps, (frame_width, frame_height)
+ )
+
+ frame_times = []
+
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ # Start time
+ start = time.time()
+
+ # Process the frame (detection and drawing bounding boxes)
+ annotated_frame = self.process_frame(frame)
+
+ # End time
+ end = time.time()
+ frame_times.append(end - start)
+
+ # Write the frame into the output video file
+ out.write(annotated_frame)
+
+ # Release everything when job is finished
+ cap.release()
+ out.release()
+
+ avg_time_per_frame = sum(frame_times) / len(frame_times)
+ print(
+ f"Average time per frame for {model_name}: {avg_time_per_frame:.4f} seconds"
+ )
+
+ def process_frame(self, frame):
+ """Process a single frame for object detection and return annotated frame"""
+ frame_pil = Image.fromarray(frame)
+ results = self.model.track(
+ source=frame_pil,
+ persist=True,
+ tracker="bytetrack.yaml",
+ conf=0.5,
+ verbose=False,
+ )
+
+ # Convert PIL image back to OpenCV format
+ annotated_frame = cv2.cvtColor(np.array(results[0].plot()), cv2.COLOR_RGB2BGR)
+ return annotated_frame
+
+
+if __name__ == "__main__":
+ models = ["yolov8n.pt", "yolov8s.pt", "yolov8m.pt", "yolov8l.pt", "yolov8x.pt"]
+
+ # Define input and output video paths
+ input_video_path = "assets/VIDEOS/COMBINED_VIDEO_USE_CASE_2.mp4"
+
+ for model in models:
+ output_video_path = f"assets/VIDEOS/COMBINED_VIDEO_USE_CASE_2_YOLO_{model}.mp4"
+ detector = YOLOv8Detector(model)
+ detector.process_and_save_video(input_video_path, output_video_path, model)
diff --git a/prototyping/src/yolo.py b/prototyping/src/yolo.py
new file mode 100644
index 0000000..ccec624
--- /dev/null
+++ b/prototyping/src/yolo.py
@@ -0,0 +1,25 @@
+# Import the YOLO class from the ultralytics library
+from ultralytics import YOLO
+
+# Global variable for the name of the model
+# "yolov8s-pose.pt" is a pre-trained model for pose estimation
+MODEL = "yolov8s-pose.pt"
+
+# Define a class named 'yolo'
+class yolo:
+ def __init__(self):
+ # Initialize a YOLO model with the specified weights file
+ self.model = YOLO(MODEL)
+
+ def compute(self, image):
+ # This function processes an image using the YOLO model
+
+ # Check if the image input is valid
+ if image is not None:
+ # Run YOLOv8 pose estimation on the provided image
+ # 'persist=True' enables persistent tracking across frames
+ # 'tracker="bytetrack.yaml"' specifies the tracking configuration to use
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+
+ # Return the results of the model
+ return results
\ No newline at end of file
diff --git a/prototyping/src/yolov5_benchmark.py b/prototyping/src/yolov5_benchmark.py
new file mode 100644
index 0000000..6ed2188
--- /dev/null
+++ b/prototyping/src/yolov5_benchmark.py
@@ -0,0 +1,103 @@
+import cv2
+import torch
+import os
+import time
+import numpy as np
+import pandas as pd
+
+
+class YOLOv5Detector:
+ def __init__(self, model_name="yolov5s"):
+ # Load YOLOv5 model
+ self.model = torch.hub.load("ultralytics/yolov5", model_name, pretrained=True)
+
+ def process_images_from_folder(self, folder_path):
+ """Process all images in the specified folder and calculate timing statistics"""
+ processing_times = []
+
+ for image_file in os.listdir(folder_path):
+ image_path = os.path.join(folder_path, image_file)
+ if os.path.isfile(image_path) and image_file.lower().endswith(
+ (".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".gif")
+ ):
+ start_time = time.time()
+ self.process_image(image_path)
+ end_time = time.time()
+ processing_times.append(end_time - start_time)
+
+ if not processing_times:
+ return np.nan, np.nan, [np.nan, np.nan]
+
+ return (
+ np.mean(processing_times),
+ np.median(processing_times),
+ np.percentile(processing_times, [25, 75]),
+ )
+
+ def process_image(self, image_path):
+ """Process a single image"""
+ frame = cv2.imread(image_path)
+ if frame is not None:
+ results = self.model(frame)
+
+
+if __name__ == "__main__":
+ # Define the models to iterate over
+ models = ["yolov5n", "yolov5s", "yolov5m", "yolov5l", "yolov5x"]
+
+ # Define the folders containing images
+ folders = [
+ "assets/DATA/CAMERA_1",
+ "assets/DATA/CAMERA_2",
+ "assets/DATA/CAMERA_3",
+ "assets/DATA/CAMERA_4",
+ "assets/DATA_2/CAMERA_1",
+ "assets/DATA_2/CAMERA_2",
+ "assets/DATA_2/CAMERA_3",
+ "assets/DATA_2/CAMERA_4",
+ ]
+ # Initialize a DataFrame to store the results
+ results_df = pd.DataFrame(
+ columns=[
+ "Model",
+ "Mean Time",
+ "Median Time",
+ "25th Percentile",
+ "75th Percentile",
+ ]
+ )
+
+ # Initialize a list to store dictionaries
+ results_list = []
+
+ # Iterate over each model and process images from all folders
+ for model in models:
+ detector = YOLOv5Detector(model)
+ all_times = []
+ for folder in folders:
+ mean_time, median_time, quartiles = detector.process_images_from_folder(
+ folder
+ )
+ all_times.extend(quartiles) # Extend the list with quartile times
+
+ # Compute overall statistics across all folders
+ overall_mean = np.mean(all_times)
+ overall_median = np.median(all_times)
+ overall_quartiles = np.percentile(all_times, [25, 75])
+
+ # Append dictionary to the list
+ results_list.append(
+ {
+ "Model": model,
+ "Mean Time": overall_mean,
+ "Median Time": overall_median,
+ "25th Percentile": overall_quartiles[0],
+ "75th Percentile": overall_quartiles[1],
+ }
+ )
+
+ # Concatenate the list of dictionaries to form a DataFrame
+ results_df = pd.concat([results_df, pd.DataFrame(results_list)], ignore_index=True)
+
+ # Save results to CSV
+ results_df.to_csv("yolov5_timing_statistics_overall.csv", index=False)
diff --git a/prototyping/src/yolov8_benchmark.py b/prototyping/src/yolov8_benchmark.py
new file mode 100644
index 0000000..55f5262
--- /dev/null
+++ b/prototyping/src/yolov8_benchmark.py
@@ -0,0 +1,104 @@
+import cv2
+from ultralytics import YOLO # YOLOv8 import
+from PIL import Image
+import os
+import time
+import numpy as np
+import pandas as pd
+
+
+class YOLOv8Detector:
+ def __init__(self, model_name):
+ # Load YOLOv8 model
+ self.model = YOLO(model_name)
+
+ def process_images_from_folder(self, folder_path):
+ """Process all images in the specified folder and calculate timing statistics"""
+ processing_times = []
+
+ for image_file in os.listdir(folder_path):
+ image_path = os.path.join(folder_path, image_file)
+ if os.path.isfile(image_path) and image_file.lower().endswith(
+ (".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".gif")
+ ):
+ start_time = time.time()
+ self.process_image(image_path)
+ end_time = time.time()
+ processing_times.append(end_time - start_time)
+
+ if not processing_times:
+ return np.nan, np.nan, [np.nan, np.nan]
+
+ return (
+ np.mean(processing_times),
+ np.median(processing_times),
+ np.percentile(processing_times, [25, 75]),
+ )
+
+ def process_image(self, image_path):
+ """Process a single image"""
+ frame = cv2.imread(image_path)
+ frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
+ results = self.model.predict(source=frame_pil, verbose=False)
+
+
+if __name__ == "__main__":
+ # Define the models to iterate over
+ models = ["yolov8n.pt", "yolov8s.pt", "yolov8m.pt", "yolov8l.pt", "yolov8x.pt"]
+
+ # Define the folders containing images
+ folders = [
+ "assets/DATA/CAMERA_1",
+ "assets/DATA/CAMERA_2",
+ "assets/DATA/CAMERA_3",
+ "assets/DATA/CAMERA_4",
+ "assets/DATA_2/CAMERA_1",
+ "assets/DATA_2/CAMERA_2",
+ "assets/DATA_2/CAMERA_3",
+ "assets/DATA_2/CAMERA_4",
+ ]
+
+ # Initialize a DataFrame to store the results
+ results_df = pd.DataFrame(
+ columns=[
+ "Model",
+ "Mean Time",
+ "Median Time",
+ "25th Percentile",
+ "75th Percentile",
+ ]
+ )
+ # Initialize a list to store dictionaries
+ results_list = []
+
+ # Iterate over each model and process images from all folders
+ for model in models:
+ detector = YOLOv8Detector(model)
+ all_times = []
+ for folder in folders:
+ mean_time, median_time, quartiles = detector.process_images_from_folder(
+ folder
+ )
+ all_times.extend(quartiles) # Extend the list with quartile times
+
+ # Compute overall statistics across all folders
+ overall_mean = np.mean(all_times)
+ overall_median = np.median(all_times)
+ overall_quartiles = np.percentile(all_times, [25, 75])
+
+ # Append dictionary to the list
+ results_list.append(
+ {
+ "Model": model,
+ "Mean Time": overall_mean,
+ "Median Time": overall_median,
+ "25th Percentile": overall_quartiles[0],
+ "75th Percentile": overall_quartiles[1],
+ }
+ )
+
+ # Concatenate the list of dictionaries to form a DataFrame
+ results_df = pd.concat([results_df, pd.DataFrame(results_list)], ignore_index=True)
+
+ # Save results to CSV
+ results_df.to_csv("yolov8_timing_statistics_overall.csv", index=False)
diff --git a/prototyping/test/test4to1.py b/prototyping/test/test4to1.py
new file mode 100644
index 0000000..c900c81
--- /dev/null
+++ b/prototyping/test/test4to1.py
@@ -0,0 +1,103 @@
+from prototyping.src.surround_yolo.image4to1 import CvFunction, Fuze
+import unittest
+from unittest.mock import patch, MagicMock
+import cv2 as cv
+import numpy as np
+import os
+
+
+class TestCvFunction(unittest.TestCase):
+ def setUp(self):
+ self.cv_function = CvFunction()
+
+ def tearDown(self):
+ # Remove the calibresult.png file if it exists
+ if os.path.exists("calibresult.png"):
+ os.remove("calibresult.png")
+
+ @patch("cv2.imread")
+ @patch("os.listdir")
+ def test_calibrate(self, mock_listdir, mock_imread):
+ # Setup mock responses
+ mock_listdir.return_value = ["image1.jpg", "image2.jpg"]
+ mock_imread.return_value = np.zeros(
+ (480, 640, 3), dtype=np.uint8
+ ) # Dummy image
+
+ # Mock cv functions
+ with patch(
+ "cv2.findChessboardCorners", return_value=(True, np.zeros((42, 1, 2)))
+ ), patch("cv2.cornerSubPix"), patch("cv2.drawChessboardCorners"), patch(
+ "cv2.fisheye.calibrate", return_value=(True, np.eye(3), np.zeros(4), [], [])
+ ):
+ # Test the method
+ folder_images = "test_folder"
+ self.cv_function.calibrate(folder_images)
+
+ # Assertions can include checks for calls to specific cv2 functions, file reading operations, etc.
+ self.assertTrue(mock_listdir.called)
+ self.assertTrue(mock_imread.called)
+
+ @patch("cv2.getOptimalNewCameraMatrix", return_value=(np.eye(3), (0, 0, 640, 480)))
+ @patch("cv2.undistort", return_value=np.zeros((480, 640, 3), dtype=np.uint8))
+ def test_undistort(self, mock_undistort, mock_getOptimalNewCameraMatrix):
+ # Dummy parameters
+ image = np.zeros((480, 640, 3), dtype=np.uint8)
+ ret = True
+ mtx = np.eye(3)
+ dist = np.zeros(4)
+
+ # Test the method
+ self.cv_function.undistort(image, ret, mtx, dist)
+
+ # Assertions
+ mock_getOptimalNewCameraMatrix.assert_called_once()
+ mock_undistort.assert_called_once()
+ args, kwargs = mock_undistort.call_args
+ self.assertTrue(
+ isinstance(args[0], np.ndarray) and args[0].shape == image.shape
+ )
+ self.assertTrue(isinstance(args[1], np.ndarray) and args[1].shape == mtx.shape)
+ self.assertTrue(isinstance(args[2], np.ndarray) and args[2].shape == dist.shape)
+
+
+class TestFuze(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Create mock images with same height but different widths
+ cls.img1 = np.zeros((100, 200, 3), dtype=np.uint8)
+ cls.img2 = np.zeros((100, 150, 3), dtype=np.uint8)
+ cls.img3 = np.zeros((100, 100, 3), dtype=np.uint8)
+ cls.img4 = np.zeros((100, 250, 3), dtype=np.uint8)
+
+ # Image with different height
+ cls.img_diff_height = np.zeros((120, 200, 3), dtype=np.uint8)
+
+ def test_concatenate_same_height(self):
+ fuze = Fuze()
+ result = fuze.concatener_images_horizontalement(
+ [self.img1, self.img2, self.img3, self.img4]
+ )
+ self.assertEqual(result.shape[0], 100)
+ self.assertEqual(result.shape[1], 200 + 150 + 100 + 250) # Sum of widths
+
+ def test_concatenate_different_heights(self):
+ fuze = Fuze()
+ with self.assertRaises(AssertionError):
+ fuze.concatener_images_horizontalement(
+ [self.img1, self.img_diff_height, self.img3, self.img4]
+ )
+
+ def test_concatenate_less_images(self):
+ fuze = Fuze()
+ with self.assertRaises(IndexError):
+ fuze.concatener_images_horizontalement([self.img1, self.img2])
+
+ def test_concatenate_empty_list(self):
+ fuze = Fuze()
+ with self.assertRaises(IndexError):
+ fuze.concatener_images_horizontalement([])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_network.py b/prototyping/test/test_network.py
new file mode 100644
index 0000000..6a02a72
--- /dev/null
+++ b/prototyping/test/test_network.py
@@ -0,0 +1,59 @@
+import unittest
+import threading
+import socket
+import time
+
+# Import your Receiver and Sender classes
+from prototyping.src.communication.network.receiver import Receiver
+from prototyping.src.communication.network.sender import Sender
+
+
+class TestNetworkOperations(unittest.TestCase):
+ def setUp(self):
+ # Setup for each test
+ self.host = "127.0.0.1"
+ self.port = 65432
+ self.receiver = Receiver(self.host, self.port)
+ self.sender = Sender(self.host, self.port)
+
+ def test_receiver_start_server(self):
+ # Test to check if the server starts correctly
+ server_thread = threading.Thread(target=self.receiver.start_server)
+ server_thread.start()
+
+ # Allow some time for the server to start
+ time.sleep(1)
+
+ # Check if the server is listening
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ result = s.connect_ex((self.host, self.port))
+ self.assertEqual(result, 0) # 0 indicates success
+
+ # Close the server
+ server_thread.join()
+
+ def test_sender_send_data(self):
+ # Test to check if the sender can send data to the receiver
+ server_thread = threading.Thread(target=self.receiver.start_server)
+ # Set as a daemon so it will close when the main thread exits
+ server_thread.daemon = True
+ server_thread.start()
+
+ # Allow some time for the server to start
+ time.sleep(1)
+
+ # Send data
+ test_data = "Hello, world!"
+ self.sender.send_data(test_data)
+
+ # Allow some time for the data to be sent and processed
+ time.sleep(1)
+
+ # Check if the receiver got the data
+ self.assertEqual(self.receiver.last_received_data, test_data)
+
+ # Note: The server thread will close when the test exits, as it's a daemon thread
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_reconstruction_video.py b/prototyping/test/test_reconstruction_video.py
new file mode 100644
index 0000000..629412d
--- /dev/null
+++ b/prototyping/test/test_reconstruction_video.py
@@ -0,0 +1,53 @@
+import unittest
+from unittest.mock import patch, MagicMock
+import os
+import cv2
+from prototyping.src.reconstruction_video import VideoReconstruction
+import io
+from contextlib import redirect_stdout
+
+
+class TestVideoReconstruction(unittest.TestCase):
+ def setUp(self):
+ self.video_reconstruction = VideoReconstruction()
+
+ @patch("os.listdir")
+ @patch("cv2.VideoWriter")
+ @patch("cv2.imread")
+ def test_video_detection_with_valid_images(
+ self, mock_imread, mock_videowriter, mock_listdir
+ ):
+ # Mocking listdir to return a list of files
+ mock_listdir.return_value = ["image1.jpg", "image2.jpg"]
+
+ # Mocking imread to return a valid image
+ mock_image = MagicMock()
+ mock_image.shape = (968, 1226, 3)
+ mock_imread.return_value = mock_image
+
+ # Mocking VideoWriter
+ mock_videowriter.return_value = MagicMock()
+
+ # Call the method
+ self.video_reconstruction.video_detection()
+
+ # Asserts
+ self.assertEqual(mock_imread.call_count, 2)
+ mock_videowriter.return_value.write.assert_called_with(mock_image)
+
+ """@patch("os.listdir")
+ def test_video_detection_with_empty_directory(self, mock_listdir):
+ # Mocking listdir to return an empty list
+ mock_listdir.return_value = []
+
+ # Call the method
+ with self.assertLogs("your_module.VideoReconstruction", level="INFO") as log:
+ self.video_reconstruction.video_detection()
+
+ # Check for log message
+ self.assertIn("Error: Invalid image size or format", log.output[0])
+""" # TODO : Fix this test, the logger doesn't work. Don't know why. Must come back to it later.
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_undistort.py b/prototyping/test/test_undistort.py
new file mode 100644
index 0000000..d6e221f
--- /dev/null
+++ b/prototyping/test/test_undistort.py
@@ -0,0 +1,32 @@
+import unittest
+import numpy as np
+import cv2
+from prototyping.src.surround_yolo.undistort import ImageUndistorter
+
+
+class TestImageUndistorter(unittest.TestCase):
+ def setUp(self):
+ # Paramètres de caméra fictifs pour les tests
+ self.camera_matrix = np.array([[1000, 0, 320], [0, 1000, 240], [0, 0, 1]])
+ self.dist_coeffs = np.array([0.1, -0.25, 0.001, 0.001, 0.0])
+ self.undistorter = ImageUndistorter(self.camera_matrix, self.dist_coeffs)
+
+ # Créer une image de test (image noire avec un carré blanc au centre)
+ self.test_image = np.zeros((480, 640, 3), dtype=np.uint8)
+ cv2.rectangle(self.test_image, (220, 140), (420, 340), (255, 255, 255), -1)
+
+ def test_undistort_not_none(self):
+ # Tester si la fonction undistort renvoie une image non nulle
+ undistorted_image = self.undistorter.undistort(self.test_image)
+ self.assertIsNotNone(undistorted_image)
+
+ def test_undistort_valid(self):
+ # Tester si l'image corrigée est valide
+ undistorted_image = self.undistorter.undistort(self.test_image)
+ self.assertIsNotNone(undistorted_image)
+ self.assertGreater(undistorted_image.shape[0], 0)
+ self.assertGreater(undistorted_image.shape[1], 0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_yolo.py b/prototyping/test/test_yolo.py
new file mode 100644
index 0000000..d56902a
--- /dev/null
+++ b/prototyping/test/test_yolo.py
@@ -0,0 +1,44 @@
+import unittest
+from prototyping.src.yolo import yolo
+import cv2 # Assuming OpenCV is used for image handling
+from PIL import Image
+import numpy as np
+
+
+class TestYolo(unittest.TestCase):
+ def setUp(self):
+ """Set up the YOLO instance before each test"""
+ self.yolo_instance = yolo()
+
+ def test_compute_with_valid_image(self):
+ """Test the compute function with a valid image"""
+ # Load a test image (replace 'path_to_test_image.jpg' with a valid image path)
+ random_image = Image.fromarray(
+ np.random.randint(0, 256, (480, 640, 3), dtype=np.uint8)
+ )
+
+ # Check if the image is not None
+ self.assertIsNotNone(random_image, "Failed to load test image.")
+
+ # Call the compute method
+ results = self.yolo_instance.compute(random_image)
+
+ # Assert that results are returned
+ self.assertIsNotNone(results, "No results returned from compute method.")
+
+ def test_compute_with_invalid_image(self):
+ """Test the compute function with an invalid image"""
+ invalid_image = None # Simulate a failed image load
+
+ # Call the compute method
+ results = self.yolo_instance.compute(invalid_image)
+
+ # Assert that no results are returned
+ self.assertIsNone(
+ results, "Results should not be returned for an invalid image."
+ )
+
+
+# This allows the test to be run from the command line
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_yolov5_benchmark.py b/prototyping/test/test_yolov5_benchmark.py
new file mode 100644
index 0000000..960c54e
--- /dev/null
+++ b/prototyping/test/test_yolov5_benchmark.py
@@ -0,0 +1,67 @@
+import unittest
+import os
+import numpy as np
+import cv2
+from prototyping.src.yolov5_benchmark import YOLOv5Detector
+import shutil
+
+
+class TestYOLOv5Detector(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.model_name = "yolov5n" # Use a small model for testing
+ cls.detector = YOLOv5Detector(cls.model_name)
+ cls.test_dir = "test_data"
+ os.makedirs(cls.test_dir, exist_ok=True)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.test_dir)
+
+ def create_test_image(self, image_name="test_image.jpg", image_size=(100, 100, 3)):
+ image_path = os.path.join(self.test_dir, image_name)
+ test_image = np.random.randint(0, 256, image_size, dtype=np.uint8)
+ cv2.imwrite(image_path, test_image)
+ return image_path
+
+ def test_init(self):
+ self.assertIsNotNone(self.detector.model)
+
+ def test_process_image(self):
+ image_path = self.create_test_image("test_image.jpg")
+ result = self.detector.process_image(image_path)
+ self.assertIsNone(
+ result
+ ) # Test is far from perfect, but its meaning is that it should detect nothing in a random image
+ os.remove(image_path)
+
+ def test_process_images_from_folder_empty(self):
+ empty_dir = os.path.join(self.test_dir, "empty_folder")
+ os.makedirs(empty_dir)
+ stats = self.detector.process_images_from_folder(empty_dir)
+ self.assertEqual(stats, (np.nan, np.nan, [np.nan, np.nan]))
+ os.rmdir(empty_dir)
+
+ def test_process_images_from_folder_with_images(self):
+ self.create_test_image("image1.jpg")
+ self.create_test_image("image2.jpg")
+
+ stats = self.detector.process_images_from_folder(self.test_dir)
+ self.assertEqual(len(stats), 3)
+ self.assertTrue(all(isinstance(stat, (float, np.ndarray)) for stat in stats))
+
+ def test_timing_statistics(self):
+ self.create_test_image("image1.jpg")
+ self.create_test_image("image2.jpg")
+
+ mean, median, percentiles = self.detector.process_images_from_folder(
+ self.test_dir
+ )
+ self.assertIsInstance(mean, float)
+ self.assertIsInstance(median, float)
+ self.assertIsInstance(percentiles, np.ndarray)
+ self.assertEqual(len(percentiles), 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/prototyping/test/test_yolov8_benchmark.py b/prototyping/test/test_yolov8_benchmark.py
new file mode 100644
index 0000000..a92862e
--- /dev/null
+++ b/prototyping/test/test_yolov8_benchmark.py
@@ -0,0 +1,69 @@
+import unittest
+import os
+import numpy as np
+import cv2
+import time
+from PIL import Image
+from prototyping.src.yolov8_benchmark import YOLOv8Detector
+import shutil
+
+
+class TestYOLOv8Detector(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.model_name = "yolov8s" # Example model name
+ cls.detector = YOLOv8Detector(cls.model_name)
+ cls.test_dir = "test_data_yolov8"
+ os.makedirs(cls.test_dir, exist_ok=True)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.test_dir)
+
+ def create_test_image(self, image_name="test_image.jpg", image_size=(100, 100, 3)):
+ image_path = os.path.join(self.test_dir, image_name)
+ test_image = np.random.randint(0, 256, image_size, dtype=np.uint8)
+ cv2.imwrite(image_path, test_image)
+ return image_path
+
+ def test_init(self):
+ self.assertIsNotNone(self.detector.model)
+
+ def test_process_image(self):
+ image_path = self.create_test_image("test_image.jpg")
+ result = self.detector.process_image(image_path)
+ self.assertIsNone(
+ result
+ ) # Test is far from perfect, but its meaning is that it should detect nothing in a random image
+ os.remove(image_path)
+
+ def test_process_images_from_folder_empty(self):
+ empty_dir = os.path.join(self.test_dir, "empty_folder")
+ os.makedirs(empty_dir)
+ stats = self.detector.process_images_from_folder(empty_dir)
+ self.assertEqual(stats, (np.nan, np.nan, [np.nan, np.nan]))
+ os.rmdir(empty_dir)
+
+ def test_process_images_from_folder_with_images(self):
+ self.create_test_image("image1.jpg")
+ self.create_test_image("image2.jpg")
+
+ stats = self.detector.process_images_from_folder(self.test_dir)
+ self.assertEqual(len(stats), 3)
+ self.assertTrue(all(isinstance(stat, (float, np.ndarray)) for stat in stats))
+
+ def test_timing_statistics(self):
+ self.create_test_image("image1.jpg")
+ self.create_test_image("image2.jpg")
+
+ mean, median, percentiles = self.detector.process_images_from_folder(
+ self.test_dir
+ )
+ self.assertIsInstance(mean, float)
+ self.assertIsInstance(median, float)
+ self.assertIsInstance(percentiles, np.ndarray)
+ self.assertEqual(len(percentiles), 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/vision_ws/build/.built_by b/vision_ws/build/.built_by
new file mode 100644
index 0000000..06e74ac
--- /dev/null
+++ b/vision_ws/build/.built_by
@@ -0,0 +1 @@
+colcon
diff --git a/vision_ws/build/COLCON_IGNORE b/vision_ws/build/COLCON_IGNORE
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/build/py_pubsub/build/lib/py_pubsub/__init__.py b/vision_ws/build/py_pubsub/build/lib/py_pubsub/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/build/py_pubsub/build/lib/py_pubsub/camera_yolo.py b/vision_ws/build/py_pubsub/build/lib/py_pubsub/camera_yolo.py
new file mode 100644
index 0000000..df2d518
--- /dev/null
+++ b/vision_ws/build/py_pubsub/build/lib/py_pubsub/camera_yolo.py
@@ -0,0 +1,71 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from ultralytics import YOLO # YOLOv8 import
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class YOLODetector:
+ def __init__(self):
+ self.model = YOLO("yolov8s-pose.pt")
+
+ def compute(self, image):
+ if image is not None:
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+ return results
+
+
+class MinimalPublisher(Node):
+ def __init__(self, camera_id):
+ super().__init__("minimal_publisher")
+ self.camera_id = camera_id
+ self.topic_name = f"annotated_images_{camera_id}"
+ self.publisher = self.create_publisher(Image, self.topic_name, 10)
+ self.subscription = self.create_subscription(
+ Image, f"Cam{camera_id}/image_raw", self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+ self.detector = YOLODetector()
+
+ def listener_callback(self, image):
+ self.get_logger().info(f"Image received from Camera {self.camera_id}")
+ cv_image = cv2.cvtColor(
+ self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough"),
+ cv2.COLOR_BGR2RGB,
+ )
+
+ results = self.detector.compute(cv_image)
+ if results:
+ annotated_image = results[0].plot()
+ msg = self._cv_bridge.cv2_to_imgmsg(annotated_image, "rgb8")
+ self.publisher.publish(msg)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="ROS 2 YOLO Object Detection Node")
+
+ # Add your custom argument
+ parser.add_argument("--cam", type=str, default="1", help="Camera identifier")
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ minimal_publisher = MinimalPublisher(custom_args.cam)
+ rclpy.spin(minimal_publisher)
+
+ # Shutdown and cleanup
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/build/py_pubsub/build/lib/py_pubsub/display.py b/vision_ws/build/py_pubsub/build/lib/py_pubsub/display.py
new file mode 100644
index 0000000..3a380b4
--- /dev/null
+++ b/vision_ws/build/py_pubsub/build/lib/py_pubsub/display.py
@@ -0,0 +1,51 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class ImageDisplayNode(Node):
+ def __init__(self, topic_name):
+ self.topic_name = topic_name
+ super().__init__("image_display_node")
+ self.subscription = self.create_subscription(
+ Image, self.topic_name, self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+
+ def listener_callback(self, image):
+ cv_image = self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")
+ cv2.imshow(f"Annotated Image {self.topic_name}", cv_image)
+ cv2.waitKey(1)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="Image Display Node")
+
+ # Add your custom argument
+ parser.add_argument(
+ "--topic", type=str, default="1", required=True, help="Topic to subscribe to"
+ )
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ image_display_node = ImageDisplayNode(topic_name=custom_args.topic)
+ rclpy.spin(image_display_node)
+
+ # Shutdown and cleanup
+ image_display_node.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/build/py_pubsub/colcon_build.rc b/vision_ws/build/py_pubsub/colcon_build.rc
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ b/vision_ws/build/py_pubsub/colcon_build.rc
@@ -0,0 +1 @@
+0
diff --git a/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh b/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh
new file mode 100644
index 0000000..f9867d5
--- /dev/null
+++ b/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh
@@ -0,0 +1 @@
+# generated from colcon_core/shell/template/command_prefix.sh.em
diff --git a/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh.env b/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh.env
new file mode 100644
index 0000000..f6f5766
--- /dev/null
+++ b/vision_ws/build/py_pubsub/colcon_command_prefix_setup_py.sh.env
@@ -0,0 +1,71 @@
+AMENT_PREFIX_PATH=/home/pge-2023/vis2-tiago/ros_ws/install/py_pubsub:/opt/ros/foxy
+COLCON=1
+COLCON_PREFIX_PATH=/home/pge-2023/vis2-tiago/ros_ws/install
+COLORTERM=truecolor
+DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1001/bus,guid=62f64cf3caca74a06d5d87cd6569a8d5
+DBUS_STARTER_ADDRESS=unix:path=/run/user/1001/bus,guid=62f64cf3caca74a06d5d87cd6569a8d5
+DBUS_STARTER_BUS_TYPE=session
+DEFAULTS_PATH=/usr/share/gconf/ubuntu.default.path
+DESKTOP_SESSION=ubuntu
+DISPLAY=:1
+GDMSESSION=ubuntu
+GENICAM_GENTL64_PATH=:/opt/pylon5/lib64/gentlproducer/gtl
+GNOME_DESKTOP_SESSION_ID=this-is-deprecated
+GNOME_SHELL_SESSION_MODE=ubuntu
+GNOME_TERMINAL_SCREEN=/org/gnome/Terminal/screen/af6db198_1651_4721_8304_f7e7cfdd415a
+GNOME_TERMINAL_SERVICE=:1.119
+GPG_AGENT_INFO=/run/user/1001/gnupg/S.gpg-agent:0:1
+GTK_MODULES=gail:atk-bridge
+HOME=/home/pge-2023
+IM_CONFIG_PHASE=1
+INVOCATION_ID=cce61bc763dc4dd2ad887f00e8baa05e
+JOURNAL_STREAM=8:48940
+LANG=en_US.UTF-8
+LC_ADDRESS=fr_FR.UTF-8
+LC_IDENTIFICATION=fr_FR.UTF-8
+LC_MEASUREMENT=fr_FR.UTF-8
+LC_MONETARY=fr_FR.UTF-8
+LC_NAME=fr_FR.UTF-8
+LC_NUMERIC=fr_FR.UTF-8
+LC_PAPER=fr_FR.UTF-8
+LC_TELEPHONE=fr_FR.UTF-8
+LC_TIME=fr_FR.UTF-8
+LD_LIBRARY_PATH=/opt/ros/foxy/opt/yaml_cpp_vendor/lib:/opt/ros/foxy/opt/rviz_ogre_vendor/lib:/opt/ros/foxy/lib/x86_64-linux-gnu:/opt/ros/foxy/lib
+LESSCLOSE=/usr/bin/lesspipe %s %s
+LESSOPEN=| /usr/bin/lesspipe %s
+LOGNAME=pge-2023
+LS_COLORS=rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:
+MANAGERPID=1688
+MANDATORY_PATH=/usr/share/gconf/ubuntu.mandatory.path
+OLDPWD=/home/pge-2023/vis2-tiago
+PATH=/opt/ros/foxy/bin:/home/pge-2023/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin
+PWD=/home/pge-2023/vis2-tiago/ros_ws/build/py_pubsub
+PYTHONPATH=/home/pge-2023/vis2-tiago/ros_ws/install/py_pubsub/lib/python3.8/site-packages:/opt/ros/foxy/lib/python3.8/site-packages
+QT_ACCESSIBILITY=1
+QT_IM_MODULE=ibus
+ROS_DISTRO=foxy
+ROS_LOCALHOST_ONLY=0
+ROS_PYTHON_VERSION=3
+ROS_VERSION=2
+SESSION_MANAGER=local/contiDemo:@/tmp/.ICE-unix/2015,unix/contiDemo:/tmp/.ICE-unix/2015
+SGX_AESM_ADDR=1
+SHELL=/bin/bash
+SHLVL=2
+SSH_AGENT_PID=1941
+SSH_AUTH_SOCK=/run/user/1001/keyring/ssh
+TERM=xterm-256color
+USER=pge-2023
+USERNAME=pge-2023
+VTE_VERSION=6003
+WINDOWPATH=2
+XAUTHORITY=/run/user/1001/gdm/Xauthority
+XDG_CONFIG_DIRS=/etc/xdg/xdg-ubuntu:/etc/xdg
+XDG_CURRENT_DESKTOP=ubuntu:GNOME
+XDG_DATA_DIRS=/usr/share/ubuntu:/usr/local/share/:/usr/share/:/var/lib/snapd/desktop
+XDG_MENU_PREFIX=gnome-
+XDG_RUNTIME_DIR=/run/user/1001
+XDG_SESSION_CLASS=user
+XDG_SESSION_DESKTOP=ubuntu
+XDG_SESSION_TYPE=x11
+XMODIFIERS=@im=ibus
+_=/usr/bin/colcon
diff --git a/vision_ws/build/py_pubsub/prefix_override/sitecustomize.py b/vision_ws/build/py_pubsub/prefix_override/sitecustomize.py
new file mode 100644
index 0000000..23ce516
--- /dev/null
+++ b/vision_ws/build/py_pubsub/prefix_override/sitecustomize.py
@@ -0,0 +1,3 @@
+import sys
+sys.real_prefix = sys.prefix
+sys.prefix = sys.exec_prefix = '/home/pge-2023/vis2-tiago/ros_ws/install/py_pubsub'
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/PKG-INFO b/vision_ws/build/py_pubsub/py_pubsub.egg-info/PKG-INFO
new file mode 100644
index 0000000..b77e92e
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.2
+Name: py-pubsub
+Version: 0.0.0
+Summary: Examples of minimal publisher/subscriber using rclpy
+Home-page: UNKNOWN
+Maintainer: pge-2023
+Maintainer-email: pge-2023@todo.todo
+License: TODO: License declaration
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/SOURCES.txt b/vision_ws/build/py_pubsub/py_pubsub.egg-info/SOURCES.txt
new file mode 100644
index 0000000..c403224
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+package.xml
+setup.cfg
+setup.py
+../../build/py_pubsub/py_pubsub.egg-info/PKG-INFO
+../../build/py_pubsub/py_pubsub.egg-info/SOURCES.txt
+../../build/py_pubsub/py_pubsub.egg-info/dependency_links.txt
+../../build/py_pubsub/py_pubsub.egg-info/entry_points.txt
+../../build/py_pubsub/py_pubsub.egg-info/requires.txt
+../../build/py_pubsub/py_pubsub.egg-info/top_level.txt
+../../build/py_pubsub/py_pubsub.egg-info/zip-safe
+py_pubsub/__init__.py
+py_pubsub/camera_yolo.py
+py_pubsub/display.py
+resource/py_pubsub
+test/test_copyright.py
+test/test_flake8.py
+test/test_pep257.py
\ No newline at end of file
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/dependency_links.txt b/vision_ws/build/py_pubsub/py_pubsub.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/entry_points.txt b/vision_ws/build/py_pubsub/py_pubsub.egg-info/entry_points.txt
new file mode 100644
index 0000000..ef1daca
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/entry_points.txt
@@ -0,0 +1,4 @@
+[console_scripts]
+camera_yolo = py_pubsub.camera_yolo:main
+display = py_pubsub.display:main
+
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/requires.txt b/vision_ws/build/py_pubsub/py_pubsub.egg-info/requires.txt
new file mode 100644
index 0000000..49fe098
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/requires.txt
@@ -0,0 +1 @@
+setuptools
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/top_level.txt b/vision_ws/build/py_pubsub/py_pubsub.egg-info/top_level.txt
new file mode 100644
index 0000000..2b491d3
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/top_level.txt
@@ -0,0 +1 @@
+py_pubsub
diff --git a/vision_ws/build/py_pubsub/py_pubsub.egg-info/zip-safe b/vision_ws/build/py_pubsub/py_pubsub.egg-info/zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/vision_ws/build/py_pubsub/py_pubsub.egg-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/vision_ws/install/.colcon_install_layout b/vision_ws/install/.colcon_install_layout
new file mode 100644
index 0000000..3aad533
--- /dev/null
+++ b/vision_ws/install/.colcon_install_layout
@@ -0,0 +1 @@
+isolated
diff --git a/vision_ws/install/COLCON_IGNORE b/vision_ws/install/COLCON_IGNORE
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/install/_local_setup_util_ps1.py b/vision_ws/install/_local_setup_util_ps1.py
new file mode 100644
index 0000000..83abe63
--- /dev/null
+++ b/vision_ws/install/_local_setup_util_ps1.py
@@ -0,0 +1,407 @@
+# Copyright 2016-2019 Dirk Thomas
+# Licensed under the Apache License, Version 2.0
+
+import argparse
+from collections import OrderedDict
+import os
+from pathlib import Path
+import sys
+
+
+FORMAT_STR_COMMENT_LINE = '# {comment}'
+FORMAT_STR_SET_ENV_VAR = 'Set-Item -Path "Env:{name}" -Value "{value}"'
+FORMAT_STR_USE_ENV_VAR = '$env:{name}'
+FORMAT_STR_INVOKE_SCRIPT = '_colcon_prefix_powershell_source_script "{script_path}"'
+FORMAT_STR_REMOVE_LEADING_SEPARATOR = ''
+FORMAT_STR_REMOVE_TRAILING_SEPARATOR = ''
+
+DSV_TYPE_APPEND_NON_DUPLICATE = 'append-non-duplicate'
+DSV_TYPE_PREPEND_NON_DUPLICATE = 'prepend-non-duplicate'
+DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS = 'prepend-non-duplicate-if-exists'
+DSV_TYPE_SET = 'set'
+DSV_TYPE_SET_IF_UNSET = 'set-if-unset'
+DSV_TYPE_SOURCE = 'source'
+
+
+def main(argv=sys.argv[1:]): # noqa: D103
+ parser = argparse.ArgumentParser(
+ description='Output shell commands for the packages in topological '
+ 'order')
+ parser.add_argument(
+ 'primary_extension',
+ help='The file extension of the primary shell')
+ parser.add_argument(
+ 'additional_extension', nargs='?',
+ help='The additional file extension to be considered')
+ parser.add_argument(
+ '--merged-install', action='store_true',
+ help='All install prefixes are merged into a single location')
+ args = parser.parse_args(argv)
+
+ packages = get_packages(Path(__file__).parent, args.merged_install)
+
+ ordered_packages = order_packages(packages)
+ for pkg_name in ordered_packages:
+ if _include_comments():
+ print(
+ FORMAT_STR_COMMENT_LINE.format_map(
+ {'comment': 'Package: ' + pkg_name}))
+ prefix = os.path.abspath(os.path.dirname(__file__))
+ if not args.merged_install:
+ prefix = os.path.join(prefix, pkg_name)
+ for line in get_commands(
+ pkg_name, prefix, args.primary_extension,
+ args.additional_extension
+ ):
+ print(line)
+
+ for line in _remove_ending_separators():
+ print(line)
+
+
+def get_packages(prefix_path, merged_install):
+ """
+ Find packages based on colcon-specific files created during installation.
+
+ :param Path prefix_path: The install prefix path of all packages
+ :param bool merged_install: The flag if the packages are all installed
+ directly in the prefix or if each package is installed in a subdirectory
+ named after the package
+ :returns: A mapping from the package name to the set of runtime
+ dependencies
+ :rtype: dict
+ """
+ packages = {}
+ # since importing colcon_core isn't feasible here the following constant
+ # must match colcon_core.location.get_relative_package_index_path()
+ subdirectory = 'share/colcon-core/packages'
+ if merged_install:
+ # return if workspace is empty
+ if not (prefix_path / subdirectory).is_dir():
+ return packages
+ # find all files in the subdirectory
+ for p in (prefix_path / subdirectory).iterdir():
+ if not p.is_file():
+ continue
+ if p.name.startswith('.'):
+ continue
+ add_package_runtime_dependencies(p, packages)
+ else:
+ # for each subdirectory look for the package specific file
+ for p in prefix_path.iterdir():
+ if not p.is_dir():
+ continue
+ if p.name.startswith('.'):
+ continue
+ p = p / subdirectory / p.name
+ if p.is_file():
+ add_package_runtime_dependencies(p, packages)
+
+ # remove unknown dependencies
+ pkg_names = set(packages.keys())
+ for k in packages.keys():
+ packages[k] = {d for d in packages[k] if d in pkg_names}
+
+ return packages
+
+
+def add_package_runtime_dependencies(path, packages):
+ """
+ Check the path and if it exists extract the packages runtime dependencies.
+
+ :param Path path: The resource file containing the runtime dependencies
+ :param dict packages: A mapping from package names to the sets of runtime
+ dependencies to add to
+ """
+ content = path.read_text()
+ dependencies = set(content.split(os.pathsep) if content else [])
+ packages[path.name] = dependencies
+
+
+def order_packages(packages):
+ """
+ Order packages topologically.
+
+ :param dict packages: A mapping from package name to the set of runtime
+ dependencies
+ :returns: The package names
+ :rtype: list
+ """
+ # select packages with no dependencies in alphabetical order
+ to_be_ordered = list(packages.keys())
+ ordered = []
+ while to_be_ordered:
+ pkg_names_without_deps = [
+ name for name in to_be_ordered if not packages[name]]
+ if not pkg_names_without_deps:
+ reduce_cycle_set(packages)
+ raise RuntimeError(
+ 'Circular dependency between: ' + ', '.join(sorted(packages)))
+ pkg_names_without_deps.sort()
+ pkg_name = pkg_names_without_deps[0]
+ to_be_ordered.remove(pkg_name)
+ ordered.append(pkg_name)
+ # remove item from dependency lists
+ for k in list(packages.keys()):
+ if pkg_name in packages[k]:
+ packages[k].remove(pkg_name)
+ return ordered
+
+
+def reduce_cycle_set(packages):
+ """
+ Reduce the set of packages to the ones part of the circular dependency.
+
+ :param dict packages: A mapping from package name to the set of runtime
+ dependencies which is modified in place
+ """
+ last_depended = None
+ while len(packages) > 0:
+ # get all remaining dependencies
+ depended = set()
+ for pkg_name, dependencies in packages.items():
+ depended = depended.union(dependencies)
+ # remove all packages which are not dependent on
+ for name in list(packages.keys()):
+ if name not in depended:
+ del packages[name]
+ if last_depended:
+ # if remaining packages haven't changed return them
+ if last_depended == depended:
+ return packages.keys()
+ # otherwise reduce again
+ last_depended = depended
+
+
+def _include_comments():
+ # skipping comment lines when COLCON_TRACE is not set speeds up the
+ # processing especially on Windows
+ return bool(os.environ.get('COLCON_TRACE'))
+
+
+def get_commands(pkg_name, prefix, primary_extension, additional_extension):
+ commands = []
+ package_dsv_path = os.path.join(prefix, 'share', pkg_name, 'package.dsv')
+ if os.path.exists(package_dsv_path):
+ commands += process_dsv_file(
+ package_dsv_path, prefix, primary_extension, additional_extension)
+ return commands
+
+
+def process_dsv_file(
+ dsv_path, prefix, primary_extension=None, additional_extension=None
+):
+ commands = []
+ if _include_comments():
+ commands.append(FORMAT_STR_COMMENT_LINE.format_map({'comment': dsv_path}))
+ with open(dsv_path, 'r') as h:
+ content = h.read()
+ lines = content.splitlines()
+
+ basenames = OrderedDict()
+ for i, line in enumerate(lines):
+ # skip over empty or whitespace-only lines
+ if not line.strip():
+ continue
+ # skip over comments
+ if line.startswith('#'):
+ continue
+ try:
+ type_, remainder = line.split(';', 1)
+ except ValueError:
+ raise RuntimeError(
+ "Line %d in '%s' doesn't contain a semicolon separating the "
+ 'type from the arguments' % (i + 1, dsv_path))
+ if type_ != DSV_TYPE_SOURCE:
+ # handle non-source lines
+ try:
+ commands += handle_dsv_types_except_source(
+ type_, remainder, prefix)
+ except RuntimeError as e:
+ raise RuntimeError(
+ "Line %d in '%s' %s" % (i + 1, dsv_path, e)) from e
+ else:
+ # group remaining source lines by basename
+ path_without_ext, ext = os.path.splitext(remainder)
+ if path_without_ext not in basenames:
+ basenames[path_without_ext] = set()
+ assert ext.startswith('.')
+ ext = ext[1:]
+ if ext in (primary_extension, additional_extension):
+ basenames[path_without_ext].add(ext)
+
+ # add the dsv extension to each basename if the file exists
+ for basename, extensions in basenames.items():
+ if not os.path.isabs(basename):
+ basename = os.path.join(prefix, basename)
+ if os.path.exists(basename + '.dsv'):
+ extensions.add('dsv')
+
+ for basename, extensions in basenames.items():
+ if not os.path.isabs(basename):
+ basename = os.path.join(prefix, basename)
+ if 'dsv' in extensions:
+ # process dsv files recursively
+ commands += process_dsv_file(
+ basename + '.dsv', prefix, primary_extension=primary_extension,
+ additional_extension=additional_extension)
+ elif primary_extension in extensions and len(extensions) == 1:
+ # source primary-only files
+ commands += [
+ FORMAT_STR_INVOKE_SCRIPT.format_map({
+ 'prefix': prefix,
+ 'script_path': basename + '.' + primary_extension})]
+ elif additional_extension in extensions:
+ # source non-primary files
+ commands += [
+ FORMAT_STR_INVOKE_SCRIPT.format_map({
+ 'prefix': prefix,
+ 'script_path': basename + '.' + additional_extension})]
+
+ return commands
+
+
+def handle_dsv_types_except_source(type_, remainder, prefix):
+ commands = []
+ if type_ in (DSV_TYPE_SET, DSV_TYPE_SET_IF_UNSET):
+ try:
+ env_name, value = remainder.split(';', 1)
+ except ValueError:
+ raise RuntimeError(
+ "doesn't contain a semicolon separating the environment name "
+ 'from the value')
+ try_prefixed_value = os.path.join(prefix, value) if value else prefix
+ if os.path.exists(try_prefixed_value):
+ value = try_prefixed_value
+ if type_ == DSV_TYPE_SET:
+ commands += _set(env_name, value)
+ elif type_ == DSV_TYPE_SET_IF_UNSET:
+ commands += _set_if_unset(env_name, value)
+ else:
+ assert False
+ elif type_ in (
+ DSV_TYPE_APPEND_NON_DUPLICATE,
+ DSV_TYPE_PREPEND_NON_DUPLICATE,
+ DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS
+ ):
+ try:
+ env_name_and_values = remainder.split(';')
+ except ValueError:
+ raise RuntimeError(
+ "doesn't contain a semicolon separating the environment name "
+ 'from the values')
+ env_name = env_name_and_values[0]
+ values = env_name_and_values[1:]
+ for value in values:
+ if not value:
+ value = prefix
+ elif not os.path.isabs(value):
+ value = os.path.join(prefix, value)
+ if (
+ type_ == DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS and
+ not os.path.exists(value)
+ ):
+ comment = f'skip extending {env_name} with not existing ' \
+ f'path: {value}'
+ if _include_comments():
+ commands.append(
+ FORMAT_STR_COMMENT_LINE.format_map({'comment': comment}))
+ elif type_ == DSV_TYPE_APPEND_NON_DUPLICATE:
+ commands += _append_unique_value(env_name, value)
+ else:
+ commands += _prepend_unique_value(env_name, value)
+ else:
+ raise RuntimeError(
+ 'contains an unknown environment hook type: ' + type_)
+ return commands
+
+
+env_state = {}
+
+
+def _append_unique_value(name, value):
+ global env_state
+ if name not in env_state:
+ if os.environ.get(name):
+ env_state[name] = set(os.environ[name].split(os.pathsep))
+ else:
+ env_state[name] = set()
+ # append even if the variable has not been set yet, in case a shell script sets the
+ # same variable without the knowledge of this Python script.
+ # later _remove_ending_separators() will cleanup any unintentional leading separator
+ extend = FORMAT_STR_USE_ENV_VAR.format_map({'name': name}) + os.pathsep
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': extend + value})
+ if value not in env_state[name]:
+ env_state[name].add(value)
+ else:
+ if not _include_comments():
+ return []
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+def _prepend_unique_value(name, value):
+ global env_state
+ if name not in env_state:
+ if os.environ.get(name):
+ env_state[name] = set(os.environ[name].split(os.pathsep))
+ else:
+ env_state[name] = set()
+ # prepend even if the variable has not been set yet, in case a shell script sets the
+ # same variable without the knowledge of this Python script.
+ # later _remove_ending_separators() will cleanup any unintentional trailing separator
+ extend = os.pathsep + FORMAT_STR_USE_ENV_VAR.format_map({'name': name})
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value + extend})
+ if value not in env_state[name]:
+ env_state[name].add(value)
+ else:
+ if not _include_comments():
+ return []
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+# generate commands for removing prepended underscores
+def _remove_ending_separators():
+ # do nothing if the shell extension does not implement the logic
+ if FORMAT_STR_REMOVE_TRAILING_SEPARATOR is None:
+ return []
+
+ global env_state
+ commands = []
+ for name in env_state:
+ # skip variables that already had values before this script started prepending
+ if name in os.environ:
+ continue
+ commands += [
+ FORMAT_STR_REMOVE_LEADING_SEPARATOR.format_map({'name': name}),
+ FORMAT_STR_REMOVE_TRAILING_SEPARATOR.format_map({'name': name})]
+ return commands
+
+
+def _set(name, value):
+ global env_state
+ env_state[name] = value
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value})
+ return [line]
+
+
+def _set_if_unset(name, value):
+ global env_state
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value})
+ if env_state.get(name, os.environ.get(name)):
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+if __name__ == '__main__': # pragma: no cover
+ try:
+ rc = main()
+ except RuntimeError as e:
+ print(str(e), file=sys.stderr)
+ rc = 1
+ sys.exit(rc)
diff --git a/vision_ws/install/_local_setup_util_sh.py b/vision_ws/install/_local_setup_util_sh.py
new file mode 100644
index 0000000..ff31198
--- /dev/null
+++ b/vision_ws/install/_local_setup_util_sh.py
@@ -0,0 +1,407 @@
+# Copyright 2016-2019 Dirk Thomas
+# Licensed under the Apache License, Version 2.0
+
+import argparse
+from collections import OrderedDict
+import os
+from pathlib import Path
+import sys
+
+
+FORMAT_STR_COMMENT_LINE = '# {comment}'
+FORMAT_STR_SET_ENV_VAR = 'export {name}="{value}"'
+FORMAT_STR_USE_ENV_VAR = '${name}'
+FORMAT_STR_INVOKE_SCRIPT = 'COLCON_CURRENT_PREFIX="{prefix}" _colcon_prefix_sh_source_script "{script_path}"'
+FORMAT_STR_REMOVE_LEADING_SEPARATOR = 'if [ "$(echo -n ${name} | head -c 1)" = ":" ]; then export {name}=${{{name}#?}} ; fi'
+FORMAT_STR_REMOVE_TRAILING_SEPARATOR = 'if [ "$(echo -n ${name} | tail -c 1)" = ":" ]; then export {name}=${{{name}%?}} ; fi'
+
+DSV_TYPE_APPEND_NON_DUPLICATE = 'append-non-duplicate'
+DSV_TYPE_PREPEND_NON_DUPLICATE = 'prepend-non-duplicate'
+DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS = 'prepend-non-duplicate-if-exists'
+DSV_TYPE_SET = 'set'
+DSV_TYPE_SET_IF_UNSET = 'set-if-unset'
+DSV_TYPE_SOURCE = 'source'
+
+
+def main(argv=sys.argv[1:]): # noqa: D103
+ parser = argparse.ArgumentParser(
+ description='Output shell commands for the packages in topological '
+ 'order')
+ parser.add_argument(
+ 'primary_extension',
+ help='The file extension of the primary shell')
+ parser.add_argument(
+ 'additional_extension', nargs='?',
+ help='The additional file extension to be considered')
+ parser.add_argument(
+ '--merged-install', action='store_true',
+ help='All install prefixes are merged into a single location')
+ args = parser.parse_args(argv)
+
+ packages = get_packages(Path(__file__).parent, args.merged_install)
+
+ ordered_packages = order_packages(packages)
+ for pkg_name in ordered_packages:
+ if _include_comments():
+ print(
+ FORMAT_STR_COMMENT_LINE.format_map(
+ {'comment': 'Package: ' + pkg_name}))
+ prefix = os.path.abspath(os.path.dirname(__file__))
+ if not args.merged_install:
+ prefix = os.path.join(prefix, pkg_name)
+ for line in get_commands(
+ pkg_name, prefix, args.primary_extension,
+ args.additional_extension
+ ):
+ print(line)
+
+ for line in _remove_ending_separators():
+ print(line)
+
+
+def get_packages(prefix_path, merged_install):
+ """
+ Find packages based on colcon-specific files created during installation.
+
+ :param Path prefix_path: The install prefix path of all packages
+ :param bool merged_install: The flag if the packages are all installed
+ directly in the prefix or if each package is installed in a subdirectory
+ named after the package
+ :returns: A mapping from the package name to the set of runtime
+ dependencies
+ :rtype: dict
+ """
+ packages = {}
+ # since importing colcon_core isn't feasible here the following constant
+ # must match colcon_core.location.get_relative_package_index_path()
+ subdirectory = 'share/colcon-core/packages'
+ if merged_install:
+ # return if workspace is empty
+ if not (prefix_path / subdirectory).is_dir():
+ return packages
+ # find all files in the subdirectory
+ for p in (prefix_path / subdirectory).iterdir():
+ if not p.is_file():
+ continue
+ if p.name.startswith('.'):
+ continue
+ add_package_runtime_dependencies(p, packages)
+ else:
+ # for each subdirectory look for the package specific file
+ for p in prefix_path.iterdir():
+ if not p.is_dir():
+ continue
+ if p.name.startswith('.'):
+ continue
+ p = p / subdirectory / p.name
+ if p.is_file():
+ add_package_runtime_dependencies(p, packages)
+
+ # remove unknown dependencies
+ pkg_names = set(packages.keys())
+ for k in packages.keys():
+ packages[k] = {d for d in packages[k] if d in pkg_names}
+
+ return packages
+
+
+def add_package_runtime_dependencies(path, packages):
+ """
+ Check the path and if it exists extract the packages runtime dependencies.
+
+ :param Path path: The resource file containing the runtime dependencies
+ :param dict packages: A mapping from package names to the sets of runtime
+ dependencies to add to
+ """
+ content = path.read_text()
+ dependencies = set(content.split(os.pathsep) if content else [])
+ packages[path.name] = dependencies
+
+
+def order_packages(packages):
+ """
+ Order packages topologically.
+
+ :param dict packages: A mapping from package name to the set of runtime
+ dependencies
+ :returns: The package names
+ :rtype: list
+ """
+ # select packages with no dependencies in alphabetical order
+ to_be_ordered = list(packages.keys())
+ ordered = []
+ while to_be_ordered:
+ pkg_names_without_deps = [
+ name for name in to_be_ordered if not packages[name]]
+ if not pkg_names_without_deps:
+ reduce_cycle_set(packages)
+ raise RuntimeError(
+ 'Circular dependency between: ' + ', '.join(sorted(packages)))
+ pkg_names_without_deps.sort()
+ pkg_name = pkg_names_without_deps[0]
+ to_be_ordered.remove(pkg_name)
+ ordered.append(pkg_name)
+ # remove item from dependency lists
+ for k in list(packages.keys()):
+ if pkg_name in packages[k]:
+ packages[k].remove(pkg_name)
+ return ordered
+
+
+def reduce_cycle_set(packages):
+ """
+ Reduce the set of packages to the ones part of the circular dependency.
+
+ :param dict packages: A mapping from package name to the set of runtime
+ dependencies which is modified in place
+ """
+ last_depended = None
+ while len(packages) > 0:
+ # get all remaining dependencies
+ depended = set()
+ for pkg_name, dependencies in packages.items():
+ depended = depended.union(dependencies)
+ # remove all packages which are not dependent on
+ for name in list(packages.keys()):
+ if name not in depended:
+ del packages[name]
+ if last_depended:
+ # if remaining packages haven't changed return them
+ if last_depended == depended:
+ return packages.keys()
+ # otherwise reduce again
+ last_depended = depended
+
+
+def _include_comments():
+ # skipping comment lines when COLCON_TRACE is not set speeds up the
+ # processing especially on Windows
+ return bool(os.environ.get('COLCON_TRACE'))
+
+
+def get_commands(pkg_name, prefix, primary_extension, additional_extension):
+ commands = []
+ package_dsv_path = os.path.join(prefix, 'share', pkg_name, 'package.dsv')
+ if os.path.exists(package_dsv_path):
+ commands += process_dsv_file(
+ package_dsv_path, prefix, primary_extension, additional_extension)
+ return commands
+
+
+def process_dsv_file(
+ dsv_path, prefix, primary_extension=None, additional_extension=None
+):
+ commands = []
+ if _include_comments():
+ commands.append(FORMAT_STR_COMMENT_LINE.format_map({'comment': dsv_path}))
+ with open(dsv_path, 'r') as h:
+ content = h.read()
+ lines = content.splitlines()
+
+ basenames = OrderedDict()
+ for i, line in enumerate(lines):
+ # skip over empty or whitespace-only lines
+ if not line.strip():
+ continue
+ # skip over comments
+ if line.startswith('#'):
+ continue
+ try:
+ type_, remainder = line.split(';', 1)
+ except ValueError:
+ raise RuntimeError(
+ "Line %d in '%s' doesn't contain a semicolon separating the "
+ 'type from the arguments' % (i + 1, dsv_path))
+ if type_ != DSV_TYPE_SOURCE:
+ # handle non-source lines
+ try:
+ commands += handle_dsv_types_except_source(
+ type_, remainder, prefix)
+ except RuntimeError as e:
+ raise RuntimeError(
+ "Line %d in '%s' %s" % (i + 1, dsv_path, e)) from e
+ else:
+ # group remaining source lines by basename
+ path_without_ext, ext = os.path.splitext(remainder)
+ if path_without_ext not in basenames:
+ basenames[path_without_ext] = set()
+ assert ext.startswith('.')
+ ext = ext[1:]
+ if ext in (primary_extension, additional_extension):
+ basenames[path_without_ext].add(ext)
+
+ # add the dsv extension to each basename if the file exists
+ for basename, extensions in basenames.items():
+ if not os.path.isabs(basename):
+ basename = os.path.join(prefix, basename)
+ if os.path.exists(basename + '.dsv'):
+ extensions.add('dsv')
+
+ for basename, extensions in basenames.items():
+ if not os.path.isabs(basename):
+ basename = os.path.join(prefix, basename)
+ if 'dsv' in extensions:
+ # process dsv files recursively
+ commands += process_dsv_file(
+ basename + '.dsv', prefix, primary_extension=primary_extension,
+ additional_extension=additional_extension)
+ elif primary_extension in extensions and len(extensions) == 1:
+ # source primary-only files
+ commands += [
+ FORMAT_STR_INVOKE_SCRIPT.format_map({
+ 'prefix': prefix,
+ 'script_path': basename + '.' + primary_extension})]
+ elif additional_extension in extensions:
+ # source non-primary files
+ commands += [
+ FORMAT_STR_INVOKE_SCRIPT.format_map({
+ 'prefix': prefix,
+ 'script_path': basename + '.' + additional_extension})]
+
+ return commands
+
+
+def handle_dsv_types_except_source(type_, remainder, prefix):
+ commands = []
+ if type_ in (DSV_TYPE_SET, DSV_TYPE_SET_IF_UNSET):
+ try:
+ env_name, value = remainder.split(';', 1)
+ except ValueError:
+ raise RuntimeError(
+ "doesn't contain a semicolon separating the environment name "
+ 'from the value')
+ try_prefixed_value = os.path.join(prefix, value) if value else prefix
+ if os.path.exists(try_prefixed_value):
+ value = try_prefixed_value
+ if type_ == DSV_TYPE_SET:
+ commands += _set(env_name, value)
+ elif type_ == DSV_TYPE_SET_IF_UNSET:
+ commands += _set_if_unset(env_name, value)
+ else:
+ assert False
+ elif type_ in (
+ DSV_TYPE_APPEND_NON_DUPLICATE,
+ DSV_TYPE_PREPEND_NON_DUPLICATE,
+ DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS
+ ):
+ try:
+ env_name_and_values = remainder.split(';')
+ except ValueError:
+ raise RuntimeError(
+ "doesn't contain a semicolon separating the environment name "
+ 'from the values')
+ env_name = env_name_and_values[0]
+ values = env_name_and_values[1:]
+ for value in values:
+ if not value:
+ value = prefix
+ elif not os.path.isabs(value):
+ value = os.path.join(prefix, value)
+ if (
+ type_ == DSV_TYPE_PREPEND_NON_DUPLICATE_IF_EXISTS and
+ not os.path.exists(value)
+ ):
+ comment = f'skip extending {env_name} with not existing ' \
+ f'path: {value}'
+ if _include_comments():
+ commands.append(
+ FORMAT_STR_COMMENT_LINE.format_map({'comment': comment}))
+ elif type_ == DSV_TYPE_APPEND_NON_DUPLICATE:
+ commands += _append_unique_value(env_name, value)
+ else:
+ commands += _prepend_unique_value(env_name, value)
+ else:
+ raise RuntimeError(
+ 'contains an unknown environment hook type: ' + type_)
+ return commands
+
+
+env_state = {}
+
+
+def _append_unique_value(name, value):
+ global env_state
+ if name not in env_state:
+ if os.environ.get(name):
+ env_state[name] = set(os.environ[name].split(os.pathsep))
+ else:
+ env_state[name] = set()
+ # append even if the variable has not been set yet, in case a shell script sets the
+ # same variable without the knowledge of this Python script.
+ # later _remove_ending_separators() will cleanup any unintentional leading separator
+ extend = FORMAT_STR_USE_ENV_VAR.format_map({'name': name}) + os.pathsep
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': extend + value})
+ if value not in env_state[name]:
+ env_state[name].add(value)
+ else:
+ if not _include_comments():
+ return []
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+def _prepend_unique_value(name, value):
+ global env_state
+ if name not in env_state:
+ if os.environ.get(name):
+ env_state[name] = set(os.environ[name].split(os.pathsep))
+ else:
+ env_state[name] = set()
+ # prepend even if the variable has not been set yet, in case a shell script sets the
+ # same variable without the knowledge of this Python script.
+ # later _remove_ending_separators() will cleanup any unintentional trailing separator
+ extend = os.pathsep + FORMAT_STR_USE_ENV_VAR.format_map({'name': name})
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value + extend})
+ if value not in env_state[name]:
+ env_state[name].add(value)
+ else:
+ if not _include_comments():
+ return []
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+# generate commands for removing prepended underscores
+def _remove_ending_separators():
+ # do nothing if the shell extension does not implement the logic
+ if FORMAT_STR_REMOVE_TRAILING_SEPARATOR is None:
+ return []
+
+ global env_state
+ commands = []
+ for name in env_state:
+ # skip variables that already had values before this script started prepending
+ if name in os.environ:
+ continue
+ commands += [
+ FORMAT_STR_REMOVE_LEADING_SEPARATOR.format_map({'name': name}),
+ FORMAT_STR_REMOVE_TRAILING_SEPARATOR.format_map({'name': name})]
+ return commands
+
+
+def _set(name, value):
+ global env_state
+ env_state[name] = value
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value})
+ return [line]
+
+
+def _set_if_unset(name, value):
+ global env_state
+ line = FORMAT_STR_SET_ENV_VAR.format_map(
+ {'name': name, 'value': value})
+ if env_state.get(name, os.environ.get(name)):
+ line = FORMAT_STR_COMMENT_LINE.format_map({'comment': line})
+ return [line]
+
+
+if __name__ == '__main__': # pragma: no cover
+ try:
+ rc = main()
+ except RuntimeError as e:
+ print(str(e), file=sys.stderr)
+ rc = 1
+ sys.exit(rc)
diff --git a/vision_ws/install/local_setup.bash b/vision_ws/install/local_setup.bash
new file mode 100644
index 0000000..03f0025
--- /dev/null
+++ b/vision_ws/install/local_setup.bash
@@ -0,0 +1,121 @@
+# generated from colcon_bash/shell/template/prefix.bash.em
+
+# This script extends the environment with all packages contained in this
+# prefix path.
+
+# a bash script is able to determine its own path if necessary
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ _colcon_prefix_bash_COLCON_CURRENT_PREFIX="$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" > /dev/null && pwd)"
+else
+ _colcon_prefix_bash_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+fi
+
+# function to prepend a value to a variable
+# which uses colons as separators
+# duplicates as well as trailing separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+_colcon_prefix_bash_prepend_unique_value() {
+ # arguments
+ _listname="$1"
+ _value="$2"
+
+ # get values from variable
+ eval _values=\"\$$_listname\"
+ # backup the field separator
+ _colcon_prefix_bash_prepend_unique_value_IFS="$IFS"
+ IFS=":"
+ # start with the new value
+ _all_values="$_value"
+ _contained_value=""
+ # iterate over existing values in the variable
+ for _item in $_values; do
+ # ignore empty strings
+ if [ -z "$_item" ]; then
+ continue
+ fi
+ # ignore duplicates of _value
+ if [ "$_item" = "$_value" ]; then
+ _contained_value=1
+ continue
+ fi
+ # keep non-duplicate values
+ _all_values="$_all_values:$_item"
+ done
+ unset _item
+ if [ -z "$_contained_value" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ if [ "$_all_values" = "$_value" ]; then
+ echo "export $_listname=$_value"
+ else
+ echo "export $_listname=$_value:\$$_listname"
+ fi
+ fi
+ fi
+ unset _contained_value
+ # restore the field separator
+ IFS="$_colcon_prefix_bash_prepend_unique_value_IFS"
+ unset _colcon_prefix_bash_prepend_unique_value_IFS
+ # export the updated variable
+ eval export $_listname=\"$_all_values\"
+ unset _all_values
+ unset _values
+
+ unset _value
+ unset _listname
+}
+
+# add this prefix to the COLCON_PREFIX_PATH
+_colcon_prefix_bash_prepend_unique_value COLCON_PREFIX_PATH "$_colcon_prefix_bash_COLCON_CURRENT_PREFIX"
+unset _colcon_prefix_bash_prepend_unique_value
+
+# check environment variable for custom Python executable
+if [ -n "$COLCON_PYTHON_EXECUTABLE" ]; then
+ if [ ! -f "$COLCON_PYTHON_EXECUTABLE" ]; then
+ echo "error: COLCON_PYTHON_EXECUTABLE '$COLCON_PYTHON_EXECUTABLE' doesn't exist"
+ return 1
+ fi
+ _colcon_python_executable="$COLCON_PYTHON_EXECUTABLE"
+else
+ # try the Python executable known at configure time
+ _colcon_python_executable="/usr/bin/python3"
+ # if it doesn't exist try a fall back
+ if [ ! -f "$_colcon_python_executable" ]; then
+ if ! /usr/bin/env python3 --version > /dev/null 2> /dev/null; then
+ echo "error: unable to find python3 executable"
+ return 1
+ fi
+ _colcon_python_executable=`/usr/bin/env python3 -c "import sys; print(sys.executable)"`
+ fi
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_sh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# get all commands in topological order
+_colcon_ordered_commands="$($_colcon_python_executable "$_colcon_prefix_bash_COLCON_CURRENT_PREFIX/_local_setup_util_sh.py" sh bash)"
+unset _colcon_python_executable
+if [ -n "$COLCON_TRACE" ]; then
+ echo "$(declare -f _colcon_prefix_sh_source_script)"
+ echo "# Execute generated script:"
+ echo "# <<<"
+ echo "${_colcon_ordered_commands}"
+ echo "# >>>"
+ echo "unset _colcon_prefix_sh_source_script"
+fi
+eval "${_colcon_ordered_commands}"
+unset _colcon_ordered_commands
+
+unset _colcon_prefix_sh_source_script
+
+unset _colcon_prefix_bash_COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/local_setup.ps1 b/vision_ws/install/local_setup.ps1
new file mode 100644
index 0000000..6f68c8d
--- /dev/null
+++ b/vision_ws/install/local_setup.ps1
@@ -0,0 +1,55 @@
+# generated from colcon_powershell/shell/template/prefix.ps1.em
+
+# This script extends the environment with all packages contained in this
+# prefix path.
+
+# check environment variable for custom Python executable
+if ($env:COLCON_PYTHON_EXECUTABLE) {
+ if (!(Test-Path "$env:COLCON_PYTHON_EXECUTABLE" -PathType Leaf)) {
+ echo "error: COLCON_PYTHON_EXECUTABLE '$env:COLCON_PYTHON_EXECUTABLE' doesn't exist"
+ exit 1
+ }
+ $_colcon_python_executable="$env:COLCON_PYTHON_EXECUTABLE"
+} else {
+ # use the Python executable known at configure time
+ $_colcon_python_executable="/usr/bin/python3"
+ # if it doesn't exist try a fall back
+ if (!(Test-Path "$_colcon_python_executable" -PathType Leaf)) {
+ if (!(Get-Command "python3" -ErrorAction SilentlyContinue)) {
+ echo "error: unable to find python3 executable"
+ exit 1
+ }
+ $_colcon_python_executable="python3"
+ }
+}
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+function _colcon_prefix_powershell_source_script {
+ param (
+ $_colcon_prefix_powershell_source_script_param
+ )
+ # source script with conditional trace output
+ if (Test-Path $_colcon_prefix_powershell_source_script_param) {
+ if ($env:COLCON_TRACE) {
+ echo ". '$_colcon_prefix_powershell_source_script_param'"
+ }
+ . "$_colcon_prefix_powershell_source_script_param"
+ } else {
+ Write-Error "not found: '$_colcon_prefix_powershell_source_script_param'"
+ }
+}
+
+# get all commands in topological order
+$_colcon_ordered_commands = & "$_colcon_python_executable" "$(Split-Path $PSCommandPath -Parent)/_local_setup_util_ps1.py" ps1
+
+# execute all commands in topological order
+if ($env:COLCON_TRACE) {
+ echo "Execute generated script:"
+ echo "<<<"
+ $_colcon_ordered_commands.Split([Environment]::NewLine, [StringSplitOptions]::RemoveEmptyEntries) | Write-Output
+ echo ">>>"
+}
+if ($_colcon_ordered_commands) {
+ $_colcon_ordered_commands.Split([Environment]::NewLine, [StringSplitOptions]::RemoveEmptyEntries) | Invoke-Expression
+}
diff --git a/vision_ws/install/local_setup.sh b/vision_ws/install/local_setup.sh
new file mode 100644
index 0000000..e91af98
--- /dev/null
+++ b/vision_ws/install/local_setup.sh
@@ -0,0 +1,137 @@
+# generated from colcon_core/shell/template/prefix.sh.em
+
+# This script extends the environment with all packages contained in this
+# prefix path.
+
+# since a plain shell script can't determine its own path when being sourced
+# either use the provided COLCON_CURRENT_PREFIX
+# or fall back to the build time prefix (if it exists)
+_colcon_prefix_sh_COLCON_CURRENT_PREFIX="/home/pge-2023/vis2-tiago/ros_ws/install"
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ if [ ! -d "$_colcon_prefix_sh_COLCON_CURRENT_PREFIX" ]; then
+ echo "The build time path \"$_colcon_prefix_sh_COLCON_CURRENT_PREFIX\" doesn't exist. Either source a script for a different shell or set the environment variable \"COLCON_CURRENT_PREFIX\" explicitly." 1>&2
+ unset _colcon_prefix_sh_COLCON_CURRENT_PREFIX
+ return 1
+ fi
+else
+ _colcon_prefix_sh_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+fi
+
+# function to prepend a value to a variable
+# which uses colons as separators
+# duplicates as well as trailing separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+_colcon_prefix_sh_prepend_unique_value() {
+ # arguments
+ _listname="$1"
+ _value="$2"
+
+ # get values from variable
+ eval _values=\"\$$_listname\"
+ # backup the field separator
+ _colcon_prefix_sh_prepend_unique_value_IFS="$IFS"
+ IFS=":"
+ # start with the new value
+ _all_values="$_value"
+ _contained_value=""
+ # iterate over existing values in the variable
+ for _item in $_values; do
+ # ignore empty strings
+ if [ -z "$_item" ]; then
+ continue
+ fi
+ # ignore duplicates of _value
+ if [ "$_item" = "$_value" ]; then
+ _contained_value=1
+ continue
+ fi
+ # keep non-duplicate values
+ _all_values="$_all_values:$_item"
+ done
+ unset _item
+ if [ -z "$_contained_value" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ if [ "$_all_values" = "$_value" ]; then
+ echo "export $_listname=$_value"
+ else
+ echo "export $_listname=$_value:\$$_listname"
+ fi
+ fi
+ fi
+ unset _contained_value
+ # restore the field separator
+ IFS="$_colcon_prefix_sh_prepend_unique_value_IFS"
+ unset _colcon_prefix_sh_prepend_unique_value_IFS
+ # export the updated variable
+ eval export $_listname=\"$_all_values\"
+ unset _all_values
+ unset _values
+
+ unset _value
+ unset _listname
+}
+
+# add this prefix to the COLCON_PREFIX_PATH
+_colcon_prefix_sh_prepend_unique_value COLCON_PREFIX_PATH "$_colcon_prefix_sh_COLCON_CURRENT_PREFIX"
+unset _colcon_prefix_sh_prepend_unique_value
+
+# check environment variable for custom Python executable
+if [ -n "$COLCON_PYTHON_EXECUTABLE" ]; then
+ if [ ! -f "$COLCON_PYTHON_EXECUTABLE" ]; then
+ echo "error: COLCON_PYTHON_EXECUTABLE '$COLCON_PYTHON_EXECUTABLE' doesn't exist"
+ return 1
+ fi
+ _colcon_python_executable="$COLCON_PYTHON_EXECUTABLE"
+else
+ # try the Python executable known at configure time
+ _colcon_python_executable="/usr/bin/python3"
+ # if it doesn't exist try a fall back
+ if [ ! -f "$_colcon_python_executable" ]; then
+ if ! /usr/bin/env python3 --version > /dev/null 2> /dev/null; then
+ echo "error: unable to find python3 executable"
+ return 1
+ fi
+ _colcon_python_executable=`/usr/bin/env python3 -c "import sys; print(sys.executable)"`
+ fi
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_sh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# get all commands in topological order
+_colcon_ordered_commands="$($_colcon_python_executable "$_colcon_prefix_sh_COLCON_CURRENT_PREFIX/_local_setup_util_sh.py" sh)"
+unset _colcon_python_executable
+if [ -n "$COLCON_TRACE" ]; then
+ echo "_colcon_prefix_sh_source_script() {
+ if [ -f \"\$1\" ]; then
+ if [ -n \"\$COLCON_TRACE\" ]; then
+ echo \"# . \\\"\$1\\\"\"
+ fi
+ . \"\$1\"
+ else
+ echo \"not found: \\\"\$1\\\"\" 1>&2
+ fi
+ }"
+ echo "# Execute generated script:"
+ echo "# <<<"
+ echo "${_colcon_ordered_commands}"
+ echo "# >>>"
+ echo "unset _colcon_prefix_sh_source_script"
+fi
+eval "${_colcon_ordered_commands}"
+unset _colcon_ordered_commands
+
+unset _colcon_prefix_sh_source_script
+
+unset _colcon_prefix_sh_COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/local_setup.zsh b/vision_ws/install/local_setup.zsh
new file mode 100644
index 0000000..b648710
--- /dev/null
+++ b/vision_ws/install/local_setup.zsh
@@ -0,0 +1,134 @@
+# generated from colcon_zsh/shell/template/prefix.zsh.em
+
+# This script extends the environment with all packages contained in this
+# prefix path.
+
+# a zsh script is able to determine its own path if necessary
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ _colcon_prefix_zsh_COLCON_CURRENT_PREFIX="$(builtin cd -q "`dirname "${(%):-%N}"`" > /dev/null && pwd)"
+else
+ _colcon_prefix_zsh_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+fi
+
+# function to convert array-like strings into arrays
+# to workaround SH_WORD_SPLIT not being set
+_colcon_prefix_zsh_convert_to_array() {
+ local _listname=$1
+ local _dollar="$"
+ local _split="{="
+ local _to_array="(\"$_dollar$_split$_listname}\")"
+ eval $_listname=$_to_array
+}
+
+# function to prepend a value to a variable
+# which uses colons as separators
+# duplicates as well as trailing separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+_colcon_prefix_zsh_prepend_unique_value() {
+ # arguments
+ _listname="$1"
+ _value="$2"
+
+ # get values from variable
+ eval _values=\"\$$_listname\"
+ # backup the field separator
+ _colcon_prefix_zsh_prepend_unique_value_IFS="$IFS"
+ IFS=":"
+ # start with the new value
+ _all_values="$_value"
+ _contained_value=""
+ # workaround SH_WORD_SPLIT not being set
+ _colcon_prefix_zsh_convert_to_array _values
+ # iterate over existing values in the variable
+ for _item in $_values; do
+ # ignore empty strings
+ if [ -z "$_item" ]; then
+ continue
+ fi
+ # ignore duplicates of _value
+ if [ "$_item" = "$_value" ]; then
+ _contained_value=1
+ continue
+ fi
+ # keep non-duplicate values
+ _all_values="$_all_values:$_item"
+ done
+ unset _item
+ if [ -z "$_contained_value" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ if [ "$_all_values" = "$_value" ]; then
+ echo "export $_listname=$_value"
+ else
+ echo "export $_listname=$_value:\$$_listname"
+ fi
+ fi
+ fi
+ unset _contained_value
+ # restore the field separator
+ IFS="$_colcon_prefix_zsh_prepend_unique_value_IFS"
+ unset _colcon_prefix_zsh_prepend_unique_value_IFS
+ # export the updated variable
+ eval export $_listname=\"$_all_values\"
+ unset _all_values
+ unset _values
+
+ unset _value
+ unset _listname
+}
+
+# add this prefix to the COLCON_PREFIX_PATH
+_colcon_prefix_zsh_prepend_unique_value COLCON_PREFIX_PATH "$_colcon_prefix_zsh_COLCON_CURRENT_PREFIX"
+unset _colcon_prefix_zsh_prepend_unique_value
+unset _colcon_prefix_zsh_convert_to_array
+
+# check environment variable for custom Python executable
+if [ -n "$COLCON_PYTHON_EXECUTABLE" ]; then
+ if [ ! -f "$COLCON_PYTHON_EXECUTABLE" ]; then
+ echo "error: COLCON_PYTHON_EXECUTABLE '$COLCON_PYTHON_EXECUTABLE' doesn't exist"
+ return 1
+ fi
+ _colcon_python_executable="$COLCON_PYTHON_EXECUTABLE"
+else
+ # try the Python executable known at configure time
+ _colcon_python_executable="/usr/bin/python3"
+ # if it doesn't exist try a fall back
+ if [ ! -f "$_colcon_python_executable" ]; then
+ if ! /usr/bin/env python3 --version > /dev/null 2> /dev/null; then
+ echo "error: unable to find python3 executable"
+ return 1
+ fi
+ _colcon_python_executable=`/usr/bin/env python3 -c "import sys; print(sys.executable)"`
+ fi
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_sh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# get all commands in topological order
+_colcon_ordered_commands="$($_colcon_python_executable "$_colcon_prefix_zsh_COLCON_CURRENT_PREFIX/_local_setup_util_sh.py" sh zsh)"
+unset _colcon_python_executable
+if [ -n "$COLCON_TRACE" ]; then
+ echo "$(declare -f _colcon_prefix_sh_source_script)"
+ echo "# Execute generated script:"
+ echo "# <<<"
+ echo "${_colcon_ordered_commands}"
+ echo "# >>>"
+ echo "unset _colcon_prefix_sh_source_script"
+fi
+eval "${_colcon_ordered_commands}"
+unset _colcon_ordered_commands
+
+unset _colcon_prefix_sh_source_script
+
+unset _colcon_prefix_zsh_COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/py_pubsub/lib/py_pubsub/camera_yolo b/vision_ws/install/py_pubsub/lib/py_pubsub/camera_yolo
new file mode 100755
index 0000000..5c11b02
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/py_pubsub/camera_yolo
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# EASY-INSTALL-ENTRY-SCRIPT: 'py-pubsub==0.0.0','console_scripts','camera_yolo'
+__requires__ = 'py-pubsub==0.0.0'
+import re
+import sys
+from pkg_resources import load_entry_point
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('py-pubsub==0.0.0', 'console_scripts', 'camera_yolo')()
+ )
diff --git a/vision_ws/install/py_pubsub/lib/py_pubsub/display b/vision_ws/install/py_pubsub/lib/py_pubsub/display
new file mode 100755
index 0000000..bdbe3ce
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/py_pubsub/display
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# EASY-INSTALL-ENTRY-SCRIPT: 'py-pubsub==0.0.0','console_scripts','display'
+__requires__ = 'py-pubsub==0.0.0'
+import re
+import sys
+from pkg_resources import load_entry_point
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('py-pubsub==0.0.0', 'console_scripts', 'display')()
+ )
diff --git a/vision_ws/install/py_pubsub/lib/py_pubsub/listener b/vision_ws/install/py_pubsub/lib/py_pubsub/listener
new file mode 100755
index 0000000..8cd56d1
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/py_pubsub/listener
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# EASY-INSTALL-ENTRY-SCRIPT: 'py-pubsub==0.0.0','console_scripts','listener'
+__requires__ = 'py-pubsub==0.0.0'
+import re
+import sys
+from pkg_resources import load_entry_point
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('py-pubsub==0.0.0', 'console_scripts', 'listener')()
+ )
diff --git a/vision_ws/install/py_pubsub/lib/py_pubsub/talker b/vision_ws/install/py_pubsub/lib/py_pubsub/talker
new file mode 100755
index 0000000..9be7e60
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/py_pubsub/talker
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# EASY-INSTALL-ENTRY-SCRIPT: 'py-pubsub==0.0.0','console_scripts','talker'
+__requires__ = 'py-pubsub==0.0.0'
+import re
+import sys
+from pkg_resources import load_entry_point
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(
+ load_entry_point('py-pubsub==0.0.0', 'console_scripts', 'talker')()
+ )
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/PKG-INFO b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/PKG-INFO
new file mode 100644
index 0000000..b77e92e
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.2
+Name: py-pubsub
+Version: 0.0.0
+Summary: Examples of minimal publisher/subscriber using rclpy
+Home-page: UNKNOWN
+Maintainer: pge-2023
+Maintainer-email: pge-2023@todo.todo
+License: TODO: License declaration
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/SOURCES.txt b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/SOURCES.txt
new file mode 100644
index 0000000..c403224
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+package.xml
+setup.cfg
+setup.py
+../../build/py_pubsub/py_pubsub.egg-info/PKG-INFO
+../../build/py_pubsub/py_pubsub.egg-info/SOURCES.txt
+../../build/py_pubsub/py_pubsub.egg-info/dependency_links.txt
+../../build/py_pubsub/py_pubsub.egg-info/entry_points.txt
+../../build/py_pubsub/py_pubsub.egg-info/requires.txt
+../../build/py_pubsub/py_pubsub.egg-info/top_level.txt
+../../build/py_pubsub/py_pubsub.egg-info/zip-safe
+py_pubsub/__init__.py
+py_pubsub/camera_yolo.py
+py_pubsub/display.py
+resource/py_pubsub
+test/test_copyright.py
+test/test_flake8.py
+test/test_pep257.py
\ No newline at end of file
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/dependency_links.txt b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/entry_points.txt b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/entry_points.txt
new file mode 100644
index 0000000..ef1daca
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/entry_points.txt
@@ -0,0 +1,4 @@
+[console_scripts]
+camera_yolo = py_pubsub.camera_yolo:main
+display = py_pubsub.display:main
+
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/requires.txt b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/requires.txt
new file mode 100644
index 0000000..49fe098
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/requires.txt
@@ -0,0 +1 @@
+setuptools
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/top_level.txt b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/top_level.txt
new file mode 100644
index 0000000..2b491d3
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/top_level.txt
@@ -0,0 +1 @@
+py_pubsub
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/zip-safe b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub-0.0.0-py3.8.egg-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/__init__.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/camera_yolo.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/camera_yolo.py
new file mode 100644
index 0000000..df2d518
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/camera_yolo.py
@@ -0,0 +1,71 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from ultralytics import YOLO # YOLOv8 import
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class YOLODetector:
+ def __init__(self):
+ self.model = YOLO("yolov8s-pose.pt")
+
+ def compute(self, image):
+ if image is not None:
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+ return results
+
+
+class MinimalPublisher(Node):
+ def __init__(self, camera_id):
+ super().__init__("minimal_publisher")
+ self.camera_id = camera_id
+ self.topic_name = f"annotated_images_{camera_id}"
+ self.publisher = self.create_publisher(Image, self.topic_name, 10)
+ self.subscription = self.create_subscription(
+ Image, f"Cam{camera_id}/image_raw", self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+ self.detector = YOLODetector()
+
+ def listener_callback(self, image):
+ self.get_logger().info(f"Image received from Camera {self.camera_id}")
+ cv_image = cv2.cvtColor(
+ self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough"),
+ cv2.COLOR_BGR2RGB,
+ )
+
+ results = self.detector.compute(cv_image)
+ if results:
+ annotated_image = results[0].plot()
+ msg = self._cv_bridge.cv2_to_imgmsg(annotated_image, "rgb8")
+ self.publisher.publish(msg)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="ROS 2 YOLO Object Detection Node")
+
+ # Add your custom argument
+ parser.add_argument("--cam", type=str, default="1", help="Camera identifier")
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ minimal_publisher = MinimalPublisher(custom_args.cam)
+ rclpy.spin(minimal_publisher)
+
+ # Shutdown and cleanup
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/display.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/display.py
new file mode 100644
index 0000000..3a380b4
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/display.py
@@ -0,0 +1,51 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class ImageDisplayNode(Node):
+ def __init__(self, topic_name):
+ self.topic_name = topic_name
+ super().__init__("image_display_node")
+ self.subscription = self.create_subscription(
+ Image, self.topic_name, self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+
+ def listener_callback(self, image):
+ cv_image = self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")
+ cv2.imshow(f"Annotated Image {self.topic_name}", cv_image)
+ cv2.waitKey(1)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="Image Display Node")
+
+ # Add your custom argument
+ parser.add_argument(
+ "--topic", type=str, default="1", required=True, help="Topic to subscribe to"
+ )
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ image_display_node = ImageDisplayNode(topic_name=custom_args.topic)
+ rclpy.spin(image_display_node)
+
+ # Shutdown and cleanup
+ image_display_node.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/publisher_member_function.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/publisher_member_function.py
new file mode 100644
index 0000000..fff0ae1
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/publisher_member_function.py
@@ -0,0 +1,39 @@
+import rclpy
+from rclpy.node import Node
+
+from std_msgs.msg import String
+
+
+class MinimalPublisher(Node):
+
+ def __init__(self):
+ super().__init__('minimal_publisher')
+ self.publisher_ = self.create_publisher(String, 'topic', 10)
+ timer_period = 0.5 # seconds
+ self.timer = self.create_timer(timer_period, self.timer_callback)
+ self.i = 0
+
+ def timer_callback(self):
+ msg = String()
+ msg.data = 'Hello World: %d' % self.i
+ self.publisher_.publish(msg)
+ self.get_logger().info('Publishing: "%s"' % msg.data)
+ self.i += 1
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ minimal_publisher = MinimalPublisher()
+
+ rclpy.spin(minimal_publisher)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/subscriber_member_function.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/subscriber_member_function.py
new file mode 100644
index 0000000..81b648c
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/subscriber_member_function.py
@@ -0,0 +1,65 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from ultralytics import YOLO # YOLOv8 import
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class YOLODetector:
+ def __init__(self):
+ self.model = YOLO("yolov8s-pose.pt") # Initialize the YOLO model
+
+ def compute(self, image):
+ # Run YOLOv8 tracking on the image if it's valid
+ if image is not None:
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+ return results
+
+
+class MinimalSubscriber(Node):
+ def __init__(self, camera_id="1"):
+ super().__init__("minimal_subscriber")
+ self.subscription = self.create_subscription(
+ Image, f"Cam{camera_id}/image_raw", self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+ self.detector = YOLODetector()
+
+ def listener_callback(self, image):
+ self.get_logger().info("Image received")
+ cv_image = cv2.cvtColor(
+ self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough"),
+ cv2.COLOR_BGR2RGB,
+ )
+
+ results = self.detector.compute(cv_image)
+ if results:
+ annotated_image = results[0].plot()
+ # Add code to display or process the annotated image
+
+
+def main(args=None):
+ # Separate ROS arguments from script arguments
+ ros_args = remove_ros_args(sys.argv)
+
+ rclpy.init(args=ros_args)
+
+ parser = argparse.ArgumentParser(description="ROS 2 YOLO Object Detection Node")
+ parser.add_argument("--cam", type=str, default="1", help="Camera identifier")
+
+ # Use parse_known_args to avoid error with unrecognized arguments
+ custom_args, unknown_args = parser.parse_known_args(args=remove_ros_args(sys.argv))
+
+ minimal_subscriber = MinimalSubscriber(custom_args.cam)
+ rclpy.spin(minimal_subscriber)
+
+ minimal_subscriber.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/yolo.py b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/yolo.py
new file mode 100644
index 0000000..63938ff
--- /dev/null
+++ b/vision_ws/install/py_pubsub/lib/python3.8/site-packages/py_pubsub/yolo.py
@@ -0,0 +1,36 @@
+import cv2
+from ultralytics import YOLO # YOLOv8 import
+from PIL import Image
+
+
+
+class yolo:
+ def __init__(self):
+ self.model = YOLO("yolov8s-pose.pt")
+ def compute(self, image):
+ # Check if the image was successfully loaded
+ if image is not None:
+ # Run YOLOv8 tracking on the image
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+
+ return results
+
+
+if __name__ == "__main__":
+ instance = yolo()
+ image = cv2.imread("test.jpg")
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ results = instance.compute(image)
+
+ print(results)
+
+ # Visualize the results on the image
+ annotated_image = results[0].plot()
+ # Display the annotated image
+ cv2.imshow("YOLOv8 Tracking", annotated_image)
+
+ # Wait for a key press before closing the window
+ cv2.waitKey(0)
+
+ # Close the display window
+ cv2.destroyAllWindows()
\ No newline at end of file
diff --git a/vision_ws/install/py_pubsub/share/ament_index/resource_index/packages/py_pubsub b/vision_ws/install/py_pubsub/share/ament_index/resource_index/packages/py_pubsub
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/install/py_pubsub/share/colcon-core/packages/py_pubsub b/vision_ws/install/py_pubsub/share/colcon-core/packages/py_pubsub
new file mode 100644
index 0000000..de58d89
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/colcon-core/packages/py_pubsub
@@ -0,0 +1 @@
+rclpy:std_msgs
\ No newline at end of file
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.dsv b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.dsv
new file mode 100644
index 0000000..79d4c95
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.dsv
@@ -0,0 +1 @@
+prepend-non-duplicate;AMENT_PREFIX_PATH;
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.ps1 b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.ps1
new file mode 100644
index 0000000..26b9997
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.ps1
@@ -0,0 +1,3 @@
+# generated from colcon_powershell/shell/template/hook_prepend_value.ps1.em
+
+colcon_prepend_unique_value AMENT_PREFIX_PATH "$env:COLCON_CURRENT_PREFIX"
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.sh b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.sh
new file mode 100644
index 0000000..f3041f6
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/ament_prefix_path.sh
@@ -0,0 +1,3 @@
+# generated from colcon_core/shell/template/hook_prepend_value.sh.em
+
+_colcon_prepend_unique_value AMENT_PREFIX_PATH "$COLCON_CURRENT_PREFIX"
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.dsv b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.dsv
new file mode 100644
index 0000000..84dbc4c
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.dsv
@@ -0,0 +1 @@
+prepend-non-duplicate;PYTHONPATH;lib/python3.8/site-packages
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.ps1 b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.ps1
new file mode 100644
index 0000000..12877ef
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.ps1
@@ -0,0 +1,3 @@
+# generated from colcon_powershell/shell/template/hook_prepend_value.ps1.em
+
+colcon_prepend_unique_value PYTHONPATH "$env:COLCON_CURRENT_PREFIX\lib/python3.8/site-packages"
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.sh b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.sh
new file mode 100644
index 0000000..ed8efd9
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/hook/pythonpath.sh
@@ -0,0 +1,3 @@
+# generated from colcon_core/shell/template/hook_prepend_value.sh.em
+
+_colcon_prepend_unique_value PYTHONPATH "$COLCON_CURRENT_PREFIX/lib/python3.8/site-packages"
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.bash b/vision_ws/install/py_pubsub/share/py_pubsub/package.bash
new file mode 100644
index 0000000..0772ef9
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.bash
@@ -0,0 +1,31 @@
+# generated from colcon_bash/shell/template/package.bash.em
+
+# This script extends the environment for this package.
+
+# a bash script is able to determine its own path if necessary
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ # the prefix is two levels up from the package specific share directory
+ _colcon_package_bash_COLCON_CURRENT_PREFIX="$(builtin cd "`dirname "${BASH_SOURCE[0]}"`/../.." > /dev/null && pwd)"
+else
+ _colcon_package_bash_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+# additional arguments: arguments to the script
+_colcon_package_bash_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$@"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# source sh script of this package
+_colcon_package_bash_source_script "$_colcon_package_bash_COLCON_CURRENT_PREFIX/share/py_pubsub/package.sh"
+
+unset _colcon_package_bash_source_script
+unset _colcon_package_bash_COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.dsv b/vision_ws/install/py_pubsub/share/py_pubsub/package.dsv
new file mode 100644
index 0000000..581ceaa
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.dsv
@@ -0,0 +1,6 @@
+source;share/py_pubsub/hook/pythonpath.ps1
+source;share/py_pubsub/hook/pythonpath.dsv
+source;share/py_pubsub/hook/pythonpath.sh
+source;share/py_pubsub/hook/ament_prefix_path.ps1
+source;share/py_pubsub/hook/ament_prefix_path.dsv
+source;share/py_pubsub/hook/ament_prefix_path.sh
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.ps1 b/vision_ws/install/py_pubsub/share/py_pubsub/package.ps1
new file mode 100644
index 0000000..b5ea45e
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.ps1
@@ -0,0 +1,116 @@
+# generated from colcon_powershell/shell/template/package.ps1.em
+
+# function to append a value to a variable
+# which uses colons as separators
+# duplicates as well as leading separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+function colcon_append_unique_value {
+ param (
+ $_listname,
+ $_value
+ )
+
+ # get values from variable
+ if (Test-Path Env:$_listname) {
+ $_values=(Get-Item env:$_listname).Value
+ } else {
+ $_values=""
+ }
+ $_duplicate=""
+ # start with no values
+ $_all_values=""
+ # iterate over existing values in the variable
+ if ($_values) {
+ $_values.Split(";") | ForEach {
+ # not an empty string
+ if ($_) {
+ # not a duplicate of _value
+ if ($_ -eq $_value) {
+ $_duplicate="1"
+ }
+ if ($_all_values) {
+ $_all_values="${_all_values};$_"
+ } else {
+ $_all_values="$_"
+ }
+ }
+ }
+ }
+ # append only non-duplicates
+ if (!$_duplicate) {
+ # avoid leading separator
+ if ($_all_values) {
+ $_all_values="${_all_values};${_value}"
+ } else {
+ $_all_values="${_value}"
+ }
+ }
+
+ # export the updated variable
+ Set-Item env:\$_listname -Value "$_all_values"
+}
+
+# function to prepend a value to a variable
+# which uses colons as separators
+# duplicates as well as trailing separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+function colcon_prepend_unique_value {
+ param (
+ $_listname,
+ $_value
+ )
+
+ # get values from variable
+ if (Test-Path Env:$_listname) {
+ $_values=(Get-Item env:$_listname).Value
+ } else {
+ $_values=""
+ }
+ # start with the new value
+ $_all_values="$_value"
+ # iterate over existing values in the variable
+ if ($_values) {
+ $_values.Split(";") | ForEach {
+ # not an empty string
+ if ($_) {
+ # not a duplicate of _value
+ if ($_ -ne $_value) {
+ # keep non-duplicate values
+ $_all_values="${_all_values};$_"
+ }
+ }
+ }
+ }
+ # export the updated variable
+ Set-Item env:\$_listname -Value "$_all_values"
+}
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+# additional arguments: arguments to the script
+function colcon_package_source_powershell_script {
+ param (
+ $_colcon_package_source_powershell_script
+ )
+ # source script with conditional trace output
+ if (Test-Path $_colcon_package_source_powershell_script) {
+ if ($env:COLCON_TRACE) {
+ echo ". '$_colcon_package_source_powershell_script'"
+ }
+ . "$_colcon_package_source_powershell_script"
+ } else {
+ Write-Error "not found: '$_colcon_package_source_powershell_script'"
+ }
+}
+
+
+# a powershell script is able to determine its own path
+# the prefix is two levels up from the package specific share directory
+$env:COLCON_CURRENT_PREFIX=(Get-Item $PSCommandPath).Directory.Parent.Parent.FullName
+
+colcon_package_source_powershell_script "$env:COLCON_CURRENT_PREFIX\share/py_pubsub/hook/pythonpath.ps1"
+colcon_package_source_powershell_script "$env:COLCON_CURRENT_PREFIX\share/py_pubsub/hook/ament_prefix_path.ps1"
+
+Remove-Item Env:\COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.sh b/vision_ws/install/py_pubsub/share/py_pubsub/package.sh
new file mode 100644
index 0000000..011c157
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.sh
@@ -0,0 +1,87 @@
+# generated from colcon_core/shell/template/package.sh.em
+
+# This script extends the environment for this package.
+
+# function to prepend a value to a variable
+# which uses colons as separators
+# duplicates as well as trailing separators are avoided
+# first argument: the name of the result variable
+# second argument: the value to be prepended
+_colcon_prepend_unique_value() {
+ # arguments
+ _listname="$1"
+ _value="$2"
+
+ # get values from variable
+ eval _values=\"\$$_listname\"
+ # backup the field separator
+ _colcon_prepend_unique_value_IFS=$IFS
+ IFS=":"
+ # start with the new value
+ _all_values="$_value"
+ # workaround SH_WORD_SPLIT not being set in zsh
+ if [ "$(command -v colcon_zsh_convert_to_array)" ]; then
+ colcon_zsh_convert_to_array _values
+ fi
+ # iterate over existing values in the variable
+ for _item in $_values; do
+ # ignore empty strings
+ if [ -z "$_item" ]; then
+ continue
+ fi
+ # ignore duplicates of _value
+ if [ "$_item" = "$_value" ]; then
+ continue
+ fi
+ # keep non-duplicate values
+ _all_values="$_all_values:$_item"
+ done
+ unset _item
+ # restore the field separator
+ IFS=$_colcon_prepend_unique_value_IFS
+ unset _colcon_prepend_unique_value_IFS
+ # export the updated variable
+ eval export $_listname=\"$_all_values\"
+ unset _all_values
+ unset _values
+
+ unset _value
+ unset _listname
+}
+
+# since a plain shell script can't determine its own path when being sourced
+# either use the provided COLCON_CURRENT_PREFIX
+# or fall back to the build time prefix (if it exists)
+_colcon_package_sh_COLCON_CURRENT_PREFIX="/home/pge-2023/vis2-tiago/ros_ws/install/py_pubsub"
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ if [ ! -d "$_colcon_package_sh_COLCON_CURRENT_PREFIX" ]; then
+ echo "The build time path \"$_colcon_package_sh_COLCON_CURRENT_PREFIX\" doesn't exist. Either source a script for a different shell or set the environment variable \"COLCON_CURRENT_PREFIX\" explicitly." 1>&2
+ unset _colcon_package_sh_COLCON_CURRENT_PREFIX
+ return 1
+ fi
+ COLCON_CURRENT_PREFIX="$_colcon_package_sh_COLCON_CURRENT_PREFIX"
+fi
+unset _colcon_package_sh_COLCON_CURRENT_PREFIX
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+# additional arguments: arguments to the script
+_colcon_package_sh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$@"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# source sh hooks
+_colcon_package_sh_source_script "$COLCON_CURRENT_PREFIX/share/py_pubsub/hook/pythonpath.sh"
+_colcon_package_sh_source_script "$COLCON_CURRENT_PREFIX/share/py_pubsub/hook/ament_prefix_path.sh"
+
+unset _colcon_package_sh_source_script
+unset COLCON_CURRENT_PREFIX
+
+# do not unset _colcon_prepend_unique_value since it might be used by non-primary shell hooks
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.xml b/vision_ws/install/py_pubsub/share/py_pubsub/package.xml
new file mode 100644
index 0000000..af2c191
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.xml
@@ -0,0 +1,21 @@
+
+
+
+ py_pubsub
+ 0.0.0
+ Examples of minimal publisher/subscriber using rclpy
+ pge-2023
+ TODO: License declaration
+
+ rclpy
+ std_msgs
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/vision_ws/install/py_pubsub/share/py_pubsub/package.zsh b/vision_ws/install/py_pubsub/share/py_pubsub/package.zsh
new file mode 100644
index 0000000..2b9ae49
--- /dev/null
+++ b/vision_ws/install/py_pubsub/share/py_pubsub/package.zsh
@@ -0,0 +1,42 @@
+# generated from colcon_zsh/shell/template/package.zsh.em
+
+# This script extends the environment for this package.
+
+# a zsh script is able to determine its own path if necessary
+if [ -z "$COLCON_CURRENT_PREFIX" ]; then
+ # the prefix is two levels up from the package specific share directory
+ _colcon_package_zsh_COLCON_CURRENT_PREFIX="$(builtin cd -q "`dirname "${(%):-%N}"`/../.." > /dev/null && pwd)"
+else
+ _colcon_package_zsh_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+# additional arguments: arguments to the script
+_colcon_package_zsh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$@"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# function to convert array-like strings into arrays
+# to workaround SH_WORD_SPLIT not being set
+colcon_zsh_convert_to_array() {
+ local _listname=$1
+ local _dollar="$"
+ local _split="{="
+ local _to_array="(\"$_dollar$_split$_listname}\")"
+ eval $_listname=$_to_array
+}
+
+# source sh script of this package
+_colcon_package_zsh_source_script "$_colcon_package_zsh_COLCON_CURRENT_PREFIX/share/py_pubsub/package.sh"
+unset convert_zsh_to_array
+
+unset _colcon_package_zsh_source_script
+unset _colcon_package_zsh_COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/setup.bash b/vision_ws/install/setup.bash
new file mode 100644
index 0000000..9cf2cb9
--- /dev/null
+++ b/vision_ws/install/setup.bash
@@ -0,0 +1,31 @@
+# generated from colcon_bash/shell/template/prefix_chain.bash.em
+
+# This script extends the environment with the environment of other prefix
+# paths which were sourced when this file was generated as well as all packages
+# contained in this prefix path.
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_chain_bash_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# source chained prefixes
+# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
+COLCON_CURRENT_PREFIX="/opt/ros/foxy"
+_colcon_prefix_chain_bash_source_script "$COLCON_CURRENT_PREFIX/local_setup.bash"
+
+# source this prefix
+# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
+COLCON_CURRENT_PREFIX="$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" > /dev/null && pwd)"
+_colcon_prefix_chain_bash_source_script "$COLCON_CURRENT_PREFIX/local_setup.bash"
+
+unset COLCON_CURRENT_PREFIX
+unset _colcon_prefix_chain_bash_source_script
diff --git a/vision_ws/install/setup.ps1 b/vision_ws/install/setup.ps1
new file mode 100644
index 0000000..412726f
--- /dev/null
+++ b/vision_ws/install/setup.ps1
@@ -0,0 +1,29 @@
+# generated from colcon_powershell/shell/template/prefix_chain.ps1.em
+
+# This script extends the environment with the environment of other prefix
+# paths which were sourced when this file was generated as well as all packages
+# contained in this prefix path.
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+function _colcon_prefix_chain_powershell_source_script {
+ param (
+ $_colcon_prefix_chain_powershell_source_script_param
+ )
+ # source script with conditional trace output
+ if (Test-Path $_colcon_prefix_chain_powershell_source_script_param) {
+ if ($env:COLCON_TRACE) {
+ echo ". '$_colcon_prefix_chain_powershell_source_script_param'"
+ }
+ . "$_colcon_prefix_chain_powershell_source_script_param"
+ } else {
+ Write-Error "not found: '$_colcon_prefix_chain_powershell_source_script_param'"
+ }
+}
+
+# source chained prefixes
+_colcon_prefix_chain_powershell_source_script "/opt/ros/foxy\local_setup.ps1"
+
+# source this prefix
+$env:COLCON_CURRENT_PREFIX=(Split-Path $PSCommandPath -Parent)
+_colcon_prefix_chain_powershell_source_script "$env:COLCON_CURRENT_PREFIX\local_setup.ps1"
diff --git a/vision_ws/install/setup.sh b/vision_ws/install/setup.sh
new file mode 100644
index 0000000..087d9f3
--- /dev/null
+++ b/vision_ws/install/setup.sh
@@ -0,0 +1,45 @@
+# generated from colcon_core/shell/template/prefix_chain.sh.em
+
+# This script extends the environment with the environment of other prefix
+# paths which were sourced when this file was generated as well as all packages
+# contained in this prefix path.
+
+# since a plain shell script can't determine its own path when being sourced
+# either use the provided COLCON_CURRENT_PREFIX
+# or fall back to the build time prefix (if it exists)
+_colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX=/home/pge-2023/vis2-tiago/ros_ws/install
+if [ ! -z "$COLCON_CURRENT_PREFIX" ]; then
+ _colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX="$COLCON_CURRENT_PREFIX"
+elif [ ! -d "$_colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX" ]; then
+ echo "The build time path \"$_colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX\" doesn't exist. Either source a script for a different shell or set the environment variable \"COLCON_CURRENT_PREFIX\" explicitly." 1>&2
+ unset _colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX
+ return 1
+fi
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_chain_sh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# source chained prefixes
+# setting COLCON_CURRENT_PREFIX avoids relying on the build time prefix of the sourced script
+COLCON_CURRENT_PREFIX="/opt/ros/foxy"
+_colcon_prefix_chain_sh_source_script "$COLCON_CURRENT_PREFIX/local_setup.sh"
+
+
+# source this prefix
+# setting COLCON_CURRENT_PREFIX avoids relying on the build time prefix of the sourced script
+COLCON_CURRENT_PREFIX="$_colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX"
+_colcon_prefix_chain_sh_source_script "$COLCON_CURRENT_PREFIX/local_setup.sh"
+
+unset _colcon_prefix_chain_sh_COLCON_CURRENT_PREFIX
+unset _colcon_prefix_chain_sh_source_script
+unset COLCON_CURRENT_PREFIX
diff --git a/vision_ws/install/setup.zsh b/vision_ws/install/setup.zsh
new file mode 100644
index 0000000..37985e0
--- /dev/null
+++ b/vision_ws/install/setup.zsh
@@ -0,0 +1,31 @@
+# generated from colcon_zsh/shell/template/prefix_chain.zsh.em
+
+# This script extends the environment with the environment of other prefix
+# paths which were sourced when this file was generated as well as all packages
+# contained in this prefix path.
+
+# function to source another script with conditional trace output
+# first argument: the path of the script
+_colcon_prefix_chain_zsh_source_script() {
+ if [ -f "$1" ]; then
+ if [ -n "$COLCON_TRACE" ]; then
+ echo "# . \"$1\""
+ fi
+ . "$1"
+ else
+ echo "not found: \"$1\"" 1>&2
+ fi
+}
+
+# source chained prefixes
+# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
+COLCON_CURRENT_PREFIX="/opt/ros/foxy"
+_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
+
+# source this prefix
+# setting COLCON_CURRENT_PREFIX avoids determining the prefix in the sourced script
+COLCON_CURRENT_PREFIX="$(builtin cd -q "`dirname "${(%):-%N}"`" > /dev/null && pwd)"
+_colcon_prefix_chain_zsh_source_script "$COLCON_CURRENT_PREFIX/local_setup.zsh"
+
+unset COLCON_CURRENT_PREFIX
+unset _colcon_prefix_chain_zsh_source_script
diff --git a/vision_ws/run_camera_yolo.sh b/vision_ws/run_camera_yolo.sh
new file mode 100644
index 0000000..c2b0772
--- /dev/null
+++ b/vision_ws/run_camera_yolo.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Charger l'environnement ROS
+source install/setup.bash
+
+# Construire le package spécifié
+colcon build --packages-select py_pubsub
+
+# Exécuter le noeud ROS avec l'argument de la caméra passé au script
+cam_id=$1
+echo "$cam_id"
+# Exécuter camera_yolo avec l'identifiant de la caméra
+ros2 run py_pubsub camera_yolo --cam $cam_id
\ No newline at end of file
diff --git a/vision_ws/run_display.sh b/vision_ws/run_display.sh
new file mode 100644
index 0000000..659e8db
--- /dev/null
+++ b/vision_ws/run_display.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# Charger l'environnement ROS
+source install/setup.bash
+
+# Construire le package spécifié
+colcon build --packages-select py_pubsub
+
+# Exécuter le noeud ROS avec l'argument de la caméra passé au script
+cam_id=${1:-1}
+topic_name="annotated_images_$cam_id"
+
+# Exécuter display avec l'identifiant de la caméra
+ros2 run py_pubsub display --topic "$topic_name"
\ No newline at end of file
diff --git a/vision_ws/src/py_pubsub/package.xml b/vision_ws/src/py_pubsub/package.xml
new file mode 100644
index 0000000..af2c191
--- /dev/null
+++ b/vision_ws/src/py_pubsub/package.xml
@@ -0,0 +1,21 @@
+
+
+
+ py_pubsub
+ 0.0.0
+ Examples of minimal publisher/subscriber using rclpy
+ pge-2023
+ TODO: License declaration
+
+ rclpy
+ std_msgs
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/vision_ws/src/py_pubsub/py_pubsub/__init__.py b/vision_ws/src/py_pubsub/py_pubsub/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/src/py_pubsub/py_pubsub/camera_yolo.py b/vision_ws/src/py_pubsub/py_pubsub/camera_yolo.py
new file mode 100644
index 0000000..efbb9c7
--- /dev/null
+++ b/vision_ws/src/py_pubsub/py_pubsub/camera_yolo.py
@@ -0,0 +1,112 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from ultralytics import YOLO # YOLOv8 import
+from ultralytics.utils import LOGGER # LOGGER import
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class YOLODetector:
+ def __init__(self):
+ self.model = YOLO("yolov8s-pose.pt")
+
+ def compute(self, image):
+ if image is not None:
+ results = self.model.track(image, persist=True, tracker="bytetrack.yaml")
+ return results
+
+
+class MinimalPublisher(Node):
+ def __init__(self, camera_id):
+ super().__init__("minimal_publisher")
+ self.camera_id = camera_id
+ self.topic_name = f"annotated_images_{camera_id}"
+ self.publisher = self.create_publisher(Image, self.topic_name, 10)
+ self.subscription = self.create_subscription(
+ Image, f"Cam{camera_id}/image_raw", self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+ self.detector = YOLODetector()
+
+ def toData(self, result, keypoint=False):
+ """Convert the object to JSON format."""
+ if result.probs is not None:
+ LOGGER.warning("Warning: Classify task do not support tojson yet.")
+ return
+
+ # Create list of detection dictionaries
+ resultat = []
+ data = result.boxes.data.cpu().tolist()
+ h, w = (1, 1)
+ for i, row in enumerate(data): # xyxy, track_id if tracking, conf, class_id
+ box = {
+ "x1": row[0] / w,
+ "y1": row[1] / h,
+ "x2": row[2] / w,
+ "y2": row[3] / h,
+ }
+ class_id = int(row[-1])
+ name = result.names[class_id]
+ results = {"name": name, "box": box}
+ if result.boxes.is_track:
+ results["track_id"] = int(row[-3]) # track ID
+ if result.masks:
+ # numpy array
+ x, y = result.masks.xy[i][:, 0], result.masks.xy[i][:, 1]
+ results["segments"] = {"x": (x / w).tolist(), "y": (y / h).tolist()}
+ if keypoint:
+ x, y, visible = (
+ result.keypoints[i].data[0].cpu().unbind(dim=1)
+ ) # torch Tensor
+ results["keypoints"] = {
+ "x": (x / w).tolist(),
+ "y": (y / h).tolist(),
+ "visible": visible.tolist(),
+ }
+ resultat.append(results)
+
+ # Convert detections to JSON
+ return resultat
+
+ def listener_callback(self, image):
+ self.get_logger().info(f"Image received from Camera {self.camera_id}")
+ cv_image = cv2.cvtColor(
+ self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough"),
+ cv2.COLOR_BGR2RGB,
+ )
+
+ results = self.detector.compute(cv_image)
+ if results:
+ annotated_image = results[0].plot()
+ msg = self._cv_bridge.cv2_to_imgmsg(annotated_image, "rgb8")
+ self.publisher.publish(msg)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="ROS 2 YOLO Object Detection Node")
+
+ # Add your custom argument
+ parser.add_argument("--cam", type=str, default="1", help="Camera identifier")
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ minimal_publisher = MinimalPublisher(custom_args.cam)
+ rclpy.spin(minimal_publisher)
+
+ # Shutdown and cleanup
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/src/py_pubsub/py_pubsub/display.py b/vision_ws/src/py_pubsub/py_pubsub/display.py
new file mode 100644
index 0000000..3a380b4
--- /dev/null
+++ b/vision_ws/src/py_pubsub/py_pubsub/display.py
@@ -0,0 +1,51 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+import argparse
+from rclpy.utilities import remove_ros_args
+import sys
+
+
+class ImageDisplayNode(Node):
+ def __init__(self, topic_name):
+ self.topic_name = topic_name
+ super().__init__("image_display_node")
+ self.subscription = self.create_subscription(
+ Image, self.topic_name, self.listener_callback, 10
+ )
+ self._cv_bridge = CvBridge()
+
+ def listener_callback(self, image):
+ cv_image = self._cv_bridge.imgmsg_to_cv2(image, desired_encoding="rgb8")
+ cv2.imshow(f"Annotated Image {self.topic_name}", cv_image)
+ cv2.waitKey(1)
+
+
+def main(args=None):
+ # Initialize ROS without passing args
+ rclpy.init()
+
+ # Create an argument parser for your script
+ parser = argparse.ArgumentParser(description="Image Display Node")
+
+ # Add your custom argument
+ parser.add_argument(
+ "--topic", type=str, default="1", required=True, help="Topic to subscribe to"
+ )
+
+ # Parse the command line arguments
+ custom_args = parser.parse_args()
+
+ # Create and spin your node
+ image_display_node = ImageDisplayNode(topic_name=custom_args.topic)
+ rclpy.spin(image_display_node)
+
+ # Shutdown and cleanup
+ image_display_node.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vision_ws/src/py_pubsub/resource/py_pubsub b/vision_ws/src/py_pubsub/resource/py_pubsub
new file mode 100644
index 0000000..e69de29
diff --git a/vision_ws/src/py_pubsub/setup.cfg b/vision_ws/src/py_pubsub/setup.cfg
new file mode 100644
index 0000000..54d2b16
--- /dev/null
+++ b/vision_ws/src/py_pubsub/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script-dir=$base/lib/py_pubsub
+[install]
+install-scripts=$base/lib/py_pubsub
diff --git a/vision_ws/src/py_pubsub/setup.py b/vision_ws/src/py_pubsub/setup.py
new file mode 100644
index 0000000..bd8a4f6
--- /dev/null
+++ b/vision_ws/src/py_pubsub/setup.py
@@ -0,0 +1,26 @@
+from setuptools import setup
+
+package_name = "py_pubsub"
+
+setup(
+ name=package_name,
+ version="0.0.0",
+ packages=[package_name],
+ data_files=[
+ ("share/ament_index/resource_index/packages", ["resource/" + package_name]),
+ ("share/" + package_name, ["package.xml"]),
+ ],
+ install_requires=["setuptools"],
+ zip_safe=True,
+ maintainer="pge-2023",
+ maintainer_email="pge-2023@todo.todo",
+ description="Examples of minimal publisher/subscriber using rclpy",
+ license="TODO: License declaration",
+ tests_require=["pytest"],
+ entry_points={
+ "console_scripts": [
+ "display = py_pubsub.display:main",
+ "camera_yolo = py_pubsub.camera_yolo:main",
+ ],
+ },
+)
diff --git a/vision_ws/src/py_pubsub/test/test_copyright.py b/vision_ws/src/py_pubsub/test/test_copyright.py
new file mode 100644
index 0000000..cc8ff03
--- /dev/null
+++ b/vision_ws/src/py_pubsub/test/test_copyright.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/vision_ws/src/py_pubsub/test/test_flake8.py b/vision_ws/src/py_pubsub/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/vision_ws/src/py_pubsub/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/vision_ws/src/py_pubsub/test/test_pep257.py b/vision_ws/src/py_pubsub/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/vision_ws/src/py_pubsub/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'