From 13a4631769c69debdb2568032024b72bdb4e817b Mon Sep 17 00:00:00 2001 From: Qian Qian Date: Wed, 13 Mar 2024 20:10:33 +0100 Subject: [PATCH] Disable detection --- requirements.txt | 12 ++++---- src/copilot.py | 28 +++++++++--------- src/main.py | 77 ++++++++++++++++++++++++++---------------------- 3 files changed, 61 insertions(+), 56 deletions(-) diff --git a/requirements.txt b/requirements.txt index 7fe976f..21d1ae1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -scipy==1.6.3 -filterpy==1.4.5 -pygame==2.0.1 -pytest==6.2.3 -PyYAML==5.4.1 -transitions==0.8.10 +scipy +filterpy +pygame +pytest +PyYAML +transitions diff --git a/src/copilot.py b/src/copilot.py index 39c0ccd..cddd08d 100644 --- a/src/copilot.py +++ b/src/copilot.py @@ -3,10 +3,10 @@ from PIL import Image -from pycoral.adapters import common -from pycoral.adapters import detect -from pycoral.adapters import classify -from pycoral.utils.dataset import read_label_file +# from pycoral.adapters import common +# from pycoral.adapters import detect +# from pycoral.adapters import classify +# from pycoral.utils.dataset import read_label_file # from .button import Button from .utils import ( @@ -56,7 +56,7 @@ def __init__( self._classfication_interpreter = traffic_light_classifier_interpreter self._classfication_interpreter.allocate_tensors() - self._traffic_light_size = common.input_size(self._classfication_interpreter) + # self._traffic_light_size = common.input_size(self._classfication_interpreter) self._speaker = speaker @@ -66,12 +66,12 @@ def __init__( assert tile_size == input_shape[1] self._tile_config = TileConfig(tile_size, tile_w_overlap, tile_h_overlap) - self._ssd_labels = read_label_file(self._args.label) if self._args.label else {} - self._traffic_light_labels = ( - read_label_file(self._args.traffic_light_label) - if self._args.traffic_light_label - else {} - ) + # self._ssd_labels = read_label_file(self._args.label) if self._args.label else {} + # self._traffic_light_labels = ( + # read_label_file(self._args.traffic_light_label) + # if self._args.traffic_light_label + # else {} + # ) self._h_crop_keep_percentage = 0.6 self._led = led @@ -97,7 +97,7 @@ def run(self): ) prev_cycle_time = current_cycle_time logging.debug("recv image from: {}".format(image_time)) - self.process(image) + # self.process(image) logging.debug( "process time: %.2f ms" % ((time.perf_counter() - current_cycle_time) * 1000) @@ -142,7 +142,7 @@ def classify(self, traffic_light_thumbnail): traffic_light_resized = traffic_light_thumbnail.resize( self._traffic_light_size, Image.ANTIALIAS ) - common.set_input(self._classfication_interpreter, traffic_light_resized) + # common.set_input(self._classfication_interpreter, traffic_light_resized) start = time.perf_counter() self._classfication_interpreter.invoke() classes = classify.get_classes( @@ -179,7 +179,7 @@ def detect(self, img): ): # print(tile_location) tile = img.crop(tile_location) - common.set_input(self._ssd_interpreter, tile) + # common.set_input(self._ssd_interpreter, tile) start = time.perf_counter() self._ssd_interpreter.invoke() inference_time += time.perf_counter() - start diff --git a/src/main.py b/src/main.py index 974f2bd..e676a85 100644 --- a/src/main.py +++ b/src/main.py @@ -4,10 +4,10 @@ import time import threading -import picamera +from picamera2 import Picamera2 from src.os_utils import generate_recording_postfix -from .led import Led +from .abc import ILed from .camera_capturer import CameraCapturer from .camera_recorder import CameraRecorder from .pubsub import PubSub @@ -109,43 +109,48 @@ def main(): blackbox = BlackBox(image_saver) - with picamera.PiCamera() as camera: - # fps for recording - camera.framerate = 20 - camera.resolution = camera_info.resolution - camera.exposure_mode = "sports" - - led_pin = 10 - led = Led(led_pin) - camera_recorder = CameraRecorder(camera, led, args.blackbox_path) - camera_capturer = CameraCapturer( - camera, 5, camera_recorder.is_recording, pubsub, inference_config - ) + camera = Picamera2() + + + main_stream = {"size": camera_info.resolution} + lores_stream = {"size": inference_config.inference_resolution} + video_config = camera.create_video_configuration(main_stream, lores_stream, encode="main") + camera.configure(video_config) + + #camera.framerate = 20 + #camera.exposure_mode = "sports" - if args.cpu: - from tflite_runtime.interpreter import Interpreter as make_interpreter - else: - from pycoral.utils.edgetpu import make_interpreter - - try: - copilot = CoPilot( - args, - pubsub, - blackbox, - camera_info, - inference_config, - led, - Speaker(args.lang), - make_interpreter(args.ssd_model), - make_interpreter(args.traffic_light_classification_model), - ) - except ValueError as e: - print(str(e) + "Use --cpu if you do not have a coral tpu") - return - copilot.run() + led_pin = 10 + led = ILed() + camera_recorder = CameraRecorder(camera, led, args.blackbox_path) + camera_capturer = CameraCapturer( + camera, 5, camera_recorder.is_recording, pubsub, inference_config + ) + + if args.cpu: + from tflite_runtime.interpreter import Interpreter as make_interpreter + else: + from pycoral.utils.edgetpu import make_interpreter + + try: + copilot = CoPilot( + args, + pubsub, + blackbox, + camera_info, + inference_config, + led, + Speaker(args.lang), + make_interpreter(args.ssd_model), + make_interpreter(args.traffic_light_classification_model), + ) + except ValueError as e: + print(str(e) + "Use --cpu if you do not have a coral tpu") + return + copilot.run() except Exception as e: - Speaker(args.lang).play_sound("dead", is_blocking=True) + # Speaker(args.lang).play_sound("dead", is_blocking=True) logging.critical(str(e)) print(str(e)) exit(1)