diff --git a/EasyPySpin/__init__.py b/EasyPySpin/__init__.py index 440b6a6..00244ba 100644 --- a/EasyPySpin/__init__.py +++ b/EasyPySpin/__init__.py @@ -1,3 +1,5 @@ from .videocapture import VideoCapture -from .synchronizedvideocapture import SynchronizedVideoCapture from .videocaptureex import VideoCaptureEX +from .multiplevideocapture import MultipleVideoCapture +from .synchronizedvideocapture import SynchronizedVideoCapture +from .utils import EasyPySpinWarning diff --git a/EasyPySpin/command_line.py b/EasyPySpin/command_line.py index 3c24cc7..ae0420c 100644 --- a/EasyPySpin/command_line.py +++ b/EasyPySpin/command_line.py @@ -5,23 +5,25 @@ import cv2 import argparse + def print_xy(event, x, y, flags, param): """ Export xy coordinates in csv format by clicking """ - if event==cv2.EVENT_LBUTTONDOWN: + if event == cv2.EVENT_LBUTTONDOWN: scale = param print(f"{int(x/scale)}, {int(y/scale)}") + def main(): parser = argparse.ArgumentParser() - parser.add_argument("-i", "--index", type=int, default=0, help="Camera index (Default: 0)") - parser.add_argument("-e", "--exposure",type=float, default=-1, help="Exposure time [us] (Default: Auto)") - parser.add_argument("-g", "--gain", type=float, default=-1, help="Gain [dB] (Default: Auto)") - parser.add_argument("-G", "--gamma", type=float, help="Gamma value") - parser.add_argument("-b", "--brightness", type=float, help="Brightness [EV]") - parser.add_argument("-f", "--fps", type=float, help="FrameRate [fps]") - parser.add_argument("-s", "--scale", type=float, default=0.25, help="Image scale to show (>0) (Default: 0.25)") + parser.add_argument("-i", "--index", type=int, default=0, help="Camera index (Default: 0)") + parser.add_argument("-e", "--exposure", type=float, default=-1, help="Exposure time [us] (Default: Auto)") + parser.add_argument("-g", "--gain", type=float, default=-1, help="Gain [dB] (Default: Auto)") + parser.add_argument("-G", "--gamma", type=float, help="Gamma value") + parser.add_argument("-b", "--brightness", type=float, help="Brightness [EV]") + parser.add_argument("-f", "--fps", type=float, help="FrameRate [fps]") + parser.add_argument("-s", "--scale", type=float, default=.25, help="Image scale to show (>0) (Default: 0.25)") args = parser.parse_args() # Instance creation @@ -31,14 +33,17 @@ def main(): if not cap.isOpened(): print("Camera can't open\nexit") return -1 - + # Set the camera parameters - cap.set(cv2.CAP_PROP_EXPOSURE, args.exposure) #-1 sets exposure_time to auto - cap.set(cv2.CAP_PROP_GAIN, args.gain) #-1 sets gain to auto - if args.gamma is not None: cap.set(cv2.CAP_PROP_GAMMA, args.gamma) - if args.fps is not None: cap.set(cv2.CAP_PROP_FPS, args.fps) - if args.brightness is not None: cap.set(cv2.CAP_PROP_BRIGHTNESS, args.brightness) - + cap.set(cv2.CAP_PROP_EXPOSURE, args.exposure) # -1 sets exposure_time to auto + cap.set(cv2.CAP_PROP_GAIN, args.gain) # -1 sets gain to auto + if args.gamma is not None: + cap.set(cv2.CAP_PROP_GAMMA, args.gamma) + if args.fps is not None: + cap.set(cv2.CAP_PROP_FPS, args.fps) + if args.brightness is not None: + cap.set(cv2.CAP_PROP_BRIGHTNESS, args.brightness) + # Window setting winname = "press q to quit" cv2.namedWindow(winname) @@ -48,16 +53,17 @@ def main(): # Start capturing while True: ret, frame = cap.read() - #frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR) #for RGB camera demosaicing + # frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR) #for RGB camera demosaicing img_show = cv2.resize(frame, None, fx=args.scale, fy=args.scale) cv2.imshow(winname, img_show) key = cv2.waitKey(30) - if key==ord("q"): + if key == ord("q"): break - + cv2.destroyAllWindows() cap.release() -if __name__=="__main__": + +if __name__ == "__main__": main() diff --git a/EasyPySpin/multiplevideocapture.py b/EasyPySpin/multiplevideocapture.py new file mode 100644 index 0000000..72d96b7 --- /dev/null +++ b/EasyPySpin/multiplevideocapture.py @@ -0,0 +1,91 @@ +from typing import List, Tuple, Union, Any +from concurrent.futures import ThreadPoolExecutor + +import numpy as np + +from .videocapture import VideoCapture as EasyPySpinVideoCapture + + +class MultipleVideoCapture: + """VideoCapture for multiple cameras. + + Examples + -------- + >>> cap = MultipleVideoCapture(0, 1) + >>> cap.isOpened() + [True, True] + >>> cap.set(cv2.CAP_PROP_EXPOSURE, 1000) + [True, True] + >>> cap.get(cv2.CAP_PROP_EXPOSURE) + [1000.0, 1000.0] + >>> cap[0].set(cv2.CAP_PROP_EXPOSURE, 2000) + True + >>> cap.get(cv2.CAP_PROP_EXPOSURE) + [2000.0, 1000.0] + >>> (ret0, frame0), (ret1, frame1) = cap.read() + >>> cap.release() + + Add camera after initialization + + >>> cap = MultipleVideoCapture(0, 1) # open two cameras + >>> cap.isOpened() + [True, True] + >>> cap.open(2) # add a camera + >>> cap.isOpened() + [True, True, True] + + Open camera as arbitrary VideoCapture + + >>> cap = MultipleVideoCapture() + >>> cap.open(0, 1, VideoCapture=EasyPySpin.VideoCaptureEX) + >>> cap.isOpened() + [True, True] + >>> cap.average_num = 5 # Set attribute of VideoCaptureEX + >>> cap.open(0, VideoCapture=cv2.VideoCapture) + >>> cap.isOpened() + [True, True, True] + """ + + __caps = list() + __executor = ThreadPoolExecutor() + + def __init__(self, *indexes: Tuple[Union[int, str], ...]): + self.open(*indexes) + + def __len__(self): + return self.__caps.__len__() + + def __getitem__(self, item): + return self.__caps.__getitem__(item) + + def __iter__(self): + return self.__caps.__iter__() + + def __next__(self): + return self.__caps.__next__() + + def __setattr__(self, key, value): + for cap in self: + if hasattr(cap, key): + setattr(cap, key, value) + + return object.__setattr__(self, key, value) + + def __getattr__(self, name): + def method(*args, **kwargs) -> List[Any]: + futures = [ + self.__executor.submit(getattr(cap, name), *args, **kwargs) + for cap in self + ] + return [future.result() for future in futures] + + return method + + def open( + self, *indexs: Tuple[Union[int, str], ...], VideoCapture=EasyPySpinVideoCapture + ) -> List[bool]: + for index in indexs: + cap = VideoCapture(index) + self.__caps.append(cap) + + return self.isOpened() diff --git a/EasyPySpin/synchronizedvideocapture.py b/EasyPySpin/synchronizedvideocapture.py index ff3fdd0..62bd4af 100644 --- a/EasyPySpin/synchronizedvideocapture.py +++ b/EasyPySpin/synchronizedvideocapture.py @@ -1,118 +1,68 @@ -import PySpin +from typing import Tuple, Union -class SynchronizedVideoCapture: - """ - Hardware synchronized video capturing. - It can be handled in the same way as the "VideoCapture" class and the return value is stored in the list. - - You can find instructions on how to connect the camera in FLIR official page. - [https://www.flir.com/support-center/iis/machine-vision/application-note/configuring-synchronized-capture-with-multiple-cameras] - - NOTE : Currently, only two cameras (primary and secondary) are supported, but I would like to support multiple secondary cameras in the future. - NOTE : I only have the "BFS" camera, so I haven't tested it with any other camera ("BFLY", "CM3", etc...). So, if you have a problem, please send me an issue or PR. - """ - def __init__(self, cap_primary, cap_secondary): - self.cap_primary = cap_primary - self.cap_secondary = cap_secondary - - self.cap_primary = self._configure_as_primary(self.cap_primary) - self.cap_secondary = self._configure_as_secondary(self.cap_secondary) - - self.cap_primary.auto_software_trigger_execute = True - - def __del__(self): - self.cap_primary.release() - self.cap_secondary.release() - - def release(self): - self.__del__() - - def isOpened(self): - return [self.cap_primary.isOpened(), self.cap_secondary.isOpened()] - - def read(self): - if not self.cap_primary.cam.IsStreaming(): - self.cap_primary.cam.BeginAcquisition() - - if not self.cap_secondary.cam.IsStreaming(): - self.cap_secondary.cam.BeginAcquisition() - - if (self.cap_primary.cam.TriggerMode.GetValue()==PySpin.TriggerMode_On and - self.cap_primary.cam.TriggerSource.GetValue()==PySpin.TriggerSource_Software and - self.cap_primary.auto_software_trigger_execute==True): - self.cap_primary.cam.TriggerSoftware.Execute() +from .multiplevideocapture import MultipleVideoCapture - ret_p, frame_p = self.cap_primary.read() - ret_s, frame_s = self.cap_secondary.read() - ret = [ret_p, ret_s] - frame = [frame_p, frame_s] - return ret, frame - def set(self, propId, value): - ret_p = self.cap_primary.set(propId, value) - ret_s = self.cap_secondary.set(propId, value) - return [ret_p, ret_s] +class SynchronizedVideoCapture(MultipleVideoCapture): + """VideoCapture for hardware synchronized cameras. - def get(self, propId): - value_p = self.cap_primary.get(propId) - value_s = self.cap_secondary.get(propId) - return [value_p, value_s] + I only have the "BFS" camera, so I haven't tested it with any other camera ("BFLY", "CM3", etc...). So, if you have a problem, please send me an issue or PR. - def _configure_as_primary(self, cap): - series_name = self._which_camera_series(cap) - - # Set the output line - if series_name in ["CM3", "FL3", "GS3", "FFY-DL", "ORX"]: - # For CM3, FL3, GS3, FFY-DL, and ORX cameras, - # select Line2 from the Line Selection dropdown and set Line Mode to Output. - cap.cam.LineSelector.SetValue(PySpin.LineSelector_Line2) - cap.cam.LineMode.SetValue(PySpin.LineMode_Output) - elif series_name in ["BFS"]: - # For BFS cameras, select Line1 from the Line Selection dropdown - # and set Line Mode to Output. - cap.cam.LineSelector.SetValue(PySpin.LineSelector_Line1) - cap.cam.LineMode.SetValue(PySpin.LineMode_Output) - - # For BFS and BFLY cameras enable the 3.3V line - if series_name in ["BFS"]: - # For BFS cameras from the line selection drop-down select Line2 - # and check the checkbox for 3.3V Enable. - cap.cam.LineSelector.SetValue(PySpin.LineSelector_Line2) - cap.cam.V3_3Enable.SetValue(True) - elif series_name in ["BFLY"]: - # For BFLY cameras, set 3.3V Enable to true - cap.cam.V3_3Enable.SetValue(True) - - return cap - - def _configure_as_secondary(self, cap): - series_name = self._which_camera_series(cap) - - cap.cam.TriggerMode.SetValue(PySpin.TriggerMode_Off) - cap.cam.TriggerSelector.SetValue(PySpin.TriggerSelector_FrameStart) - - # Set the trigger source - if series_name in ["BFS", "CM3", "FL3", "FFY-DL", "GS3"]: - # For BFS, CM3, FL3, FFY-DL, and GS3 cameras, - # from the Trigger Source drop-down, select Line 3. - cap.cam.TriggerSource.SetValue(PySpin.TriggerSource_Line3) - elif series_name in ["ORX"]: - # For ORX cameras, from the Trigger Source drop-down, select Line 5. - cap.cam.TriggerSource.SetValue(PySpin.TriggerSource_Line5) - - # From the Trigger Overlap drop-down, select Read Out. - cap.cam.TriggerOverlap.SetValue(PySpin.TriggerOverlap_ReadOut) - - # From the Trigger Mode drop-down, select On. - cap.cam.TriggerMode.SetValue(PySpin.TriggerMode_On) - - return cap - - def _which_camera_series(self, cap): - model_name = cap.cam.DeviceModelName.GetValue() + Notes + ----- + You can find instructions on how to connect the camera in FLIR official page. + https://www.flir.com/support-center/iis/machine-vision/application-note/configuring-synchronized-capture-with-multiple-cameras + + Examples + -------- + Case1: The pair of primary and secondary cameras. + + >>> serial_number_1 = "20541712" # primary camera + >>> serial_number_2 = "19412150" # secondary camera + >>> cap = EasyPySpin.SynchronizedVideoCapture(serial_number_1, serial_number_2) + >>> cap.isOpened() + [True, True] + >>> cap.set(cv2.CAP_PROP_EXPOSURE, 1000) + [True, True] + >>> cap.get(cv2.CAP_PROP_EXPOSURE) + [1000.0, 1000.0] + >>> cap[0].set(cv2.CAP_PROP_EXPOSURE, 2000) + True + >>> cap.get(cv2.CAP_PROP_EXPOSURE) + [2000.0, 1000.0] + >>> (ret0, frame0), (ret1, frame1) = cap.read() + + Case2: The secondary camera and external trigger. + + >>> serial_number = "19412150" # secondary camera + >>> cap = EasyPySpin.SynchronizedVideoCapture(None, serial_number_2) + + Case3: The two (or more) secondary cameras and external trigger. + + >>> serial_number_1 = "20541712" # secondary camera 1 + >>> serial_number_2 = "19412150" # secondary camera 2 + >>> cap = EasyPySpin.SynchronizedVideoCapture(None, serial_number_1, serial_number_2) + """ - series_names = ["BFS", "BFLY", "CM3", "FL3", "GS3", "ORX", "FFY-DL"] - for name in series_names: - if name in model_name: - return name - return None + def __init__( + self, + index_primary: Union[int, str], + *indexes_secondary: Tuple[Union[int, str], ...] + ): + if index_primary is not None: + self.open_as_primary(index_primary) + + for index_secondary in indexes_secondary: + self.open_as_secondary(index_secondary) + + def open_as_primary(self, index: Union[int, str]) -> bool: + self.open(index) + cap = self[-1] + cap._configure_as_primary() + return cap.isOpened() + + def open_as_secondary(self, index: Union[int, str]) -> bool: + self.open(index) + cap = self[-1] + cap._configure_as_secondary() + return cap.isOpened() diff --git a/EasyPySpin/utils.py b/EasyPySpin/utils.py new file mode 100644 index 0000000..b934fe6 --- /dev/null +++ b/EasyPySpin/utils.py @@ -0,0 +1,12 @@ +import warnings + + +class EasyPySpinWarning(Warning): + pass + + +def warn( + message: str, category: Warning = EasyPySpinWarning, stacklevel: int = 2 +) -> None: + """Default EasyPySpin warn""" + warnings.warn(message, category, stacklevel + 1) diff --git a/EasyPySpin/videocapture.py b/EasyPySpin/videocapture.py index 58ff947..e95561a 100644 --- a/EasyPySpin/videocapture.py +++ b/EasyPySpin/videocapture.py @@ -1,353 +1,735 @@ +import warnings +from typing import Union, Tuple + +import numpy as np import cv2 import PySpin -from sys import stderr + +from .utils import EasyPySpinWarning, warn + class VideoCapture: - """ - Open a FLIR camera for video capturing. + """Open a FLIR camera for video capturing. Attributes ---------- cam : PySpin.CameraPtr - camera - nodemap : PySpin.INodeMap - nodemap represents the elements of a camera description file. - grabTimeout : uint64_t + PySpin camera pointer. + grabTimeout : int, default=PySpin.EVENT_TIMEOUT_INFINITE a 64bit value that represents a timeout in milliseconds - streamID : uint64_t + streamID : int, default=0 The stream to grab the image. - auto_software_trigger_execute : bool - Whether or not to execute a software trigger when executing "read()". - When the "TriggerMode" is "On" and the "TriggerSource" is set to "Software". (Default: False) + auto_software_trigger_execute : bool, default=False + Whether or not to execute a software trigger when executing ``grab()``. + When the SoftwareTrigger is available. Methods ------- + get(propId) + Gets a property. + grab() + Grabs the next frame from capturing device. + isOpened() + Whether a camera is open or not. + open() + Open a capturing device for video capturing. read() - returns the next frame. + Returns the next frame. release() Closes capturing device. - isOpened() - Whether a camera is open or not. + retrieve() + Decodes and returns the grabbed video frame. set(propId, value) Sets a property. - get(propId) - Gets a property. + setExceptionMode(enable) + Switches exceptions mode. + + Notes + ----- + Supported ``cv2.VideoCaptureProperties`` for ``set()`` or ``get()`` methods. + `cv2.CAP_PROP_FPS` + `cv2.CAP_PROP_FRAME_WIDTH` + `cv2.CAP_PROP_FRAME_HEIGHT` + `cv2.CAP_PROP_BRIGHTNESS` + `cv2.CAP_PROP_GAIN` + `cv2.CAP_PROP_EXPOSURE` + `cv2.CAP_PROP_GAMMA` + `cv2.CAP_PROP_TEMPERATURE` (get only) + `cv2.CAP_PROP_TRIGGER` + `cv2.CAP_PROP_TRIGGER_DELAY` + `cv2.CAP_PROP_BACKLIGHT` + `cv2.CAP_PROP_AUTO_WB` """ - def __init__(self, index): + + # a 64bit value that represents a timeout in milliseconds + grabTimeout: int = PySpin.EVENT_TIMEOUT_INFINITE + + # The stream to grab the image. + streamID: int = 0 + + # Whether or not to execute a software trigger when executing ``grab()``. + auto_software_trigger_execute: bool = False + + def __init__(self, index: Union[int, str] = None): """ Parameters ---------- - index : int - id of the video capturing device to open. + index : int or str, default=None + For ``int`` type, the index at which to retrieve the camera object. + For ``str`` type, the serial number of the camera object to retrieve. """ - # Check for 'index' type - if isinstance(index, (int, str))==False: - raise TypeError("Argument 'index' is required to be an integer or a string") + if index is not None: + self.open(index) + + @property + def cam(self) -> Union[PySpin.CameraPtr, None]: + """Provide ``PySpin.CameraPtr``.""" + if hasattr(self, "_cam"): + return self._cam + else: + return None + + def open(self, index: Union[int, str]) -> bool: + """Open a capturing device for video capturing. + + Parameters + ---------- + index : int or str + ``int`` type, the index at which to retrieve the camera object. + ``str`` type, the serial number of the camera object to retrieve. + + Returns + ------- + retval : bool + ``True`` if the file has been successfully opened. + """ + # Close the already opened camera + self.release() - # Cerate system instance and get camera list + # Cerate system instance and get camera list self._system = PySpin.System.GetInstance() self._cam_list = self._system.GetCameras() num_cam = self._cam_list.GetSize() # Check for available cameras - if num_cam==0: - print("EasyPySpin: no camera is available", file=stderr) - self._cam_list.Clear() - self._system.ReleaseInstance() - return None - - # Try to connect camera - try: - # Index case - if type(index) is int: - # Check for 'index' bound - if index<0 or num_cam-1 None: + """Closes capturing device. The method call VideoCapture destructor.""" self.__del__() - def isOpened(self): - """ - Returns true if video capturing has been initialized already. - """ - try: return self.cam.IsValid() - except: return False + def isOpened(self) -> bool: + """Returns ``True`` if video capturing has been initialized already. - def read(self): + Returns + ------- + retval : bool """ - returns the next frame. + if self.cam is not None: + try: + return self.cam.IsValid() + except AttributeError: + return False + else: + return False + + def grab(self) -> bool: + """Grabs the next frame from capturing device. Returns ------- retval : bool - false if no frames has been grabbed. - image : array_like - grabbed image is returned here. If no image has been grabbed the image will be None. + ``True`` the case of success. """ + if not self.isOpened(): + return False + if not self.cam.IsStreaming(): self.cam.BeginAcquisition() - - # Execute a software trigger if necessary - if (self.cam.TriggerMode.GetValue() ==PySpin.TriggerMode_On and - self.cam.TriggerSource.GetValue()==PySpin.TriggerSource_Software and - self.auto_software_trigger_execute==True): + + # Execute a software trigger if required + if ( + PySpin.IsAvailable(self.cam.TriggerSoftware) + and self.auto_software_trigger_execute + ): + # Software-Trigger is executed under TWO conditions. + # First, the TriggerMode is set to ``On`` + # and the TriggerSource is set to ``Software``, + # so that SoftwareTrigger is available. + # Second, the member variable ``auto_software_trigger_execute`` is set to ``True``. self.cam.TriggerSoftware.Execute() - image = self.cam.GetNextImage(self.grabTimeout, self.streamID) - if image.IsIncomplete(): + # Grab image + self._pyspin_image = self.cam.GetNextImage(self.grabTimeout, self.streamID) + + is_complete = not self._pyspin_image.IsIncomplete() + return is_complete + + def retrieve(self) -> Tuple[bool, Union[np.ndarray, None]]: + """Decodes and returns the grabbed video frame. + + Returns + ------- + retval : bool + ``False`` if no frames has been grabbed. + image : np.ndarray + grabbed image is returned here. If no image has been grabbed the image will be None. + """ + if hasattr(self, "_pyspin_image"): + image_array = self._pyspin_image.GetNDArray() + return True, image_array + else: return False, None - - img_NDArray = image.GetNDArray() - image.Release() - return True, img_NDArray - - def set(self, propId, value): + + def read(self) -> Tuple[bool, Union[np.ndarray, None]]: + """Grabs, decodes and returns the next video frame. + + The method combines ``grab()`` and ``retrieve()`` in one call. + This is the most convenient method for capturing data from decode and returns the just grabbed frame. + If no frames has been grabbed, the method returns ``False`` and the function returns ``None``. + + Returns + ------- + retval : bool + ``False`` if no frames has been grabbed. + image : np.ndarray + grabbed image is returned here. If no image has been grabbed the image will be ``None``. """ - Sets a property in the VideoCapture. + retval = self.grab() + if retval: + return self.retrieve() + else: + return False, None + + def set(self, propId: "cv2.VideoCaptureProperties", value: any) -> bool: + """Sets a property in the VideoCapture. Parameters ---------- propId_id : cv2.VideoCaptureProperties - Property identifier from cv2.VideoCaptureProperties + Property identifier from cv2.VideoCaptureProperties. value : int or float or bool Value of the property. - + Returns ------- retval : bool True if property setting success. """ - #Exposure setting - if propId==cv2.CAP_PROP_EXPOSURE: - #Auto - if value<0: return self._set_ExposureAuto(PySpin.ExposureAuto_Continuous) - - #Manual - ret = self._set_ExposureAuto(PySpin.ExposureAuto_Off) - if ret==False: return False - return self._set_ExposureTime(value) - - #Gain setting - if propId==cv2.CAP_PROP_GAIN: - #Auto - if value<0: return self._set_GainAuto(PySpin.GainAuto_Continuous) - - #Manual - ret = self._set_GainAuto(PySpin.GainAuto_Off) - if ret==False: return False - return self._set_Gain(value) - - #Brightness(EV) setting - if propId==cv2.CAP_PROP_BRIGHTNESS: - return self._set_Brightness(value) - - #Gamma setting - if propId==cv2.CAP_PROP_GAMMA: - return self._set_Gamma(value) - - #FrameRate setting - if propId==cv2.CAP_PROP_FPS: - return self._set_FrameRate(value) - - #BackLigth setting - if propId==cv2.CAP_PROP_BACKLIGHT: - return self._set_BackLight(value) - - #Trigger Mode setting (ON/OFF) - if propId==cv2.CAP_PROP_TRIGGER: - return self._set_Trigger(value) - - #TriggerDelay setting - if propId==cv2.CAP_PROP_TRIGGER_DELAY: - return self._set_TriggerDelay(value) + # Width setting + if propId == cv2.CAP_PROP_FRAME_WIDTH: + return self.set_pyspin_value("Width", value) + + # Height setting + if propId == cv2.CAP_PROP_FRAME_HEIGHT: + return self.set_pyspin_value("Height", value) + + # FrameRate setting + if propId == cv2.CAP_PROP_FPS: + is_success1 = self.set_pyspin_value("AcquisitionFrameRateEnable", True) + is_success2 = self.set_pyspin_value("AcquisitionFrameRate", value) + return is_success1 and is_success2 + + # Brightness (EV) setting + if propId == cv2.CAP_PROP_BRIGHTNESS: + return self.set_pyspin_value("AutoExposureEVCompensation", value) + + # Gain setting + if propId == cv2.CAP_PROP_GAIN: + if value != -1: + # Manual + is_success1 = self.set_pyspin_value("GainAuto", "Off") + is_success2 = self.set_pyspin_value("Gain", value) + return is_success1 and is_success2 + else: + # Auto + return self.set_pyspin_value("GainAuto", "Continuous") + + # Exposure setting + if propId == cv2.CAP_PROP_EXPOSURE: + if value != -1: + # Manual + is_success1 = self.set_pyspin_value("ExposureAuto", "Off") + is_success2 = self.set_pyspin_value("ExposureTime", value) + return is_success1 and is_success2 + else: + # Auto + return self.set_pyspin_value("ExposureAuto", "Continuous") + + # Gamma setting + if propId == cv2.CAP_PROP_GAMMA: + is_success1 = self.set_pyspin_value("GammaEnable", True) + is_success2 = self.set_pyspin_value("Gamma", value) + return is_success1 and is_success2 + + # Trigger Mode setting + if propId == cv2.CAP_PROP_TRIGGER: + if type(value) is not bool: + warn(f"'value' must be 'bool', not '{type(value).__name__}'") + return False + + trigger_mode = "On" if value else "Off" + return self.set_pyspin_value("TriggerMode", trigger_mode) + + # TriggerDelay setting + if propId == cv2.CAP_PROP_TRIGGER_DELAY: + return self.set_pyspin_value("TriggerDelay", value) + + # BackLigth setting + if propId == cv2.CAP_PROP_BACKLIGHT: + if type(value) is not bool: + warn(f"'value' must be 'bool', not '{type(value).__name__}'") + return False + + device_indicato_mode = "Active" if value else "Inactive" + return self.set_pyspin_value("DeviceIndicatorMode", device_indicato_mode) + + # Auto White Balance setting + if propId == cv2.CAP_PROP_AUTO_WB: + if type(value) is not bool: + warn(f"'value' must be 'bool', not '{type(value).__name__}'") + return False + + balance_white_auto_mode = "Continuous" if value else "Off" + return self.set_pyspin_value("BalanceWhiteAuto", balance_white_auto_mode) + + # If none of the above conditions apply + warn(f"propID={propId} is not supported") return False - - def get(self, propId): + + def get(self, propId: "cv2.VideoCaptureProperties") -> any: """ Returns the specified VideoCapture property. - + Parameters ---------- propId_id : cv2.VideoCaptureProperties Property identifier from cv2.VideoCaptureProperties - + Returns ------- - value : int or float or bool + value : any Value for the specified property. Value Flase is returned when querying a property that is not supported. """ - if propId==cv2.CAP_PROP_EXPOSURE: - return self._get_ExposureTime() + # Width + if propId == cv2.CAP_PROP_FRAME_WIDTH: + return self.get_pyspin_value("Width") + + # Height + if propId == cv2.CAP_PROP_FRAME_HEIGHT: + return self.get_pyspin_value("Height") + + # Frame Rate + if propId == cv2.CAP_PROP_FPS: + # If this does not equal the AcquisitionFrameRate + # it is because the ExposureTime is greater than the frame time. + return self.get_pyspin_value("ResultingFrameRate") + + # Brightness + if propId == cv2.CAP_PROP_BRIGHTNESS: + return self.get_pyspin_value("AutoExposureEVCompensation") + + # Gain + if propId == cv2.CAP_PROP_GAIN: + return self.get_pyspin_value("Gain") + + # Exposure Time + if propId == cv2.CAP_PROP_EXPOSURE: + return self.get_pyspin_value("ExposureTime") + + # Gamma + if propId == cv2.CAP_PROP_GAMMA: + return self.get_pyspin_value("Gamma") + + # Temperature + if propId == cv2.CAP_PROP_TEMPERATURE: + return self.get_pyspin_value("DeviceTemperature") + + # Trigger Mode + if propId == cv2.CAP_PROP_TRIGGER: + trigger_mode = self.get_pyspin_value("TriggerMode") + if trigger_mode == PySpin.TriggerMode_Off: + return False + elif trigger_mode == PySpin.TriggerMode_On: + return True + else: + return trigger_mode + + # Trigger Delay + if propId == cv2.CAP_PROP_TRIGGER_DELAY: + return self.get_pyspin_value("TriggerDelay") + + # Back Light + if propId == cv2.CAP_PROP_BACKLIGHT: + device_indicator_mode = self.get_pyspin_value("DeviceIndicatorMode") + if device_indicator_mode == PySpin.DeviceIndicatorMode_Inactive: + return False + elif device_indicator_mode == PySpin.DeviceIndicatorMode_Active: + return True + else: + return device_indicator_mode + + # Auto White Balance setting + if propId == cv2.CAP_PROP_AUTO_WB: + balance_white_auto = self.get_pyspin_value("BalanceWhiteAuto") + + if balance_white_auto == PySpin.BalanceWhiteAuto_Off: + return False + elif balance_white_auto == PySpin.BalanceWhiteAuto_Continuous: + return True + else: + return balance_white_auto + + # If none of the above conditions apply + warn(f"propID={propId} is not supported") - if propId==cv2.CAP_PROP_GAIN: - return self._get_Gain() + return False - if propId==cv2.CAP_PROP_BRIGHTNESS: - return self._get_Brightness() + def setExceptionMode(self, enable: bool) -> None: + """Switches exceptions mode. - if propId==cv2.CAP_PROP_GAMMA: - return self._get_Gamma() + Methods raise exceptions if not successful instead of returning an error code. - if propId==cv2.CAP_PROP_FRAME_WIDTH: - return self._get_Width() + Parameters + ---------- + enable : bool + """ + if enable: + warnings.simplefilter("error", EasyPySpinWarning) + else: + warnings.simplefilter("ignore", EasyPySpinWarning) - if propId==cv2.CAP_PROP_FRAME_HEIGHT: - return self._get_Height() + def set_pyspin_value(self, node_name: str, value: any) -> bool: + """Setting PySpin value with some useful checks. + + This function adds functions that PySpin's ``SetValue`` does not support, + such as **writable check**, **argument type check**, **value range check and auto-clipping**. + If it fails, a warning will be raised. ``EasyPySpinWarning`` can control this warning. + + Parameters + ---------- + node_name : str + Name of the node to set. + value : any + Value to set. The type is assumed to be ``int``, ``float``, ``bool``, ``str`` or ``PySpin Enumerate``. + + Returns + ------- + is_success : bool + Whether success or not: True for success, False for failure. + + Examples + -------- + Success case. + + >>> set_pyspin_value("ExposureTime", 1000.0) + True + >>> set_pyspin_value("Width", 256) + True + >>> set_pyspin_value("GammaEnable", False) + True + >>> set_pyspin_value("ExposureAuto", PySpin.ExposureAuto_Off) + True + >>> set_pyspin_value("ExposureAuto", "Off") + True + + Success case, and the value is clipped. + + >>> set_pyspin_value("ExposureTime", 0.1) + EasyPySpinWarning: 'ExposureTime' value must be in the range of [20.0, 30000002.0], so 0.1 become 20.0 + True + + Failure case. + + >>> set_pyspin_value("Width", 256.0123) + EasyPySpinWarning: 'value' must be 'int', not 'float' + False + >>> set_pyspin_value("hoge", 1) + EasyPySpinWarning: 'CameraPtr' object has no attribute 'hoge' + False + >>> set_pyspin_value("ExposureAuto", "hoge") + EasyPySpinWarning: 'PySpin' object has no attribute 'ExposureAuto_hoge' + False + """ + if not self.isOpened(): + warn("Camera is not open") + return False - if propId==cv2.CAP_PROP_FPS: - return self._get_FrameRate() + # Check 'CameraPtr' object has attribute 'node_name' + if not hasattr(self.cam, node_name): + warn(f"'{type(self.cam).__name__}' object has no attribute '{node_name}'") + return False - if propId==cv2.CAP_PROP_TEMPERATURE: - return self._get_Temperature() + # Get attribution + node = getattr(self.cam, node_name) - if propId==cv2.CAP_PROP_BACKLIGHT: - return self._get_BackLight() + # Check 'node' object has attribute 'SetValue' + if not hasattr(node, "SetValue"): + warn(f"'{type(node).__name__}' object has no attribute 'SetValue'") + return False - if propId==cv2.CAP_PROP_TRIGGER: - return self._get_Trigger() + # Check node is writable + if not PySpin.IsWritable(node): + warn(f"'{node_name}' is not writable") + return False - if propId==cv2.CAP_PROP_TRIGGER_DELAY: - return self._get_TriggerDelay() + # Get type + node_type = type(node) + value_type = type(value) + + # Convert numpy array with one element + # into a standard Python scalar object + if value_type is np.ndarray: + if value.size == 1: + value = value.item() + value_type = type(value) + + # Check value type of Integer node case + if node_type is PySpin.IInteger: + if value_type is not int: + warn(f"'value' must be 'int', not '{value_type.__name__}'") + return False + + # Check value type of Float node case + elif node_type is PySpin.IFloat: + if value_type not in (int, float): + warn(f"'value' must be 'int' or 'float', not '{value_type.__name__}'") + return False + + # Check value type of Boolean node case + elif node_type is PySpin.IBoolean: + if value_type is not bool: + warn(f"'value' must be 'bool', not '{value_type.__name__}'") + return False + + # Check value type of Enumeration node case + elif isinstance(node, PySpin.IEnumeration): + if value_type is str: + # If the type is ``str``, + # replace the corresponding PySpin's Enumeration if it exists. + enumeration_name = f"{node_name}_{value}" + if hasattr(PySpin, enumeration_name): + value = getattr(PySpin, enumeration_name) + value_type = type(value) + else: + warn(f"'PySpin' object has no attribute '{enumeration_name}'") + return False + elif value_type is not int: + warn( + f"'value' must be PySpin's Enumeration, not '{value_type.__name__}'" + ) + return False + + # Clip the value when node type is Integer of Float + if node_type in (PySpin.IInteger, PySpin.IFloat): + v_min = node.GetMin() + v_max = node.GetMax() + value_clipped = min(max(value, v_min), v_max) + if value_clipped != value: + warn( + f"'{node_name}' value must be in the range of [{v_min}, {v_max}], so {value} become {value_clipped}" + ) + value = value_clipped + + # Finally, SetValue + try: + node.SetValue(value) + except PySpin.SpinnakerException as e: + msg_pyspin = str(e) + warn(msg_pyspin) + return False - return False - - def __clip(self, a, a_min, a_max): - return min(max(a, a_min), a_max) - - def _set_ExposureTime(self, value): - if not type(value) in (int, float): return False - exposureTime_to_set = self.__clip(value, self.cam.ExposureTime.GetMin(), self.cam.ExposureTime.GetMax()) - self.cam.ExposureTime.SetValue(exposureTime_to_set) return True - def _set_ExposureAuto(self, value): - self.cam.ExposureAuto.SetValue(value) - return True + def get_pyspin_value(self, node_name: str) -> any: + """Getting PySpin value with some useful checks. - def _set_Gain(self, value): - if not type(value) in (int, float): return False - gain_to_set = self.__clip(value, self.cam.Gain.GetMin(), self.cam.Gain.GetMax()) - self.cam.Gain.SetValue(gain_to_set) - return True + Parameters + ---------- + node_name : str + Name of the node to get. - def _set_GainAuto(self, value): - self.cam.GainAuto.SetValue(value) - return True - - def _set_Brightness(self, value): - if not type(value) in (int, float): return False - brightness_to_set = self.__clip(value, self.cam.AutoExposureEVCompensation.GetMin(), self.cam.AutoExposureEVCompensation.GetMax()) - self.cam.AutoExposureEVCompensation.SetValue(brightness_to_set) - return True + Returns + ------- + value : any + value - def _set_Gamma(self, value): - if not type(value) in (int, float): return False - gamma_to_set = self.__clip(value, self.cam.Gamma.GetMin(), self.cam.Gamma.GetMax()) - self.cam.Gamma.SetValue(gamma_to_set) - return True + Examples + -------- + Success case. - def _set_FrameRate(self, value): - if not type(value) in (int, float): return False - self.cam.AcquisitionFrameRateEnable.SetValue(True) - fps_to_set = self.__clip(value, self.cam.AcquisitionFrameRate.GetMin(), self.cam.AcquisitionFrameRate.GetMax()) - self.cam.AcquisitionFrameRate.SetValue(fps_to_set) - return True + >>> get_pyspin_value("ExposureTime") + 103.0 + >>> get_pyspin_value("GammaEnable") + True + >>> get_pyspin_value("ExposureAuto") + 0 - def _set_BackLight(self, value): - if value==True:backlight_to_set = PySpin.DeviceIndicatorMode_Active - elif value==False: backlight_to_set = PySpin.DeviceIndicatorMode_Inactive - else: return False - self.cam.DeviceIndicatorMode.SetValue(backlight_to_set) - return True + Failure case. - def _set_Trigger(self, value): - if value==True: - trigger_mode_to_set = PySpin.TriggerMode_On - elif value==False: - trigger_mode_to_set = PySpin.TriggerMode_Off - else: + >>> get_pyspin_value("hoge") + EasyPySpinWarning: 'CameraPtr' object has no attribute 'hoge' + None + """ + if not self.isOpened(): + warn("Camera is not open") return False - self.cam.TriggerMode.SetValue(trigger_mode_to_set) - return True + # Check 'CameraPtr' object has attribute 'node_name' + if not hasattr(self.cam, node_name): + warn(f"'{type(self.cam).__name__}' object has no attribute '{node_name}'") + return None - def _set_TriggerDelay(self, value): - if not type(value) in (int, float): return False - delay_to_set = self.__clip(value, self.cam.TriggerDelay.GetMin(), self.cam.TriggerDelay.GetMax()) - self.cam.TriggerDelay.SetValue(delay_to_set) - return True + # Get attribution + node = getattr(self.cam, node_name) - def _get_ExposureTime(self): - return self.cam.ExposureTime.GetValue() + # Check 'node_name' object has attribute 'GetValue' + if not hasattr(node, "GetValue"): + warn(f"'{type(node).__name__}' object has no attribute 'GetValue'") + return None + + # Check node is readable + if not PySpin.IsReadable(node): + warn(f"'{node_name}' is not readable") + return None + + # Finally, GetValue + value = node.GetValue() - def _get_Gain(self): - return self.cam.Gain.GetValue() + return value - def _get_Brightness(self): - return self.cam.AutoExposureEVCompensation.GetValue() + def _get_camera_series_name(self) -> str: + """Get camera series name""" + model_name = self.get_pyspin_value("DeviceModelName") - def _get_Gamma(self): - return self.cam.Gamma.GetValue() + series_names = ["BFS", "BFLY", "CM3", "FL3", "GS3", "ORX", "FFY-DL"] + for name in series_names: + if name in model_name: + return name - def _get_Width(self): - return self.cam.Width.GetValue() + def _configure_as_primary(self): + """Configure as primary camera for synchronized capture - def _get_Height(self): - return self.cam.Height.GetValue() + Notes + ----- + https://www.flir.com/support-center/iis/machine-vision/application-note/configuring-synchronized-capture-with-multiple-cameras/ + + 4. Set the output line + 1. For CM3, FL3, GS3, FFY-DL, and ORX cameras, select Line2 from the Line Selection dropdown and set Line Mode to Output. + 2. For BFS cameras, select Line1 from the Line Selection dropdown and set Line Mode to Output. + 5. For BFS and BFLY cameras enable the 3.3V line + 1. For BFS cameras from the line selection drop-down select Line2 and check the checkbox for 3.3V Enable. + 2. For BFLY cameras, set 3.3V Enable to true + """ + series_name = self._get_camera_series_name() + + # Set the output line + if series_name in ["CM3", "FL3", "GS3", "FFY-DL", "ORX"]: + # For CM3, FL3, GS3, FFY-DL, and ORX cameras, + # select Line2 from the Line Selection dropdown and set Line Mode to Output. + self.set_pyspin_value("LineSelector", "Line2") + self.set_pyspin_value("LineMode", "Output") + elif series_name in ["BFS"]: + # For BFS cameras, select Line1 from the Line Selection dropdown + # and set Line Mode to Output. + self.set_pyspin_value("LineSelector", "Line1") + self.set_pyspin_value("LineMode", "Output") + + # For BFS and BFLY cameras enable the 3.3V line + if series_name in ["BFS"]: + # For BFS cameras from the line selection drop-down select Line2 + # and check the checkbox for 3.3V Enable. + self.set_pyspin_value("LineSelector", "Line2") + self.set_pyspin_value("V3_3Enable", True) + elif series_name in ["BFLY"]: + # For BFLY cameras, set 3.3V Enable to true + self.set_pyspin_value("V3_3Enable", True) + + def _configure_as_secondary(self): + """Configure as secondary camera for synchronized capture + + Notes + ----- + https://www.flir.com/support-center/iis/machine-vision/application-note/configuring-synchronized-capture-with-multiple-cameras/ + + 2. Select the GPIO tab. + 1. Set the trigger source + 2. For BFS, CM3, FL3, FFY-DL, and GS3 cameras, from the Trigger Source drop-down, select Line 3. + 3. For ORX cameras, from the Trigger Source drop-down, select Line 5. + 4. For BFLY cameras, from the Trigger Source drop-down, select Line 0 + 3. From the Trigger Overlap drop-down, select Read Out. + 4. From the Trigger Mode drop-down, select On. + """ + series_name = self._get_camera_series_name() - def _get_FrameRate(self): - return self.cam.AcquisitionFrameRate.GetValue() + self.set_pyspin_value("TriggerMode", "Off") + self.set_pyspin_value("TriggerSelector", "FrameStart") - def _get_Temperature(self): - return self.cam.DeviceTemperature.GetValue() + # Set the trigger source + if series_name in ["BFS", "CM3", "FL3", "FFY-DL", "GS3"]: + # For BFS, CM3, FL3, FFY-DL, and GS3 cameras, + # from the Trigger Source drop-down, select Line 3. + self.set_pyspin_value("TriggerSource", "Line3") + elif series_name in ["ORX"]: + # For ORX cameras, from the Trigger Source drop-down, select Line 5. + self.set_pyspin_value("TriggerSource", "Line5") - def _get_BackLight(self): - status = self.cam.DeviceIndicatorMode.GetValue() - return (True if status == PySpin.DeviceIndicatorMode_Active else - False if status == PySpin.DeviceIndicatorMode_Inactive else - status) - - def _get_Trigger(self): - status = self.cam.TriggerMode.GetValue() - return (True if status == PySpin.TriggerMode_On else - False if status == PySpin.TriggerMode_Off else - status) + # From the Trigger Overlap drop-down, select Read Out. + self.set_pyspin_value("TriggerOverlap", "ReadOut") - def _get_TriggerDelay(self): - return self.cam.TriggerDelay.GetValue() + # From the Trigger Mode drop-down, select On. + self.set_pyspin_value("TriggerMode", "On") diff --git a/EasyPySpin/videocaptureex.py b/EasyPySpin/videocaptureex.py index fb56f45..b7c005e 100644 --- a/EasyPySpin/videocaptureex.py +++ b/EasyPySpin/videocaptureex.py @@ -1,145 +1,230 @@ +from typing import Union, Tuple, List + +import numpy as np import cv2 import PySpin -import numpy as np + from .videocapture import VideoCapture +from .utils import warn + class VideoCaptureEX(VideoCapture): - """ + """Open a FLIR camera for video capturing. + VideoCaptureEX class is subclass of VideoCapture class. - It provides extensions that are not supported by OpenCV's VideoCapture. - + It provides EXTENSIONS that OpenCV's VideoCapture does not support. + For example, Averaged image, Exposure bracketing, and HDR image. + Attributes ---------- cam : PySpin.CameraPtr - camera - nodemap : PySpin.INodeMap - nodemap represents the elements of a camera description file. - grabTimeout : uint64_t + PySpin camera pointer. + grabTimeout : int, default=PySpin.EVENT_TIMEOUT_INFINITE a 64bit value that represents a timeout in milliseconds - streamID : uint64_t + streamID : int, default=0 The stream to grab the image. - auto_software_trigger_execute : bool - Whether or not to execute a software trigger when executing "read()". - When the "TriggerMode" is "On" and the "TriggerSource" is set to "Software". (Default: False) + auto_software_trigger_execute : bool, default=False + Whether or not to execute a software trigger when executing ``grab()``. + When the SoftwareTrigger is available. average_num : int - average number + Number of images to average. It must be natural number. Methods ------- + get(propId) + Gets a property. + grab() + Grabs the next frame from capturing device. + isOpened() + Whether a camera is open or not. + open() + Open a capturing device for video capturing. read() - returns the next frame. + Returns the next frame. release() Closes capturing device. - isOpened() - Whether a camera is open or not. + retrieve() + Decodes and returns the grabbed video frame. set(propId, value) Sets a property. - get(propId) - Gets a property. + setExceptionMode(enable) + Switches exceptions mode. + readHDR(t_min, t_max, num, t_ref) + Capture multiple images with different exposure and merge into an HDR image. + + Notes + ----- + Supported ``cv2.VideoCaptureProperties`` for ``set()`` or ``get()`` methods. + `cv2.CAP_PROP_FPS` + `cv2.CAP_PROP_FRAME_WIDTH` + `cv2.CAP_PROP_FRAME_HEIGHT` + `cv2.CAP_PROP_BRIGHTNESS` + `cv2.CAP_PROP_GAIN` + `cv2.CAP_PROP_EXPOSURE` + `cv2.CAP_PROP_GAMMA` + `cv2.CAP_PROP_TEMPERATURE` (get only) + `cv2.CAP_PROP_TRIGGER` + `cv2.CAP_PROP_TRIGGER_DELAY` + `cv2.CAP_PROP_BACKLIGHT` + `cv2.CAP_PROP_AUTO_WB` """ - def __init__(self, index): - """ - Parameters - ---------- - index : int - id of the video capturing device to open. - """ - super(VideoCaptureEX, self).__init__(index) - self.average_num = 1 - def read(self): - """ - returns the next frame. - The returned frame is the average of multiple images taken. + # Number of images to average + __average_num: int = 1 + + @property + def average_num(self) -> int: + return self.__average_num + + @average_num.setter + def average_num(self, value: int): + if (type(value) is int) and (value >= 1): + self.__average_num = value + else: + warn(f"'average_num' must be natural number, {value} is invalid") + + def grab(self): + raise Exception("VideoCaptureEX does not support `grab` module") + + def retrieve(self): + raise Exception("VideoCaptureEX does not support `retrieve` module") + + def read(self) -> Tuple[bool, np.ndarray]: + """Returns the next frame. + + The returned frame is the **average of multiple images**. Returns ------- retval : bool false if no frames has been grabbed. - image : array_like + image : np.ndarray grabbed image is returned here. If no image has been grabbed the image will be None. + + Examples + -------- + Noemal case + >>> cap.average_num = 1 + >>> ret, frame = cap.read() + + Average of multiple images case + + >>> cap.average_num = 10 + >>> ret, frame = cap.read() """ - if self.average_num==1: - return super(VideoCaptureEX, self).read() + average_num = self.average_num + + if average_num == 1: + ret = super().grab() + + if not ret: + return False, None + + return super().retrieve() else: - imlist = [ super(VideoCaptureEX, self).read()[1] for i in range(self.average_num) ] - frame = (cv2.merge(imlist).mean(axis=2)).astype(imlist[0].dtype) - return True, frame - + for i in range(average_num): + ret = super().grab() + if not ret: + return False, None - def readHDR(self, t_min, t_max, num=None, t_ref=10000): - """ - Capture multiple images with different exposure and merge into an HDR image + ret, image = super().retrieve() + + if i == 0: + rets = np.empty((average_num), dtype=np.bool) + images = np.empty((*image.shape, average_num), dtype=image.dtype) + + rets[i] = ret + images[..., i] = image + + if np.all(rets): + image_averaged = np.mean(images, axis=-1).astype(image.dtype) + return True, image_averaged + else: + return False, None - NOTE: A software trigger is used to capture images. In order to acquire an image reliably at the set exposure time. + def readHDR( + self, t_min: float, t_max: float, t_ref: float = 10000, ratio: float = 2.0 + ) -> Tuple[bool, np.ndarray]: + """Capture multiple images with different exposure and merge into an HDR image. Parameters ---------- t_min : float - minimum exposure time [us] + Minimum exposure time [us] t_max : float - maximum exposure time [us] - num : int - number of shots. - If 'num' is None, 'num' is automatically determined from 't_min' and 't_max'. It is set so that the ratio of neighboring exposure times is approximately 2x. - t_ref : float, optional - Reference time [us]. Determines the brightness of the merged image based on this time. + Maximum exposure time [us] + t_ref : float, default=10000 + Reference time [us]. + Determines the brightness of the merged image based on this time. + ratio : int, default=2.0 + Ratio of exposure time. + Number of shots is automatically determined from `t_min` and `t_max`. + It is set so that the `ratio` of neighboring exposure times. Returns ------- retval : bool false if no frames has been grabbed. - image_hdr : array_like + image_hdr : np.ndarray merged HDR image is returned here. If no image has been grabbed the image will be None. + + Notes + ----- + A software trigger is used to capture images. In order to acquire an image reliably at the set exposure time. """ # Set between the maximum and minimum values of the camera - t_min = np.clip(t_min, self.cam.ExposureTime.GetMin(), self.cam.ExposureTime.GetMax()) - t_max = np.clip(t_max, self.cam.ExposureTime.GetMin(), self.cam.ExposureTime.GetMax()) - + t_min = np.clip( + t_min, self.cam.ExposureTime.GetMin(), self.cam.ExposureTime.GetMax() + ) + t_max = np.clip( + t_max, self.cam.ExposureTime.GetMin(), self.cam.ExposureTime.GetMax() + ) + + # Determine nnumber of shots + num = 2 + if ratio > 1.0: + while t_max > t_min * (ratio ** num): + num += 1 + + # Exposure time to be taken + # The equality sequence from minimum (t_min) to maximum (t_max) exposure time + times = np.geomspace(t_min, t_max, num=num) + # Original settings for gamma gamma_origin = self.get(cv2.CAP_PROP_GAMMA) # To capture a linear image, the gamma value is set to 1.0 self.set(cv2.CAP_PROP_GAMMA, 1.0) - - # If 'num' is None, determine num. - if num is None: - r = 2 # Ratio of exposure time - num = 2 - while t_max>t_min*(r**num): num += 1 - - # Exposure time to be taken - # The equality sequence from minimum (t_min) to maximum (t_max) exposure time - times = np.geomspace(t_min, t_max, num=num) - + # Exposure bracketing ret, imlist = self.readExposureBracketing(times) - if ret==False: - return False, None - + # Restore the changed gamma self.set(cv2.CAP_PROP_GAMMA, gamma_origin) - + + if not ret: + return False, None + # Normalize to a value between 0 and 1 # By dividing by the maximum value dtype = imlist[0].dtype - if dtype==np.uint8: - max_value = 2**8-1 - elif dtype==np.uint16: - max_value = 2**16-1 + if dtype == np.uint8: + max_value = 2.0 ** 8 - 1 + elif dtype == np.uint16: + max_value = 2.0 ** 16 - 1 else: - max_value = 1 - imlist_norm = [ image/max_value for image in imlist] - + max_value = 1.0 + imlist_norm = [image / max_value for image in imlist] + # Merge HDR img_hdr = self.mergeHDR(imlist_norm, times, t_ref) - return True, img_hdr - + return True, img_hdr.astype(np.float32) - def readExposureBracketing(self, exposures): - """ - Execute exposure bracketing. + def readExposureBracketing( + self, exposures: np.ndarray + ) -> Tuple[bool, List[np.ndarray]]: + """Execute exposure bracketing. Parameters ---------- @@ -154,97 +239,99 @@ def readExposureBracketing(self, exposures): Captured image list """ # Original settings for triggers, exposure, gain - TriggerSelector_origin = self.cam.TriggerSelector.GetValue() - TriggerMode_origin = self.cam.TriggerMode.GetValue() - TriggerSource_origin = self.cam.TriggerSource.GetValue() + node_names_to_change = [ + "TriggerSelector", + "TriggerMode", + "TriggerSource", + "ExposureTime", + "ExposureAuto", + "GainAuto", + ] + values_origin = [ + self.get_pyspin_value(node_name) for node_name in node_names_to_change + ] auto_software_trigger_execute_origin = self.auto_software_trigger_execute - ExposureAuto_origin = self.cam.ExposureAuto.GetValue() - ExposureTime_origin = self.cam.ExposureTime.GetValue() - GainAuto_origin = self.cam.GainAuto.GetValue() - Gain_origin = self.cam.Gain.GetValue() - + # Change the trigger setting - self.cam.TriggerSelector.SetValue(PySpin.TriggerSelector_FrameStart) - self.cam.TriggerMode.SetValue(PySpin.TriggerMode_On) - self.cam.TriggerSource.SetValue(PySpin.TriggerSource_Software) + self.set_pyspin_value("TriggerSelector", "FrameStart") + self.set_pyspin_value("TriggerMode", "On") + self.set_pyspin_value("TriggerSource", "Software") self.auto_software_trigger_execute = True # Auto gain off and fixing gain - self.cam.GainAuto.SetValue(PySpin.GainAuto_Off) - self.cam.Gain.SetValue(Gain_origin) - + gain = self.get_pyspin_value("Gain") + self.set_pyspin_value("GainAuto", "Off") + self.set_pyspin_value("Gain", gain) + # Capture start - imlist = [None]*exposures.shape[0] - for i , t in enumerate(exposures): + imlist = [] + for i, t in enumerate(exposures): self.set(cv2.CAP_PROP_EXPOSURE, float(t)) + # Dummy image + if i == 0: + for _ in range(3): + self.read() + ret, frame = self.read() - if ret==False: + if not ret: return False, None - imlist[i] = frame - - # Restore the changed settings + imlist.append(frame) + self.cam.EndAcquisition() - self.cam.TriggerSelector.SetValue(TriggerSelector_origin) - self.cam.TriggerMode.SetValue(TriggerMode_origin) - self.cam.TriggerSource.SetValue(TriggerSource_origin) + + # Restore the changed settings + for node_name, value in zip(node_names_to_change, values_origin): + self.set_pyspin_value(node_name, value) self.auto_software_trigger_execute = auto_software_trigger_execute_origin - self.cam.ExposureTime.SetValue(ExposureTime_origin) - self.cam.ExposureAuto.SetValue(ExposureAuto_origin) - self.cam.GainAuto.SetValue(GainAuto_origin) return True, imlist - - def mergeHDR(self, imlist, times, time_ref=10000, weighting='gaussian'): + @staticmethod + def mergeHDR( + imlist: List[np.ndarray], times: np.ndarray, time_ref: float = 10000 + ) -> np.ndarray: """ Merge an HDR image from LDR images. Parameters ---------- - imlist : List[array_like] + imlist : List[np.ndarray] Multiple images with different exposure. The images are a range of 0.0 to 1.0. - times : array_like + times : np.ndarray Exposure times time_ref : float, optional Reference time. Determines the brightness of the merged image based on this time. - weighting : str, {'uniform', 'tent', 'gaussian', 'photon'}, optional - Weighting scheme - + Returns ------- - img_hdr : array_like + img_hdr : np.ndarray merged HDR image is returned here. """ Zmin = 0.01 Zmax = 0.99 epsilon = 1e-32 - z = np.array(imlist) # (num, height, width) - t = (np.array(times) / time_ref)[:, np.newaxis, np.newaxis] # (num,1,1) - - # Calculate weight - mask = np.bitwise_and(Zmin<=z, z<=Zmax) - if weighting=='uniform': - w = 1.0 * mask - elif weighting=='tent': - w = (0.5-np.abs(z-0.5)) * mask - elif weighting=='gaussian': - w = np.exp(-4*((z-0.5)/0.5)**2) * mask - elif weighting=='photon': - w = t*np.ones_like(z) * mask - else: - raise ValueError(f"Unknown weighting scheme '{weighting}'.") - + + z = np.array(imlist) # (num, height, width) or (num, height, width, ch) + + t = np.array(times) / time_ref # (num, ) + t = np.expand_dims( + t, axis=tuple(range(1, z.ndim)) + ) # (num, 1, 1) or (num, 1, 1, 1) + + # Calculate gaussian weight + mask = np.bitwise_and(Zmin <= z, z <= Zmax) + w = np.exp(-4 * ((z - 0.5) / 0.5) ** 2) * mask + # Merge HDR - img_hdr = np.sum(w*z/t, axis=0) / (np.sum(w, axis=0) + epsilon) - #img_hdr = np.exp(np.sum(w*(np.log(z+epsilon)-np.log(t)), axis=0)/(np.sum(w, axis=0)+1e-32)) + img_hdr = np.average(z / t, axis=0, weights=w + epsilon) # Dealing with under-exposure and over-exposure - under_exposed = np.all(Zmin>z, axis=0) - over_exposed = np.all(z>Zmax, axis=0) - img_hdr[under_exposed] = Zmin/np.max(t) - img_hdr[over_exposed] = Zmax/np.min(t) + under_exposed = np.all(Zmin > z, axis=0) + over_exposed = np.all(z > Zmax, axis=0) + img_hdr[under_exposed] = Zmin / np.max(t) + img_hdr[over_exposed] = Zmax / np.min(t) return img_hdr diff --git a/README.md b/README.md index 564c40b..9e49c6c 100644 --- a/README.md +++ b/README.md @@ -11,14 +11,7 @@ EasyPySpin is an unofficial wrapper for FLIR [Spinnaker SDK](https://www.flir.co ```sh pip install git+https://github.com/elerac/EasyPySpin ``` -After installation, connect the camera and try `examples/video.py`. - -## Command Line Tool -Connect the camera and execute the following commands, as shown below, then you can check the captured images. -```sh -EasyPySpin -``` -To change the camera settings, add an option to this command. Check with the `-h` option. +After installation, connect the camera and try [examples/video.py](examples/video.py). ## Usage ### Capture image from camera @@ -35,43 +28,60 @@ cv2.imwrite("frame.png", frame) cap.release() ``` + ### Basic property settings You can access properties using `cap.set(propId, value)` or `cap.get(propId)`. See also [supported propId](#Supported-VideoCaptureProperties). ```python -cap.set(cv2.CAP_PROP_EXPOSURE, 100000) #us -cap.set(cv2.CAP_PROP_GAIN, 10) #dB +cap.set(cv2.CAP_PROP_EXPOSURE, 100000) # us +cap.set(cv2.CAP_PROP_GAIN, 10) # dB -print(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) -print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) +width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) +height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) ``` ### Advanced property settings -`cap.set()` and `cap.get()` can only access basic properties. To access advanced properties, you should use QuickSpinAPI or GenAPI. +`cap.set()` and `cap.get()` can only access basic properties. To access advanced properties, you can use QuickSpin API, which PySpin supports. ```python -#QuickSpinAPI example cap.cam.AdcBitDepth.SetValue(PySpin.AdcBitDepth_Bit12) cap.cam.PixelFormat.SetValue(PySpin.PixelFormat_Mono16) +``` +The other way is to use `cap.set_pyspin_value()` or `cap.get_pyspin_value()`, which are supported by EasyPySpin. These methods check whether the variable is writeable or readable and check the type of the variable, etc., at the same time. +```python +cap.set_pyspin_value("AdcBitDepth", "Bit12") +cap.set_pyspin_value("PixelFormat", "Mono16") -#GenAPI example -node_exposureAuto = PySpin.CEnumerationPtr(cap.nodemap.GetNode("ExposureAuto")) -exposureAuto = PySpin.CEnumEntryPtr(node_exposureAuto.GetEntryByName("Once")).GetValue() -node_exposureAuto.SetIntValue(exposureAuto) +cap.get_pyspin_value("GammaEnable") +cap.get_pyspin_value("DeviceModelName") ``` ## Supported VideoCaptureProperties -* `cv2.CAP_PROP_EXPOSURE` -* `cv2.CAP_PROP_GAIN` -* `cv2.CAP_PROP_GAMMA` -* `cv2.CAP_PROP_FPS` -* `cv2.CAP_PROP_BRIGHTNESS` -* `cv2.CAP_PROP_FRAME_WIDTH` (get only) -* `cv2.CAP_PROP_FRAME_HEIGHT` (get only) -* `cv2.CAP_PROP_TEMPERATURE` (get only) -* `cv2.CAP_PROP_BACKLIGHT` -* `cv2.CAP_PROP_TRIGGER` -* `cv2.CAP_PROP_TRIGGER_DELAY` +Here is the list of supported VideoCaptureProperties. +In `set(propId, value)` and `get(propId)`, PySpin is used to set and get the camera's settings. The relationship between `propId` and PySpin settings is designed to be as close in meaning as possible. The table below shows the relationship between `propId` and PySpin settings in pseudo-code format. + +| propId | type | set(propId, value) | value = get(propId) | +| ---- | ---- | ---- | ---- | +| cv2.CAP_PROP_FRAME_WIDTH | int | `Width` = value | value = `Width` | +| cv2.CAP_PROP_FRAME_HEIGHT | int | `Height` = value | value = `Height` | +| cv2.CAP_PROP_FPS | float | `AcquisitionFrameRateEnable` = `True`
`AcquisitionFrameRate` = value | value = `ResultingFrameRate`| +| cv2.CAP_PROP_BRIGHTNESS | float | `AutoExposureEVCompensation` = value | value = `AutoExposureEVCompensation` | +| cv2.CAP_PROP_GAIN | float | if value != -1
  `GainAuto` = `Off`
  `Gain` = value
else
  `GainAuto` = `Continuous` | value = `Gain` | +| cv2.CAP_PROP_EXPOSURE | float | if value != -1
  `ExposureAuto` = `Off`
  `ExposureTime` = value
else
  `ExposureAuto` = `Continuous` | value = `ExposureTime` | +| cv2.CAP_PROP_GAMMA | float | `GammaEnable` = `True`
`Gamma` = value | value = `Gamma` | +| cv2.CAP_PROP_TEMPERATURE | float | | value = `DeviceTemperature` | +| cv2.CAP_PROP_TRIGGER | bool | if value == `True`
  `TriggerMode` = `On`
else
  `TriggerMode` = `Off` | if trigger_mode == `On`
  value = `True`
elif trigger_mode == `Off`
  value = `False` | +| cv2.CAP_PROP_TRIGGER_DELAY | float | `TriggerDelay` = value | value = `TriggerDelay` | +| cv2.CAP_PROP_BACKLIGHT | bool | if value == `True`
  `DeviceIndicatorMode` = `Active`
else
  `DeviceIndicatorMode` = `Inactive` | if device_indicator_mode == `Active`
  value = `True`
elif device_indicator_mode == `Inactive`
  value = `False` | +| cv2.CAP_PROP_AUTO_WB | bool | if value == `True`
  `BalanceWhiteAuto` = `Continuous`
else
  `BalanceWhiteAuto` = `Off` | if balance_white_auto == `Continuous`
  value = `True`
elif balance_white_auto == `Off`
  value = `False` | + +## Command-Line Tool +EasyPySpin provides a command-line tool. Connect the camera and execute the following commands, as shown below, then you can view the captured images. +```sh +EasyPySpin [-h] [-i INDEX] [-e EXPOSURE] [-g GAIN] [-G GAMMA] + [-b BRIGHTNESS] [-f FPS] [-s SCALE] +``` ## External Links +Here are some external links that are useful for using Spinnaker SDK. * [Spinnaker® SDK Programmer's Guide and API Reference (C++)](http://softwareservices.ptgrey.com/Spinnaker/latest/index.html) * [Getting Started with Spinnaker SDK on MacOS Applicable products](https://www.flir.com/support-center/iis/machine-vision/application-note/getting-started-with-spinnaker-sdk-on-macos/) * [Spinnaker Nodes](https://www.flir.com/support-center/iis/machine-vision/application-note/spinnaker-nodes/) diff --git a/examples/capture_average.py b/examples/capture_average.py index 20bc0f7..6ecb57e 100644 --- a/examples/capture_average.py +++ b/examples/capture_average.py @@ -1,39 +1,40 @@ """ Example of capturing the average image width VideoCaptureEX class. -Noise can be reduced by capturing multiple images and computing the average of each pixel. +The averaged image can reduce random noise. """ import EasyPySpin import cv2 + def main(): cap = EasyPySpin.VideoCaptureEX(0) - + cap.average_num = 3 + print("Press key to change average number") print("k : average_num += 1") print("j : average_num -= 1") print("--------------------") print("average num: ", cap.average_num) - + while True: ret, frame = cap.read() img_show = cv2.resize(frame, None, fx=0.25, fy=0.25) cv2.imshow("press q to quit", img_show) - + key = cv2.waitKey(30) - if key==ord("q"): + if key == ord("q"): break - elif key==ord("k"): + elif key == ord("k"): cap.average_num += 1 print("average num: ", cap.average_num) - elif key==ord("j"): + elif key == ord("j"): cap.average_num -= 1 - if cap.average_num<1: - cap.average_num = 1 print("average num: ", cap.average_num) - + cv2.destroyAllWindows() cap.release() -if __name__=="__main__": + +if __name__ == "__main__": main() diff --git a/examples/capture_hdr.py b/examples/capture_hdr.py index d99318c..ca360c1 100644 --- a/examples/capture_hdr.py +++ b/examples/capture_hdr.py @@ -1,31 +1,41 @@ -""" -Example of capturing the HDR image width VideoCaptureEX class +"""Example of capturing the HDR image width VideoCaptureEX class """ import EasyPySpin import cv2 import numpy as np import argparse + def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--index", type=int, default=0, help="Camera index (Default: 0)") parser.add_argument("-g", "--gain", type=float, default=0, help="Gain [dB] (Default: 0)") parser.add_argument("--min", type=float, default=5000, help="Minimum exposure time [us]") parser.add_argument("--max", type=float, default=500000, help="Maximum exposure time [us]") - parser.add_argument("--num", type=int, default=8, help="Number of images to capture") - parser.add_argument("-o", "--output", type=str, default="capture_hdr.exr", help="Output file name (*.exr)") + parser.add_argument("-o", "--output", type=str, default="hdr", help="Output file name") args = parser.parse_args() cap = EasyPySpin.VideoCaptureEX(args.index) - - cap.set(cv2.CAP_PROP_GAMMA, 1.0) cap.set(cv2.CAP_PROP_GAIN, args.gain) - + print("Start capturing HDR image") - ret, img_hdr = cap.readHDR(args.min, args.max, args.num) - - print("Write {}".format(args.output)) - cv2.imwrite(args.output, img_hdr.astype(np.float32)) + ret, img_hdr = cap.readHDR(args.min, args.max) + + filename_exr = f"{args.output}.exr" + print(f"Write {filename_exr}") + cv2.imwrite(filename_exr, img_hdr.astype(np.float32)) + + for ev in [-2, -1, 0, 1, 2]: + sign = "+" if ev >= 0 else "-" + filename_png = f"{args.output}_{sign}{abs(ev)}EV.png" + ratio = 2.0 ** ev + img_hdr_u8 = np.clip(img_hdr * ratio * 255, 0, 255).astype(np.uint8) + + print(f"Write {filename_png}") + cv2.imwrite(filename_png, img_hdr_u8) + + cap.release() + -if __name__=="__main__": +if __name__ == "__main__": main() diff --git a/examples/multiple.py b/examples/multiple.py index 085b9f8..0ea7a4c 100644 --- a/examples/multiple.py +++ b/examples/multiple.py @@ -1,26 +1,35 @@ +"""Example of capture with multiple camera. +""" import EasyPySpin import cv2 -NUM_IMAGES = 10 def main(): - cap0 = EasyPySpin.VideoCapture(0) - cap1 = EasyPySpin.VideoCapture(1) - - for n in range(NUM_IMAGES): - ret0, frame0 = cap0.read() - ret1, frame1 = cap1.read() - - filename0 = "multiple-{0}-{1}.png".format(n, 0) - filename1 = "multiple-{0}-{1}.png".format(n, 1) - cv2.imwrite(filename0, frame0) - cv2.imwrite(filename1, frame1) - print("Image saved at {}".format(filename0)) - print("Image saved at {}".format(filename1)) - print() - - cap0.release() - cap1.release() - -if __name__=="__main__": + # cap = EasyPySpin.MultipleVideoCapture(0) + cap = EasyPySpin.MultipleVideoCapture(0, 1) + # cap = EasyPySpin.MultipleVideoCapture(0, 1, 2) + + if not all(cap.isOpened()): + print("All cameras can't open\nexit") + return -1 + + while True: + read_values = cap.read() + + for i, (ret, frame) in enumerate(read_values): + if not ret: + continue + + frame = cv2.resize(frame, None, fx=0.25, fy=0.25) + cv2.imshow(f"frame-{i}", frame) + + key = cv2.waitKey(30) + if key == ord("q"): + break + + cv2.destroyAllWindows() + cap.release() + + +if __name__ == "__main__": main() diff --git a/examples/synchronized.py b/examples/synchronized.py index 36bb445..4c7f894 100644 --- a/examples/synchronized.py +++ b/examples/synchronized.py @@ -1,43 +1,38 @@ +"""Example of synchronized capture with multiple cameras. + +You need to create a physical connection between the cameras by linking their GPIO pins, as follows: +https://www.flir.com/support-center/iis/machine-vision/application-note/configuring-synchronized-capture-with-multiple-cameras/ +""" import EasyPySpin import cv2 -SCALE = 0.5 def main(): - cap_primary = EasyPySpin.VideoCapture(0) - cap_secondary = EasyPySpin.VideoCapture(1) - - cap_primary.set(cv2.CAP_PROP_TRIGGER, True) #TriggerMode -> On - #import PySpin - #cap_primary.cam.TriggerSource.SetValue(PySpin.TriggerSource_Software) + serial_number_1 = "20541712" # primary camera (set your camera's serial number) + serial_number_2 = "19412150" # secondary camera (set your camera's serial number) + cap = EasyPySpin.SynchronizedVideoCapture(serial_number_1, serial_number_2) - cap_sync = EasyPySpin.SynchronizedVideoCapture(cap_primary, cap_secondary) + if not all(cap.isOpened()): + print("All cameras can't open\nexit") + return -1 while True: - ret, frame = cap_sync.read() - frame_primary = frame[0] - frame_secondary = frame[1] - - img_show_primary = cv2.resize(frame_primary, None, fx=SCALE, fy=SCALE) - img_show_secondary = cv2.resize(frame_secondary, None, fx=SCALE, fy=SCALE) - cv2.imshow("primary", img_show_primary) - cv2.imshow("secondary", img_show_secondary) - key = cv2.waitKey(1) - if key==ord("q"): + read_values = cap.read() + + for i, (ret, frame) in enumerate(read_values): + if not ret: + continue + + frame = cv2.resize(frame, None, fx=0.25, fy=0.25) + cv2.imshow(f"frame-{i}", frame) + + key = cv2.waitKey(30) + if key == ord("q"): break - elif key==ord("c"): - import datetime - time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - filename0 = "synchronized-{0}-{1}.png".format(time_stamp, 0) - filename1 = "synchronized-{0}-{1}.png".format(time_stamp, 1) - cv2.imwrite(filename0, frame_primary) - cv2.imwrite(filename1, frame_secondary) - print("Image saved at {}".format(filename0)) - print("Image saved at {}".format(filename1)) - print() cv2.destroyAllWindows() - cap_sync.release() + cap.release() + -if __name__=="__main__": +if __name__ == "__main__": main() diff --git a/examples/video.py b/examples/video.py index ec2ab27..4da6c87 100644 --- a/examples/video.py +++ b/examples/video.py @@ -1,35 +1,32 @@ -""" -A simple example of capturing and displaying an image +"""A simple example of capturing and displaying an image """ import EasyPySpin import cv2 + def main(): - # Instance creation cap = EasyPySpin.VideoCapture(0) - # Checking if it's connected to the camera if not cap.isOpened(): print("Camera can't open\nexit") return -1 - - # Set the camera parameters - cap.set(cv2.CAP_PROP_EXPOSURE, -1) #-1 sets exposure_time to auto - cap.set(cv2.CAP_PROP_GAIN, -1) #-1 sets gain to auto - # Start capturing + cap.set(cv2.CAP_PROP_EXPOSURE, -1) # -1 sets exposure_time to auto + cap.set(cv2.CAP_PROP_GAIN, -1) # -1 sets gain to auto + while True: ret, frame = cap.read() - #frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR) #for RGB camera demosaicing + # frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR) # for RGB camera demosaicing img_show = cv2.resize(frame, None, fx=0.25, fy=0.25) cv2.imshow("press q to quit", img_show) key = cv2.waitKey(30) - if key==ord("q"): + if key == ord("q"): break - - cv2.destroyAllWindows() + cap.release() + cv2.destroyAllWindows() + -if __name__=="__main__": +if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index ad9dc9e..5954154 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,18 @@ from setuptools import setup, find_packages +with open("README.md", "r") as fh: + long_description = fh.read() + setup( - name='EasyPySpin', - version='1.2.1', - description='cv2.VideoCapture like wrapper for FLIR Spinnaker SDK', - url='https://github.com/elerac/EasyPySpin', - author='Ryota Maeda', - author_email='maeda.ryota.elerac@gmail.com', - license='MIT', - entry_points={'console_scripts': ['EasyPySpin= EasyPySpin.command_line:main']}, - packages=find_packages() + name="EasyPySpin", + version="2.0.0", + description="cv2.VideoCapture like wrapper for FLIR Spinnaker SDK", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/elerac/EasyPySpin", + author="Ryota Maeda", + author_email="maeda.ryota.elerac@gmail.com", + license="MIT", + entry_points={"console_scripts": ["EasyPySpin= EasyPySpin.command_line:main"]}, + packages=find_packages(), )