diff --git a/README.md b/README.md index 2f6626ec..9145fad2 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ --- -:tada: The **ZED SDK 4.1** is released! We support the [**ZED X**](https://www.stereolabs.com/zed-x/) and [**ZED X Mini**](https://www.stereolabs.com/zed-x/) cameras, added the **Fusion API** for multi-camera Body Tracking, and more! Please check the [Release Notes](https://www.stereolabs.com/developers/release/) of the latest version for more details. +:tada: The **ZED SDK 4.2** is released! We support the [**ZED X**](https://www.stereolabs.com/zed-x/) and [**ZED X Mini**](https://www.stereolabs.com/zed-x/) cameras, added the **Fusion API** for multi-camera Body Tracking, and more! Please check the [Release Notes](https://www.stereolabs.com/developers/release/) of the latest version for more details. ## Overview diff --git a/body tracking/body tracking/cpp/include/GLViewer.hpp b/body tracking/body tracking/cpp/include/GLViewer.hpp index d8ab06ab..cdeeeb38 100644 --- a/body tracking/body tracking/cpp/include/GLViewer.hpp +++ b/body tracking/body tracking/cpp/include/GLViewer.hpp @@ -127,10 +127,19 @@ const std::vector> BODY_BONES_FAST_RENDE class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/body tracking/body tracking/cpp/src/GLViewer.cpp b/body tracking/body tracking/cpp/src/GLViewer.cpp index c470ddea..19fbc300 100644 --- a/body tracking/body tracking/cpp/src/GLViewer.cpp +++ b/body tracking/body tracking/cpp/src/GLViewer.cpp @@ -149,10 +149,10 @@ void GLViewer::init(int argc, char **argv) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader for 3D objects - shaderSK.it = Shader(SK_VERTEX_SHADER, SK_FRAGMENT_SHADER); + shaderSK.it.set(SK_VERTEX_SHADER, SK_FRAGMENT_SHADER); shaderSK.MVP_Mat = glGetUniformLocation(shaderSK.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -384,8 +384,7 @@ void GLViewer::idle() { glutPostRedisplay(); } -Simple3DObject::Simple3DObject() { -} +Simple3DObject::Simple3DObject() : vaoID_(0) {} Simple3DObject::~Simple3DObject() { if (vaoID_ != 0) { @@ -732,6 +731,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -767,12 +770,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/body tracking/body tracking/csharp/CMakeLists.txt b/body tracking/body tracking/csharp/CMakeLists.txt index 733d29aa..79219082 100644 --- a/body tracking/body tracking/csharp/CMakeLists.txt +++ b/body tracking/body tracking/csharp/CMakeLists.txt @@ -7,6 +7,7 @@ add_executable(${PROJECT_NAME} MainWindow.cs GLViewer.cs TrackingViewer.cs + Utils.cs App.config packages.config Properties/AssemblyInfo.cs diff --git a/body tracking/body tracking/csharp/MainWindow.cs b/body tracking/body tracking/csharp/MainWindow.cs index 06ad24ea..3c8af4d2 100644 --- a/body tracking/body tracking/csharp/MainWindow.cs +++ b/body tracking/body tracking/csharp/MainWindow.cs @@ -94,7 +94,6 @@ public MainWindow(string[] args) isTrackingON = bt_params.enableObjectTracking; bt_params.enableSegmentation = false; bt_params.enableBodyFitting = true; // smooth skeletons moves - bt_params.imageSync = true; // the body tracking is synchronized to the image bt_params.detectionModel = sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE; bt_params.bodyFormat = sl.BODY_FORMAT.BODY_38; @@ -125,7 +124,7 @@ public MainWindow(string[] args) bt_runtime_parameters.detectionConfidenceThreshold = 20; window_name = "ZED| 2D View"; - Cv2.NamedWindow(window_name, WindowMode.Normal);// Create Window + Cv2.NamedWindow(window_name);// Create Window // Create OpenGL window CreateWindow(); @@ -234,7 +233,7 @@ private void NativeWindow_Render(object sender, NativeWindowEventArgs e) // Retrieve Objects zedCamera.RetrieveBodies(ref bodies, ref bt_runtime_parameters); - TrackingViewer.render_2D(ref imageLeftOcv, imgScale, ref bodies, isTrackingON, bt_params.bodyFormat); + TrackingViewer.render_2D(ref imageLeftOcv, imgScale, ref bodies, isTrackingON, bt_params.bodyFormat, bt_params.enableSegmentation); //Update GL View viewer.update(pointCloud, bodies, camPose); diff --git a/body tracking/body tracking/csharp/TrackingViewer.cs b/body tracking/body tracking/csharp/TrackingViewer.cs index 9fe77212..e8fb2718 100644 --- a/body tracking/body tracking/csharp/TrackingViewer.cs +++ b/body tracking/body tracking/csharp/TrackingViewer.cs @@ -6,6 +6,7 @@ using sl; using OpenCvSharp; +using System.Windows.Media.Media3D; public class TrackingViewer @@ -58,7 +59,7 @@ static sl.float2 getImagePosition(Vector2[] bounding_box_image, sl.float2 img_sc return position; } - public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Bodies bodies, bool showOnlyOK, sl.BODY_FORMAT body_format) + public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Bodies bodies, bool showOnlyOK, sl.BODY_FORMAT body_format, bool render_mask) { OpenCvSharp.Mat overlay = left_display.Clone(); OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(1, 1, left_display.Size().Width, left_display.Size().Height); @@ -93,6 +94,29 @@ public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_sca } } } + + if (render_mask) + { + Point top_left_corner = Utils.cvt(bod.boundingBox2D[0], img_scale); + Point top_right_corner = Utils.cvt(bod.boundingBox2D[1], img_scale); + Point bottom_left_corner = Utils.cvt(bod.boundingBox2D[3], img_scale); + var width = (int)Math.Abs(top_right_corner.X - top_left_corner.X); + var height = (int)Math.Abs(bottom_left_corner.Y - top_left_corner.Y); + // Scaled ROI + OpenCvSharp.Rect roi = new OpenCvSharp.Rect(top_left_corner.X, top_left_corner.Y, width, height); + sl.Mat mask = new sl.Mat(bod.mask); + if (mask.IsInit()) + { + sl.Mat m = new sl.Mat(); + mask.CopyTo(m); + OpenCvSharp.Mat tmp_mask = new OpenCvSharp.Mat(mask.GetHeight(), mask.GetWidth(), OpenCvSharp.MatType.CV_8UC1, m.GetPtr()); + if (!tmp_mask.Empty()) + { + var mask_resized = tmp_mask.Resize(roi.Size); + overlay.SubMat(roi).SetTo(base_color, mask_resized); + } + } + } } } diff --git a/body tracking/body tracking/csharp/Utils.cs b/body tracking/body tracking/csharp/Utils.cs new file mode 100644 index 00000000..6250a92f --- /dev/null +++ b/body tracking/body tracking/csharp/Utils.cs @@ -0,0 +1,163 @@ +//======= Copyright (c) Stereolabs Corporation, All rights reserved. =============== +using System; +using System.Numerics; +using System.Runtime.InteropServices; +using System.Threading.Tasks; +using OpenCvSharp; + +namespace sl +{ + public class Utils + { + /// + /// Creates an OpenCV version of a ZED Mat. + /// + /// Source ZED Mat. + /// Type of ZED Mat - data type and channel number. + /// + public static OpenCvSharp.Mat SLMat2CVMat(ref sl.Mat zedmat, MAT_TYPE zedmattype) + { + int cvmattype = SLMatType2CVMatType(zedmattype); + OpenCvSharp.Mat cvmat = new OpenCvSharp.Mat(zedmat.GetHeight(), zedmat.GetWidth(), cvmattype, zedmat.GetPtr()); + + return cvmat; + } + + /// + /// Returns the OpenCV type that corresponds to a given ZED Mat type. + /// + private static int SLMatType2CVMatType(MAT_TYPE zedmattype) + { + switch (zedmattype) + { + case sl.MAT_TYPE.MAT_32F_C1: + return OpenCvSharp.MatType.CV_32FC1; + case sl.MAT_TYPE.MAT_32F_C2: + return OpenCvSharp.MatType.CV_32FC2; + case sl.MAT_TYPE.MAT_32F_C3: + return OpenCvSharp.MatType.CV_32FC3; + case sl.MAT_TYPE.MAT_32F_C4: + return OpenCvSharp.MatType.CV_32FC4; + case sl.MAT_TYPE.MAT_8U_C1: + return OpenCvSharp.MatType.CV_8UC1; + case sl.MAT_TYPE.MAT_8U_C2: + return OpenCvSharp.MatType.CV_8UC2; + case sl.MAT_TYPE.MAT_8U_C3: + return OpenCvSharp.MatType.CV_8UC3; + case sl.MAT_TYPE.MAT_8U_C4: + return OpenCvSharp.MatType.CV_8UC4; + default: + return -1; + } + } + + public static ulong getMilliseconds(ulong ts_ns) + { + return ts_ns / 1000000; + } + + public static void drawVerticalLine(ref OpenCvSharp.Mat left_display, Point start_pt, Point end_pt, Scalar clr, int thickness) + { + int n_steps = 7; + Point pt1, pt4; + pt1.X = ((n_steps - 1) * start_pt.X + end_pt.X) / n_steps; + pt1.Y = ((n_steps - 1) * start_pt.Y + end_pt.Y) / n_steps; + + pt4.X = (start_pt.X + (n_steps - 1) * end_pt.X) / n_steps; + pt4.Y = (start_pt.Y + (n_steps - 1) * end_pt.Y) / n_steps; + + Cv2.Line(left_display, start_pt, pt1, clr, thickness); + Cv2.Line(left_display, pt4, end_pt, clr, thickness); + } + + public static Point cvt(Vector2 pt, sl.float2 scale) + { + return new Point(pt.X * scale.x, pt.Y * scale.y); + } + + public static sl.float4 generateColorID(int idx) + { + int offset = Math.Max(0, idx % 5); + sl.float4 color = new float4(); + color.x = id_colors[offset, 0]; + color.y = id_colors[offset, 1]; + color.z = id_colors[offset, 2]; + color.w = 1.0f; + return color; + } + + public static OpenCvSharp.Scalar generateColorID_u(int idx) + { + int offset = Math.Max(0, idx % 5); + OpenCvSharp.Scalar color = new OpenCvSharp.Scalar(); + color[0] = id_colors[offset, 2] * 255; + color[1] = id_colors[offset, 1] * 255; + color[2] = id_colors[offset, 0] * 255; + color[3] = 1.0f * 255; + return color; + } + + public static float[,] id_colors = new float[5, 3]{ + + {.231f, .909f, .69f}, + {.098f, .686f, .816f}, + {.412f, .4f, .804f}, + {1, .725f, .0f}, + {.989f, .388f, .419f} + }; + + public static float[,] class_colors = new float[6, 3]{ + { 44.0f, 117.0f, 255.0f}, // PEOPLE + { 255.0f, 0.0f, 255.0f}, // VEHICLE + { 0.0f, 0.0f, 255.0f}, + { 0.0f, 255.0f, 255.0f}, + { 0.0f, 255.0f, 0.0f}, + { 255.0f, 255.0f, 255.0f} + }; + + public static float4 generateColorClass(int idx) + { + idx = Math.Min(5, idx); + sl.float4 color = new float4(); + color.x = class_colors[idx, 0]; + color.y = class_colors[idx, 1]; + color.z = class_colors[idx, 2]; + color.w = 255.0f; + return color; + } + + public static OpenCvSharp.Scalar generateColorClass_u(int idx) + { + idx = Math.Min(5, idx); + OpenCvSharp.Scalar color = new OpenCvSharp.Scalar(); + color[0] = class_colors[idx, 0]; + color[0] = class_colors[idx, 1]; + color[0] = class_colors[idx, 2]; + color[0] = 255.0f; + return color; + } + + public static bool renderObject(ObjectData i, bool showOnlyOK) + { + if (showOnlyOK) + return (i.objectTrackingState == sl.OBJECT_TRACKING_STATE.OK); + else + return (i.objectTrackingState == sl.OBJECT_TRACKING_STATE.OK || i.objectTrackingState == sl.OBJECT_TRACKING_STATE.OFF); + } + + + public static byte _applyFading(double val, float current_alpha, double current_clr) + { + return (byte)(current_alpha * current_clr + (1.0 - current_alpha) * val); + } + + public static Vec4b applyFading(Scalar val, float current_alpha, Scalar current_clr){ + Vec4b out_ = new Vec4b(); + out_[0] = _applyFading(val[0], current_alpha, current_clr[0]); + out_[1] = _applyFading(val[1], current_alpha, current_clr[1]); + out_[2] = _applyFading(val[2], current_alpha, current_clr[2]); + out_[3] = 255; + return out_; + } + } +} \ No newline at end of file diff --git a/body tracking/export/JSON export/cpp/include/GLViewer.hpp b/body tracking/export/JSON export/cpp/include/GLViewer.hpp index 1d6413e5..4506150f 100644 --- a/body tracking/export/JSON export/cpp/include/GLViewer.hpp +++ b/body tracking/export/JSON export/cpp/include/GLViewer.hpp @@ -127,10 +127,19 @@ const std::vector> BODY_BONES_FAST_RENDE class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/body tracking/export/JSON export/cpp/src/GLViewer.cpp b/body tracking/export/JSON export/cpp/src/GLViewer.cpp index 74807224..0a00a48c 100644 --- a/body tracking/export/JSON export/cpp/src/GLViewer.cpp +++ b/body tracking/export/JSON export/cpp/src/GLViewer.cpp @@ -145,10 +145,10 @@ void GLViewer::init(int argc, char **argv) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader for 3D objects - shaderSK.it = Shader(SK_VERTEX_SHADER, SK_FRAGMENT_SHADER); + shaderSK.it.set(SK_VERTEX_SHADER, SK_FRAGMENT_SHADER); shaderSK.MVP_Mat = glGetUniformLocation(shaderSK.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -373,8 +373,7 @@ void GLViewer::idle() { glutPostRedisplay(); } -Simple3DObject::Simple3DObject() { -} +Simple3DObject::Simple3DObject() : vaoID_(0) {} Simple3DObject::~Simple3DObject() { if (vaoID_ != 0) { @@ -721,6 +720,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -756,12 +759,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/body tracking/multi-camera/cpp/CMakeLists.txt b/body tracking/multi-camera/cpp/CMakeLists.txt index d247918a..529f685d 100644 --- a/body tracking/multi-camera/cpp/CMakeLists.txt +++ b/body tracking/multi-camera/cpp/CMakeLists.txt @@ -17,18 +17,21 @@ find_package(GLUT REQUIRED) find_package(GLEW REQUIRED) SET(OpenGL_GL_PREFERENCE GLVND) find_package(OpenGL REQUIRED) +find_package(OpenCV) include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${ZED_INCLUDE_DIRS}) include_directories(${GLEW_INCLUDE_DIRS}) include_directories(${GLUT_INCLUDE_DIR}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) +include_directories(${OpenCV_INCLUDE_DIRS}) link_directories(${ZED_LIBRARY_DIR}) link_directories(${CUDA_LIBRARY_DIRS}) link_directories(${GLEW_LIBRARY_DIRS}) link_directories(${GLUT_LIBRARY_DIRS}) link_directories(${OpenGL_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) IF(NOT WIN32) SET(SPECIAL_OS_LIBS "pthread") @@ -49,7 +52,7 @@ else() SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) endif() -TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OpenCV_LIBRARIES} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) if(INSTALL_SAMPLES) LIST(APPEND SAMPLE_LIST ${PROJECT_NAME}) diff --git a/body tracking/multi-camera/cpp/include/ClientPublisher.hpp b/body tracking/multi-camera/cpp/include/ClientPublisher.hpp index 45f587c8..84c11554 100644 --- a/body tracking/multi-camera/cpp/include/ClientPublisher.hpp +++ b/body tracking/multi-camera/cpp/include/ClientPublisher.hpp @@ -5,6 +5,33 @@ #include #include +#include + +struct Trigger{ + + void notifyZED(){ + + cv.notify_all(); + + if(running){ + bool wait_for_zed = true; + const int nb_zed = states.size(); + while(wait_for_zed){ + int count_r = 0; + for(auto &it:states) + count_r += it.second; + wait_for_zed = count_r != nb_zed; + sl::sleep_ms(1); + } + for(auto &it:states) + it.second = false; + } + } + + std::condition_variable cv; + bool running = true; + std::map states; +}; class ClientPublisher{ @@ -12,21 +39,18 @@ class ClientPublisher{ ClientPublisher(); ~ClientPublisher(); - bool open(sl::InputType); + bool open(sl::InputType, Trigger* ref); void start(); void stop(); void setStartSVOPosition(unsigned pos); - bool isRunning() { - return running; - } - private: sl::Camera zed; void work(); std::thread runner; - bool running; int serial; + std::mutex mtx; + Trigger *p_trigger; }; #endif // ! __SENDER_RUNNER_HDR__ diff --git a/body tracking/multi-camera/cpp/include/GLViewer.hpp b/body tracking/multi-camera/cpp/include/GLViewer.hpp index 7713f654..d8873cc2 100644 --- a/body tracking/multi-camera/cpp/include/GLViewer.hpp +++ b/body tracking/multi-camera/cpp/include/GLViewer.hpp @@ -28,10 +28,19 @@ class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -229,18 +238,21 @@ class GLViewer { GLViewer(); ~GLViewer(); bool isAvailable(); + bool isPlaying() const { return play; } + void init(int argc, char **argv); void updateCamera(int, sl::Mat &, sl::Mat &); + void updateCamera(sl::Mat &); - void updateBodies(sl::Bodies &objs,std::map& singldata, sl::FusionMetrics& metrics); + void updateBodies(sl::Bodies &objs,std::map& singledata, sl::FusionMetrics& metrics); void setCameraPose(int, sl::Transform); - unsigned char getKey() { - auto ret_v = lastPressedKey; - lastPressedKey = ' '; - return ret_v; + int getKey() { + const int key = last_key; + last_key = -1; + return key; } void exit(); @@ -262,9 +274,10 @@ class GLViewer { static void keyReleasedCallback(unsigned char c, int x, int y); static void idle(); - void addSKeleton(sl::BodyData &, Simple3DObject &, sl::float3 clr_id, bool raw, sl::BODY_FORMAT format); void addSKeleton(sl::BodyData &, Simple3DObject &, sl::float3 clr_id, bool raw); + sl::float3 getColor(int, bool); + bool available; bool drawBbox = false; @@ -292,6 +305,7 @@ class GLViewer { KEY_STATE keyStates_[256]; std::mutex mtx; + std::mutex mtx_clr; ShaderData shader; @@ -316,9 +330,11 @@ class GLViewer { bool show_raw = false; bool draw_flat_color = false; - std::uniform_int_distribution uint_dist360; + std::uniform_int_distribution uint_dist360; std::mt19937 rng; + bool play = true; + int last_key = -1; }; #endif /* __VIEWER_INCLUDE__ */ diff --git a/body tracking/multi-camera/cpp/src/ClientPublisher.cpp b/body tracking/multi-camera/cpp/src/ClientPublisher.cpp index 8e1d837e..b371e67f 100644 --- a/body tracking/multi-camera/cpp/src/ClientPublisher.cpp +++ b/body tracking/multi-camera/cpp/src/ClientPublisher.cpp @@ -1,24 +1,21 @@ #include "ClientPublisher.hpp" -ClientPublisher::ClientPublisher() : running(false) -{ -} +ClientPublisher::ClientPublisher() { } ClientPublisher::~ClientPublisher() { zed.close(); } -bool ClientPublisher::open(sl::InputType input) { - // already running - if (runner.joinable()) - return false; +bool ClientPublisher::open(sl::InputType input, Trigger* ref) { + + p_trigger = ref; sl::InitParameters init_parameters; init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; init_parameters.input = input; init_parameters.coordinate_units = sl::UNIT::METER; - init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; + init_parameters.depth_stabilization = 5; auto state = zed.open(init_parameters); if (state != sl::ERROR_CODE::SUCCESS) { @@ -26,10 +23,14 @@ bool ClientPublisher::open(sl::InputType input) { return false; } + serial = zed.getCameraInformation().serial_number; + p_trigger->states[serial] = false; + // in most cases in body tracking setup, the cameras are static sl::PositionalTrackingParameters positional_tracking_parameters; // in most cases for body detection application the camera is static: positional_tracking_parameters.set_as_static = true; + state = zed.enablePositionalTracking(positional_tracking_parameters); if (state != sl::ERROR_CODE::SUCCESS) { @@ -56,7 +57,6 @@ bool ClientPublisher::open(sl::InputType input) { void ClientPublisher::start() { if (zed.isOpened()) { - running = true; // the camera should stream its data so the fusion can subscibe to it to gather the detected body and others metadata needed for the process. zed.startPublishing(); // the thread can start to process the camera grab in background @@ -66,37 +66,36 @@ void ClientPublisher::start() void ClientPublisher::stop() { - running = false; if (runner.joinable()) runner.join(); zed.close(); } void ClientPublisher::work() -{ +{ sl::Bodies bodies; sl::BodyTrackingRuntimeParameters body_runtime_parameters; body_runtime_parameters.detection_confidence_threshold = 40; + sl::RuntimeParameters rt; + rt.confidence_threshold = 50; // in this sample we use a dummy thread to process the ZED data. // you can replace it by your own application and use the ZED like you use to, retrieve its images, depth, sensors data and so on. // as long as you call the grab function and the retrieveBodies (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module. - while (running) { - if (zed.grab() == sl::ERROR_CODE::SUCCESS) { - /* - Your App - - */ - + while (p_trigger->running) { + std::unique_lock lk(mtx); + p_trigger->cv.wait(lk); + if(p_trigger->running){ + if(zed.grab(rt) == sl::ERROR_CODE::SUCCESS){ // just be sure to run the bodies detection zed.retrieveBodies(bodies, body_runtime_parameters); + } } - sl::sleep_ms(2); + p_trigger->states[serial] = true; } } void ClientPublisher::setStartSVOPosition(unsigned pos) { zed.setSVOPosition(pos); } - diff --git a/body tracking/multi-camera/cpp/src/GLViewer.cpp b/body tracking/multi-camera/cpp/src/GLViewer.cpp index 74e0455d..9c207d93 100644 --- a/body tracking/multi-camera/cpp/src/GLViewer.cpp +++ b/body tracking/multi-camera/cpp/src/GLViewer.cpp @@ -28,11 +28,11 @@ const GLchar* POINTCLOUD_VERTEX_SHADER = "uniform vec3 u_color;\n" "uniform bool u_drawFlat;\n" "void main() {\n" - // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) - " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" " if(u_drawFlat)\n" " b_color = vec4(u_color.bgr, .85f);\n" "else{" + // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) + " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n" " b_color = vec4(clr_int.b / 255.0f, clr_int.g / 255.0f, clr_int.r / 255.0f, .85f);\n" " }" @@ -82,6 +82,10 @@ GLViewer::~GLViewer() { void GLViewer::exit() { if (currentInstance_) { available = false; + for (auto& pc : point_clouds) + { + pc.second.close(); + } } } @@ -134,7 +138,7 @@ void GLViewer::init(int argc, char **argv) { #endif // Compile and create the shader for 3D objects - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -160,7 +164,8 @@ void GLViewer::init(int argc, char **argv) { std::random_device dev; rng = std::mt19937(dev()); - uint_dist360 = std::uniform_int_distribution(0, 360); + rng.seed(42); + uint_dist360 = std::uniform_int_distribution(0, 360); // Map glut function on this class methods glutDisplayFunc(GLViewer::drawCallback); @@ -175,15 +180,15 @@ void GLViewer::init(int argc, char **argv) { } sl::float3 newColor(float hh) { - float s = 1.; - float v = 1.; + float s = 0.75f; + float v = 0.9f; sl::float3 clr; int i = (int)hh; float ff = hh - i; - float p = v * (1.0 - s); - float q = v * (1.0 - (s * ff)); - float t = v * (1.0 - (s * (1.0 - ff))); + float p = v * (1.f - s); + float q = v * (1.f - (s * ff)); + float t = v * (1.f - (s * (1.f - ff))); switch (i) { case 0: clr.r = v; @@ -221,22 +226,46 @@ sl::float3 newColor(float hh) { return clr; } -void GLViewer::updateCamera(int id, sl::Mat &view, sl::Mat &pc){ - mtx.lock(); - if (colors.find(id) == colors.end()) { - float hh = uint_dist360(rng) / 60.f; - colors[id] = newColor(hh); +sl::float3 GLViewer::getColor(int id, bool for_skeleton){ + const std::lock_guard lock(mtx_clr); + if(for_skeleton){ + if (colors_sk.find(id) == colors_sk.end()) { + float hh = uint_dist360(rng) / 60.f; + colors_sk[id] = newColor(hh); + } + return colors_sk[id]; + }else{ + if (colors.find(id) == colors.end()) { + int h_ = uint_dist360(rng); + float hh = h_ / 60.f; + colors[id] = newColor(hh); + } + return colors[id]; } +} +void GLViewer::updateCamera(int id, sl::Mat &view, sl::Mat &pc){ + const std::lock_guard lock(mtx); + auto clr = getColor(id, false); if(view.isInit() && viewers.find(id) == viewers.end()) - viewers[id].initialize(view, colors[id]); + viewers[id].initialize(view, clr); if(pc.isInit() && point_clouds.find(id) == point_clouds.end()) - point_clouds[id].initialize(pc, colors[id]); + point_clouds[id].initialize(pc, clr); - mtx.unlock(); } +void GLViewer::updateCamera(sl::Mat &pc){ + const std::lock_guard lock(mtx); + int id = 0; + auto clr = getColor(id, false); + + // we need to release old pc and initialize new one because fused point cloud don't have the same number of points for each process + // I used close but it crashed in draw. Not yet investigated + point_clouds[id].initialize(pc, clr); +} + + void GLViewer::setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar) { // Just slightly up the ZED camera FOV to make a small black border float fov_y = (params.v_fov + 0.5f) * M_PI / 180.f; @@ -283,13 +312,9 @@ void GLViewer::render() { } void GLViewer::setCameraPose(int id, sl::Transform pose) { - mtx.lock(); + const std::lock_guard lock(mtx); + getColor(id, false); poses[id] = pose; - if (colors.find(id) == colors.end()) { - float hh = uint_dist360(rng) / 60.f; - colors[id] = newColor(hh); - } - mtx.unlock(); } inline bool renderBody(const sl::BodyData& i, const bool isTrackingON) { @@ -311,49 +336,29 @@ void createSKPrimitive(sl::BodyData& body, const std::vector>& m } } -void GLViewer::addSKeleton(sl::BodyData &obj, Simple3DObject &simpleObj, sl::float3 clr_id, bool raw, sl::BODY_FORMAT format) { - switch (format) - { - case sl::BODY_FORMAT::BODY_18: - createSKPrimitive(obj, sl::BODY_18_BONES, simpleObj, clr_id, raw); - break; - case sl::BODY_FORMAT::BODY_34: - createSKPrimitive(obj, sl::BODY_34_BONES, simpleObj, clr_id, raw); - break; - case sl::BODY_FORMAT::BODY_38: - createSKPrimitive(obj, sl::BODY_38_BONES, simpleObj, clr_id, raw); - break; - } -} - void GLViewer::addSKeleton(sl::BodyData& obj, Simple3DObject& simpleObj, sl::float3 clr_id, bool raw) { switch (obj.keypoint.size()) { case 18: - addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_18); + createSKPrimitive(obj, sl::BODY_18_BONES, simpleObj, clr_id, raw); break; case 34: - addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_34); + createSKPrimitive(obj, sl::BODY_34_BONES, simpleObj, clr_id, raw); break; case 38: - addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_38); + createSKPrimitive(obj, sl::BODY_38_BONES, simpleObj, clr_id, raw); break; } } -void GLViewer::updateBodies(sl::Bodies &bodies, std::map& singldata, sl::FusionMetrics& metrics) { - mtx.lock(); +void GLViewer::updateBodies(sl::Bodies &bodies, std::map& singledata, sl::FusionMetrics& metrics) { + const std::lock_guard lock(mtx); if (bodies.is_new) { skeletons.clear(); for(auto &it:bodies.body_list) { - - if (colors_sk.find(it.id) == colors_sk.end()) { - float hh = uint_dist360(rng) / 60.f; - colors_sk[it.id] = newColor(hh); - } - + auto clr = getColor(it.id, true); if (renderBody(it, bodies.is_tracked)) - addSKeleton(it, skeletons, colors_sk[it.id], false, bodies.body_format); + addSKeleton(it, skeletons, clr, false); } } @@ -367,8 +372,8 @@ void GLViewer::updateBodies(sl::Bodies &bodies, std::mapdraw_flat_color = !currentInstance_->draw_flat_color; - if (keyStates_['p'] == KEY_STATE::UP) + if (keyStates_['s'] == KEY_STATE::UP) currentInstance_->show_pc = !currentInstance_->show_pc; - + + if (keyStates_['p'] == KEY_STATE::UP || keyStates_['P'] == KEY_STATE::UP || keyStates_[32] == KEY_STATE::UP) + play = !play; + // Rotate camera with mouse if (mouseButton_[MOUSE_BUTTON::LEFT]) { camera_.rotate(sl::Rotation((float) mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); @@ -432,19 +439,18 @@ void GLViewer::update() { } camera_.update(); - mtx.lock(); + const std::lock_guard lock(mtx); // Update point cloud buffers skeletons.pushToGPU(); for(auto &it: skeletons_raw) it.second.pushToGPU(); + // Update point cloud buffers for(auto &it: point_clouds) it.second.pushNewPC(); for(auto &it: viewers) it.second.pushNewImage(); - - mtx.unlock(); clearInputs(); } @@ -470,7 +476,7 @@ void GLViewer::draw() { for (auto& it : viewers) { sl::Transform pose_ = vpMatrix * poses[it.first]; glUniformMatrix4fv(shader.MVP_Mat, 1, GL_FALSE, sl::Transform::transpose(pose_).m); - viewers[it.first].frustum.draw(); + it.second.frustum.draw(); } glUseProgram(0); @@ -523,9 +529,12 @@ void GLViewer::printText() { void GLViewer::clearInputs() { mouseMotion_[0] = mouseMotion_[1] = 0; mouseWheelPosition_ = 0; - for (unsigned int i = 0; i < 256; ++i) + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; if (keyStates_[i] != KEY_STATE::DOWN) keyStates_[i] = KEY_STATE::FREE; + } } void GLViewer::drawCallback() { @@ -652,6 +661,10 @@ void Simple3DObject::draw() { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -687,12 +700,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -749,7 +762,7 @@ const GLchar* IMAGE_VERTEX_SHADER = "}\n"; -PointCloud::PointCloud() { +PointCloud::PointCloud(): numBytes_(0), xyzrgbaMappedBuf_(nullptr) { } @@ -760,44 +773,55 @@ PointCloud::~PointCloud() { void PointCloud::close() { if (refMat.isInit()) { auto err = cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); - if (err != cudaSuccess) - std::cerr << "Error: CUDA UnmapResources (" << err << ")" << std::endl; + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; glDeleteBuffers(1, &bufferGLID_); refMat.free(); } } -void PointCloud::initialize(sl::Mat &ref, sl::float3 clr_) { - +void PointCloud::initialize(sl::Mat& ref, sl::float3 clr_) +{ refMat = ref; clr = clr_; +} - glGenBuffers(1, &bufferGLID_); - glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); - glBufferData(GL_ARRAY_BUFFER, refMat.getResolution().area() * 4 * sizeof (float), 0, GL_STATIC_DRAW); - glBindBuffer(GL_ARRAY_BUFFER, 0); +void PointCloud::pushNewPC() { + if (refMat.isInit()) { - cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); - if (err != cudaSuccess) - std::cerr << "Error: CUDA - OpenGL Interop failed (" << err << ")" << std::endl; + int sizebytes = refMat.getResolution().area() * sizeof(sl::float4); + if (numBytes_ != sizebytes) { - err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); - if (err != cudaSuccess) - std::cerr << "Error: CUDA MapResources (" << err << ")" << std::endl; + if (numBytes_ == 0) { + glGenBuffers(1, &bufferGLID_); - err = cudaGraphicsResourceGetMappedPointer((void**) &xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); - if (err != cudaSuccess) - std::cerr << "Error: CUDA GetMappedPointer (" << err << ")" << std::endl; + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); + shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); + shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); + } + else { + cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + cudaGraphicsUnregisterResource(bufferCudaID_); + } - shader_ = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); - shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); - shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); - shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); -} + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glBufferData(GL_ARRAY_BUFFER, sizebytes, 0, GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsResourceGetMappedPointer((void**)&xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + } -void PointCloud::pushNewPC() { - if (refMat.isInit()) cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice); + } } void PointCloud::draw(const sl::Transform& vp, bool draw_flat) { @@ -839,7 +863,9 @@ void CameraViewer::close() { if (ref.isInit()) { auto err = cudaGraphicsUnmapResources(1, &cuda_gl_ressource, 0); - if (err) std::cout << "err 3 " << err << " " << cudaGetErrorString(err) << "\n"; + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(cuda_gl_ressource); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; glDeleteTextures(1, &texture); glDeleteBuffers(3, vboID_); @@ -909,7 +935,7 @@ bool CameraViewer::initialize(sl::Mat &im, sl::float3 clr) { faces.push_back(sl::uint3(0,2,3)); ref = im; - shader = Shader(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); + shader.set(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); glGenVertexArrays(1, &vaoID_); diff --git a/body tracking/multi-camera/cpp/src/main.cpp b/body tracking/multi-camera/cpp/src/main.cpp index dec99d81..2b7ea9d3 100644 --- a/body tracking/multi-camera/cpp/src/main.cpp +++ b/body tracking/multi-camera/cpp/src/main.cpp @@ -50,6 +50,8 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } + Trigger trigger; + // Check if the ZED camera should run within the same process or if they are running on the edge. std::vector clients(configurations.size()); int id_ = 0; @@ -58,7 +60,7 @@ int main(int argc, char **argv) { // if the ZED camera should run locally, then start a thread to handle it if(conf.communication_parameters.getType() == sl::CommunicationParameters::COMM_TYPE::INTRA_PROCESS){ std::cout << "Try to open ZED " < cameras; for (auto& it : configurations) { sl::CameraIdentifier uuid(it.serial_number); - // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. - auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose); + // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. + auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity); if (state != sl::FUSION_ERROR_CODE::SUCCESS) std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl; else @@ -129,18 +130,20 @@ int main(int argc, char **argv) { sl::BodyTrackingFusionRuntimeParameters body_tracking_runtime_parameters; // be sure that the detection skeleton is complete enough body_tracking_runtime_parameters.skeleton_minimum_allowed_keypoints = 7; - // we can also want to retrieve skeleton seen by multiple camera, in this case at least half of them body_tracking_runtime_parameters.skeleton_minimum_allowed_camera = cameras.size() / 2.; + // creation of a 3D viewer GLViewer viewer; viewer.init(argc, argv); std::cout << "Viewer Shortcuts\n" << - "\t- 'r': swicth on/off for raw skeleton display\n" << - "\t- 'p': swicth on/off for live point cloud display\n" << - "\t- 'c': swicth on/off point cloud display with flat color\n" << std::endl; + "\t- 'q': quit the application\n" << + "\t- 'p': play/pause the GLViewer\n" << + "\t- 'r': switch on/off for raw skeleton display\n" << + "\t- 's': switch on/off for live point cloud display\n" << + "\t- 'c': switch on/off point cloud display with raw color\n" << std::endl; // fusion outputs sl::Bodies fused_bodies; @@ -149,15 +152,19 @@ int main(int argc, char **argv) { std::map views; std::map pointClouds; sl::Resolution low_res(512,360); + sl::CameraIdentifier fused_camera(0); // run the fusion as long as the viewer is available. while (viewer.isAvailable()) { + trigger.notifyZED(); + // run the fusion process (which gather data from all camera, sync them and process them) if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) { // Retrieve fused body fusion.retrieveBodies(fused_bodies, body_tracking_runtime_parameters); - // for debug, you can retrieve the data send by each camera - for (auto& id : cameras) { + + // for debug, you can retrieve the data sent by each camera + for (auto& id : cameras) { fusion.retrieveBodies(camera_raw_data[id], body_tracking_runtime_parameters, id); sl::Pose pose; if(fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id, sl::POSITION_TYPE::RAW) == sl::POSITIONAL_TRACKING_STATE::OK) @@ -165,8 +172,9 @@ int main(int argc, char **argv) { auto state_view = fusion.retrieveImage(views[id], id, low_res); auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res); - if(state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) - viewer.updateCamera(id.sn, views[id], pointClouds[id]); + + if (state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) + viewer.updateCamera(id.sn, views[id], pointClouds[id]); } // get metrics about the fusion process for monitoring purposes @@ -174,11 +182,16 @@ int main(int argc, char **argv) { } // update the 3D view viewer.updateBodies(fused_bodies, camera_raw_data, metrics); - - sl::sleep_ms(10); + + while (!viewer.isPlaying() && viewer.isAvailable()) { + sl::sleep_ms(10); + } } viewer.exit(); + + trigger.running = false; + trigger.notifyZED(); for (auto &it: clients) it.stop(); diff --git a/body tracking/multi-camera/csharp/App.config b/body tracking/multi-camera/csharp/App.config new file mode 100644 index 00000000..bae5d6d8 --- /dev/null +++ b/body tracking/multi-camera/csharp/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/body tracking/multi-camera/csharp/CMakeLists.txt b/body tracking/multi-camera/csharp/CMakeLists.txt new file mode 100644 index 00000000..b786193f --- /dev/null +++ b/body tracking/multi-camera/csharp/CMakeLists.txt @@ -0,0 +1,44 @@ +cmake_minimum_required( VERSION 3.8.0 ) + +project(ZED_Body_Fusion CSharp) + +add_executable(${PROJECT_NAME} + Program.cs + MainWindow.cs + GLViewer.cs + ClientPublisher.cs + App.config + packages.config + Properties/AssemblyInfo.cs +) + +# Set the target platform to x64, since ZED SDK does not support 32-bits arch +target_compile_options(${PROJECT_NAME} PRIVATE "/platform:x64" ) + +# Set the .NET Framework version for the target. +set_property(TARGET ${PROJECT_NAME} PROPERTY VS_DOTNET_TARGET_FRAMEWORK_VERSION "v4.6.1") + +# Set the C# language version, otherwise default 3.0 is taken +set(CMAKE_CSharp_FLAGS "/langversion:7") + +set_property(TARGET ${PROJECT_NAME} PROPERTY VS_DOTNET_REFERENCES + "Microsoft.CSharp" + "PresentationCore" + "PresentationFramework" + "System" + "System.Xaml" + "System.Data" + "System.Linq" + "System.Windows" + "System.Windows.Forms" + "System.Numerics" + "WindowsBase" + +) + +set(CMAKE_SUPPRESS_REGENERATION true) +set_property(TARGET ${PROJECT_NAME} PROPERTY VS_PACKAGE_REFERENCES + "Stereolabs.zed_4.*" + "OpenGL.Net_0.8.4" + "OpenGL.Net.CoreUI_0.8.4" +) \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/ClientPublisher.cs b/body tracking/multi-camera/csharp/ClientPublisher.cs new file mode 100644 index 00000000..7849422b --- /dev/null +++ b/body tracking/multi-camera/csharp/ClientPublisher.cs @@ -0,0 +1,185 @@ +using sl; +using System; +using System.Threading; +using static Khronos.Platform; + +class ClientPublisher +{ + sl.Camera zedCamera; + Thread thread; + bool running = false; + int id = 0; + + /// + /// + /// + public ClientPublisher(int id_) + { + id = id_; + running = false; + zedCamera = new Camera(id_); + } + + /// + /// + /// + /// + /// + public bool Open(sl.InputType inputType) + { + if (thread != null && thread.IsAlive) return false; + + sl.InitParameters initParameters = new sl.InitParameters(); + initParameters.resolution = sl.RESOLUTION.AUTO; + initParameters.cameraFPS = 30; + initParameters.depthMode = sl.DEPTH_MODE.ULTRA; + initParameters.coordinateUnits = sl.UNIT.METER; + initParameters.coordinateSystem = COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP; + initParameters.sdkVerbose = 1; + + switch(inputType.GetType()) + { + case sl.INPUT_TYPE.USB: + initParameters.inputType = sl.INPUT_TYPE.USB; + initParameters.cameraDeviceID = id; + break; + case sl.INPUT_TYPE.SVO: + initParameters.inputType = sl.INPUT_TYPE.SVO; + initParameters.pathSVO = new string(inputType.svoInputFilename); + break; + case sl.INPUT_TYPE.STREAM: + initParameters.inputType = sl.INPUT_TYPE.STREAM; + initParameters.ipStream = new string(inputType.streamInputIp); + initParameters.portStream = inputType.streamInputPort; + break; + case sl.INPUT_TYPE.GMSL: + initParameters.inputType = sl.INPUT_TYPE.GMSL; + break; + default: + Console.WriteLine("ERROR: Invalid input type"); + return false; + } + + ERROR_CODE err = zedCamera.Open(ref initParameters); + + if (err != ERROR_CODE.SUCCESS) + { + Console.WriteLine("ERROR while opening the camera. Exiting..."); + Environment.Exit(-1); + } + + + if (zedCamera.CameraModel == MODEL.ZED) + { + Console.WriteLine(" ERROR : not compatible camera model"); + Environment.Exit(-1); + } + + // Enable tracking (mandatory for body trackin) + PositionalTrackingParameters positionalTrackingParameters = new PositionalTrackingParameters(); + err = zedCamera.EnablePositionalTracking(ref positionalTrackingParameters); + + if (err != ERROR_CODE.SUCCESS) + { + Console.WriteLine("ERROR while enable the positional tracking module. Exiting..."); + Environment.Exit(-1); + } + + // Enable the Objects detection module + sl.BodyTrackingParameters bodyTrackingParameters = new BodyTrackingParameters(); + bodyTrackingParameters.enableObjectTracking = false; // the body tracking will track bodies across multiple images, instead of an image-by-image basis + bodyTrackingParameters.enableSegmentation = false; + bodyTrackingParameters.enableBodyFitting = false; // smooth skeletons moves + bodyTrackingParameters.detectionModel = sl.BODY_TRACKING_MODEL.HUMAN_BODY_MEDIUM; + bodyTrackingParameters.bodyFormat = sl.BODY_FORMAT.BODY_38; + bodyTrackingParameters.allowReducedPrecisionInference = true; + err = zedCamera.EnableBodyTracking(ref bodyTrackingParameters); + + if (err != ERROR_CODE.SUCCESS) + { + Console.WriteLine("ERROR while enable the body tracking module. Exiting..."); + Environment.Exit(-1); + } + + return true; + } + + /// + /// + /// + public void Start() + { + if (zedCamera.IsOpened()) + { + running = true; + sl.CommunicationParameters communicationParameters = new sl.CommunicationParameters(); + communicationParameters.communicationType = sl.COMM_TYPE.INTRA_PROCESS; + ERROR_CODE err =zedCamera.StartPublishing(ref communicationParameters); + if (err != ERROR_CODE.SUCCESS) + { + Console.WriteLine("ERROR while startPublishing" + err + " . Exiting..."); + Environment.Exit(-1); + } + thread = new Thread(new ThreadStart(Work)); + thread.Priority = System.Threading.ThreadPriority.Highest; + thread.Start(); + } + } + + /// + /// + /// + public void Stop() + { + running = false; + if (thread != null && thread.IsAlive) + { + thread.Join(); + } + zedCamera.Close(); + } + + /// + /// + /// + /// + public void SetStartSVOPosition(int pos) + { + zedCamera.SetSVOPosition(pos); + } + + /// + /// + /// + /// + public bool IsRunning() + { + return running; + } + + /// + /// + /// + private void Work() + { + sl.Bodies bodies = new sl.Bodies(); + sl.BodyTrackingRuntimeParameters bodyTrackingRuntimeParameters = new sl.BodyTrackingRuntimeParameters(); + bodyTrackingRuntimeParameters.detectionConfidenceThreshold = 40; + + sl.RuntimeParameters runtimeParameters = new sl.RuntimeParameters(); + sl.ERROR_CODE err = ERROR_CODE.FAILURE; + while (IsRunning()) + { + err = zedCamera.Grab(ref runtimeParameters); + if (err == sl.ERROR_CODE.SUCCESS) + { + err = zedCamera.RetrieveBodies(ref bodies, ref bodyTrackingRuntimeParameters); + } + else + { + Console.WriteLine("Error while grabbing: " + err); + } + Thread.Sleep(2); + } + } +} \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/GLViewer.cs b/body tracking/multi-camera/csharp/GLViewer.cs new file mode 100644 index 00000000..33991cb3 --- /dev/null +++ b/body tracking/multi-camera/csharp/GLViewer.cs @@ -0,0 +1,1600 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Numerics; +using OpenGL; +using OpenGL.CoreUI; +using System.Threading; + +namespace sl +{ + public struct ObjectClassName + { + public Vector3 position; + public string name; + public float4 color; + }; + + class GLViewer + { + public GLViewer() + { + available = false; + currentInstance = this; + + floorGrid = new Simple3DObject(); + skeleton = new Simple3DObject(); + skeleton_raw = new Simple3DObject(); + + pointClouds = new Dictionary(); + poses = new Dictionary(); + mtx = new Mutex(); + } + + public bool IsAvailable() + { + return available; + } + + + public void Init(bool showOnlyOk) + { + Gl.Enable(EnableCap.FramebufferSrgb); + + shaderSK = new ShaderData(); + shaderSK.it = new Shader(Shader.SK_VERTEX_SHADER, Shader.SK_FRAGMENT_SHADER); + shaderSK.MVP_Mat = Gl.GetUniformLocation(shaderSK.it.getProgramId(), "u_mvpMatrix"); + + shaderLine = new ShaderData(); + shaderLine.it = new Shader(Shader.VERTEX_SHADER, Shader.FRAGMENT_SHADER); + shaderLine.MVP_Mat = Gl.GetUniformLocation(shaderSK.it.getProgramId(), "u_mvpMatrix"); + + //Create Camera + camera_ = new CameraGL(new Vector3(0, 0, 0), new Vector3(0, 0, -1f), Vector3.UnitY); + + showOnlyOK_ = showOnlyOk; + + sphere = new Simple3DObject(); + sphere.init(); + sphere.setDrawingType(PrimitiveType.Quads); + sphere.createSphere(); + + skeleton.init(); + skeleton.setDrawingType(PrimitiveType.Quads); + + skeleton_raw.init(); + skeleton_raw.setDrawingType(PrimitiveType.Quads); + + floorGrid.init(); + floorGrid.setDrawingType(PrimitiveType.Lines); + + float limit = 20; + float4 clr_grid = new float4(); + clr_grid.x = 0.35f; + clr_grid.y = 0.35f; + clr_grid.z = 0.35f; + clr_grid.w = 1f; + + float height = -3; + for (int i = -(int)limit; i <= (int)limit; i++) + { + addVert(ref floorGrid, i, limit, height, clr_grid); + } + + floorGrid.pushToGPU(); + + available = true; + } + + public void InitPointCloud(ulong id, Resolution res) + { + mtx.WaitOne(); + if (!pointClouds.ContainsKey(id)) + { + PointCloud pc = new PointCloud(); + pc.initialize(res); + pointClouds.Add(id, pc); + } + mtx.ReleaseMutex(); + } + + void CreateSK(ref Simple3DObject obj, Tuple[] Bones, Vector3[] keypoints, float4 clr_id, bool raw = false) + { + foreach (var limb in Bones) + { + Vector3 kp_1 = keypoints[getIdx(limb.Item1)]; + Vector3 kp_2 = keypoints[getIdx(limb.Item2)]; + + float norm_1 = kp_1.Length(); + float norm_2 = kp_2.Length(); + + if (!float.IsNaN(norm_1) && norm_1 > 0 && !float.IsNaN(norm_2) && norm_2 > 0) + { + obj.addCylinder(new float3(kp_1.X, kp_1.Y, kp_1.Z), new float3(kp_2.X, kp_2.Y, kp_2.Z), clr_id, raw ? 0.005f : 0.01f); + } + } + } + + public void SetCameraPose(ulong id, sl.Pose pose) + { + mtx.WaitOne(); + + Matrix4x4 cam_pose = Matrix4x4.Identity; + cam_pose = Matrix4x4.Transform(cam_pose, pose.rotation); + cam_pose.Translation = pose.translation; + cam_pose = Matrix4x4.Transpose(cam_pose); + + poses[id] = cam_pose; + mtx.ReleaseMutex(); + } + + public void UpdatePointCloud(ulong id, sl.Mat pc) + { + mtx.WaitOne(); + foreach (var pointCloud in pointClouds) + { + if (pointClouds.ContainsKey(id)) pointClouds[id].pushNewPC(pc); + } + mtx.ReleaseMutex(); + } + + public void UpdateBodies(Bodies bodies, Dictionary singleData) + { + skeleton.clear(); + + // For each object + for (int idx = 0; idx < bodies.nbBodies; idx++) + { + sl.BodyData bod = bodies.bodiesList[idx]; + Vector3[] keypoints = bod.keypoints; + float4 clr_id = generateColorID(bod.id); + // Only show tracked objects + if (renderObject(bod, showOnlyOK_)) + { + if (bodies.bodyFormat == BODY_FORMAT.BODY_38) + { + CreateSK(ref skeleton, SKELETON_BONES_BODY_38, keypoints, clr_id); + + for (int i = 0; i < (int)BODY_38_PARTS.LAST; i++) + { + Vector3 kp = bod.keypoints[i]; + float norm = kp.Length(); + if (!float.IsNaN(norm) && norm > 0) + { + skeleton.addSphere(sphere, new float3(kp.X, kp.Y, kp.Z), clr_id); + } + } + } + else if (bodies.bodyFormat == BODY_FORMAT.BODY_34) + { + CreateSK(ref skeleton, SKELETON_BONES_BODY_34, keypoints, clr_id); + + for (int i = 0; i < (int)BODY_34_PARTS.LAST; i++) + { + Vector3 kp = bod.keypoints[i]; + float norm = kp.Length(); + if (!float.IsNaN(norm) && norm > 0) + { + skeleton.addSphere(sphere, new float3(kp.X, kp.Y, kp.Z), clr_id); + } + } + } + else if (bodies.bodyFormat == BODY_FORMAT.BODY_18) + { + CreateSK(ref skeleton, SKELETON_BONES_BODY_18, keypoints, clr_id); + + for (int i = 0; i < (int)BODY_18_PARTS.LAST; i++) + { + Vector3 kp = bod.keypoints[i]; + float norm = kp.Length(); + if (!float.IsNaN(norm) && norm > 0) + { + skeleton.addSphere(sphere, new float3(kp.X, kp.Y, kp.Z), clr_id); + } + } + } + } + } + + skeleton_raw.clear(); + + + // raw data + foreach (var single in singleData) + { + sl.Bodies body = single.Value; + for (int idx = 0; idx < body.nbBodies; idx++) + { + sl.BodyData bod = body.bodiesList[idx]; + Vector3[] keypoints = bod.keypoints; + float4 clr_id = generateColorID(bod.id); + // Only show tracked objects + if (renderObject(bod, false)) + { + if (body.bodyFormat == BODY_FORMAT.BODY_38) + { + CreateSK(ref skeleton_raw, SKELETON_BONES_BODY_38, keypoints, clr_id, true); + } + else if (body.bodyFormat == BODY_FORMAT.BODY_34) + { + CreateSK(ref skeleton_raw, SKELETON_BONES_BODY_34, keypoints, clr_id, true); + } + else if (body.bodyFormat == BODY_FORMAT.BODY_18) + { + CreateSK(ref skeleton_raw, SKELETON_BONES_BODY_18, keypoints, clr_id, true); + } + } + } + } + } + + void addVert(ref Simple3DObject obj, float i_f, float limit, float height, float4 clr) + { + float3 p1 = new float3(i_f, height, -limit); + float3 p2 = new float3(i_f, height, limit); + float3 p3 = new float3(-limit, height, i_f); + float3 p4 = new float3(limit, height, i_f); + + obj.addLine(p1, p2, clr); + obj.addLine(p3, p4, clr); + } + + public void Render() + { + camera_.update(); + skeleton.pushToGPU(); + skeleton_raw.pushToGPU(); + draw(); + } + + public void keyEventFunction(NativeWindowKeyEventArgs e) + { + if (e.Key == KeyCode.S) + { + camera_.setPosition(new Vector3(0, 0, 1.5f)); + camera_.setDirection(new Vector3(0, 0, -1), Vector3.UnitY); + } + if (e.Key == KeyCode.T) + { + camera_.setPosition(new Vector3(0, 0.0f, 1.5f)); + camera_.setOffsetFromPosition(new Vector3(0, 0, 6)); + camera_.translate(new Vector3(0, 1.5f, -4)); + camera_.setDirection(new Vector3(0, -1, 0), Vector3.UnitY); + } + if (e.Key == KeyCode.P) + { + showPC = !showPC; + } + if (e.Key == KeyCode.R) + { + showRawSkeletons = !showRawSkeletons; + } + } + + public void mouseEventFunction(NativeWindowMouseEventArgs e) + { + // Rotate camera with mouse + if (e.Buttons == OpenGL.CoreUI.MouseButton.Left) + { + camera_.rotate(Quaternion.CreateFromAxisAngle(camera_.getRight(), (float)mouseMotion_[1] * MOUSE_R_SENSITIVITY)); + camera_.rotate(Quaternion.CreateFromAxisAngle(camera_.getVertical() * -1f, (float)mouseMotion_[0] * MOUSE_R_SENSITIVITY)); + } + if (e.Buttons == OpenGL.CoreUI.MouseButton.Right) + { + camera_.translate(camera_.getUp() * (float)mouseMotion_[1] * MOUSE_T_SENSITIVITY * -1); + camera_.translate(camera_.getRight() * (float)mouseMotion_[0] * MOUSE_T_SENSITIVITY * -1); + } + if (e.Buttons == OpenGL.CoreUI.MouseButton.Middle) + { + camera_.translate(camera_.getForward() * (float)mouseMotion_[1] * MOUSE_T_SENSITIVITY * -1); + camera_.translate(camera_.getForward() * (float)mouseMotion_[0] * MOUSE_T_SENSITIVITY * -1); + } + } + + public void resizeCallback(int width, int height) + { + Gl.Viewport(0, 0, width, height); + float hfov = (180.0f / (float)Math.PI) * (float)(2.0 * Math.Atan(width / (2.0f * 500))); + float vfov = (180.0f / (float)Math.PI) * (float)(2.0f * Math.Atan(height / (2.0f * 500))); + + camera_.setProjection(hfov, vfov, camera_.getZNear(), camera_.getZFar()); + } + + public void computeMouseMotion(int x, int y) + { + currentInstance.mouseMotion_[0] = x - currentInstance.previousMouseMotion_[0]; + currentInstance.mouseMotion_[1] = y - currentInstance.previousMouseMotion_[1]; + currentInstance.previousMouseMotion_[0] = x; + currentInstance.previousMouseMotion_[1] = y; + } + + public void draw() + { + Matrix4x4 vpMatrix = camera_.getViewProjectionMatrix(); + + Gl.UseProgram(shaderLine.it.getProgramId()); + Gl.UniformMatrix4f(shaderSK.MVP_Mat, 1, true, vpMatrix); + Gl.LineWidth(1.0f); + floorGrid.draw(); + Gl.UseProgram(0); + + Gl.PointSize(1.0f); + if (showPC) + { + foreach (var pose in poses) + { + Matrix4x4 vpMatrixCam = vpMatrix * pose.Value; + if (pointClouds.ContainsKey(pose.Key)) + { + pointClouds[pose.Key].draw(vpMatrixCam); + } + } + } + + Gl.Enable(EnableCap.DepthTest); + Gl.UseProgram(shaderSK.it.getProgramId()); + Gl.PolygonMode(MaterialFace.FrontAndBack, PolygonMode.Fill); + Gl.UniformMatrix4f(shaderSK.MVP_Mat, 1, true, vpMatrix); + skeleton.draw(); + if (showRawSkeletons) skeleton_raw.draw(); + Gl.UseProgram(0); + Gl.Disable(EnableCap.DepthTest); + } + + sl.float4 generateColorID(int idx) + { + sl.float4 default_color = new float4(); + default_color.x = 236.0f / 255; + default_color.y = 184.0f / 255; + default_color.z = 36.0f / 255; + default_color.z = 255.0f / 255; + + if (idx < 0) return default_color; + + int offset = Math.Max(0, idx % 8); + sl.float4 color = new float4(); + color.x = id_colors[offset, 2] / 255; + color.y = id_colors[offset, 1] / 255; + color.z = id_colors[offset, 0] / 255; + color.w = 1.0f; + return color; + } + + float[,] id_colors = new float[8, 3]{ + { 232.0f, 176.0f ,59.0f }, + { 165.0f, 218.0f ,25.0f }, + { 102.0f, 205.0f ,105.0f}, + { 185.0f, 0.0f ,255.0f}, + { 99.0f, 107.0f ,252.0f}, + {252.0f, 225.0f, 8.0f}, + {167.0f, 130.0f, 141.0f}, + {194.0f, 72.0f, 113.0f} + }; + + float[,] class_colors = new float[6, 3]{ + { 44.0f, 117.0f, 255.0f}, // PEOPLE + { 255.0f, 0.0f, 255.0f}, // VEHICLE + { 0.0f, 0.0f, 255.0f}, + { 0.0f, 255.0f, 255.0f}, + { 0.0f, 255.0f, 0.0f}, + { 255.0f, 255.0f, 255.0f} + }; + + float4 getColorClass(int idx) + { + idx = Math.Min(5, idx); + sl.float4 color = new float4(); + color.x = class_colors[idx, 0]; + color.y = class_colors[idx, 1]; + color.z = class_colors[idx, 2]; + color.w = 1.0f; + return color; + } + + bool renderObject(BodyData i, bool showOnlyOK) + { + if (showOnlyOK) + return (i.trackingState == sl.OBJECT_TRACKING_STATE.OK); + else + return (i.trackingState == sl.OBJECT_TRACKING_STATE.OK || i.trackingState == sl.OBJECT_TRACKING_STATE.OFF); + } + + private void setRenderCameraProjection(CameraParameters camParams, float znear, float zfar) + { + float PI = 3.141592653f; + // Just slightly up the ZED camera FOV to make a small black border + float fov_y = (camParams.vFOV + 0.5f) * PI / 180; + float fov_x = (camParams.hFOV + 0.5f) * PI / 180; + + projection_.M11 = 1.0f / (float)Math.Tan(fov_x * 0.5f); + projection_.M22 = 1.0f / (float)Math.Tan(fov_y * 0.5f); + projection_.M33 = -(zfar + znear) / (zfar - znear); + projection_.M43 = -1; + projection_.M34 = -(2.0f * zfar * znear) / (zfar - znear); + projection_.M44 = 0; + + projection_.M12 = 0; + projection_.M13 = 2.0f * (((int)camParams.resolution.width - 1.0f * camParams.cx) / (int)camParams.resolution.width) - 1.0f; //Horizontal offset. + projection_.M14 = 0; + + projection_.M21 = 0; + projection_.M22 = 1.0f / (float)Math.Tan(fov_y * 0.5f); //Vertical FoV. + projection_.M23 = -(2.0f * (((int)camParams.resolution.height - 1.0f * camParams.cy) / (int)camParams.resolution.height) - 1.0f); //Vertical offset. + projection_.M24 = 0; + + projection_.M31 = 0; + projection_.M32 = 0; + + projection_.M41 = 0; + projection_.M42 = 0; + } + + int getIdx(T part) + { + return (int)(object)part; + } + + public static readonly Tuple[] SKELETON_BONES_BODY_38 = + { + Tuple.Create(BODY_38_PARTS.PELVIS, BODY_38_PARTS.SPINE_1), + Tuple.Create(BODY_38_PARTS.SPINE_1, BODY_38_PARTS.SPINE_2), + Tuple.Create(BODY_38_PARTS.SPINE_2, BODY_38_PARTS.SPINE_3), + Tuple.Create(BODY_38_PARTS.SPINE_3, BODY_38_PARTS.NECK), + Tuple.Create(BODY_38_PARTS.PELVIS, BODY_38_PARTS.LEFT_HIP), + Tuple.Create(BODY_38_PARTS.PELVIS, BODY_38_PARTS.RIGHT_HIP), + Tuple.Create(BODY_38_PARTS.NECK, BODY_38_PARTS.NOSE), + Tuple.Create(BODY_38_PARTS.NECK, BODY_38_PARTS.LEFT_CLAVICLE), + Tuple.Create(BODY_38_PARTS.LEFT_CLAVICLE, BODY_38_PARTS.LEFT_SHOULDER), + Tuple.Create(BODY_38_PARTS.NECK, BODY_38_PARTS.RIGHT_CLAVICLE), + Tuple.Create(BODY_38_PARTS.RIGHT_CLAVICLE, BODY_38_PARTS.RIGHT_SHOULDER), + Tuple.Create(BODY_38_PARTS.NOSE, BODY_38_PARTS.LEFT_EYE), + Tuple.Create(BODY_38_PARTS.LEFT_EYE, BODY_38_PARTS.LEFT_EAR), + Tuple.Create(BODY_38_PARTS.NOSE, BODY_38_PARTS.RIGHT_EYE), + Tuple.Create(BODY_38_PARTS.RIGHT_EYE, BODY_38_PARTS.RIGHT_EAR), + Tuple.Create(BODY_38_PARTS.LEFT_SHOULDER, BODY_38_PARTS.LEFT_ELBOW), + Tuple.Create(BODY_38_PARTS.LEFT_ELBOW, BODY_38_PARTS.LEFT_WRIST), + Tuple.Create(BODY_38_PARTS.LEFT_WRIST, BODY_38_PARTS.LEFT_HAND_THUMB_4), + Tuple.Create(BODY_38_PARTS.LEFT_WRIST, BODY_38_PARTS.LEFT_HAND_INDEX_1), + Tuple.Create(BODY_38_PARTS.LEFT_WRIST, BODY_38_PARTS.LEFT_HAND_MIDDLE_4), + Tuple.Create(BODY_38_PARTS.LEFT_WRIST, BODY_38_PARTS.LEFT_HAND_PINKY_1), + Tuple.Create(BODY_38_PARTS.RIGHT_SHOULDER, BODY_38_PARTS.RIGHT_ELBOW), + Tuple.Create(BODY_38_PARTS.RIGHT_ELBOW, BODY_38_PARTS.RIGHT_WRIST), + Tuple.Create(BODY_38_PARTS.RIGHT_WRIST, BODY_38_PARTS.RIGHT_HAND_THUMB_4), + Tuple.Create(BODY_38_PARTS.RIGHT_WRIST, BODY_38_PARTS.RIGHT_HAND_INDEX_1), + Tuple.Create(BODY_38_PARTS.RIGHT_WRIST, BODY_38_PARTS.RIGHT_HAND_MIDDLE_4), + Tuple.Create(BODY_38_PARTS.RIGHT_WRIST, BODY_38_PARTS.RIGHT_HAND_PINKY_1), + Tuple.Create(BODY_38_PARTS.LEFT_HIP, BODY_38_PARTS.LEFT_KNEE), + Tuple.Create(BODY_38_PARTS.LEFT_KNEE, BODY_38_PARTS.LEFT_ANKLE), + Tuple.Create(BODY_38_PARTS.LEFT_ANKLE, BODY_38_PARTS.LEFT_HEEL), + Tuple.Create(BODY_38_PARTS.LEFT_ANKLE, BODY_38_PARTS.LEFT_BIG_TOE), + Tuple.Create(BODY_38_PARTS.LEFT_ANKLE, BODY_38_PARTS.LEFT_SMALL_TOE), + Tuple.Create(BODY_38_PARTS.RIGHT_HIP, BODY_38_PARTS.RIGHT_KNEE), + Tuple.Create(BODY_38_PARTS.RIGHT_KNEE, BODY_38_PARTS.RIGHT_ANKLE), + Tuple.Create(BODY_38_PARTS.RIGHT_ANKLE, BODY_38_PARTS.RIGHT_HEEL), + Tuple.Create(BODY_38_PARTS.RIGHT_ANKLE, BODY_38_PARTS.RIGHT_BIG_TOE), + Tuple.Create(BODY_38_PARTS.RIGHT_ANKLE, BODY_38_PARTS.RIGHT_SMALL_TOE) + }; + + private static readonly Tuple[] SKELETON_BONES_BODY_18 = +{ + Tuple.Create(BODY_18_PARTS.NOSE, BODY_18_PARTS.NECK), + Tuple.Create(BODY_18_PARTS.NECK, BODY_18_PARTS.RIGHT_SHOULDER), + Tuple.Create(BODY_18_PARTS.RIGHT_SHOULDER, BODY_18_PARTS.RIGHT_ELBOW), + Tuple.Create(BODY_18_PARTS.RIGHT_ELBOW, BODY_18_PARTS.RIGHT_WRIST), + Tuple.Create(BODY_18_PARTS.NECK, BODY_18_PARTS.LEFT_SHOULDER), + Tuple.Create(BODY_18_PARTS.LEFT_SHOULDER, BODY_18_PARTS.LEFT_ELBOW), + Tuple.Create(BODY_18_PARTS.LEFT_ELBOW, BODY_18_PARTS.LEFT_WRIST), + Tuple.Create(BODY_18_PARTS.RIGHT_HIP, BODY_18_PARTS.RIGHT_KNEE), + Tuple.Create(BODY_18_PARTS.RIGHT_KNEE, BODY_18_PARTS.RIGHT_ANKLE), + Tuple.Create(BODY_18_PARTS.LEFT_HIP, BODY_18_PARTS.LEFT_KNEE), + Tuple.Create(BODY_18_PARTS.LEFT_KNEE, BODY_18_PARTS.LEFT_ANKLE), + Tuple.Create(BODY_18_PARTS.RIGHT_SHOULDER, BODY_18_PARTS.LEFT_SHOULDER), + Tuple.Create(BODY_18_PARTS.RIGHT_HIP, BODY_18_PARTS.LEFT_HIP), + Tuple.Create(BODY_18_PARTS.NOSE, BODY_18_PARTS.RIGHT_EYE), + Tuple.Create(BODY_18_PARTS.RIGHT_EYE, BODY_18_PARTS.RIGHT_EAR), + Tuple.Create(BODY_18_PARTS.NOSE, BODY_18_PARTS.LEFT_EYE), + Tuple.Create(BODY_18_PARTS.LEFT_EYE, BODY_18_PARTS.LEFT_EAR) + }; + + private static readonly Tuple[] SKELETON_BONES_BODY_34 = + { + Tuple.Create(BODY_34_PARTS.PELVIS, BODY_34_PARTS.NAVAL_SPINE), + Tuple.Create(BODY_34_PARTS.NAVAL_SPINE, BODY_34_PARTS.CHEST_SPINE), + Tuple.Create(BODY_34_PARTS.CHEST_SPINE, BODY_34_PARTS.LEFT_CLAVICLE), + Tuple.Create(BODY_34_PARTS.LEFT_CLAVICLE, BODY_34_PARTS.LEFT_SHOULDER), + Tuple.Create(BODY_34_PARTS.LEFT_SHOULDER, BODY_34_PARTS.LEFT_ELBOW), + Tuple.Create(BODY_34_PARTS.LEFT_ELBOW, BODY_34_PARTS.LEFT_WRIST), + Tuple.Create(BODY_34_PARTS.LEFT_WRIST, BODY_34_PARTS.LEFT_HAND), + Tuple.Create(BODY_34_PARTS.LEFT_HAND, BODY_34_PARTS.LEFT_HANDTIP), + Tuple.Create(BODY_34_PARTS.LEFT_WRIST, BODY_34_PARTS.LEFT_THUMB), + Tuple.Create(BODY_34_PARTS.CHEST_SPINE, BODY_34_PARTS.RIGHT_CLAVICLE), + Tuple.Create(BODY_34_PARTS.RIGHT_CLAVICLE, BODY_34_PARTS.RIGHT_SHOULDER), + Tuple.Create(BODY_34_PARTS.RIGHT_SHOULDER, BODY_34_PARTS.RIGHT_ELBOW), + Tuple.Create(BODY_34_PARTS.RIGHT_ELBOW, BODY_34_PARTS.RIGHT_WRIST), + Tuple.Create(BODY_34_PARTS.RIGHT_WRIST, BODY_34_PARTS.RIGHT_HAND), + Tuple.Create(BODY_34_PARTS.RIGHT_HAND, BODY_34_PARTS.RIGHT_HANDTIP), + Tuple.Create(BODY_34_PARTS.RIGHT_WRIST, BODY_34_PARTS.RIGHT_THUMB), + Tuple.Create(BODY_34_PARTS.PELVIS, BODY_34_PARTS.LEFT_HIP), + Tuple.Create(BODY_34_PARTS.LEFT_HIP, BODY_34_PARTS.LEFT_KNEE), + Tuple.Create(BODY_34_PARTS.LEFT_KNEE, BODY_34_PARTS.LEFT_ANKLE), + Tuple.Create(BODY_34_PARTS.LEFT_ANKLE, BODY_34_PARTS.LEFT_FOOT), + Tuple.Create(BODY_34_PARTS.PELVIS, BODY_34_PARTS.RIGHT_HIP), + Tuple.Create(BODY_34_PARTS.RIGHT_HIP, BODY_34_PARTS.RIGHT_KNEE), + Tuple.Create(BODY_34_PARTS.RIGHT_KNEE, BODY_34_PARTS.RIGHT_ANKLE), + Tuple.Create(BODY_34_PARTS.RIGHT_ANKLE, BODY_34_PARTS.RIGHT_FOOT), + Tuple.Create(BODY_34_PARTS.CHEST_SPINE, BODY_34_PARTS.NECK), + Tuple.Create(BODY_34_PARTS.NECK, BODY_34_PARTS.HEAD), + Tuple.Create(BODY_34_PARTS.HEAD, BODY_34_PARTS.NOSE), + Tuple.Create(BODY_34_PARTS.NOSE, BODY_34_PARTS.LEFT_EYE), + Tuple.Create(BODY_34_PARTS.LEFT_EYE, BODY_34_PARTS.LEFT_EAR), + Tuple.Create(BODY_34_PARTS.NOSE, BODY_34_PARTS.RIGHT_EYE), + Tuple.Create(BODY_34_PARTS.RIGHT_EYE, BODY_34_PARTS.RIGHT_EAR), + Tuple.Create(BODY_34_PARTS.LEFT_ANKLE, BODY_34_PARTS.LEFT_HEEL), + Tuple.Create(BODY_34_PARTS.RIGHT_ANKLE, BODY_34_PARTS.RIGHT_HEEL), + Tuple.Create(BODY_34_PARTS.LEFT_HEEL, BODY_34_PARTS.LEFT_FOOT), + Tuple.Create(BODY_34_PARTS.RIGHT_HEEL, BODY_34_PARTS.RIGHT_FOOT) + }; + + public void Exit() + { + if (currentInstance != null) + { + available = false; + skeleton.clear(); + skeleton_raw.clear(); + } + } + + int[] mouseCurrentPosition_ = new int[2]; + int[] mouseMotion_ = new int[2]; + int[] previousMouseMotion_ = new int[2]; + + const float MOUSE_R_SENSITIVITY = 0.004f; + const float MOUSE_T_SENSITIVITY = 0.05f; + const float MOUSE_UZ_SENSITIVITY = 0.75f; + const float MOUSE_DZ_SENSITIVITY = 1.25f; + + Mutex mtx; + + bool available; + bool showOnlyOK_ = false; + Matrix4x4 projection_; + Dictionary poses; + Dictionary pointClouds; + + ShaderData shaderSK; + ShaderData shaderLine; + + Simple3DObject sphere; + Simple3DObject skeleton; + Simple3DObject skeleton_raw; + Simple3DObject floorGrid; + + GLViewer currentInstance; + CameraGL camera_; + + bool showPC = true; + bool showRawSkeletons = false; + } + + class ImageHandler + { + public ImageHandler(Resolution res) + { + resolution = res; + } + + // Initialize Opengl buffers + public void initialize() + { + shaderImage.it = new Shader(Shader.IMAGE_VERTEX_SHADER, Shader.IMAGE_FRAGMENT_SHADER); + texID = Gl.GetUniformLocation(shaderImage.it.getProgramId(), "texImage"); + + float[] g_quad_vertex_buffer_data = new float[18]{ + -1.0f, -1.0f, 0.0f, + 1.0f, -1.0f, 0.0f, + -1.0f, 1.0f, 0.0f, + -1.0f, 1.0f, 0.0f, + 1.0f, -1.0f, 0.0f, + 1.0f, 1, 0.0f}; + + quad_vb = Gl.GenBuffer(); + Gl.BindBuffer(BufferTarget.ArrayBuffer, quad_vb); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)(sizeof(float) * g_quad_vertex_buffer_data.Length), g_quad_vertex_buffer_data, BufferUsage.StaticDraw); + Gl.BindBuffer(BufferTarget.ArrayBuffer, 0); + + Gl.Enable(EnableCap.Texture2d); + imageTex = Gl.GenTexture(); + Gl.BindTexture(TextureTarget.Texture2d, imageTex); + Gl.TexParameter(TextureTarget.Texture2d, TextureParameterName.TextureWrapS, Gl.CLAMP_TO_BORDER); + Gl.TexParameter(TextureTarget.Texture2d, TextureParameterName.TextureWrapT, Gl.CLAMP_TO_BORDER); + Gl.TexParameter(TextureTarget.Texture2d, TextureParameterName.TextureWrapR, Gl.CLAMP_TO_BORDER); + Gl.TexParameter(TextureTarget.Texture2d, TextureParameterName.TextureMinFilter, Gl.LINEAR); + Gl.TexParameter(TextureTarget.Texture2d, TextureParameterName.TextureMagFilter, Gl.LINEAR); + Gl.TexImage2D(TextureTarget.Texture2d, 0, InternalFormat.Rgba, (int)resolution.width, (int)resolution.height, 0, OpenGL.PixelFormat.Rgba, PixelType.UnsignedByte, null); + Gl.BindTexture(TextureTarget.Texture2d, 0); + } + + public void pushNewImage(Mat zedImage) + { + // Update Texture with current zedImage + Gl.TexSubImage2D(TextureTarget.Texture2d, 0, 0, 0, zedImage.GetWidth(), zedImage.GetHeight(), OpenGL.PixelFormat.Rgba, PixelType.UnsignedByte, zedImage.GetPtr()); + } + + // Draw the Image + public void draw() + { + Gl.UseProgram(shaderImage.it.getProgramId()); + Gl.ActiveTexture(TextureUnit.Texture0); + Gl.BindTexture(TextureTarget.Texture2d, imageTex); + Gl.Uniform1(texID, 0); + + Gl.EnableVertexAttribArray(0); + Gl.BindBuffer(BufferTarget.ArrayBuffer, quad_vb); + Gl.VertexAttribPointer(0, 3, VertexAttribType.Float, false, 0, IntPtr.Zero); + Gl.DrawArrays(PrimitiveType.Triangles, 0, 6); + Gl.DisableVertexAttribArray(0); + Gl.UseProgram(0); + } + + public void close() + { + Gl.DeleteTextures(imageTex); + } + + private int texID; + private uint imageTex; + + private ShaderData shaderImage; + private Resolution resolution; + private uint quad_vb; + + }; + + class PointCloud + { + public PointCloud() + { + shader = new ShaderData(); + mat_ = new Mat(); + } + + void close() + { + if (mat_.IsInit()) + { + mat_.Free(); + Gl.DeleteBuffers(1, bufferGLID_); + } + } + + public void initialize(Resolution res) + { + bufferGLID_ = Gl.GenBuffer(); + Gl.BindBuffer(BufferTarget.ArrayBuffer, bufferGLID_); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)res.height * (uint)res.width * sizeof(float) * 4, IntPtr.Zero, BufferUsage.StaticDraw); + Gl.BindBuffer(BufferTarget.ArrayBuffer, 0); + + shader.it = new Shader(Shader.POINTCLOUD_VERTEX_SHADER, Shader.POINTCLOUD_FRAGMENT_SHADER); + shader.MVP_Mat = Gl.GetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); + + mat_.Create(res, sl.MAT_TYPE.MAT_32F_C4, MEM.CPU); + //mat_.Create(new Resolution(Math.Min((uint)res.width, 720), Math.Min((uint)res.height, 404)), sl.MAT_TYPE.MAT_32F_C4, MEM.CPU); + } + + public void pushNewPC(Mat matXYZRGBA) + { + if (mat_.IsInit()) + { + mat_.SetFrom(matXYZRGBA, COPY_TYPE.CPU_CPU); + } + } + + public void draw(Matrix4x4 vp) + { + if (mat_.IsInit()) + { + Gl.UseProgram(shader.it.getProgramId()); + Gl.UniformMatrix4f(shader.MVP_Mat, 1, true, vp); + + Gl.EnableVertexAttribArray(Shader.ATTRIB_VERTICES_POS); + Gl.BindBuffer(BufferTarget.ArrayBuffer, bufferGLID_); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)mat_.GetResolution().height * (uint)mat_.GetResolution().width * sizeof(float) * 4, mat_.GetPtr(), BufferUsage.StaticDraw); + Gl.VertexAttribPointer(Shader.ATTRIB_VERTICES_POS, 4, VertexAttribType.Float, false, 0, IntPtr.Zero); + Gl.DrawArrays(PrimitiveType.Points, 0, (int)mat_.GetResolution().height * (int)mat_.GetResolution().width); + Gl.BindBuffer(BufferTarget.ArrayBuffer, 0); + Gl.UseProgram(0); + + } + } + uint bufferGLID_; + + Mat mat_; + ShaderData shader; + }; + + class CameraGL + { + public CameraGL() { } + + public CameraGL(Vector3 position, Vector3 direction, Vector3 vertical) + { + position_ = position; + setDirection(direction, vertical); + offset_ = new Vector3(0, 0, 0); + view_ = Matrix4x4.Identity; + updateView(); + setProjection(70, 70, 0.2f, 50); + updateVPMatrix(); + } + + public void update() + { + if (Vector3.Dot(vertical_, up_) < 0) + { + vertical_ = vertical_ * -1f; + } + updateView(); + updateVPMatrix(); + } + + public void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) + { + horizontalFieldOfView_ = horizontalFOV; + verticalFieldOfView_ = verticalFOV; + znear_ = znear; + zfar_ = zfar; + + float fov_y = verticalFOV * (float)Math.PI / 180.0f; + float fov_x = horizontalFOV * (float)Math.PI / 180.0f; + + projection_ = Matrix4x4.Identity; + + projection_.M11 = 1.0f / (float)Math.Tan(fov_x * 0.5f); + projection_.M22 = 1.0f / (float)Math.Tan(fov_y * 0.5f); + projection_.M33 = -(zfar + znear) / (zfar - znear); + projection_.M43 = -1; + projection_.M34 = -(2.0f * zfar * znear) / (zfar - znear); + projection_.M44 = 0; + + } + + public Matrix4x4 getViewProjectionMatrix() { return vpMatrix_; } + + public float getHorizontalFOV() { return horizontalFieldOfView_; } + + public float getVerticalFOV() { return verticalFieldOfView_; } + + public void setOffsetFromPosition(Vector3 o) { offset_ = o; } + + public Vector3 getOffsetFromPosition() { return offset_; } + + public void setDirection(Vector3 direction, Vector3 vertical) + { + Vector3 dirNormalized = Vector3.Normalize(direction); + + // Create rotation + Vector3 tr1 = Vector3.UnitZ; + Vector3 tr2 = dirNormalized * -1f; + float cos_theta = Vector3.Dot(Vector3.Normalize(tr1), Vector3.Normalize(tr2)); + float angle = 0.5f * (float)Math.Acos(cos_theta); + Vector3 w = Vector3.Cross(tr1, tr2); + if (Vector3.Zero != w) + { + + w = Vector3.Normalize(w); + } + + float half_sin = (float)Math.Sin(angle); + rotation_.W = (float)Math.Cos(angle); + rotation_.X = half_sin * w.X; + rotation_.Y = half_sin * w.Y; + rotation_.Z = half_sin * w.Z; + + /////////////////////// + updateVectors(); + vertical_ = vertical; + if (Vector3.Dot(vertical, up_) < 0) rotate(Quaternion.CreateFromAxisAngle(Vector3.UnitZ, (float)Math.PI)); + } + + public void translate(Vector3 t) { position_ = position_ + t; } + + public void setPosition(Vector3 p) { position_ = p; } + + public void rotate(Quaternion rot) + { + rotation_ = rot * rotation_; + updateVectors(); + } + + public void rotate(Matrix4x4 m) + { + rotate(Quaternion.CreateFromRotationMatrix(m)); + } + + public void setRotation(Quaternion rot) + { + rotation_ = rot; + updateVectors(); + } + + public void setRotation(Matrix4x4 m) + { + setRotation(Quaternion.CreateFromRotationMatrix(m)); + } + + public Vector3 getPosition() { return position_; } + + public Vector3 getForward() { return forward_; } + + public Vector3 getRight() { return right_; } + + public Vector3 getUp() { return up_; } + + public Vector3 getVertical() { return vertical_; } + + public float getZNear() { return znear_; } + + public float getZFar() { return zfar_; } + + void updateVectors() + { + forward_ = Vector3.Transform(Vector3.UnitZ, rotation_); + up_ = Vector3.Transform(Vector3.UnitY, rotation_); + right_ = Vector3.Transform(Vector3.UnitX, rotation_); + } + + void updateView() + { + Matrix4x4 transformation = Matrix4x4.Identity; + + transformation = Matrix4x4.Transform(transformation, rotation_); + transformation.Translation = Vector3.Transform(offset_, rotation_) + position_; + transformation = Matrix4x4.Transpose(transformation); + + Matrix4x4.Invert(transformation, out view_); + } + + void updateVPMatrix() + { + vpMatrix_ = projection_ * view_; + } + + public Matrix4x4 projection_; + + Vector3 offset_; + Vector3 position_; + Vector3 forward_; + Vector3 up_; + Vector3 right_; + Vector3 vertical_; + + Quaternion rotation_; + + Matrix4x4 view_; + Matrix4x4 vpMatrix_; + + float horizontalFieldOfView_; + float verticalFieldOfView_; + float znear_; + float zfar_; + }; + + class Shader + { + + public static readonly string[] IMAGE_VERTEX_SHADER = new string[] { + "#version 330\n", + "layout(location = 0) in vec3 vert;\n", + "out vec2 UV;", + "void main() {\n", + " UV = (vert.xy+vec2(1,1))/2;\n", + " gl_Position = vec4(vert, 1);\n", + "}\n" + }; + + public static readonly string[] IMAGE_FRAGMENT_SHADER = new string[] { + "#version 330 core\n", + "in vec2 UV;\n", + "out vec4 color;\n", + "uniform sampler2D texImage;\n", + "void main() {\n", + " vec2 scaler = vec2(UV.x,1.f - UV.y);\n", + " vec3 rgbcolor = vec3(texture(texImage, scaler).zyx);\n", + " float gamma = 1.0/1.65;\n", + " vec3 color_rgb = pow(rgbcolor, vec3(1.0/gamma));\n", + " color = vec4(color_rgb,1);\n", + "}" + }; + + public static readonly string[] SK_VERTEX_SHADER = new string[] { + "#version 330 core\n", + "layout(location = 0) in vec3 in_Vertex;\n", + "layout(location = 1) in vec4 in_Color;\n", + "layout(location = 2) in vec3 in_Normal;\n", + "out vec4 b_color;\n", + "out vec3 b_position;\n", + "out vec3 b_normal;\n", + "uniform mat4 u_mvpMatrix;\n", + "uniform vec4 u_color;\n", + "void main() {\n", + " b_color = in_Color;\n", + " b_position = in_Vertex;\n", + " b_normal = in_Normal;\n", + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n", + "}" + }; + + public static readonly string[] SK_FRAGMENT_SHADER = new string[] { + "#version 330 core\n", + "in vec4 b_color;\n", + "in vec3 b_position;\n", + "in vec3 b_normal;\n", + "out vec4 out_Color;\n", + "void main() {\n", + " vec3 lightPosition = vec3(0, 10, 0);\n", + " vec3 lightColor = vec3(1,1,1);\n", + " float ambientStrength = 0.4;\n", + " vec3 ambient = ambientStrength * lightColor;\n", + " vec3 norm = normalize(b_normal); \n", + " vec3 lightDir = normalize(lightPosition - b_position);\n", + " float diffuse = (1 - ambientStrength) * max(dot(b_normal, lightDir), 0.0);\n", + " out_Color = vec4(b_color.rgb * (diffuse + ambient), 1);\n", + "}" + }; + + public static readonly string[] POINTCLOUD_VERTEX_SHADER = new string[] { + "#version 330 core\n", + "layout(location = 0) in vec4 in_VertexRGBA;\n", + "uniform mat4 u_mvpMatrix;\n", + "out vec4 b_color;\n", + "void main() {\n", + // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) + " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n", + " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n", + " b_color = vec4(clr_int.r / 255.0f, clr_int.g / 255.0f, clr_int.b / 255.0f, 1.f);", + " gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1);\n", + "}" + }; + + public static readonly string[] POINTCLOUD_FRAGMENT_SHADER = new string[] { + "#version 330 core\n", + "in vec4 b_color;\n", + "layout(location = 0) out vec4 out_Color;\n", + "void main() {\n", + " out_Color = b_color;\n", + "}" + }; + + public static readonly string[] VERTEX_SHADER = new string[] { + "#version 330 core\n", + "layout(location = 0) in vec3 in_Vertex;\n", + "layout(location = 1) in vec4 in_Color;\n", + "uniform mat4 u_mvpMatrix;\n", + "out vec4 b_color;\n", + "void main() {\n", + " b_color = in_Color;\n", + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n", + "}" + }; + + public static readonly string[] FRAGMENT_SHADER = new string[] { + "#version 330 core\n", + "in vec4 b_color;\n", + "layout(location = 0) out vec4 out_Color;\n", + "void main() {\n", + " out_Color = b_color;\n", + "}" + }; + + public Shader(string[] vs, string[] fs) + { + if (!compile(ref verterxId_, ShaderType.VertexShader, vs)) + { + Console.WriteLine("ERROR: while compiling vertex shader"); + } + if (!compile(ref fragmentId_, ShaderType.FragmentShader, fs)) + { + Console.WriteLine("ERROR: while compiling fragment shader"); + } + + programId_ = Gl.CreateProgram(); + + Gl.AttachShader(programId_, verterxId_); + Gl.AttachShader(programId_, fragmentId_); + + Gl.BindAttribLocation(programId_, ATTRIB_VERTICES_POS, "in_vertex"); + Gl.BindAttribLocation(programId_, ATTRIB_COLOR_POS, "in_texCoord"); + + Gl.LinkProgram(programId_); + + int errorlk = 0; + Gl.GetProgram(programId_, ProgramProperty.LinkStatus, out errorlk); + if (errorlk != Gl.TRUE) + { + Console.WriteLine("ERROR: while linking Shader :"); + int errorSize = 0; + Gl.GetProgram(programId_, ProgramProperty.InfoLogLength, out errorSize); + StringBuilder error = new StringBuilder(1024); + Gl.GetShaderInfoLog(programId_, errorSize, out errorSize, error); + Console.WriteLine(error.ToString()); + error.Clear(); + Gl.DeleteProgram(programId_); + } + } + + public uint getProgramId() + { + return programId_; + } + + public static uint ATTRIB_VERTICES_POS = 0; + public static uint ATTRIB_COLOR_POS = 1; + public static uint ATTRIB_NORMAL = 2; + + private bool compile(ref uint shaderId, ShaderType type, string[] src) + { + int errorcp = 0; + + shaderId = Gl.CreateShader(type); + if (shaderId == 0) + { + Console.WriteLine("ERROR: shader type (" + type + ") does not exist"); + } + + Gl.ShaderSource(shaderId, src); + Gl.CompileShader(shaderId); + Gl.GetShader(shaderId, ShaderParameterName.CompileStatus, out errorcp); + + if (errorcp != Gl.TRUE) + { + Console.WriteLine("ERROR: while compiling Shader :"); + int errorSize; + Gl.GetShader(shaderId, ShaderParameterName.InfoLogLength, out errorSize); + + StringBuilder error = new StringBuilder(1024); + Gl.GetShaderInfoLog(shaderId, errorSize, out errorSize, error); + + Console.WriteLine(error.ToString()); + error.Clear(); + + Gl.DeleteShader(shaderId); + return false; + } + return true; + } + + uint verterxId_; + uint fragmentId_; + uint programId_; + }; + + struct ShaderData + { + public Shader it; + public int MVP_Mat; + }; + + class Simple3DObject + { + public Simple3DObject() + { + is_init = false; + } + + public void init() + { + vaoID_ = 0; + isStatic_ = false; + + shader.it = new Shader(Shader.SK_VERTEX_SHADER, Shader.SK_FRAGMENT_SHADER); + shader.MVP_Mat = Gl.GetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); + + vertices_ = new List(); + colors_ = new List(); + indices_ = new List(); + normals_ = new List(); + + is_init = true; + } + + public bool isInit() + { + return is_init; + } + + public void addPt(float3 pt) + { + vertices_.Add(pt.x); + vertices_.Add(pt.y); + vertices_.Add(pt.z); + } + + public void addClr(float4 clr) + { + colors_.Add(clr.x); + colors_.Add(clr.y); + colors_.Add(clr.z); + colors_.Add(clr.w); + } + + public void addNormal(float3 normal) + { + normals_.Add(normal.x); + normals_.Add(normal.y); + normals_.Add(normal.z); + } + + void addBBox(List pts, float4 clr) + { + int start_id = vertices_.Count / 3; + + float transparency_top = 0.05f, transparency_bottom = 0.75f; + for (int i = 0; i < pts.Count; i++) + { + addPt(pts[i]); + clr.w = (i < 4 ? transparency_top : transparency_bottom); + addClr(clr); + } + + uint[] boxLinks = new uint[] { 4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7 }; + + for (int i = 0; i < boxLinks.Length; i += 2) + { + indices_.Add((uint)start_id + boxLinks[i]); + indices_.Add((uint)start_id + boxLinks[i + 1]); + } + } + + public void addPoint(float3 pt, float4 clr) + { + addPt(pt); + addClr(clr); + indices_.Add((uint)indices_.Count()); + } + + public void addLine(float3 p1, float3 p2, float4 clr) + { + addPoint(p1, clr); + addPoint(p2, clr); + } + + public void addTriangle(float3 p1, float3 p2, float3 p3, float4 clr) + { + addPoint(p1, clr); + addPoint(p2, clr); + addPoint(p3, clr); + } + + public void addFullEdges(List pts, float4 clr) + { + int start_id = vertices_.Count / 3; + + for (int i = 0; i < pts.Count; i++) + { + addPt(new float3(pts[i].X, pts[i].Y, pts[i].Z)); + addClr(clr); + } + + uint[] boxLinksTop = new uint[] { 0, 1, 1, 2, 2, 3, 3, 0 }; + for (int i = 0; i < boxLinksTop.Length; i += 2) + { + indices_.Add((uint)start_id + boxLinksTop[i]); + indices_.Add((uint)start_id + boxLinksTop[i + 1]); + } + + uint[] boxLinksBottom = new uint[] { 4, 5, 5, 6, 6, 7, 7, 4 }; + for (int i = 0; i < boxLinksBottom.Length; i += 2) + { + indices_.Add((uint)start_id + boxLinksBottom[i]); + indices_.Add((uint)start_id + boxLinksBottom[i + 1]); + } + } + + public void addSingleVerticalLine(Vector3 top_pt, Vector3 bot_pt, float4 clr) + { + List current_pts = new List() + { + top_pt, + ((grid_size - 1.0f) * top_pt + bot_pt) / grid_size, + ((grid_size - 2.0f) * top_pt + bot_pt* 2.0f) / grid_size, + (2.0f * top_pt + bot_pt* (grid_size - 2.0f)) / grid_size, + (top_pt + bot_pt* (grid_size - 1.0f)) / grid_size, + bot_pt + }; + + int start_id = vertices_.Count / 3; + for (int i = 0; i < current_pts.Count; i++) + { + addPt(new float3(current_pts[i].X, current_pts[i].Y, current_pts[i].Z)); + clr.w = (i == 2 || i == 3) ? 0.0f : 0.75f; + addClr(clr); + } + + uint[] boxLinks = new uint[] { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5 }; + for (int i = 0; i < boxLinks.Length; i += 2) + { + indices_.Add((uint)start_id + boxLinks[i]); + indices_.Add((uint)start_id + boxLinks[i + 1]); + } + } + + public void addVerticalEdges(List pts, float4 clr) + { + + addSingleVerticalLine(pts[0], pts[4], clr); + addSingleVerticalLine(pts[1], pts[5], clr); + addSingleVerticalLine(pts[2], pts[6], clr); + addSingleVerticalLine(pts[3], pts[7], clr); + } + + public void addTopFace(List pts, float4 clr) + { + clr.w = 0.3f; + foreach (Vector3 it in pts) + addPoint(new float3(it.X, it.Y, it.Z), clr); + } + + void addQuad(List quad_pts, float alpha1, float alpha2, float4 clr) + { // To use only with 4 points + for (int i = 0; i < quad_pts.Count; ++i) + { + addPt(new float3(quad_pts[i].X, quad_pts[i].Y, quad_pts[i].Z)); + clr.w = (i < 2 ? alpha1 : alpha2); + addClr(clr); + } + + indices_.Add((uint)indices_.Count); + indices_.Add((uint)indices_.Count); + indices_.Add((uint)indices_.Count); + indices_.Add((uint)indices_.Count); + } + + public void addVerticalFaces(List pts, float4 clr) + { + // For each face, we need to add 4 quads (the first 2 indexes are always the top points of the quad) + int[][] quads = new int[4][] + { + new int[4] + { + 0, 3, 7, 4 + }, // front face + new int[4] + { + 3, 2, 6, 7 + }, // right face + new int[4] + { + 2, 1, 5, 6 + }, // back face + new int[4] + { + 1, 0, 4, 5 + } // left face + }; + float alpha = 0.5f; + + foreach (int[] quad in quads) + { + + // Top quads + List quad_pts_1 = new List { + pts[quad[0]], + pts[quad[1]], + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size }; + addQuad(quad_pts_1, alpha, alpha, clr); + + List quad_pts_2 = new List { + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size}; + addQuad(quad_pts_2, alpha, 2 * alpha / 3, clr); + + List quad_pts_3 = new List { + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_3, 2 * alpha / 3, alpha / 3, clr); + + List quad_pts_4 = new List { + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[1]] + 2.0f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[0]] + 2.0f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_4, alpha / 3, 0.0f, clr); + + // Bottom quads + List quad_pts_5 = new List { + (pts[quad[1]] * 2.0f + (grid_size - 2.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 2.0f + (grid_size - 2.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size }; + addQuad(quad_pts_5, 0.0f, alpha / 3, clr); + + List quad_pts_6 = new List { + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_6, alpha / 3, 2 * alpha / 3, clr); + + List quad_pts_7 = new List { + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_7, 2 * alpha / 3, alpha, clr); + + List quad_pts_8 = new List { + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size, + pts[quad[2]], + pts[quad[3]]}; + addQuad(quad_pts_8, alpha, alpha, clr); + } + } + + public void addCylinder(float3 startPosition, float3 endPosition, float4 clr, float radius = 0.010f) + { + + ///////////////////////////// + /// Compute Rotation Matrix + ///////////////////////////// + + const float PI = 3.1415926f; + + float m_radius = radius; + + float3 dir = endPosition.sub(startPosition); + + float m_height = dir.norm(); + + dir.divide(m_height); + + float3 yAxis = new float3(0, 1, 0); + + float3 v = dir.cross(yAxis); ; + + Matrix4x4 rotation; + + float sinTheta = v.norm(); + if (sinTheta < 0.00001f) + { + rotation = Matrix4x4.Identity; + } + else + { + float cosTheta = dir.dot(yAxis); + float scale = (1.0f - cosTheta) / (1.0f - (cosTheta * cosTheta)); + + Matrix4x4 vx = new Matrix4x4(0, v.z, -v.y, 0, + -v.z, 0, v.x, 0, + v.y, -v.x, 0, 0, + 0, 0, 0, 1.0f); + + Matrix4x4 vx2 = vx * vx; + Matrix4x4 vx2Scaled = vx2 * scale; + + rotation = Matrix4x4.Identity; + rotation = rotation + vx; + rotation = rotation + vx2Scaled; + } + + ///////////////////////////// + /// Create Cylinder + ///////////////////////////// + + Matrix3x3 rotationMatrix = new Matrix3x3(); + float[] data = new float[9] { rotation.M11, rotation.M12, rotation.M13, rotation.M21, rotation.M22, rotation.M23, rotation.M31, rotation.M32, rotation.M33 }; + rotationMatrix.m = data; + + float3 v1; + float3 v2; + float3 v3; + float3 v4; + float3 normal; + int NB_SEG = 32; + float scale_seg = (float)1 / (float)NB_SEG; + + for (int j = 0; j < NB_SEG; j++) + { + float i = (float)2 * PI * ((float)j * scale_seg); + float i1 = (float)2 * PI * ((float)(j + 1) * scale_seg); + v1 = rotationMatrix.multiply(new float3(m_radius * (float)Math.Cos(i), 0, m_radius * (float)Math.Sin(i))).add(startPosition); + v2 = rotationMatrix.multiply(new float3(m_radius * (float)Math.Cos(i), m_height, m_radius * (float)Math.Sin(i))).add(startPosition); + v4 = rotationMatrix.multiply(new float3(m_radius * (float)Math.Cos(i1), m_height, m_radius * (float)Math.Sin(i1))).add(startPosition); + v3 = rotationMatrix.multiply(new float3(m_radius * (float)Math.Cos(i1), 0, m_radius * (float)Math.Sin(i1))).add(startPosition); + + float3 a = v2.sub(v1); + float3 b = v3.sub(v1); + normal = a.cross(b); + normal.divide(normal.norm()); + + addPoint(v1, clr); + addPoint(v2, clr); + addPoint(v4, clr); + addPoint(v3, clr); + + addNormal(normal); + addNormal(normal); + addNormal(normal); + addNormal(normal); + } + } + + public void createSphere() + { + const float PI = 3.1415926f; + + float m_radius = 0.02f; + float radiusInv = 1.0f / m_radius; + + int m_stackCount = 16; + int m_sectorCount = 16; + + float3 v1; + float3 v2; + float3 v3; + float3 v4; + float3 normal; + + for (int i = 0; i <= m_stackCount; i++) + { + double lat0 = PI * (-0.5 + (double)(i - 1) / m_stackCount); + float z0 = (float)Math.Sin(lat0); + float zr0 = (float)Math.Cos(lat0); + + double lat1 = PI * (-0.5 + (double)i / m_stackCount); + float z1 = (float)Math.Sin(lat1); + float zr1 = (float)Math.Cos(lat1); + + for (int j = 0; j <= m_sectorCount - 1; j++) + { + double lng = 2 * PI * (double)(j - 1) / m_sectorCount; + float x = (float)Math.Cos(lng); + float y = (float)Math.Sin(lng); + + v1 = new float3(m_radius * x * zr0, m_radius * y * zr0, m_radius * z0); + normal = new float3(x * zr0, y * zr0, z0); + addPt(v1); + indices_.Add((uint)indices_.Count()); + addNormal(normal); + + v2 = new float3(m_radius * x * zr1, m_radius * y * zr1, m_radius * z1); + normal = new float3(x * zr1, y * zr1, z1); + addPt(v2); + indices_.Add((uint)indices_.Count()); + addNormal(normal); + + lng = 2 * PI * (double)j / m_sectorCount; + x = (float)Math.Cos(lng); + y = (float)Math.Sin(lng); + + v4 = new float3(m_radius * x * zr1, m_radius * y * zr1, m_radius * z1); + normal = new float3(x * zr1, y * zr1, z1); + addPt(v4); + indices_.Add((uint)indices_.Count()); + addNormal(normal); + + v3 = new float3(m_radius * x * zr0, m_radius * y * zr0, m_radius * z0); + normal = new float3(x * zr0, y * zr0, z0); + addPt(v3); + indices_.Add((uint)indices_.Count()); + addNormal(normal); + } + } + } + + public void addSphere(Simple3DObject sphere, float3 position, float4 clr) + { + for (int i = 0; i < sphere.vertices_.Count; i += 3) + { + float3 point = new float3(sphere.vertices_[i], sphere.vertices_[i + 1], sphere.vertices_[i + 2]).add(position); + addPoint(point, clr); + float3 normal = new float3(sphere.normals_[i], sphere.normals_[i + 1], sphere.normals_[i + 2]); + addNormal(normal); + } + } + + public void pushToGPU() + { + if (!isStatic_ || vaoID_ == 0) + { + if (vaoID_ == 0) + { + vaoID_ = Gl.GenVertexArray(); + Gl.GenBuffers(vboID_); + } + + Gl.ShadeModel(ShadingModel.Smooth); + if (vertices_.Count() > 0) + { + Gl.BindVertexArray(vaoID_); + Gl.BindBuffer(BufferTarget.ArrayBuffer, vboID_[0]); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)vertices_.Count() * sizeof(float), vertices_.ToArray(), isStatic_ ? BufferUsage.StaticDraw : BufferUsage.DynamicDraw); + Gl.VertexAttribPointer(Shader.ATTRIB_VERTICES_POS, 3, VertexAttribType.Float, false, 0, IntPtr.Zero); + Gl.EnableVertexAttribArray(Shader.ATTRIB_VERTICES_POS); + } + if (colors_.Count() > 0) + { + + Gl.BindBuffer(BufferTarget.ArrayBuffer, vboID_[1]); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)colors_.Count() * sizeof(float), colors_.ToArray(), isStatic_ ? BufferUsage.StaticDraw : BufferUsage.DynamicDraw); + Gl.VertexAttribPointer(Shader.ATTRIB_COLOR_POS, 4, VertexAttribType.Float, false, 0, IntPtr.Zero); + Gl.EnableVertexAttribArray(Shader.ATTRIB_COLOR_POS); + } + if (indices_.Count() > 0) + { + + Gl.BindBuffer(BufferTarget.ElementArrayBuffer, vboID_[2]); + Gl.BufferData(BufferTarget.ElementArrayBuffer, (uint)indices_.Count() * sizeof(float), indices_.ToArray(), isStatic_ ? BufferUsage.StaticDraw : BufferUsage.DynamicDraw); + } + if (normals_.Count() > 0) + { + Gl.BindBuffer(BufferTarget.ArrayBuffer, vboID_[3]); + Gl.BufferData(BufferTarget.ArrayBuffer, (uint)normals_.Count() * sizeof(float), normals_.ToArray(), isStatic_ ? BufferUsage.StaticDraw : BufferUsage.DynamicDraw); + Gl.VertexAttribPointer(Shader.ATTRIB_NORMAL, 3, VertexAttribType.Float, false, 0, IntPtr.Zero); + Gl.EnableVertexAttribArray(Shader.ATTRIB_NORMAL); + } + + Gl.BindVertexArray(0); + Gl.BindBuffer(BufferTarget.ElementArrayBuffer, 0); + Gl.BindBuffer(BufferTarget.ArrayBuffer, 0); + } + } + + public void clear() + { + vertices_.Clear(); + colors_.Clear(); + indices_.Clear(); + normals_.Clear(); + } + + public void setDrawingType(PrimitiveType type) + { + drawingType_ = type; + } + + public void draw() + { + if (indices_.Count() > 0 && vaoID_ != 0) + { + Gl.BindVertexArray(vaoID_); + Gl.DrawElements(drawingType_, indices_.Count(), DrawElementsType.UnsignedInt, IntPtr.Zero); + Gl.BindVertexArray(0); + } + } + + public float grid_size = 9.0f; + + private List vertices_; + private List colors_; + private List indices_; + private List normals_; + + private bool isStatic_; + private bool is_init; + + private uint vaoID_; + + /* + Vertex buffer IDs: + - [0]: Vertices coordinates; + - [1]: Colors; + - [2]: Indices; + - [3]: Normals + */ + private uint[] vboID_ = new uint[4]; + + private ShaderData shader; + + + PrimitiveType drawingType_; + }; + +} diff --git a/body tracking/multi-camera/csharp/MainWindow.cs b/body tracking/multi-camera/csharp/MainWindow.cs new file mode 100644 index 00000000..b198d72c --- /dev/null +++ b/body tracking/multi-camera/csharp/MainWindow.cs @@ -0,0 +1,303 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +/***************************************************************************************** + ** This sample demonstrates how to detect human bodies and retrieves their 3D position ** + ** with the ZED SDK and display the result in an OpenGL window. ** + *****************************************************************************************/ + +using System; +using System.Collections.Generic; +using System.Net; +using System.Numerics; +using System.Text; +using System.Windows.Forms; +using OpenGL; +using OpenGL.CoreUI; + +using sl; + +class MainWindow +{ + GLViewer viewer; + Fusion fusion; + BodyTrackingFusionParameters bodyTrackingFusionParameters; + BodyTrackingFusionRuntimeParameters bodyTrackingFusionRuntimeParameters; + Bodies bodies; + List cameras; + sl.Resolution pointCloudResolution; + Dictionary pointClouds; + Dictionary camRawData; + public MainWindow(string[] args) + { + if (args.Length < 1) + { + Console.WriteLine("Requires a configuration file as argument"); + return; + } + + const sl.COORDINATE_SYSTEM COORDINATE_SYSTEM = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP; + const sl.UNIT UNIT = sl.UNIT.METER; + + // Read json file containing the configuration of your multicamera setup. + List fusionConfigurations = sl.Fusion.ReadConfigurationFile(new StringBuilder(args[0]), COORDINATE_SYSTEM, UNIT); + + if (fusionConfigurations.Count == 0 ) + { + Console.WriteLine("Empty configuration file"); + return; + } + + List clients = new List(); + int id = 0; + // Check if the ZED camera should run within the same process or if they are running on the edge. + foreach (FusionConfiguration config in fusionConfigurations) + { + if (config.commParam.communicationType == sl.COMM_TYPE.INTRA_PROCESS) + { + Console.WriteLine("Try to open ZED " + config.serialNumber); + var client = new ClientPublisher(id); + bool state = client.Open(config.inputType); + clients.Add(client); + + if (!state) + { + Console.WriteLine("Error while opening ZED " + config.serialNumber); + continue; + } + + Console.WriteLine("ready ! "); + id++; + } + } + + // Starts Camera threads + foreach(var client in clients) client.Start(); + + sl.InitFusionParameters initFusionParameters = new sl.InitFusionParameters(); + initFusionParameters.coordinateUnits = UNIT; + initFusionParameters.coordinateSystem = COORDINATE_SYSTEM; + + fusion = new sl.Fusion(); + FUSION_ERROR_CODE err = fusion.Init(ref initFusionParameters); + + if (err != sl.FUSION_ERROR_CODE.SUCCESS) + { + Console.WriteLine("Error while initializing the fusion. Exiting..."); + Environment.Exit(-1); + } + + cameras = new List(); + + // subscribe to every cameras of the setup to internally gather their data + foreach (var config in fusionConfigurations) + { + sl.CameraIdentifier uuid = new sl.CameraIdentifier(config.serialNumber); + // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. + Vector3 translation = config.position; + Quaternion rotation = config.rotation; + err = fusion.Subscribe(ref uuid, config.commParam, ref translation, ref rotation); + if (err != sl.FUSION_ERROR_CODE.SUCCESS) + { + Console.WriteLine("Error while subscribing to camera " + config.serialNumber + " : " + err + ". Exiting..."); + Environment.Exit(-1); + } + else + { + cameras.Add(uuid); + } + } + + if (cameras.Count == 0) + { + Console.WriteLine("No camera subscribed. Exiting..."); + Environment.Exit(-1); + } + + pointCloudResolution = new sl.Resolution(512, 360); + pointClouds = new Dictionary(); + + foreach (var camera in cameras) + { + sl.Mat pointCloud = new sl.Mat(); + pointCloud.Create(pointCloudResolution, MAT_TYPE.MAT_32F_C4, MEM.CPU); + pointClouds[camera.sn] = pointCloud; + } + + // as this sample shows how to fuse body detection from the multi camera setup + // we enable the Body Tracking module with its options + bodyTrackingFusionParameters.enableTracking = true; + bodyTrackingFusionParameters.enableBodyFitting = true; + fusion.EnableBodyTracking(ref bodyTrackingFusionParameters); + + bodyTrackingFusionRuntimeParameters.skeletonMinimumAllowedKeypoints = 7; + bodyTrackingFusionRuntimeParameters.skeletonMinimumAllowedCameras = cameras.Count / 2; + + camRawData = new Dictionary(); + + // Create OpenGL Viewer + viewer = new GLViewer(); + + Console.Write("Viewer shortcuts : \n" + + "'r': switch on/off for raw skeleton display\n" + + "'p': switch on/off for live point cloud display"); + + // Create OpenGL window + CreateWindow(); + } + + // Create Window + public void CreateWindow() + { + using (OpenGL.CoreUI.NativeWindow nativeWindow = OpenGL.CoreUI.NativeWindow.Create()) + { + nativeWindow.ContextCreated += NativeWindow_ContextCreated; + nativeWindow.Render += NativeWindow_Render; + nativeWindow.MouseMove += NativeWindow_MouseEvent; + nativeWindow.Resize += NativeWindow_Resize; + nativeWindow.KeyDown += (object obj, NativeWindowKeyEventArgs e) => + { + switch (e.Key) + { + case KeyCode.Escape: + close(); + nativeWindow.Stop(); + break; + + case KeyCode.F: + nativeWindow.Fullscreen = !nativeWindow.Fullscreen; + break; + } + + viewer.keyEventFunction(e); + }; + nativeWindow.CursorVisible = false; + nativeWindow.MultisampleBits = 4; + + int wnd_h = Screen.PrimaryScreen.Bounds.Height; + int wnd_w = Screen.PrimaryScreen.Bounds.Width; + + uint height = (uint)(wnd_h * 0.9f); + uint width = (uint)(wnd_w * 0.9f); + + nativeWindow.Create((int)(wnd_h * 0.05f), (int)(wnd_w * 0.05f), width, height, NativeWindowStyle.Resizeable); + nativeWindow.Show(); + try + { + nativeWindow.Run(); + } + catch(Exception e) + { + Console.WriteLine("Mouse wheel is broken in the current OPENGL .NET VERSION. Please do not use it. " + e); + } + } + } + + private void NativeWindow_Resize(object sender, EventArgs e) + { + OpenGL.CoreUI.NativeWindow nativeWindow = (OpenGL.CoreUI.NativeWindow)sender; + + viewer.resizeCallback((int)nativeWindow.Width, (int)nativeWindow.Height); + } + + private void NativeWindow_MouseEvent(object sender, NativeWindowMouseEventArgs e) + { + viewer.mouseEventFunction(e); + viewer.computeMouseMotion(e.Location.X, e.Location.Y); + } + + // Init Window + private void NativeWindow_ContextCreated(object sender, NativeWindowEventArgs e) + { + OpenGL.CoreUI.NativeWindow nativeWindow = (OpenGL.CoreUI.NativeWindow)sender; + + Gl.ReadBuffer(ReadBufferMode.Back); + Gl.ClearColor(0.2f, 0.19f, 0.2f, 1.0f); + + Gl.Enable(EnableCap.Blend); + Gl.BlendFunc(BlendingFactor.SrcAlpha, BlendingFactor.OneMinusSrcAlpha); + + Gl.Enable(EnableCap.LineSmooth); + Gl.Hint(HintTarget.LineSmoothHint, HintMode.Nicest); + + viewer.Init(bodyTrackingFusionParameters.enableTracking); + + foreach (var camera in cameras) + { + viewer.InitPointCloud(camera.sn, pointCloudResolution); + } + } + + // Render loop + private void NativeWindow_Render(object sender, NativeWindowEventArgs e) + { + OpenGL.CoreUI.NativeWindow nativeWindow = (OpenGL.CoreUI.NativeWindow)sender; + Gl.Viewport(0, 0, (int)nativeWindow.Width, (int)nativeWindow.Height); + Gl.Clear(ClearBufferMask.ColorBufferBit); + + FUSION_ERROR_CODE err = FUSION_ERROR_CODE.FAILURE; + // run the fusion as long as the viewer is available. + if (viewer.IsAvailable()) + { + // run the fusion process (which gather data from all camera, sync them and process them) + err = fusion.Process(); + if (err == sl.FUSION_ERROR_CODE.SUCCESS) + { + // Retrieve fused body + fusion.RetrieveBodies(ref bodies, ref bodyTrackingFusionRuntimeParameters, new CameraIdentifier(0)); + + for (int i = 0; i < cameras.Count; i++) + { + CameraIdentifier uuid = cameras[i]; + + sl.Bodies rawBodies = new sl.Bodies(); + // Retrieve raw skeleton data + err = fusion.RetrieveBodies(ref rawBodies, ref bodyTrackingFusionRuntimeParameters, uuid); + camRawData[uuid.sn] = rawBodies; + + // Retrieve camera pose + sl.Pose pose = new sl.Pose(); + sl.POSITIONAL_TRACKING_STATE state = fusion.GetPosition(ref pose, REFERENCE_FRAME.WORLD, ref uuid, POSITION_TYPE.RAW); + if (state == sl.POSITIONAL_TRACKING_STATE.OK) + { + viewer.SetCameraPose(uuid.sn, pose); + } + + // Retrieve point cloud + err = fusion.RetrieveMeasure(pointClouds[uuid.sn], ref uuid, MEASURE.XYZBGRA, pointCloudResolution); + + if (err == sl.FUSION_ERROR_CODE.SUCCESS) + { + viewer.UpdatePointCloud(uuid.sn, pointClouds[uuid.sn]); + } + } + } + //Update GL View + viewer.UpdateBodies(bodies, camRawData); + viewer.Render(); + } + } + + private void close() + { + fusion.Close(); + viewer.Exit(); + } +} diff --git a/body tracking/multi-camera/csharp/Program.cs b/body tracking/multi-camera/csharp/Program.cs new file mode 100644 index 00000000..0ba470c7 --- /dev/null +++ b/body tracking/multi-camera/csharp/Program.cs @@ -0,0 +1,16 @@ +//======= Copyright (c) Stereolabs Corporation, All rights reserved. =============== +using System; +using System.Runtime.InteropServices; +using System.Threading.Tasks; + +namespace sl +{ + class Program + { + [STAThread] + static void Main(string[] args) + { + MainWindow window = new MainWindow(args); + } + } +} \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/Properties/AssemblyInfo.cs b/body tracking/multi-camera/csharp/Properties/AssemblyInfo.cs new file mode 100644 index 00000000..19d0c1a1 --- /dev/null +++ b/body tracking/multi-camera/csharp/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("Tutorials")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("Tutorials")] +[assembly: AssemblyCopyright("Copyright © 2024")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("db8455c8-b2a9-4e62-9597-7b26c432a999")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/body tracking/multi-camera/csharp/Properties/Resources.Designer.cs b/body tracking/multi-camera/csharp/Properties/Resources.Designer.cs new file mode 100644 index 00000000..54694871 --- /dev/null +++ b/body tracking/multi-camera/csharp/Properties/Resources.Designer.cs @@ -0,0 +1,63 @@ +//------------------------------------------------------------------------------ +// +// Ce code a été généré par un outil. +// Version du runtime :4.0.30319.42000 +// +// Les modifications apportées à ce fichier peuvent provoquer un comportement incorrect et seront perdues si +// le code est régénéré. +// +//------------------------------------------------------------------------------ + +namespace Image_capture.Properties { + using System; + + + /// + /// Une classe de ressource fortement typée destinée, entre autres, à la consultation des chaînes localisées. + /// + // Cette classe a été générée automatiquement par la classe StronglyTypedResourceBuilder + // à l'aide d'un outil, tel que ResGen ou Visual Studio. + // Pour ajouter ou supprimer un membre, modifiez votre fichier .ResX, puis réexécutez ResGen + // avec l'option /str ou régénérez votre projet VS. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "15.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + internal class Resources { + + private static global::System.Resources.ResourceManager resourceMan; + + private static global::System.Globalization.CultureInfo resourceCulture; + + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + internal Resources() { + } + + /// + /// Retourne l'instance ResourceManager mise en cache utilisée par cette classe. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { + get { + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Image_capture.Properties.Resources", typeof(Resources).Assembly); + resourceMan = temp; + } + return resourceMan; + } + } + + /// + /// Remplace la propriété CurrentUICulture du thread actuel pour toutes + /// les recherches de ressources à l'aide de cette classe de ressource fortement typée. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { + get { + return resourceCulture; + } + set { + resourceCulture = value; + } + } + } +} diff --git a/body tracking/multi-camera/csharp/Properties/Resources.resx b/body tracking/multi-camera/csharp/Properties/Resources.resx new file mode 100644 index 00000000..af7dbebb --- /dev/null +++ b/body tracking/multi-camera/csharp/Properties/Resources.resx @@ -0,0 +1,117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + text/microsoft-resx + + + 2.0 + + + System.Resources.ResXResourceReader, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + + + System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + + \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/Properties/Settings.Designer.cs b/body tracking/multi-camera/csharp/Properties/Settings.Designer.cs new file mode 100644 index 00000000..84a7c738 --- /dev/null +++ b/body tracking/multi-camera/csharp/Properties/Settings.Designer.cs @@ -0,0 +1,26 @@ +//------------------------------------------------------------------------------ +// +// Ce code a été généré par un outil. +// Version du runtime :4.0.30319.42000 +// +// Les modifications apportées à ce fichier peuvent provoquer un comportement incorrect et seront perdues si +// le code est régénéré. +// +//------------------------------------------------------------------------------ + +namespace Image_capture.Properties { + + + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "15.9.0.0")] + internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase { + + private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings()))); + + public static Settings Default { + get { + return defaultInstance; + } + } + } +} diff --git a/body tracking/multi-camera/csharp/Properties/Settings.settings b/body tracking/multi-camera/csharp/Properties/Settings.settings new file mode 100644 index 00000000..033d7a5e --- /dev/null +++ b/body tracking/multi-camera/csharp/Properties/Settings.settings @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/README.md b/body tracking/multi-camera/csharp/README.md new file mode 100644 index 00000000..c834b754 --- /dev/null +++ b/body tracking/multi-camera/csharp/README.md @@ -0,0 +1,23 @@ +# ZED SDK - Body Tracking + +This sample shows how to detect and track human bodies in space. + +## Getting Started +- Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/), NuGet packages are automatically downloaded + - Check the [Documentation](https://www.stereolabs.com/docs/) + +## Build the program + - Build for [Windows](https://www.stereolabs.com/docs/app-development/cpp/windows/) + +## Run the program +*NOTE: The ZED v1 is not compatible with this module* +- Navigate to the build directory and launch the executable +- Or open a terminal in the build directory and run the sample : + + ./ZED_Body_Tracking + +## Features + - Display bodies bounding boxes by pressing the `b` key. + +## Support +If you need assistance go to our Community site at https://community.stereolabs.com/ \ No newline at end of file diff --git a/body tracking/multi-camera/csharp/packages.config b/body tracking/multi-camera/csharp/packages.config new file mode 100644 index 00000000..00464fb1 --- /dev/null +++ b/body tracking/multi-camera/csharp/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/camera control/cpp/src/main.cpp b/camera control/cpp/src/main.cpp index 4ad6ce56..e6769df7 100644 --- a/camera control/cpp/src/main.cpp +++ b/camera control/cpp/src/main.cpp @@ -142,7 +142,7 @@ int main(int argc, char **argv) { std::cout << "returned_state " << returned_state << std::endl; int current_value=10; zed.getCameraSettings(VIDEO_SETTINGS::EXPOSURE, current_value); - if (1/*returned_state == ERROR_CODE::SUCCESS)*/) { + if (returned_state == ERROR_CODE::SUCCESS) { // Retrieve left image zed.retrieveImage(zed_image, VIEW::SIDE_BY_SIDE); diff --git a/camera streaming/receiver/cpp/src/main.cpp b/camera streaming/receiver/cpp/src/main.cpp index f59c90eb..418a927c 100644 --- a/camera streaming/receiver/cpp/src/main.cpp +++ b/camera streaming/receiver/cpp/src/main.cpp @@ -113,21 +113,6 @@ void setStreamParameter(InitParameters& init_p, string& argument) { } int main(int argc, char **argv) { - -#if 0 - auto streaming_devices = Camera::getStreamingDeviceList(); - int nb_streaming_zed = streaming_devices.size(); - - print("Detect: " + to_string(nb_streaming_zed) + " ZED in streaming"); - if (nb_streaming_zed == 0) { - print("No streaming ZED detected, have you take a look to the sample 'ZED Streaming Sender' ?"); - return 0; - } - - for (auto& it : streaming_devices) - cout << "* ZED: " << it.serial_number << ", IP: " << it.ip << ", port : " << it.port << ", bitrate : " << it.current_bitrate << "\n"; -#endif - Camera zed; // Set configuration parameters for the ZED InitParameters init_parameters; diff --git a/depth sensing/depth sensing/cpp/include/GLViewer.hpp b/depth sensing/depth sensing/cpp/include/GLViewer.hpp index e2099b91..3ec322a4 100644 --- a/depth sensing/depth sensing/cpp/include/GLViewer.hpp +++ b/depth sensing/depth sensing/cpp/include/GLViewer.hpp @@ -98,9 +98,19 @@ class CameraGL { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -115,7 +125,7 @@ class Shader { class Simple3DObject { public: - Simple3DObject() {} + Simple3DObject(); Simple3DObject(sl::Translation position, bool isStatic); ~Simple3DObject(); diff --git a/depth sensing/depth sensing/cpp/src/GLViewer.cpp b/depth sensing/depth sensing/cpp/src/GLViewer.cpp index ce46d16a..a4de6ca7 100644 --- a/depth sensing/depth sensing/cpp/src/GLViewer.cpp +++ b/depth sensing/depth sensing/cpp/src/GLViewer.cpp @@ -129,7 +129,7 @@ GLenum GLViewer::init(int argc, char **argv, sl::CameraParameters param, CUstrea pointCloud_.initialize(image_size, strm_); // Compile and create the shader - shader_ = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader_.set(VERTEX_SHADER, FRAGMENT_SHADER); shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -292,6 +292,8 @@ void GLViewer::idle() { glutPostRedisplay(); } +Simple3DObject::Simple3DObject() : vaoID_(0) {} + Simple3DObject::Simple3DObject(sl::Translation position, bool isStatic): isStatic_(isStatic) { vaoID_ = 0; drawingType_ = GL_TRIANGLES; @@ -429,6 +431,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { print("ERROR: while compiling vertex shader"); } @@ -464,12 +470,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -553,7 +559,7 @@ void PointCloud::initialize(sl::Resolution res, CUstream strm_) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone)); - shader_ = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/depth sensing/depth sensing/cpp/src/main.cpp b/depth sensing/depth sensing/cpp/src/main.cpp index 723514ea..660723b0 100644 --- a/depth sensing/depth sensing/cpp/src/main.cpp +++ b/depth sensing/depth sensing/cpp/src/main.cpp @@ -79,8 +79,8 @@ int main(int argc, char **argv) { RuntimeParameters runParameters; // Setting the depth confidence parameters - runParameters.confidence_threshold = 100; - runParameters.texture_confidence_threshold = 100; + //runParameters.confidence_threshold = 98; + //runParameters.texture_confidence_threshold = 100; // Allocation of 4 channels of float on GPU Mat point_cloud(res, MAT_TYPE::F32_C4, sl::MEM::GPU); @@ -92,7 +92,7 @@ int main(int argc, char **argv) { // retrieve the current 3D coloread point cloud in GPU zed.retrieveMeasure(point_cloud, MEASURE::XYZRGBA, MEM::GPU, res); viewer.updatePointCloud(point_cloud); - + std::cout << "FPS: " << zed.getCurrentFPS() << std::endl; if(viewer.shouldSaveData()){ sl::Mat point_cloud_to_save; zed.retrieveMeasure(point_cloud_to_save, MEASURE::XYZRGBA); diff --git a/depth sensing/fusion/cpp/CMakeLists.txt b/depth sensing/fusion/cpp/CMakeLists.txt new file mode 100644 index 00000000..de5970c3 --- /dev/null +++ b/depth sensing/fusion/cpp/CMakeLists.txt @@ -0,0 +1,60 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +PROJECT(ZED_DepthFusion) + +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +SET(CMAKE_BUILD_TYPE "Release") + +option(LINK_SHARED_ZED "Link with the ZED SDK shared executable" ON) + +if (NOT LINK_SHARED_ZED AND MSVC) + message(FATAL_ERROR "LINK_SHARED_ZED OFF : ZED SDK static libraries not available on Windows") +endif() + +find_package(ZED 4 REQUIRED) +find_package(CUDA REQUIRED) +find_package(GLUT REQUIRED) +find_package(GLEW REQUIRED) +SET(OpenGL_GL_PREFERENCE GLVND) +find_package(OpenGL REQUIRED) +find_package(OpenCV) + +include_directories(${CUDA_INCLUDE_DIRS}) +include_directories(${ZED_INCLUDE_DIRS}) +include_directories(${GLEW_INCLUDE_DIRS}) +include_directories(${GLUT_INCLUDE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) +include_directories(${OpenCV_INCLUDE_DIRS}) + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) +link_directories(${GLEW_LIBRARY_DIRS}) +link_directories(${GLUT_LIBRARY_DIRS}) +link_directories(${OpenGL_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) + +IF(NOT WIN32) + SET(SPECIAL_OS_LIBS "pthread") + + IF (CMAKE_SYSTEM_PROCESSOR MATCHES aarch64) + add_definitions(-DJETSON_STYLE) + ENDIF() +ENDIF() + +FILE(GLOB_RECURSE SRC_FILES src/*.c*) +FILE(GLOB_RECURSE HDR_FILES include/*.h*) + +add_executable(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) + +if (LINK_SHARED_ZED) + SET(ZED_LIBS ${ZED_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY}) +else() + SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) +endif() + +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OpenCV_LIBRARIES} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) + +if(INSTALL_SAMPLES) + LIST(APPEND SAMPLE_LIST ${PROJECT_NAME}) + SET(SAMPLE_LIST "${SAMPLE_LIST}" PARENT_SCOPE) +endif() diff --git a/depth sensing/fusion/cpp/include/ClientPublisher.hpp b/depth sensing/fusion/cpp/include/ClientPublisher.hpp new file mode 100644 index 00000000..84c11554 --- /dev/null +++ b/depth sensing/fusion/cpp/include/ClientPublisher.hpp @@ -0,0 +1,56 @@ +#ifndef __SENDER_RUNNER_HDR__ +#define __SENDER_RUNNER_HDR__ + +#include +#include + +#include +#include + +struct Trigger{ + + void notifyZED(){ + + cv.notify_all(); + + if(running){ + bool wait_for_zed = true; + const int nb_zed = states.size(); + while(wait_for_zed){ + int count_r = 0; + for(auto &it:states) + count_r += it.second; + wait_for_zed = count_r != nb_zed; + sl::sleep_ms(1); + } + for(auto &it:states) + it.second = false; + } + } + + std::condition_variable cv; + bool running = true; + std::map states; +}; + +class ClientPublisher{ + +public: + ClientPublisher(); + ~ClientPublisher(); + + bool open(sl::InputType, Trigger* ref); + void start(); + void stop(); + void setStartSVOPosition(unsigned pos); + +private: + sl::Camera zed; + void work(); + std::thread runner; + int serial; + std::mutex mtx; + Trigger *p_trigger; +}; + +#endif // ! __SENDER_RUNNER_HDR__ diff --git a/depth sensing/fusion/cpp/include/GLViewer.hpp b/depth sensing/fusion/cpp/include/GLViewer.hpp new file mode 100644 index 00000000..c1a89d82 --- /dev/null +++ b/depth sensing/fusion/cpp/include/GLViewer.hpp @@ -0,0 +1,340 @@ +#ifndef __VIEWER_INCLUDE__ +#define __VIEWER_INCLUDE__ + +#include + +#include +#include + +#include +#include + +#include +#include + +#ifndef M_PI +#define M_PI 3.141592653f +#endif + +#define MOUSE_R_SENSITIVITY 0.03f +#define MOUSE_UZ_SENSITIVITY 0.75f +#define MOUSE_DZ_SENSITIVITY 1.25f +#define MOUSE_T_SENSITIVITY 0.5f +#define KEY_T_SENSITIVITY 0.1f + + +/////////////////////////////////////////////////////////////////////////////////////////////// + +class Shader { +public: + + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} + Shader(const GLchar* vs, const GLchar* fs); + ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); + GLuint getProgramId(); + + static const GLint ATTRIB_VERTICES_POS = 0; + static const GLint ATTRIB_COLOR_POS = 1; + static const GLint ATTRIB_NORMAL = 2; +private: + bool compile(GLuint &shaderId, GLenum type, const GLchar* src); + GLuint verterxId_; + GLuint fragmentId_; + GLuint programId_; +}; + +struct ShaderData { + Shader it; + GLuint MVP_Mat; +}; + +class Simple3DObject { +public: + + Simple3DObject(); + + ~Simple3DObject(); + + void addPoint(sl::float3 pt, sl::float3 clr); + void addLine(sl::float3 pt1, sl::float3 pt2, sl::float3 clr); + void addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float3 clr); + void pushToGPU(); + void clear(); + + void setStatic(bool _static) { + isStatic_ = _static; + } + + void setDrawingType(GLenum type); + + void draw(); + +private: + std::vector vertices_; + std::vector colors_; + std::vector indices_; + + bool isStatic_; + bool need_update; + GLenum drawingType_; + GLuint vaoID_; + GLuint vboID_[3]; +}; + +class CameraGL { +public: + + CameraGL() { + } + + enum DIRECTION { + UP, DOWN, LEFT, RIGHT, FORWARD, BACK + }; + CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) + ~CameraGL(); + + void update(); + void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar); + const sl::Transform& getViewProjectionMatrix() const; + + float getHorizontalFOV() const; + float getVerticalFOV() const; + + // Set an offset between the eye of the camera and its position + // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 + // Note: coordinates are in local space + void setOffsetFromPosition(const sl::Translation& offset); + const sl::Translation& getOffsetFromPosition() const; + + void setDirection(const sl::Translation& direction, const sl::Translation &vertical); + void translate(const sl::Translation& t); + void setPosition(const sl::Translation& p); + void rotate(const sl::Orientation& rot); + void rotate(const sl::Rotation& m); + void setRotation(const sl::Orientation& rot); + void setRotation(const sl::Rotation& m); + + const sl::Translation& getPosition() const; + const sl::Translation& getForward() const; + const sl::Translation& getRight() const; + const sl::Translation& getUp() const; + const sl::Translation& getVertical() const; + float getZNear() const; + float getZFar() const; + + static const sl::Translation ORIGINAL_FORWARD; + static const sl::Translation ORIGINAL_UP; + static const sl::Translation ORIGINAL_RIGHT; + + sl::Transform projection_; + bool usePerspective_; +private: + void updateVectors(); + void updateView(); + void updateVPMatrix(); + + sl::Translation offset_; + sl::Translation position_; + sl::Translation forward_; + sl::Translation up_; + sl::Translation right_; + sl::Translation vertical_; + + sl::Orientation rotation_; + + sl::Transform view_; + sl::Transform vpMatrix_; + float horizontalFieldOfView_; + float verticalFieldOfView_; + float znear_; + float zfar_; +}; + + +class PointCloud { +public: + PointCloud(); + ~PointCloud(); + + // Initialize Opengl and Cuda buffers + // Warning: must be called in the Opengl thread + void initialize(sl::Mat&, sl::float3 clr); + // Push a new point cloud + // Warning: can be called from any thread but the mutex "mutexData" must be locked + void pushNewPC(); + // Draw the point cloud + // Warning: must be called in the Opengl thread + void draw(const sl::Transform& vp, bool draw_clr); + // Close (disable update) + void close(); + +private: + sl::Mat refMat; + sl::float3 clr; + + Shader shader_; + GLuint shMVPMatrixLoc_; + GLuint shDrawColor; + GLuint shColor; + + size_t numBytes_; + float* xyzrgbaMappedBuf_; + GLuint bufferGLID_; + cudaGraphicsResource* bufferCudaID_; +}; + +class CameraViewer { +public: + CameraViewer(); + ~CameraViewer(); + + // Initialize Opengl and Cuda buffers + bool initialize(sl::Mat& image, sl::float3 clr); + // Push a new Image + Z buffer and transform into a point cloud + void pushNewImage(); + // Draw the Image + void draw(sl::Transform vpMatrix); + // Close (disable update) + void close(); + + Simple3DObject frustum; +private: + sl::Mat ref; + cudaArray_t ArrIm; + cudaGraphicsResource* cuda_gl_ressource;//cuda GL resource + Shader shader; + GLuint shMVPMatrixLocTex_; + + GLuint texture; + GLuint vaoID_; + GLuint vboID_[3]; + + std::vector faces; + std::vector vert; + std::vector uv; +}; + +struct ObjectClassName { + sl::float3 position; + std::string name_lineA; + std::string name_lineB; + sl::float3 color; +}; + +// This class manages input events, window and Opengl rendering pipeline + +class GLViewer { +public: + GLViewer(); + ~GLViewer(); + bool isAvailable(); + bool isPlaying() const { return play; } + + void init(int argc, char **argv); + + void updateCamera(int, sl::Mat &, sl::Mat &); + void updateCamera(sl::Mat &); + + void updateBodies(sl::Bodies &objs,std::map& singledata, sl::FusionMetrics& metrics); + + void setCameraPose(int, sl::Transform); + + int getKey() { + const int key = last_key; + last_key = -1; + return key; + } + + void exit(); +private: + void render(); + void update(); + void draw(); + void clearInputs(); + void setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar); + + void printText(); + + // Glut functions callbacks + static void drawCallback(); + static void mouseButtonCallback(int button, int state, int x, int y); + static void mouseMotionCallback(int x, int y); + static void reshapeCallback(int width, int height); + static void keyPressedCallback(unsigned char c, int x, int y); + static void keyReleasedCallback(unsigned char c, int x, int y); + static void idle(); + + void addSKeleton(sl::BodyData &, Simple3DObject &, sl::float3 clr_id, bool raw); + + sl::float3 getColor(int, bool); + + bool available; + bool drawBbox = false; + + enum MOUSE_BUTTON { + LEFT = 0, + MIDDLE = 1, + RIGHT = 2, + WHEEL_UP = 3, + WHEEL_DOWN = 4 + }; + + enum KEY_STATE { + UP = 'u', + DOWN = 'd', + FREE = 'f' + }; + + unsigned char lastPressedKey; + + bool mouseButton_[3]; + int mouseWheelPosition_; + int mouseCurrentPosition_[2]; + int mouseMotion_[2]; + int previousMouseMotion_[2]; + KEY_STATE keyStates_[256]; + + std::mutex mtx; + std::mutex mtx_clr; + + ShaderData shader; + + sl::Transform projection_; + sl::float3 bckgrnd_clr; + + std::map point_clouds; + std::map viewers; + std::map poses; + + std::map skeletons_raw; + std::map colors; + std::map colors_sk; + + std::vector fusionStats; + + CameraGL camera_; + Simple3DObject skeletons; + Simple3DObject floor_grid; + + bool show_pc = true; + bool show_raw = false; + bool draw_flat_color = false; + + std::uniform_int_distribution uint_dist360; + std::mt19937 rng; + + bool play = true; + int last_key = -1; +}; + +#endif /* __VIEWER_INCLUDE__ */ diff --git a/depth sensing/fusion/cpp/include/utils.hpp b/depth sensing/fusion/cpp/include/utils.hpp new file mode 100644 index 00000000..bb3e00c4 --- /dev/null +++ b/depth sensing/fusion/cpp/include/utils.hpp @@ -0,0 +1,57 @@ +#pragma once + +#include + +/** +* @brief Compute the start frame of each SVO for playback to be synced +* +* @param svo_files Map camera index to SVO file path +* @return Map camera index to starting SVO frame for synced playback +*/ +std::map syncDATA(std::map svo_files) { + std::map output; // map of camera index and frame index of the starting point for each + + // Open all SVO + std::map> p_zeds; + + for (auto &it : svo_files) { + auto p_zed = std::make_shared(); + + sl::InitParameters init_param; + init_param.depth_mode = sl::DEPTH_MODE::NONE; + init_param.camera_disable_self_calib = true; + init_param.input.setFromSVOFile(it.second.c_str()); + + auto error = p_zed->open(init_param); + if (error == sl::ERROR_CODE::SUCCESS) + p_zeds.insert(std::make_pair(it.first, p_zed)); + else { + std::cerr << "Could not open file " << it.second.c_str() << ": " << sl::toString(error) << ". Skipping" << std::endl; + } + } + + //p_zeds.begin()->second->setSVOPosition(14000); + // Compute the starting point, we have to take the latest one + sl::Timestamp start_ts = 0; + for (auto &it : p_zeds) { + it.second->grab(); + auto ts = it.second->getTimestamp(sl::TIME_REFERENCE::IMAGE); + + if (ts > start_ts) + start_ts = ts; + } + + std::cout << "Found SVOs common starting time: " << start_ts << std::endl; + + // The starting point is now known, let's find the frame idx for all corresponding + for (auto &it : p_zeds) { + auto frame_position_at_ts = it.second->getSVOPositionAtTimestamp(start_ts); + + if (frame_position_at_ts != -1) + output.insert(std::make_pair(it.first, frame_position_at_ts)); + } + + for (auto &it : p_zeds) it.second->close(); + + return output; +} diff --git a/depth sensing/fusion/cpp/src/ClientPublisher.cpp b/depth sensing/fusion/cpp/src/ClientPublisher.cpp new file mode 100644 index 00000000..a526eac3 --- /dev/null +++ b/depth sensing/fusion/cpp/src/ClientPublisher.cpp @@ -0,0 +1,96 @@ +#include "ClientPublisher.hpp" + +ClientPublisher::ClientPublisher() { } + +ClientPublisher::~ClientPublisher() +{ + zed.close(); +} + +bool ClientPublisher::open(sl::InputType input, Trigger* ref) { + + p_trigger = ref; + + sl::InitParameters init_parameters; + init_parameters.depth_mode = sl::DEPTH_MODE::NEURAL; + init_parameters.input = input; + init_parameters.coordinate_units = sl::UNIT::METER; + init_parameters.depth_stabilization = 10; + auto state = zed.open(init_parameters); + if (state != sl::ERROR_CODE::SUCCESS) + { + std::cout << "Error: " << state << std::endl; + return false; + } + + serial = zed.getCameraInformation().serial_number; + p_trigger->states[serial] = false; + + // in most cases in body tracking setup, the cameras are static + sl::PositionalTrackingParameters positional_tracking_parameters; + + + std::string config_file_path(input.getConfiguration().c_str()); + size_t found = config_file_path.find_last_of("/\\"); + std::string config_dir = config_file_path.substr(0, found); + + auto roi_path = config_dir + "/Mask_"+std::to_string(serial)+"_Left.png"; + sl::Mat roi; + roi.read(roi_path.c_str()); + if(roi.isInit()) + zed.setRegionOfInterest(roi); + + state = zed.enablePositionalTracking(positional_tracking_parameters); + if (state != sl::ERROR_CODE::SUCCESS) + { + std::cout << "Error: " << state << std::endl; + return false; + } + + return true; +} + +void ClientPublisher::start() +{ + if (zed.isOpened()) { + // the camera should stream its data so the fusion can subscibe to it to gather the detected body and others metadata needed for the process. + zed.startPublishing(); + // the thread can start to process the camera grab in background + runner = std::thread(&ClientPublisher::work, this); + } +} + +void ClientPublisher::stop() +{ + if (runner.joinable()) + runner.join(); + zed.close(); +} + +void ClientPublisher::work() +{ + sl::Bodies bodies; + sl::BodyTrackingRuntimeParameters body_runtime_parameters; + body_runtime_parameters.detection_confidence_threshold = 40; + + sl::RuntimeParameters rt; + rt.confidence_threshold = 50; + + // in this sample we use a dummy thread to process the ZED data. + // you can replace it by your own application and use the ZED like you use to, retrieve its images, depth, sensors data and so on. + // as long as you call the grab function and the retrieveBodies (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module. + while (p_trigger->running) { + std::unique_lock lk(mtx); + p_trigger->cv.wait(lk); + if(p_trigger->running){ + if(zed.grab(rt) == sl::ERROR_CODE::SUCCESS){ + // just be sure to run the bodies detection + } + } + p_trigger->states[serial] = true; + } +} + +void ClientPublisher::setStartSVOPosition(unsigned pos) { + zed.setSVOPosition(pos); +} diff --git a/depth sensing/fusion/cpp/src/GLViewer.cpp b/depth sensing/fusion/cpp/src/GLViewer.cpp new file mode 100644 index 00000000..9c207d93 --- /dev/null +++ b/depth sensing/fusion/cpp/src/GLViewer.cpp @@ -0,0 +1,1144 @@ +#include "GLViewer.hpp" + +const GLchar* VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec3 in_Color;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec3 b_color;\n" + "void main() {\n" + " b_color = in_Color.bgr;\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + "}"; + +const GLchar* FRAGMENT_SHADER = + "#version 330 core\n" + "in vec3 b_color;\n" + "layout(location = 0) out vec4 color;\n" + "void main() {\n" + " color = vec4(b_color, 0.95);\n" + "}"; + + +const GLchar* POINTCLOUD_VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec4 in_VertexRGBA;\n" + "out vec4 b_color;\n" + "uniform mat4 u_mvpMatrix;\n" + "uniform vec3 u_color;\n" + "uniform bool u_drawFlat;\n" + "void main() {\n" + " if(u_drawFlat)\n" + " b_color = vec4(u_color.bgr, .85f);\n" + "else{" + // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) + " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" + " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n" + " b_color = vec4(clr_int.b / 255.0f, clr_int.g / 255.0f, clr_int.r / 255.0f, .85f);\n" + " }" + " gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1);\n" + "}"; + +const GLchar* POINTCLOUD_FRAGMENT_SHADER = + "#version 330 core\n" + "in vec4 b_color;\n" + "layout(location = 0) out vec4 out_Color;\n" + "void main() {\n" + " out_Color = b_color;\n" + "}"; + +const GLchar* VERTEX_SHADER_TEXTURE = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec2 in_UVs;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec2 UV;\n" + "void main() {\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + " UV = in_UVs;\n" + "}\n"; + +const GLchar* FRAGMENT_SHADER_TEXTURE = + "#version 330 core\n" + "in vec2 UV;\n" + "uniform sampler2D texture_sampler;\n" + "void main() {\n" + " gl_FragColor = vec4(texture(texture_sampler, UV).bgr, 1.0);\n" + "}\n"; + + +GLViewer* currentInstance_ = nullptr; + +GLViewer::GLViewer() : available(false) { + currentInstance_ = this; + mouseButton_[0] = mouseButton_[1] = mouseButton_[2] = false; + clearInputs(); + previousMouseMotion_[0] = previousMouseMotion_[1] = 0; +} + +GLViewer::~GLViewer() { +} + +void GLViewer::exit() { + if (currentInstance_) { + available = false; + for (auto& pc : point_clouds) + { + pc.second.close(); + } + } +} + +bool GLViewer::isAvailable() { + if (currentInstance_ && available) { + glutMainLoopEvent(); + } + return available; +} + +void CloseFunc(void) { + if (currentInstance_) currentInstance_->exit(); +} + +void addVert(Simple3DObject &obj, float i_f, float limit, float height, sl::float4 &clr) { + auto p1 = sl::float3(i_f, height, -limit); + auto p2 = sl::float3(i_f, height, limit); + auto p3 = sl::float3(-limit, height, i_f); + auto p4 = sl::float3(limit, height, i_f); + + obj.addLine(p1, p2, clr); + obj.addLine(p3, p4, clr); +} + +void GLViewer::init(int argc, char **argv) { + + glutInit(&argc, argv); + int wnd_w = glutGet(GLUT_SCREEN_WIDTH); + int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); + + glutInitWindowSize(1200, 700); + glutInitWindowPosition(wnd_w * 0.05, wnd_h * 0.05); + glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH); + + glutCreateWindow("ZED| 3D View"); + + GLenum err = glewInit(); + if (GLEW_OK != err) + std::cout << "ERROR: glewInit failed: " << glewGetErrorString(err) << "\n"; + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); + + glEnable(GL_DEPTH_TEST); + glEnable(GL_TEXTURE_2D); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + +#ifndef JETSON_STYLE + glEnable(GL_POINT_SMOOTH); +#endif + + // Compile and create the shader for 3D objects + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); + shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); + + // Create the camera + camera_ = CameraGL(sl::Translation(0, 2, 10), sl::Translation(0, 0, -1)); + + // Create the skeletons objects + skeletons.setDrawingType(GL_LINES); + floor_grid.setDrawingType(GL_LINES); + + // Set background color (black) + bckgrnd_clr = sl::float4(0.2f, 0.19f, 0.2f, 1.0f); + + + float limit = 20.0f; + sl::float4 clr_grid(80, 80, 80, 255); + clr_grid /= 255.f; + + float grid_height = -0; + for (int i = (int) (-limit); i <= (int) (limit); i++) + addVert(floor_grid, i, limit, grid_height, clr_grid); + + floor_grid.pushToGPU(); + + std::random_device dev; + rng = std::mt19937(dev()); + rng.seed(42); + uint_dist360 = std::uniform_int_distribution(0, 360); + + // Map glut function on this class methods + glutDisplayFunc(GLViewer::drawCallback); + glutMouseFunc(GLViewer::mouseButtonCallback); + glutMotionFunc(GLViewer::mouseMotionCallback); + glutReshapeFunc(GLViewer::reshapeCallback); + glutKeyboardFunc(GLViewer::keyPressedCallback); + glutKeyboardUpFunc(GLViewer::keyReleasedCallback); + glutCloseFunc(CloseFunc); + + available = true; +} + +sl::float3 newColor(float hh) { + float s = 0.75f; + float v = 0.9f; + + sl::float3 clr; + int i = (int)hh; + float ff = hh - i; + float p = v * (1.f - s); + float q = v * (1.f - (s * ff)); + float t = v * (1.f - (s * (1.f - ff))); + switch (i) { + case 0: + clr.r = v; + clr.g = t; + clr.b = p; + break; + case 1: + clr.r = q; + clr.g = v; + clr.b = p; + break; + case 2: + clr.r = p; + clr.g = v; + clr.b = t; + break; + + case 3: + clr.r = p; + clr.g = q; + clr.b = v; + break; + case 4: + clr.r = t; + clr.g = p; + clr.b = v; + break; + case 5: + default: + clr.r = v; + clr.g = p; + clr.b = q; + break; + } + return clr; +} + +sl::float3 GLViewer::getColor(int id, bool for_skeleton){ + const std::lock_guard lock(mtx_clr); + if(for_skeleton){ + if (colors_sk.find(id) == colors_sk.end()) { + float hh = uint_dist360(rng) / 60.f; + colors_sk[id] = newColor(hh); + } + return colors_sk[id]; + }else{ + if (colors.find(id) == colors.end()) { + int h_ = uint_dist360(rng); + float hh = h_ / 60.f; + colors[id] = newColor(hh); + } + return colors[id]; + } +} + +void GLViewer::updateCamera(int id, sl::Mat &view, sl::Mat &pc){ + const std::lock_guard lock(mtx); + auto clr = getColor(id, false); + if(view.isInit() && viewers.find(id) == viewers.end()) + viewers[id].initialize(view, clr); + + if(pc.isInit() && point_clouds.find(id) == point_clouds.end()) + point_clouds[id].initialize(pc, clr); + +} + +void GLViewer::updateCamera(sl::Mat &pc){ + const std::lock_guard lock(mtx); + int id = 0; + auto clr = getColor(id, false); + + // we need to release old pc and initialize new one because fused point cloud don't have the same number of points for each process + // I used close but it crashed in draw. Not yet investigated + point_clouds[id].initialize(pc, clr); +} + + +void GLViewer::setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar) { + // Just slightly up the ZED camera FOV to make a small black border + float fov_y = (params.v_fov + 0.5f) * M_PI / 180.f; + float fov_x = (params.h_fov + 0.5f) * M_PI / 180.f; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); //Horizontal FoV. + projection_(0, 1) = 0; + projection_(0, 2) = 2.0f * ((params.image_size.width - 1.0f * params.cx) / params.image_size.width) - 1.0f; //Horizontal offset. + projection_(0, 3) = 0; + + projection_(1, 0) = 0; + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); //Vertical FoV. + projection_(1, 2) = -(2.0f * ((params.image_size.height - 1.0f * params.cy) / params.image_size.height) - 1.0f); //Vertical offset. + projection_(1, 3) = 0; + + projection_(2, 0) = 0; + projection_(2, 1) = 0; + projection_(2, 2) = -(zfar + znear) / (zfar - znear); //Near and far planes. + projection_(2, 3) = -(2.0f * zfar * znear) / (zfar - znear); //Near and far planes. + + projection_(3, 0) = 0; + projection_(3, 1) = 0; + projection_(3, 2) = -1; + projection_(3, 3) = 0.0f; +} + +void GLViewer::render() { + if (available) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + glClearColor(bckgrnd_clr.r, bckgrnd_clr.g, bckgrnd_clr.b, 1.f); + update(); + draw(); + printText(); + glutSwapBuffers(); + glutPostRedisplay(); + } +} + +void GLViewer::setCameraPose(int id, sl::Transform pose) { + const std::lock_guard lock(mtx); + getColor(id, false); + poses[id] = pose; +} + +inline bool renderBody(const sl::BodyData& i, const bool isTrackingON) { + if (isTrackingON) + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); + else + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); +} + +template +void createSKPrimitive(sl::BodyData& body, const std::vector>& map, Simple3DObject& skp, sl::float3 clr_id, bool raw) { + const float cylinder_thickness = raw ? 0.01f : 0.025f; + + for (auto& limb : map) { + sl::float3 kp_1 = body.keypoint[getIdx(limb.first)]; + sl::float3 kp_2 = body.keypoint[getIdx(limb.second)]; + if (std::isfinite(kp_1.norm()) && std::isfinite(kp_2.norm())) + skp.addLine(kp_1, kp_2, clr_id); + } +} + +void GLViewer::addSKeleton(sl::BodyData& obj, Simple3DObject& simpleObj, sl::float3 clr_id, bool raw) { + switch (obj.keypoint.size()) { + case 18: + createSKPrimitive(obj, sl::BODY_18_BONES, simpleObj, clr_id, raw); + break; + case 34: + createSKPrimitive(obj, sl::BODY_34_BONES, simpleObj, clr_id, raw); + break; + case 38: + createSKPrimitive(obj, sl::BODY_38_BONES, simpleObj, clr_id, raw); + break; + } +} + +void GLViewer::updateBodies(sl::Bodies &bodies, std::map& singledata, sl::FusionMetrics& metrics) { + const std::lock_guard lock(mtx); + + if (bodies.is_new) { + skeletons.clear(); + for(auto &it:bodies.body_list) { + auto clr = getColor(it.id, true); + if (renderBody(it, bodies.is_tracked)) + addSKeleton(it, skeletons, clr, false); + } + } + + fusionStats.clear(); + int id = 0; + + ObjectClassName obj_str; + obj_str.name_lineA = "Publishers :" + std::to_string(metrics.mean_camera_fused); + obj_str.name_lineB = "Sync :" + std::to_string(metrics.mean_stdev_between_camera * 1000.f); + obj_str.color = sl::float4(0.9,0.9,0.9,1); + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + + for (auto &it : singledata) { + auto clr = getColor(it.first.sn, false); + id++; + if (it.second.is_new) + { + auto& sk_r = skeletons_raw[it.first.sn]; + sk_r.clear(); + sk_r.setDrawingType(GL_LINES); + + for (auto& sk : it.second.body_list) { + if(renderBody(sk, it.second.is_tracked)) + addSKeleton(sk, sk_r, clr, true); + } + } + + ObjectClassName obj_str; + obj_str.name_lineA = "CAM: " + std::to_string(it.first.sn) + " FPS: " + std::to_string(metrics.camera_individual_stats[it.first].received_fps); + obj_str.name_lineB = "Ratio Detection :" + std::to_string(metrics.camera_individual_stats[it.first].ratio_detection) + " Delta " + std::to_string(metrics.camera_individual_stats[it.first].delta_ts * 1000.f); + obj_str.color = clr; + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + } +} + +void GLViewer::update() { + + if (keyStates_['q'] == KEY_STATE::UP || keyStates_['Q'] == KEY_STATE::UP || keyStates_[27] == KEY_STATE::UP) { + currentInstance_->exit(); + return; + } + + if (keyStates_['r'] == KEY_STATE::UP) + currentInstance_->show_raw = !currentInstance_->show_raw; + + if (keyStates_['c'] == KEY_STATE::UP) + currentInstance_->draw_flat_color = !currentInstance_->draw_flat_color; + + if (keyStates_['s'] == KEY_STATE::UP) + currentInstance_->show_pc = !currentInstance_->show_pc; + + if (keyStates_['p'] == KEY_STATE::UP || keyStates_['P'] == KEY_STATE::UP || keyStates_[32] == KEY_STATE::UP) + play = !play; + + // Rotate camera with mouse + if (mouseButton_[MOUSE_BUTTON::LEFT]) { + camera_.rotate(sl::Rotation((float) mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); + camera_.rotate(sl::Rotation((float) mouseMotion_[0] * MOUSE_R_SENSITIVITY, camera_.getVertical() * -1.f)); + } + + // Translate camera with mouse + if (mouseButton_[MOUSE_BUTTON::RIGHT]) { + camera_.translate(camera_.getUp() * (float) mouseMotion_[1] * MOUSE_T_SENSITIVITY); + camera_.translate(camera_.getRight() * (float) mouseMotion_[0] * MOUSE_T_SENSITIVITY); + } + + // Zoom in with mouse wheel + if (mouseWheelPosition_ != 0) { + //float distance = sl::Translation(camera_.getOffsetFromPosition()).norm(); + if (mouseWheelPosition_ > 0 /* && distance > camera_.getZNear()*/) { // zoom + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f * -1); + } else if (/*distance < camera_.getZFar()*/ mouseWheelPosition_ < 0) {// unzoom + //camera_.setOffsetFromPosition(camera_.getOffsetFromPosition() * MOUSE_DZ_SENSITIVITY); + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f); + } + } + + camera_.update(); + const std::lock_guard lock(mtx); + // Update point cloud buffers + skeletons.pushToGPU(); + for(auto &it: skeletons_raw) + it.second.pushToGPU(); + + // Update point cloud buffers + for(auto &it: point_clouds) + it.second.pushNewPC(); + + for(auto &it: viewers) + it.second.pushNewImage(); + clearInputs(); +} + + +void GLViewer::draw() { + + glPolygonMode(GL_FRONT, GL_LINE); + glPolygonMode(GL_BACK, GL_LINE); + glLineWidth(2.f); + glPointSize(1.f); + + sl::Transform vpMatrix = camera_.getViewProjectionMatrix(); + glUseProgram(shader.it.getProgramId()); + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, vpMatrix.m); + + floor_grid.draw(); + skeletons.draw(); + + if (show_raw) + for (auto& it : skeletons_raw) + it.second.draw(); + + for (auto& it : viewers) { + sl::Transform pose_ = vpMatrix * poses[it.first]; + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_FALSE, sl::Transform::transpose(pose_).m); + it.second.frustum.draw(); + } + + glUseProgram(0); + + for (auto& it : poses) { + sl::Transform vpMatrix_world = vpMatrix * it.second; + + if(show_pc) + if(point_clouds.find(it.first) != point_clouds.end()) + point_clouds[it.first].draw(vpMatrix_world, draw_flat_color); + + if (viewers.find(it.first) != viewers.end()) + viewers[it.first].draw(vpMatrix_world); + } +} + +sl::float2 compute3Dprojection(sl::float3 &pt, const sl::Transform &cam, sl::Resolution wnd_size) { + sl::float4 pt4d(pt.x, pt.y, pt.z, 1.); + auto proj3D_cam = pt4d * cam; + sl::float2 proj2D; + proj2D.x = ((proj3D_cam.x / pt4d.w) * wnd_size.width) / (2.f * proj3D_cam.w) + wnd_size.width / 2.f; + proj2D.y = ((proj3D_cam.y / pt4d.w) * wnd_size.height) / (2.f * proj3D_cam.w) + wnd_size.height / 2.f; + return proj2D; +} + +void GLViewer::printText() { + + sl::Resolution wnd_size(glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT)); + for (auto &it : fusionStats) { +#if 0 + auto pt2d = compute3Dprojection(it.position, projection_, wnd_size); +#else + sl::float2 pt2d(it.position.x, it.position.y); +#endif + glColor4f(it.color.b, it.color.g, it.color.r, .85f); + const auto *string = it.name_lineA.c_str(); + glWindowPos2f(pt2d.x, pt2d.y + 15); + int len = (int) strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + + string = it.name_lineB.c_str(); + glWindowPos2f(pt2d.x, pt2d.y); + len = (int) strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + } +} + +void GLViewer::clearInputs() { + mouseMotion_[0] = mouseMotion_[1] = 0; + mouseWheelPosition_ = 0; + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; + if (keyStates_[i] != KEY_STATE::DOWN) + keyStates_[i] = KEY_STATE::FREE; + } +} + +void GLViewer::drawCallback() { + currentInstance_->render(); +} + +void GLViewer::mouseButtonCallback(int button, int state, int x, int y) { + if (button < 5) { + if (button < 3) { + currentInstance_->mouseButton_[button] = state == GLUT_DOWN; + } else { + currentInstance_->mouseWheelPosition_ += button == MOUSE_BUTTON::WHEEL_UP ? 1 : -1; + } + currentInstance_->mouseCurrentPosition_[0] = x; + currentInstance_->mouseCurrentPosition_[1] = y; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; + } +} + +void GLViewer::mouseMotionCallback(int x, int y) { + currentInstance_->mouseMotion_[0] = x - currentInstance_->previousMouseMotion_[0]; + currentInstance_->mouseMotion_[1] = y - currentInstance_->previousMouseMotion_[1]; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; +} + +void GLViewer::reshapeCallback(int width, int height) { + glViewport(0, 0, width, height); + float hfov = (180.0f / M_PI) * (2.0f * atan(width / (2.0f * 500))); + float vfov = (180.0f / M_PI) * (2.0f * atan(height / (2.0f * 500))); + currentInstance_->camera_.setProjection(hfov, vfov, currentInstance_->camera_.getZNear(), currentInstance_->camera_.getZFar()); +} + +void GLViewer::keyPressedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::DOWN; + currentInstance_->lastPressedKey = c; + //glutPostRedisplay(); +} + +void GLViewer::keyReleasedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::UP; +} + +void GLViewer::idle() { + glutPostRedisplay(); +} + +Simple3DObject::Simple3DObject() { + vaoID_ = 0; + drawingType_ = GL_TRIANGLES; + isStatic_ = need_update = false; +} + +Simple3DObject::~Simple3DObject() { + clear(); + if (vaoID_ != 0) { + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + } +} + +void Simple3DObject::addPoint(sl::float3 pt, sl::float3 clr){ + vertices_.push_back(pt); + colors_.push_back(clr); + indices_.push_back((int) indices_.size()); + need_update = true; +} + +void Simple3DObject::addLine(sl::float3 pt1, sl::float3 pt2, sl::float3 clr){ + addPoint(pt1, clr); + addPoint(pt2, clr); +} + +void Simple3DObject::addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float3 clr){ + addPoint(p1, clr); + addPoint(p2, clr); + addPoint(p3, clr); +} + +void Simple3DObject::pushToGPU() { + if(!need_update) return; + + if (!isStatic_ || vaoID_ == 0) { + if (vaoID_ == 0) { + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + } + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vertices_.size() * sizeof(sl::float3), &vertices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, colors_.size() * sizeof(sl::float3), &colors_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_.size() * sizeof (unsigned int), &indices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + need_update = false; + } +} + +void Simple3DObject::clear() { + vertices_.clear(); + colors_.clear(); + indices_.clear(); +} + +void Simple3DObject::setDrawingType(GLenum type) { + drawingType_ = type; +} + +void Simple3DObject::draw() { + glBindVertexArray(vaoID_); + glDrawElements(drawingType_, (GLsizei) indices_.size(), GL_UNSIGNED_INT, 0); + glBindVertexArray(0); +} + +Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { + if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { + std::cout << "ERROR: while compiling vertex shader" << std::endl; + } + if (!compile(fragmentId_, GL_FRAGMENT_SHADER, fs)) { + std::cout << "ERROR: while compiling fragment shader" << std::endl; + } + + programId_ = glCreateProgram(); + + glAttachShader(programId_, verterxId_); + glAttachShader(programId_, fragmentId_); + + glBindAttribLocation(programId_, ATTRIB_VERTICES_POS, "in_vertex"); + glBindAttribLocation(programId_, ATTRIB_COLOR_POS, "in_texCoord"); + + glLinkProgram(programId_); + + GLint errorlk(0); + glGetProgramiv(programId_, GL_LINK_STATUS, &errorlk); + if (errorlk != GL_TRUE) { + std::cout << "ERROR: while linking Shader :" << std::endl; + GLint errorSize(0); + glGetProgramiv(programId_, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(programId_, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteProgram(programId_); + } +} + +Shader::~Shader() { + if (verterxId_ != 0 && glIsShader(verterxId_)) + glDeleteShader(verterxId_); + if (fragmentId_ != 0 && glIsShader(fragmentId_)) + glDeleteShader(fragmentId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); +} + +GLuint Shader::getProgramId() { + return programId_; +} + +bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { + shaderId = glCreateShader(type); + if (shaderId == 0) { + std::cout << "ERROR: shader type (" << type << ") does not exist" << std::endl; + return false; + } + glShaderSource(shaderId, 1, (const char**) &src, 0); + glCompileShader(shaderId); + + GLint errorCp(0); + glGetShaderiv(shaderId, GL_COMPILE_STATUS, &errorCp); + if (errorCp != GL_TRUE) { + std::cout << "ERROR: while compiling Shader :" << std::endl; + GLint errorSize(0); + glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(shaderId, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteShader(shaderId); + return false; + } + return true; +} + +const GLchar* IMAGE_FRAGMENT_SHADER = + "#version 330 core\n" + " in vec2 UV;\n" + " out vec4 color;\n" + " uniform sampler2D texImage;\n" + " void main() {\n" + " vec2 scaler =vec2(UV.x,1.f - UV.y);\n" + " vec3 rgbcolor = vec3(texture(texImage, scaler).zyx);\n" + " vec3 color_rgb = pow(rgbcolor, vec3(1.65f));\n" + " color = vec4(color_rgb,1);\n" + "}"; + +const GLchar* IMAGE_VERTEX_SHADER = + "#version 330\n" + "layout(location = 0) in vec3 vert;\n" + "out vec2 UV;" + "void main() {\n" + " UV = (vert.xy+vec2(1,1))* .5f;\n" + " gl_Position = vec4(vert, 1);\n" + "}\n"; + + +PointCloud::PointCloud(): numBytes_(0), xyzrgbaMappedBuf_(nullptr) { + +} + +PointCloud::~PointCloud() { + close(); +} + +void PointCloud::close() { + if (refMat.isInit()) { + auto err = cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + glDeleteBuffers(1, &bufferGLID_); + refMat.free(); + } +} + +void PointCloud::initialize(sl::Mat& ref, sl::float3 clr_) +{ + refMat = ref; + clr = clr_; +} + +void PointCloud::pushNewPC() { + if (refMat.isInit()) { + + int sizebytes = refMat.getResolution().area() * sizeof(sl::float4); + if (numBytes_ != sizebytes) { + + if (numBytes_ == 0) { + glGenBuffers(1, &bufferGLID_); + + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); + shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); + shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); + } + else { + cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + cudaGraphicsUnregisterResource(bufferCudaID_); + } + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glBufferData(GL_ARRAY_BUFFER, sizebytes, 0, GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsResourceGetMappedPointer((void**)&xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + } + + cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice); + } +} + +void PointCloud::draw(const sl::Transform& vp, bool draw_flat) { + if (refMat.isInit()) { +#ifndef JETSON_STYLE + glDisable(GL_BLEND); +#endif + + glUseProgram(shader_.getProgramId()); + glUniformMatrix4fv(shMVPMatrixLoc_, 1, GL_TRUE, vp.m); + + glUniform3fv(shColor, 1, clr.v); + glUniform1i(shDrawColor, draw_flat); + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glDrawArrays(GL_POINTS, 0, refMat.getResolution().area()); + glBindBuffer(GL_ARRAY_BUFFER, 0); + glUseProgram(0); + +#ifndef JETSON_STYLE + glEnable(GL_BLEND); +#endif + } +} + + +CameraViewer::CameraViewer() { + +} + +CameraViewer::~CameraViewer() { + close(); +} + +void CameraViewer::close() { + if (ref.isInit()) { + + auto err = cudaGraphicsUnmapResources(1, &cuda_gl_ressource, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(cuda_gl_ressource); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + glDeleteTextures(1, &texture); + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + ref.free(); + } +} + +bool CameraViewer::initialize(sl::Mat &im, sl::float3 clr) { + + // Create 3D axis + float fx,fy,cx,cy; + fx = fy = 1400; + float width, height; + width = 2208; + height = 1242; + cx = width /2; + cy = height /2; + + float Z_ = .5f; + sl::float3 toOGL(1,-1,-1); + sl::float3 cam_0(0, 0, 0); + sl::float3 cam_1, cam_2, cam_3, cam_4; + + float fx_ = 1.f / fx; + float fy_ = 1.f / fy; + + cam_1.z = Z_; + cam_1.x = (0 - cx) * Z_ *fx_; + cam_1.y = (0 - cy) * Z_ *fy_ ; + cam_1 *= toOGL; + + cam_2.z = Z_; + cam_2.x = (width - cx) * Z_ *fx_; + cam_2.y = (0 - cy) * Z_ *fy_; + cam_2 *= toOGL; + + cam_3.z = Z_; + cam_3.x = (width - cx) * Z_ *fx_; + cam_3.y = (height - cy) * Z_ *fy_; + cam_3 *= toOGL; + + cam_4.z = Z_; + cam_4.x = (0 - cx) * Z_ *fx_; + cam_4.y = (height - cy) * Z_ *fy_; + cam_4 *= toOGL; + + + frustum.addFace(cam_0, cam_1, cam_2, clr); + frustum.addFace(cam_0, cam_2, cam_3, clr); + frustum.addFace(cam_0, cam_3, cam_4, clr); + frustum.addFace(cam_0, cam_4, cam_1, clr); + frustum.setDrawingType(GL_TRIANGLES); + frustum.pushToGPU(); + + vert.push_back(cam_1); + vert.push_back(cam_2); + vert.push_back(cam_3); + vert.push_back(cam_4); + + uv.push_back(sl::float2(0,0)); + uv.push_back(sl::float2(1,0)); + uv.push_back(sl::float2(1,1)); + uv.push_back(sl::float2(0,1)); + + faces.push_back(sl::uint3(0,1,2)); + faces.push_back(sl::uint3(0,2,3)); + + ref = im; + shader.set(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); + shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); + + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vert.size() * sizeof(sl::float3), &vert[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, uv.size() * sizeof(sl::float2), &uv[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(sl::uint3), &faces[0], GL_STATIC_DRAW); + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + auto res = ref.getResolution(); + glGenTextures(1, &texture); + glBindTexture(GL_TEXTURE_2D, texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, res.width, res.height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); + glBindTexture(GL_TEXTURE_2D, 0); + cudaError_t err = cudaGraphicsGLRegisterImage(&cuda_gl_ressource, texture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard); + if (err) std::cout << "err alloc " << err << " " << cudaGetErrorString(err) << "\n"; + glDisable(GL_TEXTURE_2D); + + err = cudaGraphicsMapResources(1, &cuda_gl_ressource, 0); + if (err) std::cout << "err 0 " << err << " " << cudaGetErrorString(err) << "\n"; + err = cudaGraphicsSubResourceGetMappedArray(&ArrIm, cuda_gl_ressource, 0, 0); + if (err) std::cout << "err 1 " << err << " " << cudaGetErrorString(err) << "\n"; + + return (err == cudaSuccess); +} + +void CameraViewer::pushNewImage() { + if (!ref.isInit()) return; + auto err = cudaMemcpy2DToArray(ArrIm, 0, 0, ref.getPtr(sl::MEM::CPU), ref.getStepBytes(sl::MEM::CPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyHostToDevice); + if (err) std::cout << "err 2 " << err << " " << cudaGetErrorString(err) << "\n"; +} + +void CameraViewer::draw(sl::Transform vpMatrix) { + if (!ref.isInit()) return; + + glUseProgram(shader.getProgramId()); + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); + + glUniformMatrix4fv(shMVPMatrixLocTex_, 1, GL_FALSE, sl::Transform::transpose(vpMatrix).m); + glBindTexture(GL_TEXTURE_2D, texture); + + glBindVertexArray(vaoID_); + glDrawElements(GL_TRIANGLES, (GLsizei)faces.size()*3, GL_UNSIGNED_INT, 0); + glBindVertexArray(0); + + glUseProgram(0); +} + + +const sl::Translation CameraGL::ORIGINAL_FORWARD = sl::Translation(0, 0, 1); +const sl::Translation CameraGL::ORIGINAL_UP = sl::Translation(0, 1, 0); +const sl::Translation CameraGL::ORIGINAL_RIGHT = sl::Translation(1, 0, 0); + +CameraGL::CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical) { + this->position_ = position; + setDirection(direction, vertical); + + offset_ = sl::Translation(0, 0, 0); + view_.setIdentity(); + updateView(); + setProjection(70, 70, 0.200f, 50.f); + updateVPMatrix(); +} + +CameraGL::~CameraGL() { +} + +void CameraGL::update() { + if (sl::Translation::dot(vertical_, up_) < 0) + vertical_ = vertical_ * -1.f; + updateView(); + updateVPMatrix(); +} + +void CameraGL::setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) { + horizontalFieldOfView_ = horizontalFOV; + verticalFieldOfView_ = verticalFOV; + znear_ = znear; + zfar_ = zfar; + + float fov_y = verticalFOV * M_PI / 180.f; + float fov_x = horizontalFOV * M_PI / 180.f; + + projection_.setIdentity(); + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; +} + +const sl::Transform& CameraGL::getViewProjectionMatrix() const { + return vpMatrix_; +} + +float CameraGL::getHorizontalFOV() const { + return horizontalFieldOfView_; +} + +float CameraGL::getVerticalFOV() const { + return verticalFieldOfView_; +} + +void CameraGL::setOffsetFromPosition(const sl::Translation& o) { + offset_ = o; +} + +const sl::Translation& CameraGL::getOffsetFromPosition() const { + return offset_; +} + +void CameraGL::setDirection(const sl::Translation& direction, const sl::Translation& vertical) { + sl::Translation dirNormalized = direction; + dirNormalized.normalize(); + this->rotation_ = sl::Orientation(ORIGINAL_FORWARD, dirNormalized * -1.f); + updateVectors(); + this->vertical_ = vertical; + if (sl::Translation::dot(vertical_, up_) < 0) + rotate(sl::Rotation(M_PI, ORIGINAL_FORWARD)); +} + +void CameraGL::translate(const sl::Translation& t) { + position_ = position_ + t; +} + +void CameraGL::setPosition(const sl::Translation& p) { + position_ = p; +} + +void CameraGL::rotate(const sl::Orientation& rot) { + rotation_ = rot * rotation_; + updateVectors(); +} + +void CameraGL::rotate(const sl::Rotation& m) { + this->rotate(sl::Orientation(m)); +} + +void CameraGL::setRotation(const sl::Orientation& rot) { + rotation_ = rot; + updateVectors(); +} + +void CameraGL::setRotation(const sl::Rotation& m) { + this->setRotation(sl::Orientation(m)); +} + +const sl::Translation& CameraGL::getPosition() const { + return position_; +} + +const sl::Translation& CameraGL::getForward() const { + return forward_; +} + +const sl::Translation& CameraGL::getRight() const { + return right_; +} + +const sl::Translation& CameraGL::getUp() const { + return up_; +} + +const sl::Translation& CameraGL::getVertical() const { + return vertical_; +} + +float CameraGL::getZNear() const { + return znear_; +} + +float CameraGL::getZFar() const { + return zfar_; +} + +void CameraGL::updateVectors() { + forward_ = ORIGINAL_FORWARD * rotation_; + up_ = ORIGINAL_UP * rotation_; + right_ = sl::Translation(ORIGINAL_RIGHT * -1.f) * rotation_; +} + +void CameraGL::updateView() { + sl::Transform transformation(rotation_, (offset_ * rotation_) + position_); + view_ = sl::Transform::inverse(transformation); +} + +void CameraGL::updateVPMatrix() { + vpMatrix_ = projection_ * view_; +} diff --git a/depth sensing/fusion/cpp/src/main.cpp b/depth sensing/fusion/cpp/src/main.cpp new file mode 100644 index 00000000..63f428ec --- /dev/null +++ b/depth sensing/fusion/cpp/src/main.cpp @@ -0,0 +1,187 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +// ZED include +#include "ClientPublisher.hpp" +#include "GLViewer.hpp" +#include "utils.hpp" + + +int main(int argc, char **argv) { + +#ifdef _SL_JETSON_ + const bool isJetson = true; +#else + const bool isJetson = false; +#endif + + if (argc != 2) { + // this file should be generated by using the tool ZED360 + std::cout << "Need a Configuration file in input" << std::endl; + return 1; + } + + // Defines the Coordinate system and unit used in this sample + constexpr sl::COORDINATE_SYSTEM COORDINATE_SYSTEM = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; + constexpr sl::UNIT UNIT = sl::UNIT::METER; + + // Read json file containing the configuration of your multicamera setup. + auto configurations = sl::readFusionConfigurationFile(argv[1], COORDINATE_SYSTEM, UNIT); + + if (configurations.empty()) { + std::cout << "Empty configuration File." << std::endl; + return EXIT_FAILURE; + } + + Trigger trigger; + + // Check if the ZED camera should run within the same process or if they are running on the edge. + std::vector clients(configurations.size()); + int id_ = 0; + std::map svo_files; + for (auto conf: configurations) { + // if the ZED camera should run locally, then start a thread to handle it + if(conf.communication_parameters.getType() == sl::CommunicationParameters::COMM_TYPE::INTRA_PROCESS){ + std::cout << "Try to open ZED " < 1); + if (enable_svo_sync) { + std::cout << "Starting SVO sync process..." << std::endl; + std::map cam_idx_to_svo_frame_idx = syncDATA(svo_files); + + for (auto &it : cam_idx_to_svo_frame_idx) { + std::cout << "Setting camera " << it.first << " to frame " << it.second << std::endl; + clients[it.first].setStartSVOPosition(it.second); + } + } + + // start camera threads + for (auto &it: clients) + it.start(); + + // Now that the ZED camera are running, we need to initialize the fusion module + sl::InitFusionParameters init_params; + init_params.coordinate_units = UNIT; + init_params.coordinate_system = COORDINATE_SYSTEM; + init_params.verbose = true; + + // create and initialize it + sl::Fusion fusion; + fusion.init(init_params); + + // subscribe to every cameras of the setup to internally gather their data + std::vector cameras; + for (auto& it : configurations) { + sl::CameraIdentifier uuid(it.serial_number); + // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. + auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity); + if (state != sl::FUSION_ERROR_CODE::SUCCESS) + std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl; + else + cameras.push_back(uuid); + } + + // check that at least one camera is connected + if (cameras.empty()) { + std::cout << "no connections " << std::endl; + return EXIT_FAILURE; + } + + fusion.enablePositionalTracking(); + + // creation of a 3D viewer + GLViewer viewer; + viewer.init(argc, argv); + + std::cout << "Viewer Shortcuts\n" << + "\t- 'q': quit the application\n" << + "\t- 'p': play/pause the GLViewer\n" << + "\t- 'r': switch on/off for raw skeleton display\n" << + "\t- 's': switch on/off for live point cloud display\n" << + "\t- 'c': switch on/off point cloud display with raw color\n" << std::endl; + + // fusion outputs + sl::Bodies fused_bodies; + std::map camera_raw_data; + sl::FusionMetrics metrics; + std::map views; + std::map pointClouds; + sl::Resolution low_res(512,360); + sl::CameraIdentifier fused_camera(0); + + // run the fusion as long as the viewer is available. + while (viewer.isAvailable()) { + trigger.notifyZED(); + + // run the fusion process (which gather data from all camera, sync them and process them) + if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) { + + // for debug, you can retrieve the data sent by each camera + for (auto& id : cameras) { + + sl::Pose pose; + if(fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id) == sl::POSITIONAL_TRACKING_STATE::OK) + viewer.setCameraPose(id.sn, pose.pose_data); + + auto state_view = fusion.retrieveImage(views[id], id, low_res); + auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res); + + if (state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) + viewer.updateCamera(id.sn, views[id], pointClouds[id]); + } + + // get metrics about the fusion process for monitoring purposes + fusion.getProcessMetrics(metrics); + } + // update the 3D view + viewer.updateBodies(fused_bodies, camera_raw_data, metrics); + + while (!viewer.isPlaying() && viewer.isAvailable()) { + sl::sleep_ms(10); + } + } + + viewer.exit(); + + trigger.running = false; + trigger.notifyZED(); + + for (auto &it: clients) + it.stop(); + + fusion.close(); + + return EXIT_SUCCESS; +} diff --git a/global localization/live/cpp/include/display/GLViewer.hpp b/global localization/live/cpp/include/display/GLViewer.hpp index ba5d26e7..b30a53b8 100644 --- a/global localization/live/cpp/include/display/GLViewer.hpp +++ b/global localization/live/cpp/include/display/GLViewer.hpp @@ -78,9 +78,19 @@ class CameraGL { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/global localization/live/cpp/include/display/GenericDisplay.h b/global localization/live/cpp/include/display/GenericDisplay.h index 89de6ad5..d3834017 100644 --- a/global localization/live/cpp/include/display/GenericDisplay.h +++ b/global localization/live/cpp/include/display/GenericDisplay.h @@ -51,7 +51,7 @@ class GenericDisplay * @param geo_pose geopose to display * @param current_timestamp timestamp of the geopose to display */ - void updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp); + void updateGeoPoseData(sl::GeoPose geo_pose); protected: GLViewer opengl_viewer; diff --git a/global localization/live/cpp/src/display/GLViewer.cpp b/global localization/live/cpp/src/display/GLViewer.cpp index d2bfa105..40c0c5e0 100644 --- a/global localization/live/cpp/src/display/GLViewer.cpp +++ b/global localization/live/cpp/src/display/GLViewer.cpp @@ -107,10 +107,10 @@ void GLViewer::init(int argc, char **argv) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader - mainShader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + mainShader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); mainShader.MVP_Mat = glGetUniformLocation(mainShader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -569,6 +569,10 @@ Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -604,12 +608,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/global localization/live/cpp/src/display/GenericDisplay.cpp b/global localization/live/cpp/src/display/GenericDisplay.cpp index c9980fc4..1d1f8703 100644 --- a/global localization/live/cpp/src/display/GenericDisplay.cpp +++ b/global localization/live/cpp/src/display/GenericDisplay.cpp @@ -51,7 +51,7 @@ void GenericDisplay::updateRawGeoPoseData(sl::GNSSData geo_data) data.close(); } -void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp) +void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose) { // Make the pose available for the Live Server ofstream data; @@ -61,7 +61,7 @@ void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp curre data << ","; data << geo_pose.latlng_coordinates.getLongitude(false); data << ","; - data << current_timestamp.getMilliseconds(); + data << geo_pose.timestamp.getMilliseconds(); data.flush(); // flush will do the same thing than "\n" but without additional character data.close(); diff --git a/global localization/live/cpp/src/main.cpp b/global localization/live/cpp/src/main.cpp index 3a3c1a58..e17c88cc 100644 --- a/global localization/live/cpp/src/main.cpp +++ b/global localization/live/cpp/src/main.cpp @@ -125,7 +125,7 @@ int main(int argc, char **argv) if (current_geopose_satus == sl::GNSS_FUSION_STATUS::OK) { // Display it on the Live Server: - viewer.updateGeoPoseData(current_geopose, zed.getTimestamp(sl::TIME_REFERENCE::CURRENT)); + viewer.updateGeoPoseData(current_geopose); } else { diff --git a/global localization/live/python/live.py b/global localization/live/python/live.py index c3b9d3e9..7f05101d 100644 --- a/global localization/live/python/live.py +++ b/global localization/live/python/live.py @@ -124,7 +124,7 @@ def main(): current_geopose = sl.GeoPose() current_geopose_satus = fusion.get_geo_pose(current_geopose) if current_geopose_satus == sl.GNSS_FUSION_STATUS.OK: - viewer.updateGeoPoseData(current_geopose, zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)) + viewer.updateGeoPoseData(current_geopose) """ else: GNSS coordinate system to ZED coordinate system is not initialize yet diff --git a/global localization/playback/cpp/include/display/GLViewer.hpp b/global localization/playback/cpp/include/display/GLViewer.hpp index ba5d26e7..b30a53b8 100644 --- a/global localization/playback/cpp/include/display/GLViewer.hpp +++ b/global localization/playback/cpp/include/display/GLViewer.hpp @@ -78,9 +78,19 @@ class CameraGL { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/global localization/playback/cpp/include/display/GenericDisplay.h b/global localization/playback/cpp/include/display/GenericDisplay.h index 89de6ad5..d3834017 100644 --- a/global localization/playback/cpp/include/display/GenericDisplay.h +++ b/global localization/playback/cpp/include/display/GenericDisplay.h @@ -51,7 +51,7 @@ class GenericDisplay * @param geo_pose geopose to display * @param current_timestamp timestamp of the geopose to display */ - void updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp); + void updateGeoPoseData(sl::GeoPose geo_pose); protected: GLViewer opengl_viewer; diff --git a/global localization/playback/cpp/src/display/GLViewer.cpp b/global localization/playback/cpp/src/display/GLViewer.cpp index 06c3af54..d83f751e 100644 --- a/global localization/playback/cpp/src/display/GLViewer.cpp +++ b/global localization/playback/cpp/src/display/GLViewer.cpp @@ -107,10 +107,10 @@ void GLViewer::init(int argc, char **argv) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader - mainShader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + mainShader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); mainShader.MVP_Mat = glGetUniformLocation(mainShader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -569,6 +569,10 @@ Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -604,12 +608,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/global localization/playback/cpp/src/display/GenericDisplay.cpp b/global localization/playback/cpp/src/display/GenericDisplay.cpp index c9980fc4..1d1f8703 100644 --- a/global localization/playback/cpp/src/display/GenericDisplay.cpp +++ b/global localization/playback/cpp/src/display/GenericDisplay.cpp @@ -51,7 +51,7 @@ void GenericDisplay::updateRawGeoPoseData(sl::GNSSData geo_data) data.close(); } -void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp) +void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose) { // Make the pose available for the Live Server ofstream data; @@ -61,7 +61,7 @@ void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp curre data << ","; data << geo_pose.latlng_coordinates.getLongitude(false); data << ","; - data << current_timestamp.getMilliseconds(); + data << geo_pose.timestamp.getMilliseconds(); data.flush(); // flush will do the same thing than "\n" but without additional character data.close(); diff --git a/global localization/playback/cpp/src/main.cpp b/global localization/playback/cpp/src/main.cpp index aa04a262..49858498 100644 --- a/global localization/playback/cpp/src/main.cpp +++ b/global localization/playback/cpp/src/main.cpp @@ -204,7 +204,7 @@ int main(int argc, char **argv) { auto current_geopose_status = fusion.getGeoPose(current_geopose); if (current_geopose_status == sl::GNSS_FUSION_STATUS::OK) { // Display it on the Live Server: - viewer.updateGeoPoseData(current_geopose, zed.getTimestamp(sl::TIME_REFERENCE::CURRENT)); + viewer.updateGeoPoseData(current_geopose); sl::Transform current_calibration = fusion.getGeoTrackingCalibration(); } diff --git a/global localization/playback/python/display/generic_display.py b/global localization/playback/python/display/generic_display.py index ec668341..0ab3a7bd 100644 --- a/global localization/playback/python/display/generic_display.py +++ b/global localization/playback/python/display/generic_display.py @@ -32,14 +32,14 @@ def updateRawGeoPoseData(self, geo_data): except ImportError: print("An exception was raised: the raw geo-pose data was not sent.") - def updateGeoPoseData(self, geo_pose, current_timestamp): + def updateGeoPoseData(self, geo_pose): try: # Replace this part with the appropriate sending of data to your IoT system f = open('../../map server/data.txt', 'w') f.write("{},{},{}" .format(geo_pose.latlng_coordinates.get_latitude(False), geo_pose.latlng_coordinates.get_longitude(False), - current_timestamp.get_milliseconds())) + geo_pose.timestamp.get_milliseconds())) f.flush() gnss_data = {} gnss_data["longitude"] = geo_pose.latlng_coordinates.get_latitude(False) diff --git a/global localization/playback/python/playback.py b/global localization/playback/python/playback.py index 5284787b..4cdb4564 100644 --- a/global localization/playback/python/playback.py +++ b/global localization/playback/python/playback.py @@ -156,7 +156,7 @@ def main(): current_geopose_satus = fusion.get_geo_pose(current_geopose) if current_geopose_satus == sl.GNSS_FUSION_STATUS.OK: # Display it on the Live Server - viewer.updateGeoPoseData(current_geopose, zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)) + viewer.updateGeoPoseData(current_geopose) _, yaw_std, position_std = fusion.get_current_gnss_calibration_std() if yaw_std != -1: print("GNSS State : ", current_geopose_satus, " : calibration uncertainty yaw_std ", yaw_std, diff --git a/global localization/recording/cpp/include/display/GLViewer.hpp b/global localization/recording/cpp/include/display/GLViewer.hpp index ba5d26e7..b30a53b8 100644 --- a/global localization/recording/cpp/include/display/GLViewer.hpp +++ b/global localization/recording/cpp/include/display/GLViewer.hpp @@ -78,9 +78,19 @@ class CameraGL { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/global localization/recording/cpp/include/display/GenericDisplay.h b/global localization/recording/cpp/include/display/GenericDisplay.h index 89de6ad5..d3834017 100644 --- a/global localization/recording/cpp/include/display/GenericDisplay.h +++ b/global localization/recording/cpp/include/display/GenericDisplay.h @@ -51,7 +51,7 @@ class GenericDisplay * @param geo_pose geopose to display * @param current_timestamp timestamp of the geopose to display */ - void updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp); + void updateGeoPoseData(sl::GeoPose geo_pose); protected: GLViewer opengl_viewer; diff --git a/global localization/recording/cpp/src/display/GLViewer.cpp b/global localization/recording/cpp/src/display/GLViewer.cpp index d2bfa105..40c0c5e0 100644 --- a/global localization/recording/cpp/src/display/GLViewer.cpp +++ b/global localization/recording/cpp/src/display/GLViewer.cpp @@ -107,10 +107,10 @@ void GLViewer::init(int argc, char **argv) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader - mainShader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + mainShader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); mainShader.MVP_Mat = glGetUniformLocation(mainShader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -569,6 +569,10 @@ Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -604,12 +608,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/global localization/recording/cpp/src/display/GenericDisplay.cpp b/global localization/recording/cpp/src/display/GenericDisplay.cpp index 2c26fb23..55c69ddb 100644 --- a/global localization/recording/cpp/src/display/GenericDisplay.cpp +++ b/global localization/recording/cpp/src/display/GenericDisplay.cpp @@ -52,7 +52,7 @@ void GenericDisplay::updateRawGeoPoseData(sl::GNSSData geo_data) data.close(); } -void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp current_timestamp) +void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose) { // Make the pose available for the Live Server ofstream data; @@ -62,7 +62,7 @@ void GenericDisplay::updateGeoPoseData(sl::GeoPose geo_pose, sl::Timestamp curre data << ","; data << geo_pose.latlng_coordinates.getLongitude(false); data << ","; - data << current_timestamp.getMilliseconds(); + data << geo_pose.timestamp.getMilliseconds(); data << "\n"; data.close(); diff --git a/global localization/recording/cpp/src/main.cpp b/global localization/recording/cpp/src/main.cpp index a07ac230..3c221a9c 100644 --- a/global localization/recording/cpp/src/main.cpp +++ b/global localization/recording/cpp/src/main.cpp @@ -143,7 +143,7 @@ int main(int argc, char **argv) { auto current_geopose_satus = fusion.getGeoPose(current_geopose); if (current_geopose_satus == sl::GNSS_FUSION_STATUS::OK) { // Display it on the Live Server: - viewer.updateGeoPoseData(current_geopose, zed.getTimestamp(sl::TIME_REFERENCE::CURRENT)); + viewer.updateGeoPoseData(current_geopose); } else { // GNSS coordinate system to ZED coordinate system is not initialize yet // The initialisation between the coordinates system is basically an optimization problem that diff --git a/global localization/recording/python/display/generic_display.py b/global localization/recording/python/display/generic_display.py index d012fe17..1f90496b 100644 --- a/global localization/recording/python/display/generic_display.py +++ b/global localization/recording/python/display/generic_display.py @@ -32,14 +32,14 @@ def updateRawGeoPoseData(self, geo_data): except ImportError: print("An exception was raised: the raw geo-pose data was not sent.") - def updateGeoPoseData(self, geo_pose, current_timestamp): + def updateGeoPoseData(self, geo_pose): try: # Replace this part with the appropriate sending of data to your IoT system f = open('../../map server/data.txt', 'w') f.write("{},{},{}" .format(geo_pose.latlng_coordinates.get_latitude(False), geo_pose.latlng_coordinates.get_longitude(False), - current_timestamp.get_milliseconds())) + geo_pose.timestamp.get_milliseconds())) f.flush() except ImportError: print("An exception was raised: the geo-pose data was not sent.") diff --git a/global localization/recording/python/recording.py b/global localization/recording/python/recording.py index 8d23d68d..f7ee72f1 100644 --- a/global localization/recording/python/recording.py +++ b/global localization/recording/python/recording.py @@ -152,7 +152,7 @@ def main(): current_geopose = sl.GeoPose() current_geopose_satus = fusion.get_geo_pose(current_geopose) if current_geopose_satus == sl.GNSS_FUSION_STATUS.OK: - viewer.updateGeoPoseData(current_geopose, zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)) + viewer.updateGeoPoseData(current_geopose) """ else: GNSS coordinate system to ZED coordinate system is not initialized yet diff --git a/object detection/birds eye viewer/cpp/include/GLViewer.hpp b/object detection/birds eye viewer/cpp/include/GLViewer.hpp index 12526dec..73180952 100644 --- a/object detection/birds eye viewer/cpp/include/GLViewer.hpp +++ b/object detection/birds eye viewer/cpp/include/GLViewer.hpp @@ -104,10 +104,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/object detection/birds eye viewer/cpp/src/GLViewer.cpp b/object detection/birds eye viewer/cpp/src/GLViewer.cpp index 1d1e9434..fa531659 100644 --- a/object detection/birds eye viewer/cpp/src/GLViewer.cpp +++ b/object detection/birds eye viewer/cpp/src/GLViewer.cpp @@ -123,10 +123,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -731,6 +731,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -766,12 +770,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -855,7 +859,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/object detection/birds eye viewer/cpp/src/TrackingViewer.cpp b/object detection/birds eye viewer/cpp/src/TrackingViewer.cpp index 804cf209..7e19f571 100644 --- a/object detection/birds eye viewer/cpp/src/TrackingViewer.cpp +++ b/object detection/birds eye viewer/cpp/src/TrackingViewer.cpp @@ -35,7 +35,7 @@ void TrackingViewer::render_2D(cv::Mat &left_display, sl::float2 img_scale, std: // scaled ROI cv::Rect roi(top_left_corner, bottom_right_corner); // Use isInit() to check if mask is available - if (obj.mask.isInit()) { + if (obj.mask.isInit() && obj.mask.getWidth() > 0 && obj.mask.getHeight() > 0) { // Here, obj.mask is the object segmentation mask inside the object bbox, computed on the native resolution // The resize is needed to get the mask on the display resolution cv::Mat tmp_mask; diff --git a/object detection/birds eye viewer/csharp/MainWindow.cs b/object detection/birds eye viewer/csharp/MainWindow.cs index 6f1d4242..6e6480c4 100644 --- a/object detection/birds eye viewer/csharp/MainWindow.cs +++ b/object detection/birds eye viewer/csharp/MainWindow.cs @@ -109,7 +109,6 @@ public MainWindow(string[] args) obj_det_params.enableObjectTracking = true; // the object detection will track objects across multiple images, instead of an image-by-image basis isTrackingON = obj_det_params.enableObjectTracking; obj_det_params.enableSegmentation = false; - obj_det_params.imageSync = true; // the object detection is synchronized to the image obj_det_params.detectionModel = sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE; if (USE_BATCHING) diff --git a/object detection/birds eye viewer/python/object_detection_birds_view.py b/object detection/birds eye viewer/python/object_detection_birds_view.py index a2a2003f..363f674c 100644 --- a/object detection/birds eye viewer/python/object_detection_birds_view.py +++ b/object detection/birds eye viewer/python/object_detection_birds_view.py @@ -209,7 +209,7 @@ def main(): def parse_args(init): - if len(opt.input_svo_file)>0 and opt.input_svo_file.endswith(".svo"): + if len(opt.input_svo_file)>0 and (opt.input_svo_file.endswith(".svo") or opt.input_svo_file.endswith(".svo2")): init.set_from_svo_file(opt.input_svo_file) print("[Sample] Using SVO File input: {0}".format(opt.input_svo_file)) elif len(opt.ip_address)>0 : diff --git a/object detection/concurrent detections/cpp/include/GLViewer.hpp b/object detection/concurrent detections/cpp/include/GLViewer.hpp index fdaca9ca..99019139 100644 --- a/object detection/concurrent detections/cpp/include/GLViewer.hpp +++ b/object detection/concurrent detections/cpp/include/GLViewer.hpp @@ -104,10 +104,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/object detection/concurrent detections/cpp/src/GLViewer.cpp b/object detection/concurrent detections/cpp/src/GLViewer.cpp index 43465be3..c6919afd 100644 --- a/object detection/concurrent detections/cpp/src/GLViewer.cpp +++ b/object detection/concurrent detections/cpp/src/GLViewer.cpp @@ -137,10 +137,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -735,6 +735,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -770,12 +774,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -859,7 +863,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/object detection/custom detector/cpp/opencv_dnn_yolov4/include/GLViewer.hpp b/object detection/custom detector/cpp/opencv_dnn_yolov4/include/GLViewer.hpp index 414be22c..8fd1bbf3 100644 --- a/object detection/custom detector/cpp/opencv_dnn_yolov4/include/GLViewer.hpp +++ b/object detection/custom detector/cpp/opencv_dnn_yolov4/include/GLViewer.hpp @@ -104,10 +104,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/object detection/custom detector/cpp/opencv_dnn_yolov4/src/GLViewer.cpp b/object detection/custom detector/cpp/opencv_dnn_yolov4/src/GLViewer.cpp index b3f31e00..66fd7c02 100644 --- a/object detection/custom detector/cpp/opencv_dnn_yolov4/src/GLViewer.cpp +++ b/object detection/custom detector/cpp/opencv_dnn_yolov4/src/GLViewer.cpp @@ -133,10 +133,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -733,6 +733,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -768,12 +772,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -857,7 +861,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/include/GLViewer.hpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/include/GLViewer.hpp index db155f25..cb35b05a 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/include/GLViewer.hpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/include/GLViewer.hpp @@ -104,10 +104,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -230,10 +239,18 @@ class GLViewer { GLViewer(); ~GLViewer(); bool isAvailable(); + bool isPlaying() const { return play; } + void setPlaying(const bool p) { play = p; } void init(int argc, char **argv, sl::CameraParameters& param, bool isTrackingON); void updateData(sl::Mat& matXYZRGBA, std::vector& objs, sl::Transform &cam_pose); + int getKey() { + int const key{last_key}; + last_key = -1; + return key; + } + void exit(); private: // Rendering loop method called each frame by glutDisplayFunc @@ -298,6 +315,9 @@ class GLViewer { Simple3DObject BBox_faces; Simple3DObject skeletons; Simple3DObject floor_grid; + + bool play{true}; + int last_key{-1}; }; #endif /* __VIEWER_INCLUDE__ */ diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/GLViewer.cpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/GLViewer.cpp index c7eab2e6..80412ec6 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/GLViewer.cpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/GLViewer.cpp @@ -133,10 +133,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -379,9 +379,12 @@ void GLViewer::printText() { void GLViewer::clearInputs() { mouseMotion_[0] = mouseMotion_[1] = 0; mouseWheelPosition_ = 0; - for (unsigned int i = 0; i < 256; ++i) + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; if (keyStates_[i] != KEY_STATE::DOWN) keyStates_[i] = KEY_STATE::FREE; + } } void GLViewer::drawCallback() { @@ -735,6 +738,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -770,12 +777,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -859,7 +866,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/main.cpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/main.cpp index d9985f46..01581e78 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/main.cpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/main.cpp @@ -115,6 +115,7 @@ int main(int argc, char** argv) { if (zed_opt.find(".svo") != std::string::npos) init_parameters.input.setFromSVOFile(zed_opt.c_str()); } + // Open the camera auto returned_state = zed.open(init_parameters); if (returned_state != sl::ERROR_CODE::SUCCESS) { @@ -186,7 +187,7 @@ int main(int argc, char** argv) { tmp.label = (int) it.label; tmp.bounding_box_2d = cvt(it.box); tmp.is_grounded = ((int) it.label == 0); // Only the first class (person) is grounded, that is moving on the floor plane - // others are tracked in full 3D space + // others are tracked in full 3D space objects_in.push_back(tmp); } // Send the custom detected boxes to the ZED diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/yolo.cpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/yolo.cpp index 59bc82c2..570e09de 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/yolo.cpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx/src/yolo.cpp @@ -14,6 +14,14 @@ inline int clamp(int val, int min, int max) { #define WEIGHTED_NMS +#if NV_TENSORRT_MAJOR >= 10 +#define trt_name_engine_get_binding_name getIOTensorName +#define trt_name_engine_get_nb_binding getNbIOTensors +#else +#define trt_name_engine_get_binding_name getBindingName +#define trt_name_engine_get_nb_binding getNbBindings +#endif + std::vector nonMaximumSuppression(const float nmsThresh, std::vector binfo) { auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float { @@ -116,9 +124,16 @@ Yolo::~Yolo() { CUDA_CHECK(cudaFree(d_input)); CUDA_CHECK(cudaFree(d_output)); // Destroy the engine + +#if NV_TENSORRT_MAJOR >= 10 + delete context; + delete engine; + delete runtime; +#else context->destroy(); engine->destroy(); runtime->destroy(); +#endif delete[] h_input; delete[] h_output; @@ -169,7 +184,9 @@ int Yolo::build_engine(std::string onnx_path, std::string engine_path, OptimDim profile->setDimensions(dyn_dim_profile.tensor_name.c_str(), OptProfileSelector::kMAX, dyn_dim_profile.size); config->addOptimizationProfile(profile); +#if NV_TENSORRT_MAJOR < 10 builder->setMaxBatchSize(1); +#endif } auto parser = nvonnxparser::createParser(*network, gLogger); @@ -195,7 +212,18 @@ int Yolo::build_engine(std::string onnx_path, std::string engine_path, OptimDim //////////////// Actual engine building +#if NV_TENSORRT_MAJOR >= 10 + engine = nullptr; + if (builder->isNetworkSupported(*network, *config)) { + std::unique_ptr serializedEngine{builder->buildSerializedNetwork(*network, *config)}; + if (serializedEngine != nullptr && serializedEngine.get() && serializedEngine->size() > 0) { + nvinfer1::IRuntime* infer = createInferRuntime(gLogger); + engine = infer->deserializeCudaEngine(serializedEngine->data(), serializedEngine->size()); + } + } +#else engine = builder->buildEngineWithConfig(*network, *config); +#endif onnx_file_content.clear(); @@ -209,12 +237,19 @@ int Yolo::build_engine(std::string onnx_path, std::string engine_path, OptimDim fwrite(reinterpret_cast (ptr->data()), ptr->size() * sizeof (char), 1, fp); fclose(fp); +#if NV_TENSORRT_MAJOR >= 10 + delete parser; + delete network; + delete config; + delete builder; + delete engine; +#else parser->destroy(); network->destroy(); config->destroy(); builder->destroy(); - engine->destroy(); +#endif return 0; } else return 1; @@ -250,24 +285,38 @@ int Yolo::init(std::string engine_name) { if (context == nullptr) return 1; delete[] trtModelStream; - if (engine->getNbBindings() != 2) return 1; + if (engine->trt_name_engine_get_nb_binding() != 2) return 1; - const int bindings = engine->getNbBindings(); + const int bindings = engine->trt_name_engine_get_nb_binding(); for (int i = 0; i < bindings; i++) { - if (engine->bindingIsInput(i)) { - input_binding_name = engine->getBindingName(i); +#if NV_TENSORRT_MAJOR > 8 || (NV_TENSORRT_MAJOR >= 8 && NV_TENSORRT_MINOR > 4) + if (engine->getTensorIOMode(engine->getIOTensorName(i)) == nvinfer1::TensorIOMode::kINPUT) +#else + if (engine->bindingIsInput(i)) +#endif + { + input_binding_name = engine->trt_name_engine_get_binding_name(i); + +#if NV_TENSORRT_MAJOR >= 10 + Dims bind_dim = engine->getTensorShape(engine->getIOTensorName(i)); +#else Dims bind_dim = engine->getBindingDimensions(i); +#endif input_width = bind_dim.d[3]; input_height = bind_dim.d[2]; inputIndex = i; std::cout << "Inference size : " << input_height << "x" << input_width << std::endl; - }//if (engine->getTensorIOMode(engine->getBindingName(i)) == TensorIOMode::kOUTPUT) + }//if (engine->getTensorIOMode(engine->trt_name_engine_get_binding_name(i)) == TensorIOMode::kOUTPUT) else { - output_name = engine->getBindingName(i); + output_name = engine->trt_name_engine_get_binding_name(i); // fill size, allocation must be done externally outputIndex = i; +#if NV_TENSORRT_MAJOR >= 10 + Dims bind_dim = engine->getTensorShape(engine->getIOTensorName(i)); +#else Dims bind_dim = engine->getBindingDimensions(i); +#endif size_t batch = bind_dim.d[0]; if (batch > batch_size) { std::cout << "batch > 1 not supported" << std::endl; @@ -297,7 +346,7 @@ int Yolo::init(std::string engine_name) { h_input = new float[batch_size * 3 * input_height * input_width]; h_output = new float[batch_size * output_size]; // In order to bind the buffers, we need to know the names of the input and output tensors. - // Note that indices are guaranteed to be less than IEngine::getNbBindings() + // Note that indices are guaranteed to be less than IEngine::trt_name_engine_get_nb_binding() assert(inputIndex == 0); assert(outputIndex == 1); // Create GPU buffers on device @@ -339,10 +388,16 @@ std::vector Yolo::run(sl::Mat left_sl, int orig_image_h, int orig_imag // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host CUDA_CHECK(cudaMemcpyAsync(d_input, h_input, batch_size * 3 * frame_s * sizeof (float), cudaMemcpyHostToDevice, stream)); +#if (NV_TENSORRT_MAJOR < 8) || (NV_TENSORRT_MAJOR == 8 && NV_TENSORRT_MINOR < 5) std::vector d_buffers_nvinfer(2); d_buffers_nvinfer[inputIndex] = d_input; d_buffers_nvinfer[outputIndex] = d_output; context->enqueueV2(&d_buffers_nvinfer[0], stream, nullptr); +#else + context->setTensorAddress(input_binding_name.c_str(), d_input); + context->setTensorAddress(output_name.c_str(), d_output); + context->enqueueV3(stream); +#endif CUDA_CHECK(cudaMemcpyAsync(h_output, d_output, batch_size * output_size * sizeof (float), cudaMemcpyDeviceToHost, stream)); cudaStreamSynchronize(stream); diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/CMakeLists.txt b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/CMakeLists.txt similarity index 76% rename from object detection/custom detector/cpp/tensorrt_yolov5_v5.0/CMakeLists.txt rename to object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/CMakeLists.txt index eacb4556..d1308d68 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/CMakeLists.txt +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.5) -PROJECT(yolov5_zed) +PROJECT(yolo_onnx_zed) set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -10,45 +10,41 @@ if (NOT LINK_SHARED_ZED AND MSVC) message(FATAL_ERROR "LINK_SHARED_ZED OFF : ZED SDK static libraries not available on Windows") endif() -SET(SPECIAL_OS_LIBS "") - -find_package(ZED 3 REQUIRED) +find_package(ZED 4 REQUIRED) find_package(OpenCV REQUIRED) +find_package(CUDA REQUIRED) find_package(GLUT REQUIRED) find_package(GLEW REQUIRED) +SET(OpenGL_GL_PREFERENCE GLVND) find_package(OpenGL REQUIRED) -find_package(CUDA REQUIRED) -IF(NOT WIN32) - SET(SPECIAL_OS_LIBS "pthread" "X11") - add_definitions(-Wno-write-strings) -ENDIF() include_directories(${OpenCV_INCLUDE_DIRS}) +include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${ZED_INCLUDE_DIRS}) include_directories(${GLEW_INCLUDE_DIRS}) include_directories(${GLUT_INCLUDE_DIR}) -include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) link_directories(${GLEW_LIBRARY_DIRS}) link_directories(${GLUT_LIBRARY_DIRS}) link_directories(${OpenGL_LIBRARY_DIRS}) -link_directories(${CUDA_LIBRARY_DIRS}) link_directories(${OpenCV_LIBRARY_DIRS}) -# cuda -include_directories(/usr/local/cuda/include) -link_directories(/usr/local/cuda/lib64) -# tensorrt -include_directories(/usr/include/x86_64-linux-gnu/) -link_directories(/usr/lib/x86_64-linux-gnu/) + +IF(NOT WIN32) + SET(SPECIAL_OS_LIBS "pthread") + + IF (CMAKE_SYSTEM_PROCESSOR MATCHES aarch64) + add_definitions(-DJETSON_STYLE) + ENDIF() +ENDIF() FILE(GLOB_RECURSE SRC_FILES src/*.c*) FILE(GLOB_RECURSE HDR_FILES include/*.h*) -cuda_add_executable(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) -add_definitions(-O3 -D_MWAITXINTRIN_H_INCLUDED -Wno-deprecated-declarations) +ADD_EXECUTABLE(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) if (LINK_SHARED_ZED) SET(ZED_LIBS ${ZED_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY}) @@ -56,8 +52,6 @@ else() SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) endif() -SET(TRT_LIBS nvinfer) - target_link_libraries(${PROJECT_NAME} ${TRT_LIBS} ${SPECIAL_OS_LIBS} diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/README.md b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/README.md new file mode 100644 index 00000000..6cb303d9 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/README.md @@ -0,0 +1,38 @@ +# Yolov8 / v6 / v5 custom ONNX ran in the ZED SDK + +This sample is designed to run a state of the art object detection model using the ZED SDK and optimizing your model with the highly optimized TensorRT framework. Internally, the ZED SDK takes its images, run inference on it to obtain 2D box detections and extract 3D informations (localization, 3D bounding boxes) and tracking. + +This sample shows how to pass your custom YOLO-like onnx model to the ZED SDK. + +A custom detector can be trained with the same architecture. These tutorials walk you through the workflow of training a custom detector : + +- Yolov8 https://docs.ultralytics.com/modes/train +- Yolov6 https://github.com/meituan/YOLOv6/blob/main/docs/Train_custom_data.md +- Yolov5 https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data + +## Getting Started + + - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) + - Check the [Documentation](https://www.stereolabs.com/docs/) + - [TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html) + +## Workflow + +This sample is expecting a TensorRT engine, optimized from an ONNX model. The ONNX model can be exported from Pytorch using the original YOLO code. Please refer to the section corresponding to the needed version. + +### Build the sample + + - Build for [Windows](https://www.stereolabs.com/docs/app-development/cpp/windows/) + - Build for [Linux/Jetson](https://www.stereolabs.com/docs/app-development/cpp/linux/) + +### Running the sample with the engine generated + +```sh +./yolo_onnx_zed [.onnx] [zed camera id / optional svo filepath] + +# For example yolov8n +./yolo_onnx_zed yolov8n.onnx 0 # 0 for zed camera id 0 + +# With an SVO file +./yolo_onnx_zed yolov8n.onnx ./foo.svo +``` diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/GLViewer.hpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/GLViewer.hpp similarity index 90% rename from object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/GLViewer.hpp rename to object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/GLViewer.hpp index 414be22c..96a1fc46 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/GLViewer.hpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/GLViewer.hpp @@ -14,7 +14,7 @@ #include -#include "utils.hpp" +#include "utils.h" #ifndef M_PI @@ -104,10 +104,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -230,9 +239,17 @@ class GLViewer { GLViewer(); ~GLViewer(); bool isAvailable(); + bool isPlaying() const { return play; } + void setPlaying(const bool p) { play = p; } - void init(int argc, char **argv, sl::CameraParameters& param, bool isTrackingON); - void updateData(sl::Mat& matXYZRGBA, std::vector& objs, sl::Transform &cam_pose); + void init(int argc, char **argv, const sl::CameraParameters& param, bool isTrackingON); + void updateData(sl::Mat& matXYZRGBA, std::vector& objs, sl::Transform &cam_pose, const sl::CustomObjectDetectionRuntimeParameters& obj_runtime_params); + + int getKey() { + int const key{last_key}; + last_key = -1; + return key; + } void exit(); private: @@ -298,6 +315,9 @@ class GLViewer { Simple3DObject BBox_faces; Simple3DObject skeletons; Simple3DObject floor_grid; + + bool play{true}; + int last_key{-1}; }; #endif /* __VIEWER_INCLUDE__ */ diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.hpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/utils.h similarity index 53% rename from object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.hpp rename to object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/utils.h index a96504f8..efbc1d05 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.hpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/include/utils.h @@ -1,23 +1,52 @@ -#ifndef DRAWING_UTILS_HPP -#define DRAWING_UTILS_HPP - -#include +#ifndef TRTX_YOLOV5_UTILS_H_ +#define TRTX_YOLOV5_UTILS_H_ #include +#include #include -float const id_colors[5][3] = { - { 232.0f, 176.0f, 59.0f}, - { 175.0f, 208.0f, 25.0f}, - { 102.0f, 205.0f, 105.0f}, - { 185.0f, 0.0f, 255.0f}, - { 99.0f, 107.0f, 252.0f} -}; +static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) { + int w, h, x, y; + float r_w = input_w / (img.cols * 1.0); + float r_h = input_h / (img.rows * 1.0); + if (r_h > r_w) { + w = input_w; + h = r_w * img.rows; + x = 0; + y = (input_h - h) / 2; + } else { + w = r_h * img.cols; + h = input_h; + x = (input_w - w) / 2; + y = 0; + } + cv::Mat re(h, w, CV_8UC3); + cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR); + cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128)); + re.copyTo(out(cv::Rect(x, y, re.cols, re.rows))); + return out; +} + +std::vector> const CLASS_COLORS = { + {0, 114, 189}, {217, 83, 25}, {237, 177, 32}, {126, 47, 142}, {119, 172, 48}, {77, 190, 238}, + {162, 20, 47}, {76, 76, 76}, {153, 153, 153}, {255, 0, 0}, {255, 128, 0}, {191, 191, 0}, + {0, 255, 0}, {0, 0, 255}, {170, 0, 255}, {85, 85, 0}, {85, 170, 0}, {85, 255, 0}, + {170, 85, 0}, {170, 170, 0}, {170, 255, 0}, {255, 85, 0}, {255, 170, 0}, {255, 255, 0}, + {0, 85, 128}, {0, 170, 128}, {0, 255, 128}, {85, 0, 128}, {85, 85, 128}, {85, 170, 128}, + {85, 255, 128}, {170, 0, 128}, {170, 85, 128}, {170, 170, 128}, {170, 255, 128}, {255, 0, 128}, + {255, 85, 128}, {255, 170, 128}, {255, 255, 128}, {0, 85, 255}, {0, 170, 255}, {0, 255, 255}, + {85, 0, 255}, {85, 85, 255}, {85, 170, 255}, {85, 255, 255}, {170, 0, 255}, {170, 85, 255}, + {170, 170, 255}, {170, 255, 255}, {255, 0, 255}, {255, 85, 255}, {255, 170, 255}, {85, 0, 0}, + {128, 0, 0}, {170, 0, 0}, {212, 0, 0}, {255, 0, 0}, {0, 43, 0}, {0, 85, 0}, + {0, 128, 0}, {0, 170, 0}, {0, 212, 0}, {0, 255, 0}, {0, 0, 43}, {0, 0, 85}, + {0, 0, 128}, {0, 0, 170}, {0, 0, 212}, {0, 0, 255}, {0, 0, 0}, {36, 36, 36}, + {73, 73, 73}, {109, 109, 109}, {146, 146, 146}, {182, 182, 182}, {219, 219, 219}, {0, 114, 189}, + {80, 183, 189}, {128, 128, 0}}; inline cv::Scalar generateColorID_u(int idx) { if (idx < 0) return cv::Scalar(236, 184, 36, 255); - int color_idx = idx % 5; - return cv::Scalar(id_colors[color_idx][0], id_colors[color_idx][1], id_colors[color_idx][2], 255); + int color_idx = idx % CLASS_COLORS.size(); + return cv::Scalar(CLASS_COLORS[color_idx][0], CLASS_COLORS[color_idx][1], CLASS_COLORS[color_idx][2], 255); } inline sl::float4 generateColorID_f(int idx) { @@ -79,7 +108,7 @@ inline void drawVerticalLine( cv::line(left_display, pt4, end_pt, clr, thickness); } -inline cv::Mat slMat2cvMat(sl::Mat& input) { +inline cv::Mat slMat2cvMat(sl::Mat const& input) { // Mapping between MAT_TYPE and CV_TYPE int cv_type = -1; switch (input.getDataType()) { @@ -105,5 +134,13 @@ inline cv::Mat slMat2cvMat(sl::Mat& input) { return cv::Mat(input.getHeight(), input.getWidth(), cv_type, input.getPtr(sl::MEM::CPU)); } +inline bool readFile(std::string filename, std::vector &file_content) { + // open the file: + std::ifstream instream(filename, std::ios::in | std::ios::binary); + if (!instream.is_open()) return true; + file_content = std::vector((std::istreambuf_iterator(instream)), std::istreambuf_iterator()); + return false; +} + +#endif // TRTX_YOLOV5_UTILS_H_ -#endif diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/GLViewer.cpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/GLViewer.cpp similarity index 95% rename from object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/GLViewer.cpp rename to object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/GLViewer.cpp index c7eab2e6..716e60e8 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/GLViewer.cpp +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/GLViewer.cpp @@ -105,7 +105,7 @@ void CloseFunc(void) { currentInstance_->exit(); } -void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isTrackingON) { +void GLViewer::init(int argc, char **argv, const sl::CameraParameters& param, bool isTrackingON) { glutInit(&argc, argv); int wnd_w = glutGet(GLUT_SCREEN_WIDTH); int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); @@ -133,10 +133,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -191,7 +191,7 @@ void GLViewer::render() { } } -void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs, sl::Transform& pose) { +void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs, sl::Transform& pose, const sl::CustomObjectDetectionRuntimeParameters& obj_runtime_params) { mtx.lock(); pointCloud_.pushNewPC(matXYZRGBA); BBox_edges.clear(); @@ -203,8 +203,21 @@ void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs cam_pose.setTranslation(tr_0); for (unsigned int i = 0; i < objs.size(); i++) { - if (renderObject(objs[i], isTrackingON_)) - { + bool show = renderObject(objs[i], isTrackingON_); + + // We can still display the object even if it is tracked and static, even if its state is SEARCHING + if (!show && isTrackingON_ && objs[i].tracking_state == sl::OBJECT_TRACKING_STATE::SEARCHING) { + if (obj_runtime_params.object_class_detection_properties.find(objs[i].raw_label) != obj_runtime_params.object_class_detection_properties.end()) { + if (obj_runtime_params.object_class_detection_properties.at(objs[i].raw_label).is_static) + show = true; + } + else { + if (obj_runtime_params.object_detection_properties.is_static) + show = true; + } + } + + if (show) { auto bb_ = objs[i].bounding_box; if (!bb_.empty()) { auto clr_class = getColorClass((int) objs[i].raw_label); @@ -379,9 +392,12 @@ void GLViewer::printText() { void GLViewer::clearInputs() { mouseMotion_[0] = mouseMotion_[1] = 0; mouseWheelPosition_ = 0; - for (unsigned int i = 0; i < 256; ++i) + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; if (keyStates_[i] != KEY_STATE::DOWN) keyStates_[i] = KEY_STATE::FREE; + } } void GLViewer::drawCallback() { @@ -735,6 +751,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -770,12 +790,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -859,7 +879,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); diff --git a/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/main.cpp b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/main.cpp new file mode 100644 index 00000000..c8d98dcd --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov5-v6-v8_onnx_internal/src/main.cpp @@ -0,0 +1,165 @@ +#include +#include +#include +#include "utils.h" + +#include "GLViewer.hpp" + +#include + + +static void draw_objects(const cv::Mat& image, + cv::Mat &res, + const sl::Objects& objs, + const std::vector>& colors, + const bool isTrackingON) +{ + res = image.clone(); + cv::Mat mask{image.clone()}; + for (sl::ObjectData const& obj : objs.object_list) { + if (!renderObject(obj, isTrackingON)) + continue; + size_t const idx_color{obj.id % colors.size()}; + cv::Scalar const color{cv::Scalar(colors[idx_color][0U], colors[idx_color][1U], colors[idx_color][2U])}; + + cv::Rect const rect{static_cast(obj.bounding_box_2d[0U].x), + static_cast(obj.bounding_box_2d[0U].y), + static_cast(obj.bounding_box_2d[1U].x - obj.bounding_box_2d[0U].x), + static_cast(obj.bounding_box_2d[2U].y - obj.bounding_box_2d[0U].y)}; + cv::rectangle(res, rect, color, 2); + + char text[256U]; + sprintf(text, "Class %d - %.1f%%", obj.raw_label, obj.confidence); + if (obj.mask.isInit() && obj.mask.getWidth() > 0U && obj.mask.getHeight() > 0U) { + const cv::Mat obj_mask = slMat2cvMat(obj.mask); + mask(rect).setTo(color, obj_mask); + } + + int baseLine{0}; + const cv::Size label_size{cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine)}; + + const int x{rect.x}; + const int y{std::min(rect.y + 1, res.rows)}; + + cv::rectangle(res, cv::Rect(x, y, label_size.width, label_size.height + baseLine), {0, 0, 255}, -1); + cv::putText(res, text, cv::Point(x, y + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.4, {255, 255, 255}, 1); + } + cv::addWeighted(res, 0.5, mask, 0.8, 1, res); +} + +void print(const std::string& msg_prefix, const sl::ERROR_CODE err_code, const std::string& msg_suffix) { + std::cout << "[Sample] "; + if (err_code != sl::ERROR_CODE::SUCCESS) + std::cout << "[Error] "; + std::cout << msg_prefix << " "; + if (err_code != sl::ERROR_CODE::SUCCESS) { + std::cout << " | " << toString(err_code) << " : "; + std::cout << toVerbose(err_code); + } + if (!msg_suffix.empty()) + std::cout << " " << msg_suffix; + std::cout << std::endl; +} + +int main(int argc, char** argv) { + if (argc == 1) { + std::cout << "Usage: ./yolo_onnx_zed yolov8s.onnx " << std::endl; + return 0; + } + + /// Opening the ZED camera before the model deserialization to avoid cuda context issue + sl::Camera zed; + sl::InitParameters init_parameters; + init_parameters.sdk_verbose = true; + init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; + init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; // OpenGL's coordinate system is right_handed + + if (argc > 2) { + const std::string zed_opt = argv[2]; + if (zed_opt.find(".svo") != std::string::npos) + init_parameters.input.setFromSVOFile(zed_opt.c_str()); + } + + // Open the camera + const sl::ERROR_CODE open_ret = zed.open(init_parameters); + if (open_ret != sl::ERROR_CODE::SUCCESS) { + print("Camera Open", open_ret, "Exit program."); + return EXIT_FAILURE; + } + + // Enable Positional tracking + zed.enablePositionalTracking(); + + // Enable Custom OD + constexpr bool enable_tracking = true; + sl::ObjectDetectionParameters detection_parameters; + detection_parameters.enable_tracking = enable_tracking; + detection_parameters.enable_segmentation = false; + detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS; + detection_parameters.custom_onnx_file.set(argv[1U]); + detection_parameters.custom_onnx_dynamic_input_shape = sl::Resolution(320, 320); // Provide resolution for dynamic shape model + const sl::ERROR_CODE od_ret = zed.enableObjectDetection(detection_parameters); + if (od_ret != sl::ERROR_CODE::SUCCESS) { + print("enableObjectDetection", od_ret, "\nExit program."); + zed.close(); + return EXIT_FAILURE; + } + + // Camera config + const sl::CameraConfiguration camera_config = zed.getCameraInformation().camera_configuration; + const sl::Resolution pc_resolution(std::min((int) camera_config.resolution.width, 720), std::min((int) camera_config.resolution.height, 404)); + const sl::CameraConfiguration camera_info = zed.getCameraInformation(pc_resolution).camera_configuration; + + // Create OpenGL Viewer + GLViewer viewer; + viewer.init(argc, argv, camera_info.calibration_parameters.left_cam, enable_tracking); + + // Prepare SDK's output retrieval + const sl::Resolution display_resolution = zed.getCameraInformation().camera_configuration.resolution; + sl::Mat left_sl, point_cloud; + cv::Mat left_cv; + sl::CustomObjectDetectionRuntimeParameters objectTracker_parameters_rt; + // All classes parameters + objectTracker_parameters_rt.object_detection_properties.detection_confidence_threshold = 20.f; + // objectTracker_parameters_rt.object_detection_properties.is_static = true; + // objectTracker_parameters_rt.object_detection_properties.tracking_timeout = 100.f; + // // Per classes paramters override + // objectTracker_parameters_rt.object_class_detection_properties[0U].detection_confidence_threshold = 80.f; + // objectTracker_parameters_rt.object_class_detection_properties[1U].min_box_width_normalized = 0.01f; + // objectTracker_parameters_rt.object_class_detection_properties[1U].max_box_width_normalized = 0.5f; + // objectTracker_parameters_rt.object_class_detection_properties[1U].min_box_height_normalized = 0.01f; + // objectTracker_parameters_rt.object_class_detection_properties[1U].max_box_height_normalized = 0.5f; + + sl::Objects objects; + sl::Pose cam_w_pose; + cam_w_pose.pose_data.setIdentity(); + + // Main loop + while (viewer.isAvailable()) { + if (zed.grab() == sl::ERROR_CODE::SUCCESS) { + + // Get image for display + zed.retrieveImage(left_sl, sl::VIEW::LEFT); + left_cv = slMat2cvMat(left_sl); + + // Retrieve the tracked objects, with 2D and 3D attributes + zed.retrieveObjects(objects, objectTracker_parameters_rt); + + // GL Viewer + zed.retrieveMeasure(point_cloud, sl::MEASURE::XYZRGBA, sl::MEM::GPU, pc_resolution); + zed.getPosition(cam_w_pose, sl::REFERENCE_FRAME::WORLD); + viewer.updateData(point_cloud, objects.object_list, cam_w_pose.pose_data, objectTracker_parameters_rt); + + // Displaying the SDK objects + draw_objects(left_cv, left_cv, objects, CLASS_COLORS, enable_tracking); + cv::imshow("ZED retrieved Objects", left_cv); + int const key{cv::waitKey(10)}; + if (key == 'q' || key == 'Q' || key == 27) + break; + } + } + + viewer.exit(); + + return 0; +} diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/LICENSE b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/LICENSE deleted file mode 100644 index e0718cfd..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019-2020 Wang Xinyu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/README.md b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/README.md deleted file mode 100644 index edf7ba90..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Yolov5 with ZED Custom Box input - -This sample is designed to run a state of the art object detection model using the highly optimized TensorRT framework. The image are taken from the ZED SDK, and the 2D box detections are then ingested into the ZED SDK to extract 3D informations (localization, 3D bounding boxes) and tracking. - -This sample is a fork from [wang-xinyu/tensorrtx](https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5). The Pytorch implementation is [ultralytics/yolov5](https://github.com/ultralytics/yolov5). The default model was trained on COCO dataset (80 classes), a custom detector can be trained with the same architecture, it requires a configuration tweak (see below). This tutorial walk you through the workflow of training a custom detector https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data. - - -## Getting Started - - - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) - - Check the [Documentation](https://www.stereolabs.com/docs/) - - [TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html) - -## Different versions of yolov5 - -- For yolov5 v5.0, download .pt from [yolov5 release v5.0](https://github.com/ultralytics/yolov5/releases/tag/v5.0), `git clone -b v5.0 https://github.com/ultralytics/yolov5.git` - -## Config - -- Choose the model s/m/l/x/s6/m6/l6/x6 from command line arguments. -- Input shape defined in yololayer.h **DO NOT FORGET TO CHANGE INPUT HEIGHT AND WIDTH, IF USING CUSTOM MODEL** -- Number of classes defined in yololayer.h, **DO NOT FORGET TO ADAPT THIS, IF USING CUSTOM MODEL** -- FP16/FP32 can be selected by the macro in yolov5.cpp, FP16 is faster if the GPU support it (all jetsons or GeForce RTX cards), -- GPU id can be selected by the macro in yolov5.cpp -- NMS thresh in yolov5.cpp -- BBox confidence thresh in yolov5.cpp - -## Using the sample - - -### 1. (Optional for first run) Generate .wts from pytorch with .pt - -**This file has already been generated and can be downloaded [here](https://download.stereolabs.com/sample_custom_objects/yolov5s.wts.zip)** (and needs to be unzipped) to run the sample. - -This procedure can be applied to other models (such as `l` or `m` variants) or custom dataset trained model. - -The goal is to export the PyTorch model `.pt` into a easily readable weight file `.wts`. - -```sh -git clone -b v5.0 https://github.com/ultralytics/yolov5.git - -# Download the pretrained weight file -wget https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt -cp gen_wts.py {ultralytics}/yolov5 -cd {ultralytics}/yolov5 - -python gen_wts.py -w yolov5s.pt -# a file 'yolov5s.wts' will be generated. -``` - - -### 2. Build the sample - -If a custom model is used, let's say trained on another dataset than COCO, with a different number of classes, `CLASS_NUM` , `INPUT_H` and `INPUT_W` in yololayer.h must be updated accordingly. - - - Build for [Windows](https://www.stereolabs.com/docs/app-development/cpp/windows/) - - Build for [Linux/Jetson](https://www.stereolabs.com/docs/app-development/cpp/linux/) - - -### 3. Generate the TensorRT engine - -TensorRT apply heavy optimisation by processing the network structure itself and benchmarking all the available implementation of each inference function to take the fastest. The result in the inference engine. This process can take a few minutes so we usually want to generate it the first time than saving it for later reload. This step should be done at each model or weight change, but only once. - -```sh -./yolov5_zed -s [.wts] [.engine] [s/m/l/x/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file - -# For example yolov5s -./yolov5_zed -s yolov5s.wts yolov5s.engine s -# a file 'yolov5s.engine' will be generated. - -# For example Custom model with depth_multiple=0.17, width_multiple=0.25 in yolov5.yaml -./yolov5_zed -s yolov5_custom.wts yolov5_custom.engine c 0.17 0.25 -# a file 'yolov5_custom.engine' will be generated. -``` - -### 4. Running the sample with the engine generated - -```sh -./yolov5_zed -d [.engine] [zed camera id / optional svo filepath] // deserialize and run inference - -# For example yolov5s -./yolov5_zed -d yolov5s.engine 0 # 0 for zed camera id 0 - -# With an SVO file -./yolov5_zed -d yolov5.engine ./foo.svo -``` diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/gen_wts.py b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/gen_wts.py deleted file mode 100644 index 501fd638..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/gen_wts.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch -import struct -import sys -from utils.torch_utils import select_device - -# Initialize -device = select_device('cpu') -pt_file = sys.argv[1] -# Load model -model = torch.load(pt_file, map_location=device)['model'].float() # load to FP32 -model.to(device).eval() - -with open(pt_file.split('.')[0] + '.wts', 'w') as f: - f.write('{}\n'.format(len(model.state_dict().keys()))) - for k, v in model.state_dict().items(): - vr = v.reshape(-1).cpu().numpy() - f.write('{} {} '.format(k, len(vr))) - for vv in vr: - f.write(' ') - f.write(struct.pack('>f',float(vv)).hex()) - f.write('\n') diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/calibrator.h b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/calibrator.h deleted file mode 100644 index 27f2d377..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/calibrator.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef ENTROPY_CALIBRATOR_H -#define ENTROPY_CALIBRATOR_H - -#include "NvInfer.h" -#include -#include - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#else -#define TRT_NOEXCEPT -#endif - -//! \class Int8EntropyCalibrator2 -//! -//! \brief Implements Entropy calibrator 2. -//! CalibrationAlgoType is kENTROPY_CALIBRATION_2. -//! -class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 -{ -public: - Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true); - - virtual ~Int8EntropyCalibrator2(); - int getBatchSize() const TRT_NOEXCEPT override; - bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override; - const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override; - void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override; - -private: - int batchsize_; - int input_w_; - int input_h_; - int img_idx_; - std::string img_dir_; - std::vector img_files_; - size_t input_count_; - std::string calib_table_name_; - const char* input_blob_name_; - bool read_cache_; - void* device_input_; - std::vector calib_cache_; -}; - -#endif // ENTROPY_CALIBRATOR_H diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/common.hpp b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/common.hpp deleted file mode 100644 index 2e1a1e8b..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/common.hpp +++ /dev/null @@ -1,306 +0,0 @@ -#ifndef YOLOV5_COMMON_H_ -#define YOLOV5_COMMON_H_ - -#include -#include -#include -#include -#include -#include "NvInfer.h" -#include "yololayer.h" - -using namespace nvinfer1; - -cv::Rect get_rect(cv::Mat& img, float bbox[4]) { - int l, r, t, b; - float r_w = Yolo::INPUT_W / (img.cols * 1.0); - float r_h = Yolo::INPUT_H / (img.rows * 1.0); - if (r_h > r_w) { - l = bbox[0] - bbox[2] / 2.f; - r = bbox[0] + bbox[2] / 2.f; - t = bbox[1] - bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2; - b = bbox[1] + bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2; - l = l / r_w; - r = r / r_w; - t = t / r_w; - b = b / r_w; - } else { - l = bbox[0] - bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2; - r = bbox[0] + bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2; - t = bbox[1] - bbox[3] / 2.f; - b = bbox[1] + bbox[3] / 2.f; - l = l / r_h; - r = r / r_h; - t = t / r_h; - b = b / r_h; - } - return cv::Rect(l, t, r - l, b - t); -} - -float iou(float lbox[4], float rbox[4]) { - float interBox[] = { - (std::max)(lbox[0] - lbox[2] / 2.f , rbox[0] - rbox[2] / 2.f), //left - (std::min)(lbox[0] + lbox[2] / 2.f , rbox[0] + rbox[2] / 2.f), //right - (std::max)(lbox[1] - lbox[3] / 2.f , rbox[1] - rbox[3] / 2.f), //top - (std::min)(lbox[1] + lbox[3] / 2.f , rbox[1] + rbox[3] / 2.f), //bottom - }; - - if (interBox[2] > interBox[3] || interBox[0] > interBox[1]) - return 0.0f; - - float interBoxS = (interBox[1] - interBox[0])*(interBox[3] - interBox[2]); - return interBoxS / (lbox[2] * lbox[3] + rbox[2] * rbox[3] - interBoxS); -} - -bool cmp(const Yolo::Detection& a, const Yolo::Detection& b) { - return a.conf > b.conf; -} - -void nms(std::vector& res, float *output, float conf_thresh, float nms_thresh = 0.5) { - int det_size = sizeof(Yolo::Detection) / sizeof(float); - std::map> m; - for (int i = 0; i < output[0] && i < Yolo::MAX_OUTPUT_BBOX_COUNT; i++) { - if (output[1 + det_size * i + 4] <= conf_thresh) continue; - Yolo::Detection det; - memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float)); - if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector()); - m[det.class_id].push_back(det); - } - for (auto it = m.begin(); it != m.end(); it++) { - //std::cout << it->second[0].class_id << " --- " << std::endl; - auto& dets = it->second; - std::sort(dets.begin(), dets.end(), cmp); - for (size_t m = 0; m < dets.size(); ++m) { - auto& item = dets[m]; - res.push_back(item); - for (size_t n = m + 1; n < dets.size(); ++n) { - if (iou(item.bbox, dets[n].bbox) > nms_thresh) { - dets.erase(dets.begin() + n); - --n; - } - } - } - } -} - -// TensorRT weight files have a simple space delimited format: -// [type] [size] -std::map loadWeights(const std::string file) { - std::cout << "Loading weights: " << file << std::endl; - std::map weightMap; - - // Open weights file - std::ifstream input(file); - assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!"); - - // Read number of weight blobs - int32_t count; - input >> count; - assert(count > 0 && "Invalid weight map file."); - - while (count--) - { - Weights wt{ DataType::kFLOAT, nullptr, 0 }; - uint32_t size; - - // Read name and type of blob - std::string name; - input >> name >> std::dec >> size; - wt.type = DataType::kFLOAT; - - // Load blob - uint32_t* val = reinterpret_cast(malloc(sizeof(val) * size)); - for (uint32_t x = 0, y = size; x < y; ++x) - { - input >> std::hex >> val[x]; - } - wt.values = val; - - wt.count = size; - weightMap[name] = wt; - } - - return weightMap; -} - -IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map& weightMap, ITensor& input, std::string lname, float eps) { - float *gamma = (float*)weightMap[lname + ".weight"].values; - float *beta = (float*)weightMap[lname + ".bias"].values; - float *mean = (float*)weightMap[lname + ".running_mean"].values; - float *var = (float*)weightMap[lname + ".running_var"].values; - int len = weightMap[lname + ".running_var"].count; - - float *scval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - scval[i] = gamma[i] / sqrt(var[i] + eps); - } - Weights scale{ DataType::kFLOAT, scval, len }; - - float *shval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps); - } - Weights shift{ DataType::kFLOAT, shval, len }; - - float *pval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - pval[i] = 1.0; - } - Weights power{ DataType::kFLOAT, pval, len }; - - weightMap[lname + ".scale"] = scale; - weightMap[lname + ".shift"] = shift; - weightMap[lname + ".power"] = power; - IScaleLayer* scale_1 = network->addScale(input, ScaleMode::kCHANNEL, shift, scale, power); - assert(scale_1); - return scale_1; -} - -ILayer* convBlock(INetworkDefinition *network, std::map& weightMap, ITensor& input, int outch, int ksize, int s, int g, std::string lname) { - Weights emptywts{ DataType::kFLOAT, nullptr, 0 }; - int p = ksize / 2; - IConvolutionLayer* conv1 = network->addConvolutionNd(input, outch, DimsHW{ ksize, ksize }, weightMap[lname + ".conv.weight"], emptywts); - assert(conv1); - conv1->setStrideNd(DimsHW{ s, s }); - conv1->setPaddingNd(DimsHW{ p, p }); - conv1->setNbGroups(g); - IScaleLayer* bn1 = addBatchNorm2d(network, weightMap, *conv1->getOutput(0), lname + ".bn", 1e-3); - - // silu = x * sigmoid - auto sig = network->addActivation(*bn1->getOutput(0), ActivationType::kSIGMOID); - assert(sig); - auto ew = network->addElementWise(*bn1->getOutput(0), *sig->getOutput(0), ElementWiseOperation::kPROD); - assert(ew); - return ew; -} - -ILayer* focus(INetworkDefinition *network, std::map& weightMap, ITensor& input, int inch, int outch, int ksize, std::string lname) { - ISliceLayer *s1 = network->addSlice(input, Dims3{ 0, 0, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s2 = network->addSlice(input, Dims3{ 0, 1, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s3 = network->addSlice(input, Dims3{ 0, 0, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s4 = network->addSlice(input, Dims3{ 0, 1, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ITensor* inputTensors[] = { s1->getOutput(0), s2->getOutput(0), s3->getOutput(0), s4->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 4); - auto conv = convBlock(network, weightMap, *cat->getOutput(0), outch, ksize, 1, 1, lname + ".conv"); - return conv; -} - -ILayer* bottleneck(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, bool shortcut, int g, float e, std::string lname) { - auto cv1 = convBlock(network, weightMap, input, (int)((float)c2 * e), 1, 1, 1, lname + ".cv1"); - auto cv2 = convBlock(network, weightMap, *cv1->getOutput(0), c2, 3, 1, g, lname + ".cv2"); - if (shortcut && c1 == c2) { - auto ew = network->addElementWise(input, *cv2->getOutput(0), ElementWiseOperation::kSUM); - return ew; - } - return cv2; -} - -ILayer* bottleneckCSP(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) { - Weights emptywts{ DataType::kFLOAT, nullptr, 0 }; - int c_ = (int)((float)c2 * e); - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - auto cv2 = network->addConvolutionNd(input, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv2.weight"], emptywts); - ITensor *y1 = cv1->getOutput(0); - for (int i = 0; i < n; i++) { - auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i)); - y1 = b->getOutput(0); - } - auto cv3 = network->addConvolutionNd(*y1, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv3.weight"], emptywts); - - ITensor* inputTensors[] = { cv3->getOutput(0), cv2->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 2); - - IScaleLayer* bn = addBatchNorm2d(network, weightMap, *cat->getOutput(0), lname + ".bn", 1e-4); - auto lr = network->addActivation(*bn->getOutput(0), ActivationType::kLEAKY_RELU); - lr->setAlpha(0.1); - - auto cv4 = convBlock(network, weightMap, *lr->getOutput(0), c2, 1, 1, 1, lname + ".cv4"); - return cv4; -} - -ILayer* C3(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) { - int c_ = (int)((float)c2 * e); - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - auto cv2 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv2"); - ITensor *y1 = cv1->getOutput(0); - for (int i = 0; i < n; i++) { - auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i)); - y1 = b->getOutput(0); - } - - ITensor* inputTensors[] = { y1, cv2->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 2); - - auto cv3 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv3"); - return cv3; -} - -ILayer* SPP(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int k1, int k2, int k3, std::string lname) { - int c_ = c1 / 2; - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - - auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k1, k1 }); - pool1->setPaddingNd(DimsHW{ k1 / 2, k1 / 2 }); - pool1->setStrideNd(DimsHW{ 1, 1 }); - auto pool2 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k2, k2 }); - pool2->setPaddingNd(DimsHW{ k2 / 2, k2 / 2 }); - pool2->setStrideNd(DimsHW{ 1, 1 }); - auto pool3 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k3, k3 }); - pool3->setPaddingNd(DimsHW{ k3 / 2, k3 / 2 }); - pool3->setStrideNd(DimsHW{ 1, 1 }); - - ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 4); - - auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2"); - return cv2; -} - -std::vector> getAnchors(std::map& weightMap, std::string lname) { - std::vector> anchors; - Weights wts = weightMap[lname + ".anchor_grid"]; - int anchor_len = Yolo::CHECK_COUNT * 2; - for (int i = 0; i < wts.count / anchor_len; i++) { - auto *p = (const float*)wts.values + i * anchor_len; - std::vector anchor(p, p + anchor_len); - anchors.push_back(anchor); - } - return anchors; -} - -IPluginV2Layer* addYoLoLayer(INetworkDefinition *network, std::map& weightMap, std::string lname, std::vector dets) { - auto creator = getPluginRegistry()->getPluginCreator("YoloLayer_TRT", "1"); - auto anchors = getAnchors(weightMap, lname); - PluginField plugin_fields[2]; - int netinfo[4] = {Yolo::CLASS_NUM, Yolo::INPUT_W, Yolo::INPUT_H, Yolo::MAX_OUTPUT_BBOX_COUNT}; - plugin_fields[0].data = netinfo; - plugin_fields[0].length = 4; - plugin_fields[0].name = "netinfo"; - plugin_fields[0].type = PluginFieldType::kFLOAT32; - int scale = 8; - std::vector kernels; - for (size_t i = 0; i < anchors.size(); i++) { - Yolo::YoloKernel kernel; - kernel.width = Yolo::INPUT_W / scale; - kernel.height = Yolo::INPUT_H / scale; - memcpy(kernel.anchors, &anchors[i][0], anchors[i].size() * sizeof(float)); - kernels.push_back(kernel); - scale *= 2; - } - plugin_fields[1].data = &kernels[0]; - plugin_fields[1].length = kernels.size(); - plugin_fields[1].name = "kernels"; - plugin_fields[1].type = PluginFieldType::kFLOAT32; - PluginFieldCollection plugin_data; - plugin_data.nbFields = 2; - plugin_data.fields = plugin_fields; - IPluginV2 *plugin_obj = creator->createPlugin("yololayer", &plugin_data); - std::vector input_tensors; - for (auto det: dets) { - input_tensors.push_back(det->getOutput(0)); - } - auto yolo = network->addPluginV2(&input_tensors[0], input_tensors.size(), *plugin_obj); - return yolo; -} -#endif - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/cuda_utils.h b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/cuda_utils.h deleted file mode 100644 index 8fbd3199..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/cuda_utils.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef TRTX_CUDA_UTILS_H_ -#define TRTX_CUDA_UTILS_H_ - -#include - -#ifndef CUDA_CHECK -#define CUDA_CHECK(callstr)\ - {\ - cudaError_t error_code = callstr;\ - if (error_code != cudaSuccess) {\ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\ - assert(0);\ - }\ - } -#endif // CUDA_CHECK - -#endif // TRTX_CUDA_UTILS_H_ - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/logging.h b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/logging.h deleted file mode 100644 index 1339ee2f..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/logging.h +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSORRT_LOGGING_H -#define TENSORRT_LOGGING_H - -#include "NvInferRuntimeCommon.h" -#include -#include -#include -#include -#include -#include -#include - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#else -#define TRT_NOEXCEPT -#endif - -using Severity = nvinfer1::ILogger::Severity; - -class LogStreamConsumerBuffer : public std::stringbuf -{ -public: - LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mOutput(stream) - , mPrefix(prefix) - , mShouldLog(shouldLog) - { - } - - LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) - : mOutput(other.mOutput) - { - } - - ~LogStreamConsumerBuffer() - { - // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence - // std::streambuf::pptr() gives a pointer to the current position of the output sequence - // if the pointer to the beginning is not equal to the pointer to the current position, - // call putOutput() to log the output to the stream - if (pbase() != pptr()) - { - putOutput(); - } - } - - // synchronizes the stream buffer and returns 0 on success - // synchronizing the stream buffer consists of inserting the buffer contents into the stream, - // resetting the buffer and flushing the stream - virtual int sync() - { - putOutput(); - return 0; - } - - void putOutput() - { - if (mShouldLog) - { - // prepend timestamp - std::time_t timestamp = std::time(nullptr); - tm* tm_local = std::localtime(×tamp); - std::cout << "["; - std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; - std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; - // std::stringbuf::str() gets the string contents of the buffer - // insert the buffer contents pre-appended by the appropriate prefix into the stream - mOutput << mPrefix << str(); - // set the buffer to empty - str(""); - // flush the stream - mOutput.flush(); - } - } - - void setShouldLog(bool shouldLog) - { - mShouldLog = shouldLog; - } - -private: - std::ostream& mOutput; - std::string mPrefix; - bool mShouldLog; -}; - -//! -//! \class LogStreamConsumerBase -//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer -//! -class LogStreamConsumerBase -{ -public: - LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mBuffer(stream, prefix, shouldLog) - { - } - -protected: - LogStreamConsumerBuffer mBuffer; -}; - -//! -//! \class LogStreamConsumer -//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. -//! Order of base classes is LogStreamConsumerBase and then std::ostream. -//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field -//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. -//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. -//! Please do not change the order of the parent classes. -//! -class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream -{ -public: - //! \brief Creates a LogStreamConsumer which logs messages with level severity. - //! Reportable severity determines if the messages are severe enough to be logged. - LogStreamConsumer(Severity reportableSeverity, Severity severity) - : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(severity <= reportableSeverity) - , mSeverity(severity) - { - } - - LogStreamConsumer(LogStreamConsumer&& other) - : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(other.mShouldLog) - , mSeverity(other.mSeverity) - { - } - - void setReportableSeverity(Severity reportableSeverity) - { - mShouldLog = mSeverity <= reportableSeverity; - mBuffer.setShouldLog(mShouldLog); - } - -private: - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } - - static std::string severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } - - bool mShouldLog; - Severity mSeverity; -}; - -//! \class Logger -//! -//! \brief Class which manages logging of TensorRT tools and samples -//! -//! \details This class provides a common interface for TensorRT tools and samples to log information to the console, -//! and supports logging two types of messages: -//! -//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) -//! - Test pass/fail messages -//! -//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is -//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. -//! -//! In the future, this class could be extended to support dumping test results to a file in some standard format -//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). -//! -//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger -//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT -//! library and messages coming from the sample. -//! -//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the -//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger -//! object. - -class Logger : public nvinfer1::ILogger -{ -public: - Logger(Severity severity = Severity::kWARNING) - : mReportableSeverity(severity) - { - } - - //! - //! \enum TestResult - //! \brief Represents the state of a given test - //! - enum class TestResult - { - kRUNNING, //!< The test is running - kPASSED, //!< The test passed - kFAILED, //!< The test failed - kWAIVED //!< The test was waived - }; - - //! - //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger - //! \return The nvinfer1::ILogger associated with this Logger - //! - //! TODO Once all samples are updated to use this method to register the logger with TensorRT, - //! we can eliminate the inheritance of Logger from ILogger - //! - nvinfer1::ILogger& getTRTLogger() - { - return *this; - } - - //! - //! \brief Implementation of the nvinfer1::ILogger::log() virtual method - //! - //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the - //! inheritance from nvinfer1::ILogger - //! - void log(Severity severity, const char* msg) TRT_NOEXCEPT override - { - LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; - } - - //! - //! \brief Method for controlling the verbosity of logging output - //! - //! \param severity The logger will only emit messages that have severity of this level or higher. - //! - void setReportableSeverity(Severity severity) - { - mReportableSeverity = severity; - } - - //! - //! \brief Opaque handle that holds logging information for a particular test - //! - //! This object is an opaque handle to information used by the Logger to print test results. - //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used - //! with Logger::reportTest{Start,End}(). - //! - class TestAtom - { - public: - TestAtom(TestAtom&&) = default; - - private: - friend class Logger; - - TestAtom(bool started, const std::string& name, const std::string& cmdline) - : mStarted(started) - , mName(name) - , mCmdline(cmdline) - { - } - - bool mStarted; - std::string mName; - std::string mCmdline; - }; - - //! - //! \brief Define a test for logging - //! - //! \param[in] name The name of the test. This should be a string starting with - //! "TensorRT" and containing dot-separated strings containing - //! the characters [A-Za-z0-9_]. - //! For example, "TensorRT.sample_googlenet" - //! \param[in] cmdline The command line used to reproduce the test - // - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - //! - static TestAtom defineTest(const std::string& name, const std::string& cmdline) - { - return TestAtom(false, name, cmdline); - } - - //! - //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments - //! as input - //! - //! \param[in] name The name of the test - //! \param[in] argc The number of command-line arguments - //! \param[in] argv The array of command-line arguments (given as C strings) - //! - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - static TestAtom defineTest(const std::string& name, int argc, char const* const* argv) - { - auto cmdline = genCmdlineString(argc, argv); - return defineTest(name, cmdline); - } - - //! - //! \brief Report that a test has started. - //! - //! \pre reportTestStart() has not been called yet for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has started - //! - static void reportTestStart(TestAtom& testAtom) - { - reportTestResult(testAtom, TestResult::kRUNNING); - assert(!testAtom.mStarted); - testAtom.mStarted = true; - } - - //! - //! \brief Report that a test has ended. - //! - //! \pre reportTestStart() has been called for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has ended - //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, - //! TestResult::kFAILED, TestResult::kWAIVED - //! - static void reportTestEnd(const TestAtom& testAtom, TestResult result) - { - assert(result != TestResult::kRUNNING); - assert(testAtom.mStarted); - reportTestResult(testAtom, result); - } - - static int reportPass(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kPASSED); - return EXIT_SUCCESS; - } - - static int reportFail(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kFAILED); - return EXIT_FAILURE; - } - - static int reportWaive(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kWAIVED); - return EXIT_SUCCESS; - } - - static int reportTest(const TestAtom& testAtom, bool pass) - { - return pass ? reportPass(testAtom) : reportFail(testAtom); - } - - Severity getReportableSeverity() const - { - return mReportableSeverity; - } - -private: - //! - //! \brief returns an appropriate string for prefixing a log message with the given severity - //! - static const char* severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } - - //! - //! \brief returns an appropriate string for prefixing a test result message with the given result - //! - static const char* testResultString(TestResult result) - { - switch (result) - { - case TestResult::kRUNNING: return "RUNNING"; - case TestResult::kPASSED: return "PASSED"; - case TestResult::kFAILED: return "FAILED"; - case TestResult::kWAIVED: return "WAIVED"; - default: assert(0); return ""; - } - } - - //! - //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity - //! - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } - - //! - //! \brief method that implements logging test results - //! - static void reportTestResult(const TestAtom& testAtom, TestResult result) - { - severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " - << testAtom.mCmdline << std::endl; - } - - //! - //! \brief generate a command line string from the given (argc, argv) values - //! - static std::string genCmdlineString(int argc, char const* const* argv) - { - std::stringstream ss; - for (int i = 0; i < argc; i++) - { - if (i > 0) - ss << " "; - ss << argv[i]; - } - return ss.str(); - } - - Severity mReportableSeverity; -}; - -namespace -{ - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE -//! -//! Example usage: -//! -//! LOG_VERBOSE(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO -//! -//! Example usage: -//! -//! LOG_INFO(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_INFO(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING -//! -//! Example usage: -//! -//! LOG_WARN(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_WARN(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR -//! -//! Example usage: -//! -//! LOG_ERROR(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_ERROR(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR -// ("fatal" severity) -//! -//! Example usage: -//! -//! LOG_FATAL(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_FATAL(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); -} - -} // anonymous namespace - -#endif // TENSORRT_LOGGING_H diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.h b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.h deleted file mode 100644 index fe50d039..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TRTX_YOLOV5_UTILS_H_ -#define TRTX_YOLOV5_UTILS_H_ - -#include -#include - -static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) { - int w, h, x, y; - float r_w = input_w / (img.cols*1.0); - float r_h = input_h / (img.rows*1.0); - if (r_h > r_w) { - w = input_w; - h = r_w * img.rows; - x = 0; - y = (input_h - h) / 2; - } else { - w = r_h * img.cols; - h = input_h; - x = (input_w - w) / 2; - y = 0; - } - cv::Mat re(h, w, CV_8UC3); - cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR); - cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128)); - re.copyTo(out(cv::Rect(x, y, re.cols, re.rows))); - return out; -} - -static inline int read_files_in_dir(const char *p_dir_name, std::vector &file_names) { - DIR *p_dir = opendir(p_dir_name); - if (p_dir == nullptr) { - return -1; - } - - struct dirent* p_file = nullptr; - while ((p_file = readdir(p_dir)) != nullptr) { - if (strcmp(p_file->d_name, ".") != 0 && - strcmp(p_file->d_name, "..") != 0) { - //std::string cur_file_name(p_dir_name); - //cur_file_name += "/"; - //cur_file_name += p_file->d_name; - std::string cur_file_name(p_file->d_name); - file_names.push_back(cur_file_name); - } - } - - closedir(p_dir); - return 0; -} - -#endif // TRTX_YOLOV5_UTILS_H_ - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.hpp b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.hpp deleted file mode 100644 index a96504f8..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/utils.hpp +++ /dev/null @@ -1,109 +0,0 @@ -#ifndef DRAWING_UTILS_HPP -#define DRAWING_UTILS_HPP - -#include - -#include -#include - -float const id_colors[5][3] = { - { 232.0f, 176.0f, 59.0f}, - { 175.0f, 208.0f, 25.0f}, - { 102.0f, 205.0f, 105.0f}, - { 185.0f, 0.0f, 255.0f}, - { 99.0f, 107.0f, 252.0f} -}; - -inline cv::Scalar generateColorID_u(int idx) { - if (idx < 0) return cv::Scalar(236, 184, 36, 255); - int color_idx = idx % 5; - return cv::Scalar(id_colors[color_idx][0], id_colors[color_idx][1], id_colors[color_idx][2], 255); -} - -inline sl::float4 generateColorID_f(int idx) { - auto clr_u = generateColorID_u(idx); - return sl::float4(static_cast (clr_u.val[0]) / 255.f, static_cast (clr_u.val[1]) / 255.f, static_cast (clr_u.val[2]) / 255.f, 1.f); -} - -inline bool renderObject(const sl::ObjectData& i, const bool isTrackingON) { - if (isTrackingON) - return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); - else - return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); -} - -float const class_colors[6][3] = { - { 44.0f, 117.0f, 255.0f}, // PEOPLE - { 255.0f, 0.0f, 255.0f}, // VEHICLE - { 0.0f, 0.0f, 255.0f}, - { 0.0f, 255.0f, 255.0f}, - { 0.0f, 255.0f, 0.0f}, - { 255.0f, 255.0f, 255.0f} -}; - -inline sl::float4 getColorClass(int idx) { - idx = std::min(5, idx); - sl::float4 clr(class_colors[idx][0], class_colors[idx][1], class_colors[idx][2], 1.f); - return clr / 255.f; -} - -template -inline uchar _applyFading(T val, float current_alpha, double current_clr) { - return static_cast (current_alpha * current_clr + (1.0 - current_alpha) * val); -} - -inline cv::Vec4b applyFading(cv::Scalar val, float current_alpha, cv::Scalar current_clr) { - cv::Vec4b out; - out[0] = _applyFading(val.val[0], current_alpha, current_clr.val[0]); - out[1] = _applyFading(val.val[1], current_alpha, current_clr.val[1]); - out[2] = _applyFading(val.val[2], current_alpha, current_clr.val[2]); - out[3] = 255; - return out; -} - -inline void drawVerticalLine( - cv::Mat &left_display, - cv::Point2i start_pt, - cv::Point2i end_pt, - cv::Scalar clr, - int thickness) { - int n_steps = 7; - cv::Point2i pt1, pt4; - pt1.x = ((n_steps - 1) * start_pt.x + end_pt.x) / n_steps; - pt1.y = ((n_steps - 1) * start_pt.y + end_pt.y) / n_steps; - - pt4.x = (start_pt.x + (n_steps - 1) * end_pt.x) / n_steps; - pt4.y = (start_pt.y + (n_steps - 1) * end_pt.y) / n_steps; - - cv::line(left_display, start_pt, pt1, clr, thickness); - cv::line(left_display, pt4, end_pt, clr, thickness); -} - -inline cv::Mat slMat2cvMat(sl::Mat& input) { - // Mapping between MAT_TYPE and CV_TYPE - int cv_type = -1; - switch (input.getDataType()) { - case sl::MAT_TYPE::F32_C1: cv_type = CV_32FC1; - break; - case sl::MAT_TYPE::F32_C2: cv_type = CV_32FC2; - break; - case sl::MAT_TYPE::F32_C3: cv_type = CV_32FC3; - break; - case sl::MAT_TYPE::F32_C4: cv_type = CV_32FC4; - break; - case sl::MAT_TYPE::U8_C1: cv_type = CV_8UC1; - break; - case sl::MAT_TYPE::U8_C2: cv_type = CV_8UC2; - break; - case sl::MAT_TYPE::U8_C3: cv_type = CV_8UC3; - break; - case sl::MAT_TYPE::U8_C4: cv_type = CV_8UC4; - break; - default: break; - } - - return cv::Mat(input.getHeight(), input.getWidth(), cv_type, input.getPtr(sl::MEM::CPU)); -} - - -#endif diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/yololayer.h b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/yololayer.h deleted file mode 100644 index 7224bde4..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/include/yololayer.h +++ /dev/null @@ -1,146 +0,0 @@ -#ifndef _YOLO_LAYER_H -#define _YOLO_LAYER_H - -#include -#include -#include "NvInfer.h" - - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#define TRT_CONST_ENQUEUE const -#else -#define TRT_NOEXCEPT -#define TRT_CONST_ENQUEUE -#endif - -namespace Yolo -{ - static constexpr int CHECK_COUNT = 3; - static constexpr float IGNORE_THRESH = 0.1f; - struct YoloKernel - { - int width; - int height; - float anchors[CHECK_COUNT * 2]; - }; - static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000; - static constexpr int CLASS_NUM = 80; - static constexpr int INPUT_H = 640; // yolov5's input height and width must be divisible by 32. - static constexpr int INPUT_W = 640; - - static constexpr int LOCATIONS = 4; - struct alignas(float) Detection { - //center_x center_y w h - float bbox[LOCATIONS]; - float conf; // bbox_conf * cls_conf - float class_id; - }; -} - -namespace nvinfer1 -{ - class YoloLayerPlugin : public IPluginV2IOExt - { - public: - YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector& vYoloKernel); - YoloLayerPlugin(const void* data, size_t length); - ~YoloLayerPlugin(); - - int getNbOutputs() const TRT_NOEXCEPT override - { - return 1; - } - - Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override; - - int initialize() TRT_NOEXCEPT override; - - virtual void terminate() TRT_NOEXCEPT override {}; - - virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; } - - virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override; - - virtual size_t getSerializationSize() const TRT_NOEXCEPT override; - - virtual void serialize(void* buffer) const TRT_NOEXCEPT override; - - bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override { - return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT; - } - - const char* getPluginType() const TRT_NOEXCEPT override; - - const char* getPluginVersion() const TRT_NOEXCEPT override; - - void destroy() TRT_NOEXCEPT override; - - IPluginV2IOExt* clone() const TRT_NOEXCEPT override; - - void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override; - - const char* getPluginNamespace() const TRT_NOEXCEPT override; - - DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override; - - bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override; - - bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override; - - void attachToContext( - cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override; - - void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override; - - void detachFromContext() TRT_NOEXCEPT override; - - private: - void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1); - int mThreadCount = 256; - const char* mPluginNamespace; - int mKernelCount; - int mClassCount; - int mYoloV5NetWidth; - int mYoloV5NetHeight; - int mMaxOutObject; - std::vector mYoloKernel; - void** mAnchor; - }; - - class YoloPluginCreator : public IPluginCreator - { - public: - YoloPluginCreator(); - - ~YoloPluginCreator() override = default; - - const char* getPluginName() const TRT_NOEXCEPT override; - - const char* getPluginVersion() const TRT_NOEXCEPT override; - - const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override; - - IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override; - - IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override; - - void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override - { - mNamespace = libNamespace; - } - - const char* getPluginNamespace() const TRT_NOEXCEPT override - { - return mNamespace.c_str(); - } - - private: - std::string mNamespace; - static PluginFieldCollection mFC; - static std::vector mPluginAttributes; - }; - REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); -}; - -#endif diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/calibrator.cpp b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/calibrator.cpp deleted file mode 100644 index 2e2f5866..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/calibrator.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#include -#include -#include -#include -#include "calibrator.h" -#include "cuda_utils.h" -#include "utils.h" - -Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache) - : batchsize_(batchsize) - , input_w_(input_w) - , input_h_(input_h) - , img_idx_(0) - , img_dir_(img_dir) - , calib_table_name_(calib_table_name) - , input_blob_name_(input_blob_name) - , read_cache_(read_cache) -{ - input_count_ = 3 * input_w * input_h * batchsize; - CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float))); - read_files_in_dir(img_dir, img_files_); -} - -Int8EntropyCalibrator2::~Int8EntropyCalibrator2() -{ - CUDA_CHECK(cudaFree(device_input_)); -} - -int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT -{ - return batchsize_; -} - -bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT -{ - if (img_idx_ + batchsize_ > (int)img_files_.size()) { - return false; - } - - std::vector input_imgs_; - for (int i = img_idx_; i < img_idx_ + batchsize_; i++) { - std::cout << img_files_[i] << " " << i << std::endl; - cv::Mat temp = cv::imread(img_dir_ + img_files_[i]); - if (temp.empty()){ - std::cerr << "Fatal error: image cannot open!" << std::endl; - return false; - } - cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_); - input_imgs_.push_back(pr_img); - } - img_idx_ += batchsize_; - cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false); - - CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice)); - assert(!strcmp(names[0], input_blob_name_)); - bindings[0] = device_input_; - return true; -} - -const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT -{ - std::cout << "reading calib cache: " << calib_table_name_ << std::endl; - calib_cache_.clear(); - std::ifstream input(calib_table_name_, std::ios::binary); - input >> std::noskipws; - if (read_cache_ && input.good()) - { - std::copy(std::istream_iterator(input), std::istream_iterator(), std::back_inserter(calib_cache_)); - } - length = calib_cache_.size(); - return length ? calib_cache_.data() : nullptr; -} - -void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT -{ - std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl; - std::ofstream output(calib_table_name_, std::ios::binary); - output.write(reinterpret_cast(cache), length); -} - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yololayer.cu b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yololayer.cu deleted file mode 100644 index 4c042a66..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yololayer.cu +++ /dev/null @@ -1,313 +0,0 @@ -#include -#include -#include -#include "yololayer.h" -#include "cuda_utils.h" - -namespace Tn -{ - template - void write(char*& buffer, const T& val) - { - *reinterpret_cast(buffer) = val; - buffer += sizeof(T); - } - - template - void read(const char*& buffer, T& val) - { - val = *reinterpret_cast(buffer); - buffer += sizeof(T); - } -} - -using namespace Yolo; - -namespace nvinfer1 -{ - YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector& vYoloKernel) - { - mClassCount = classCount; - mYoloV5NetWidth = netWidth; - mYoloV5NetHeight = netHeight; - mMaxOutObject = maxOut; - mYoloKernel = vYoloKernel; - mKernelCount = vYoloKernel.size(); - - CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); - size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); - const auto& yolo = mYoloKernel[ii]; - CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); - } - } - YoloLayerPlugin::~YoloLayerPlugin() - { - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaFree(mAnchor[ii])); - } - CUDA_CHECK(cudaFreeHost(mAnchor)); - } - - // create the plugin at runtime from a byte stream - YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) - { - using namespace Tn; - const char *d = reinterpret_cast(data), *a = d; - read(d, mClassCount); - read(d, mThreadCount); - read(d, mKernelCount); - read(d, mYoloV5NetWidth); - read(d, mYoloV5NetHeight); - read(d, mMaxOutObject); - mYoloKernel.resize(mKernelCount); - auto kernelSize = mKernelCount * sizeof(YoloKernel); - memcpy(mYoloKernel.data(), d, kernelSize); - d += kernelSize; - CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); - size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); - const auto& yolo = mYoloKernel[ii]; - CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); - } - assert(d == a + length); - } - - void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT - { - using namespace Tn; - char* d = static_cast(buffer), *a = d; - write(d, mClassCount); - write(d, mThreadCount); - write(d, mKernelCount); - write(d, mYoloV5NetWidth); - write(d, mYoloV5NetHeight); - write(d, mMaxOutObject); - auto kernelSize = mKernelCount * sizeof(YoloKernel); - memcpy(d, mYoloKernel.data(), kernelSize); - d += kernelSize; - - assert(d == a + getSerializationSize()); - } - - size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT - { - return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); - } - - int YoloLayerPlugin::initialize() TRT_NOEXCEPT - { - return 0; - } - - Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT - { - //output the result to channel - int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); - - return Dims3(totalsize + 1, 1, 1); - } - - // Set plugin namespace - void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT - { - mPluginNamespace = pluginNamespace; - } - - const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT - { - return mPluginNamespace; - } - - // Return the DataType of the plugin output at the requested index - DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT - { - return DataType::kFLOAT; - } - - // Return true if output tensor is broadcast across a batch. - bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT - { - return false; - } - - // Return true if plugin can use input that is broadcast across batch without replication. - bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT - { - return false; - } - - void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT - { - } - - // Attach the plugin object to an execution context and grant the plugin the access to some context resource. - void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT - { - } - - // Detach the plugin object from its execution context. - void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} - - const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT - { - return "YoloLayer_TRT"; - } - - const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT - { - return "1"; - } - - void YoloLayerPlugin::destroy() TRT_NOEXCEPT - { - delete this; - } - - // Clone the plugin - IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT - { - YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); - p->setPluginNamespace(mPluginNamespace); - return p; - } - - __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; - - __global__ void CalDetection(const float *input, float *output, int noElements, - const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) - { - - int idx = threadIdx.x + blockDim.x * blockIdx.x; - if (idx >= noElements) return; - - int total_grid = yoloWidth * yoloHeight; - int bnIdx = idx / total_grid; - idx = idx - total_grid * bnIdx; - int info_len_i = 5 + classes; - const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); - - for (int k = 0; k < CHECK_COUNT; ++k) { - float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); - if (box_prob < IGNORE_THRESH) continue; - int class_id = 0; - float max_cls_prob = 0.0; - for (int i = 5; i < info_len_i; ++i) { - float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); - if (p > max_cls_prob) { - max_cls_prob = p; - class_id = i - 5; - } - } - float *res_count = output + bnIdx * outputElem; - int count = (int)atomicAdd(res_count, 1); - if (count >= maxoutobject) return; - char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); - Detection *det = (Detection*)(data); - - int row = idx / yoloWidth; - int col = idx % yoloWidth; - - //Location - // pytorch: - // y = x[i].sigmoid() - // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy - // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth - det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; - det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; - - // W: (Pw * e^tw) / FeaturemapW * netwidth - // v5: https://github.com/ultralytics/yolov5/issues/471 - det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); - det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; - det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); - det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; - det->conf = box_prob * max_cls_prob; - det->class_id = class_id; - } - } - - void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) - { - int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); - for (int idx = 0; idx < batchSize; ++idx) { - CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float))); - } - int numElem = 0; - for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { - const auto& yolo = mYoloKernel[i]; - numElem = yolo.width * yolo.height * batchSize; - if (numElem < mThreadCount) mThreadCount = numElem; - - //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); - CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > - (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); - } - } - - - int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT - { - forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); - return 0; - } - - PluginFieldCollection YoloPluginCreator::mFC{}; - std::vector YoloPluginCreator::mPluginAttributes; - - YoloPluginCreator::YoloPluginCreator() - { - mPluginAttributes.clear(); - - mFC.nbFields = mPluginAttributes.size(); - mFC.fields = mPluginAttributes.data(); - } - - const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT - { - return "YoloLayer_TRT"; - } - - const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT - { - return "1"; - } - - const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT - { - return &mFC; - } - - IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT - { - assert(fc->nbFields == 2); - assert(strcmp(fc->fields[0].name, "netinfo") == 0); - assert(strcmp(fc->fields[1].name, "kernels") == 0); - int *p_netinfo = (int*)(fc->fields[0].data); - int class_count = p_netinfo[0]; - int input_w = p_netinfo[1]; - int input_h = p_netinfo[2]; - int max_output_object_count = p_netinfo[3]; - std::vector kernels(fc->fields[1].length); - memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); - YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); - obj->setPluginNamespace(mNamespace.c_str()); - return obj; - } - - IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT - { - // This object will be deleted when the network is destroyed, which will - // call YoloLayerPlugin::destroy() - YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); - obj->setPluginNamespace(mNamespace.c_str()); - return obj; - } -} - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yolov5.cpp b/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yolov5.cpp deleted file mode 100644 index b4445ed7..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v5.0/src/yolov5.cpp +++ /dev/null @@ -1,523 +0,0 @@ -#include -#include -#include -#include "cuda_utils.h" -#include "logging.h" -#include "common.hpp" -#include "utils.h" -#include "calibrator.h" -#include "GLViewer.hpp" - -#include - -#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32 -#define DEVICE 0 // GPU id -#define NMS_THRESH 0.4 -#define CONF_THRESH 0.5 -#define BATCH_SIZE 1 - -// stuff we know about the network and the input/output blobs -static const int INPUT_H = Yolo::INPUT_H; -static const int INPUT_W = Yolo::INPUT_W; -static const int CLASS_NUM = Yolo::CLASS_NUM; -static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof (Yolo::Detection) / sizeof (float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1 -const char* INPUT_BLOB_NAME = "data"; -const char* OUTPUT_BLOB_NAME = "prob"; -static Logger gLogger; - -static int get_width(int x, float gw, int divisor = 8) { - return int(ceil((x * gw) / divisor)) * divisor; -} - -static int get_depth(int x, float gd) { - if (x == 1) return 1; - int r = round(x * gd); - if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) { - --r; - } - return std::max(r, 1); -} - -ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) { - INetworkDefinition* network = builder->createNetworkV2(0U); - - // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME - ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{3, INPUT_H, INPUT_W}); - assert(data); - - std::map weightMap = loadWeights(wts_name); - - /* ------ yolov5 backbone------ */ - auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0"); - auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1"); - auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2"); - auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3"); - auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4"); - auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5"); - auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6"); - auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7"); - auto spp8 = SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8"); - - /* ------ yolov5 head ------ */ - auto bottleneck_csp9 = C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9"); - auto conv10 = convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10"); - - auto upsample11 = network->addResize(*conv10->getOutput(0)); - assert(upsample11); - upsample11->setResizeMode(ResizeMode::kNEAREST); - upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions()); - - ITensor * inputTensors12[] = {upsample11->getOutput(0), bottleneck_csp6->getOutput(0)}; - auto cat12 = network->addConcatenation(inputTensors12, 2); - auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13"); - auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14"); - - auto upsample15 = network->addResize(*conv14->getOutput(0)); - assert(upsample15); - upsample15->setResizeMode(ResizeMode::kNEAREST); - upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions()); - - ITensor * inputTensors16[] = {upsample15->getOutput(0), bottleneck_csp4->getOutput(0)}; - auto cat16 = network->addConcatenation(inputTensors16, 2); - - auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17"); - - /* ------ detect ------ */ - IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]); - auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18"); - ITensor * inputTensors19[] = {conv18->getOutput(0), conv14->getOutput(0)}; - auto cat19 = network->addConcatenation(inputTensors19, 2); - auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20"); - IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]); - auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21"); - ITensor * inputTensors22[] = {conv21->getOutput(0), conv10->getOutput(0)}; - auto cat22 = network->addConcatenation(inputTensors22, 2); - auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23"); - IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]); - - auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector{det0, det1, det2}); - yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME); - network->markOutput(*yolo->getOutput(0)); - - // Build engine - builder->setMaxBatchSize(maxBatchSize); - config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB -#if defined(USE_FP16) - config->setFlag(BuilderFlag::kFP16); -#elif defined(USE_INT8) - std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl; - assert(builder->platformHasFastInt8()); - config->setFlag(BuilderFlag::kINT8); - Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME); - config->setInt8Calibrator(calibrator); -#endif - - std::cout << "Building engine, please wait for a while..." << std::endl; - ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config); - std::cout << "Build engine successfully!" << std::endl; - - // Don't need the network any more - network->destroy(); - - // Release host memory - for (auto& mem : weightMap) { - free((void*) (mem.second.values)); - } - - return engine; -} - -ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) { - INetworkDefinition* network = builder->createNetworkV2(0U); - - // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME - ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{3, INPUT_H, INPUT_W}); - assert(data); - - std::map weightMap = loadWeights(wts_name); - - /* ------ yolov5 backbone------ */ - auto focus0 = focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0"); - auto conv1 = convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1"); - auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2"); - auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3"); - auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4"); - auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5"); - auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6"); - auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7"); - auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8"); - auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9"); - auto spp10 = SPP(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), 3, 5, 7, "model.10"); - auto c3_11 = C3(network, weightMap, *spp10->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.11"); - - /* ------ yolov5 head ------ */ - auto conv12 = convBlock(network, weightMap, *c3_11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12"); - auto upsample13 = network->addResize(*conv12->getOutput(0)); - assert(upsample13); - upsample13->setResizeMode(ResizeMode::kNEAREST); - upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions()); - ITensor * inputTensors14[] = {upsample13->getOutput(0), c3_8->getOutput(0)}; - auto cat14 = network->addConcatenation(inputTensors14, 2); - auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15"); - - auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16"); - auto upsample17 = network->addResize(*conv16->getOutput(0)); - assert(upsample17); - upsample17->setResizeMode(ResizeMode::kNEAREST); - upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions()); - ITensor * inputTensors18[] = {upsample17->getOutput(0), c3_6->getOutput(0)}; - auto cat18 = network->addConcatenation(inputTensors18, 2); - auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19"); - - auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20"); - auto upsample21 = network->addResize(*conv20->getOutput(0)); - assert(upsample21); - upsample21->setResizeMode(ResizeMode::kNEAREST); - upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions()); - ITensor * inputTensors21[] = {upsample21->getOutput(0), c3_4->getOutput(0)}; - auto cat22 = network->addConcatenation(inputTensors21, 2); - auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23"); - - auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24"); - ITensor * inputTensors25[] = {conv24->getOutput(0), conv20->getOutput(0)}; - auto cat25 = network->addConcatenation(inputTensors25, 2); - auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26"); - - auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27"); - ITensor * inputTensors28[] = {conv27->getOutput(0), conv16->getOutput(0)}; - auto cat28 = network->addConcatenation(inputTensors28, 2); - auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29"); - - auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30"); - ITensor * inputTensors31[] = {conv30->getOutput(0), conv12->getOutput(0)}; - auto cat31 = network->addConcatenation(inputTensors31, 2); - auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32"); - - /* ------ detect ------ */ - IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]); - IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]); - IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]); - IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]); - - auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector{det0, det1, det2, det3}); - yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME); - network->markOutput(*yolo->getOutput(0)); - - // Build engine - builder->setMaxBatchSize(maxBatchSize); - config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB -#if defined(USE_FP16) - config->setFlag(BuilderFlag::kFP16); -#elif defined(USE_INT8) - std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl; - assert(builder->platformHasFastInt8()); - config->setFlag(BuilderFlag::kINT8); - Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME); - config->setInt8Calibrator(calibrator); -#endif - - std::cout << "Building engine, please wait for a while..." << std::endl; - ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config); - std::cout << "Build engine successfully!" << std::endl; - - // Don't need the network any more - network->destroy(); - - // Release host memory - for (auto& mem : weightMap) { - free((void*) (mem.second.values)); - } - - return engine; -} - -void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) { - // Create builder - IBuilder* builder = createInferBuilder(gLogger); - IBuilderConfig* config = builder->createBuilderConfig(); - - // Create model to populate the network, then set the outputs and create an engine - ICudaEngine *engine = nullptr; - if (is_p6) { - engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name); - } else { - engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name); - } - assert(engine != nullptr); - - // Serialize the engine - (*modelStream) = engine->serialize(); - - // Close everything down - engine->destroy(); - config->destroy(); - builder->destroy(); -} - -void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) { - // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host - CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof (float), cudaMemcpyHostToDevice, stream)); - context.enqueue(batchSize, buffers, stream, nullptr); - CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof (float), cudaMemcpyDeviceToHost, stream)); - cudaStreamSynchronize(stream); -} - -bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw) { - if (std::string(argv[1]) == "-s" && (argc == 5 || argc == 7)) { - wts = std::string(argv[2]); - engine = std::string(argv[3]); - auto net = std::string(argv[4]); - if (net[0] == 's') { - gd = 0.33; - gw = 0.50; - } else if (net[0] == 'm') { - gd = 0.67; - gw = 0.75; - } else if (net[0] == 'l') { - gd = 1.0; - gw = 1.0; - } else if (net[0] == 'x') { - gd = 1.33; - gw = 1.25; - } else if (net[0] == 'c' && argc == 7) { - gd = atof(argv[5]); - gw = atof(argv[6]); - } else { - return false; - } - if (net.size() == 2 && net[1] == '6') { - is_p6 = true; - } - } else if (std::string(argv[1]) == "-d") { - engine = std::string(argv[2]); - } else { - return false; - } - return true; -} - -void print(std::string msg_prefix, sl::ERROR_CODE err_code, std::string msg_suffix) { - std::cout << "[Sample] "; - if (err_code != sl::ERROR_CODE::SUCCESS) - std::cout << "[Error] "; - std::cout << msg_prefix << " "; - if (err_code != sl::ERROR_CODE::SUCCESS) { - std::cout << " | " << toString(err_code) << " : "; - std::cout << toVerbose(err_code); - } - if (!msg_suffix.empty()) - std::cout << " " << msg_suffix; - std::cout << std::endl; -} - -std::vector cvt(const cv::Rect &bbox_in){ - std::vector bbox_out(4); - bbox_out[0] = sl::uint2(bbox_in.x, bbox_in.y); - bbox_out[1] = sl::uint2(bbox_in.x + bbox_in.width, bbox_in.y); - bbox_out[2] = sl::uint2(bbox_in.x + bbox_in.width, bbox_in.y + bbox_in.height); - bbox_out[3] = sl::uint2(bbox_in.x, bbox_in.y + bbox_in.height); - return bbox_out; -} - -int main(int argc, char** argv) { - - std::string wts_name = ""; - std::string engine_name = ""; - bool is_p6 = false; - float gd = 0.0f, gw = 0.0f; - if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw)) { - std::cerr << "arguments not right!" << std::endl; - std::cerr << "./yolov5 -s [.wts] [.engine] [s/m/l/x/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl; - std::cerr << "./yolov5 -d [.engine] [zed camera id / optional svo filepath] // deserialize plan file and run inference" << std::endl; - return -1; - } - - // create a model using the API directly and serialize it to a stream - if (!wts_name.empty()) { - cudaSetDevice(DEVICE); - IHostMemory * modelStream{ nullptr}; - APIToModel(BATCH_SIZE, &modelStream, is_p6, gd, gw, wts_name); - assert(modelStream != nullptr); - std::ofstream p(engine_name, std::ios::binary); - if (!p) { - std::cerr << "could not open plan output file" << std::endl; - return -1; - } - p.write(reinterpret_cast (modelStream->data()), modelStream->size()); - modelStream->destroy(); - return 0; - } - - /// Opening the ZED camera before the model deserialization to avoid cuda context issue - sl::Camera zed; - sl::InitParameters init_parameters; - init_parameters.sdk_verbose = true; - init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; - init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; // OpenGL's coordinate system is right_handed - - if (argc > 3) { - std::string zed_opt = argv[3]; - if (zed_opt.find(".svo") != std::string::npos) - init_parameters.input.setFromSVOFile(zed_opt.c_str()); - } - // Open the camera - auto returned_state = zed.open(init_parameters); - if (returned_state != sl::ERROR_CODE::SUCCESS) { - print("Camera Open", returned_state, "Exit program."); - return EXIT_FAILURE; - } - zed.enablePositionalTracking(); - // Custom OD - sl::ObjectDetectionParameters detection_parameters; - detection_parameters.enable_tracking = true; - detection_parameters.enable_segmentation = false; // designed to give person pixel mask - detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::CUSTOM_BOX_OBJECTS; - returned_state = zed.enableObjectDetection(detection_parameters); - if (returned_state != sl::ERROR_CODE::SUCCESS) { - print("enableObjectDetection", returned_state, "\nExit program."); - zed.close(); - return EXIT_FAILURE; - } - auto camera_config = zed.getCameraInformation().camera_configuration; - sl::Resolution pc_resolution(std::min((int) camera_config.resolution.width, 720), std::min((int) camera_config.resolution.height, 404)); - auto camera_info = zed.getCameraInformation(pc_resolution).camera_configuration; - // Create OpenGL Viewer - GLViewer viewer; - viewer.init(argc, argv, camera_info.calibration_parameters.left_cam, true); - // --------- - - - // deserialize the .engine and run inference - std::ifstream file(engine_name, std::ios::binary); - if (!file.good()) { - std::cerr << "read " << engine_name << " error!" << std::endl; - return -1; - } - char *trtModelStream = nullptr; - size_t size = 0; - file.seekg(0, file.end); - size = file.tellg(); - file.seekg(0, file.beg); - trtModelStream = new char[size]; - assert(trtModelStream); - file.read(trtModelStream, size); - file.close(); - - // prepare input data --------------------------- - static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W]; - static float prob[BATCH_SIZE * OUTPUT_SIZE]; - IRuntime* runtime = createInferRuntime(gLogger); - assert(runtime != nullptr); - ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size); - assert(engine != nullptr); - IExecutionContext* context = engine->createExecutionContext(); - assert(context != nullptr); - delete[] trtModelStream; - assert(engine->getNbBindings() == 2); - void* buffers[2]; - // In order to bind the buffers, we need to know the names of the input and output tensors. - // Note that indices are guaranteed to be less than IEngine::getNbBindings() - const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME); - const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME); - assert(inputIndex == 0); - assert(outputIndex == 1); - // Create GPU buffers on device - CUDA_CHECK(cudaMalloc(&buffers[inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof (float))); - CUDA_CHECK(cudaMalloc(&buffers[outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof (float))); - // Create stream - cudaStream_t stream; - CUDA_CHECK(cudaStreamCreate(&stream)); - - assert(BATCH_SIZE == 1); // This sample only support batch 1 for now - - sl::Mat left_sl, point_cloud; - cv::Mat left_cv_rgb; - sl::ObjectDetectionRuntimeParameters objectTracker_parameters_rt; - sl::Objects objects; - sl::Pose cam_w_pose; - cam_w_pose.pose_data.setIdentity(); - - while (viewer.isAvailable()) { - if (zed.grab() == sl::ERROR_CODE::SUCCESS) { - - zed.retrieveImage(left_sl, sl::VIEW::LEFT); - - // Preparing inference - cv::Mat left_cv_rgba = slMat2cvMat(left_sl); - cv::cvtColor(left_cv_rgba, left_cv_rgb, cv::COLOR_BGRA2BGR); - if (left_cv_rgb.empty()) continue; - cv::Mat pr_img = preprocess_img(left_cv_rgb, INPUT_W, INPUT_H); // letterbox BGR to RGB - int i = 0; - int batch = 0; - for (int row = 0; row < INPUT_H; ++row) { - uchar* uc_pixel = pr_img.data + row * pr_img.step; - for (int col = 0; col < INPUT_W; ++col) { - data[batch * 3 * INPUT_H * INPUT_W + i] = (float) uc_pixel[2] / 255.0; - data[batch * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float) uc_pixel[1] / 255.0; - data[batch * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float) uc_pixel[0] / 255.0; - uc_pixel += 3; - ++i; - } - } - - // Running inference - doInference(*context, stream, buffers, data, prob, BATCH_SIZE); - std::vector> batch_res(BATCH_SIZE); - auto& res = batch_res[batch]; - nms(res, &prob[batch * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH); - - // Preparing for ZED SDK ingesting - std::vector objects_in; - for (auto &it : res) { - sl::CustomBoxObjectData tmp; - cv::Rect r = get_rect(left_cv_rgb, it.bbox); - // Fill the detections into the correct format - tmp.unique_object_id = sl::generate_unique_id(); - tmp.probability = it.conf; - tmp.label = (int) it.class_id; - tmp.bounding_box_2d = cvt(r); - tmp.is_grounded = ((int) it.class_id == 0); // Only the first class (person) is grounded, that is moving on the floor plane - // others are tracked in full 3D space - objects_in.push_back(tmp); - } - // Send the custom detected boxes to the ZED - zed.ingestCustomBoxObjects(objects_in); - - - // Displaying 'raw' objects - for (size_t j = 0; j < res.size(); j++) { - cv::Rect r = get_rect(left_cv_rgb, res[j].bbox); - cv::rectangle(left_cv_rgb, r, cv::Scalar(0x27, 0xC1, 0x36), 2); - cv::putText(left_cv_rgb, std::to_string((int) res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2); - } - cv::imshow("Objects", left_cv_rgb); - cv::waitKey(10); - - // Retrieve the tracked objects, with 2D and 3D attributes - zed.retrieveObjects(objects, objectTracker_parameters_rt); - // GL Viewer - zed.retrieveMeasure(point_cloud, sl::MEASURE::XYZRGBA, sl::MEM::GPU, pc_resolution); - zed.getPosition(cam_w_pose, sl::REFERENCE_FRAME::WORLD); - viewer.updateData(point_cloud, objects.object_list, cam_w_pose.pose_data); - } - } - - // Release stream and buffers - cudaStreamDestroy(stream); - CUDA_CHECK(cudaFree(buffers[inputIndex])); - CUDA_CHECK(cudaFree(buffers[outputIndex])); - // Destroy the engine - context->destroy(); - engine->destroy(); - runtime->destroy(); - viewer.exit(); - - return 0; -} diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/LICENSE b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/LICENSE deleted file mode 100644 index e0718cfd..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019-2020 Wang Xinyu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/README.md b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/README.md deleted file mode 100644 index 78bdb0b7..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Yolov5 with ZED Custom Box input - -This sample is designed to run a state of the art object detection model using the highly optimized TensorRT framework. The image are taken from the ZED SDK, and the 2D box detections are then ingested into the ZED SDK to extract 3D informations (localization, 3D bounding boxes) and tracking. - -This sample is a fork from [wang-xinyu/tensorrtx](https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5). The Pytorch implementation is [ultralytics/yolov5](https://github.com/ultralytics/yolov5). The default model was trained on COCO dataset (80 classes), a custom detector can be trained with the same architecture, it requires a configuration tweak (see below). This tutorial walk you through the workflow of training a custom detector https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data. - - -## Getting Started - - - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) - - Check the [Documentation](https://www.stereolabs.com/docs/) - - [TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html) - -## Different versions of yolov5 - -- For yolov5 v6.0, download .pt from [yolov5 release v6.0](https://github.com/ultralytics/yolov5/releases/tag/v6.0), `git clone -b v6.0 https://github.com/ultralytics/yolov5.git` - -## Config - -- Choose the model n/s/m/l/x/n6/s6/m6/l6/x6 from command line arguments. -- Input shape defined in yololayer.h **DO NOT FORGET TO CHANGE INPUT HEIGHT AND WIDTH, IF USING CUSTOM MODEL** -- Number of classes defined in yololayer.h, **DO NOT FORGET TO ADAPT THIS, IF USING CUSTOM MODEL** -- FP16/FP32 can be selected by the macro in yolov5.cpp, FP16 is faster if the GPU support it (all jetsons or GeForce RTX cards), -- GPU id can be selected by the macro in yolov5.cpp -- NMS thresh in yolov5.cpp -- BBox confidence thresh in yolov5.cpp - -## Using the sample - - -### 1. (Optional for first run) Generate .wts from pytorch with .pt - -**This file has already been generated and can be downloaded [here](https://download.stereolabs.com/sample_custom_objects/yolov5s_v6.0.wts.zip)** (and needs to be unzipped) to run the sample. - -This procedure can be applied to other models (such as `l` or `m` variants) or custom dataset trained model. - -The goal is to export the PyTorch model `.pt` into a easily readable weight file `.wts`. - -```sh -git clone -b v6.0 https://github.com/ultralytics/yolov5.git - -# Download the pretrained weight file -wget https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.pt -cp gen_wts.py {ultralytics}/yolov5 -cd {ultralytics}/yolov5 - -# convert .pt to .wts using gen_wts.py file -python gen_wts.py -w yolov5s.pt -# a file 'yolov5s.wts' will be generated. -``` - - -### 2. Build the sample - -If a custom model is used, let's say trained on another dataset than COCO, with a different number of classes, `CLASS_NUM` , `INPUT_H` and `INPUT_W` in yololayer.h must be updated accordingly. - - - Build for [Windows](https://www.stereolabs.com/docs/app-development/cpp/windows/) - - Build for [Linux/Jetson](https://www.stereolabs.com/docs/app-development/cpp/linux/) - - -### 3. Generate the TensorRT engine - -TensorRT apply heavy optimisation by processing the network structure itself and benchmarking all the available implementation of each inference function to take the fastest. The result in the inference engine. This process can take a few minutes so we usually want to generate it the first time than saving it for later reload. This step should be done at each model or weight change, but only once. - -```sh -./yolov5_zed -s [.wts] [.engine] [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file - -# For example yolov5s -./yolov5_zed -s yolov5s.wts yolov5s.engine s -# a file 'yolov5s.engine' will be generated. - -# For example Custom model with depth_multiple=0.17, width_multiple=0.25 in yolov5.yaml -./yolov5_zed -s yolov5_custom.wts yolov5_custom.engine c 0.17 0.25 -# a file 'yolov5_custom.engine' will be generated. -``` - -### 4. Running the sample with the engine generated - -```sh -./yolov5_zed -d [.engine] [zed camera id / optional svo filepath] // deserialize and run inference - -# For example yolov5s -./yolov5_zed -d yolov5s.engine 0 # 0 for zed camera id 0 - -# With an SVO file -./yolov5_zed -d yolov5.engine ./foo.svo -``` diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/gen_wts.py b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/gen_wts.py deleted file mode 100644 index 5ae94973..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/gen_wts.py +++ /dev/null @@ -1,49 +0,0 @@ -import sys -import argparse -import os -import struct -import torch -from utils.torch_utils import select_device - - -def parse_args(): - parser = argparse.ArgumentParser(description='Convert .pt file to .wts') - parser.add_argument('-w', '--weights', required=True, help='Input weights (.pt) file path (required)') - parser.add_argument('-o', '--output', help='Output (.wts) file path (optional)') - args = parser.parse_args() - if not os.path.isfile(args.weights): - raise SystemExit('Invalid input file') - if not args.output: - args.output = os.path.splitext(args.weights)[0] + '.wts' - elif os.path.isdir(args.output): - args.output = os.path.join( - args.output, - os.path.splitext(os.path.basename(args.weights))[0] + '.wts') - return args.weights, args.output - - -pt_file, wts_file = parse_args() - -# Initialize -device = select_device('cpu') -# Load model -model = torch.load(pt_file, map_location=device) # load to FP32 -model = model['ema' if model.get('ema') else 'model'].float() - -# update anchor_grid info -anchor_grid = model.model[-1].anchors * model.model[-1].stride[...,None,None] -# model.model[-1].anchor_grid = anchor_grid -delattr(model.model[-1], 'anchor_grid') # model.model[-1] is detect layer -model.model[-1].register_buffer("anchor_grid",anchor_grid) #The parameters are saved in the OrderDict through the "register_buffer" method, and then saved to the weight. - -model.to(device).eval() - -with open(wts_file, 'w') as f: - f.write('{}\n'.format(len(model.state_dict().keys()))) - for k, v in model.state_dict().items(): - vr = v.reshape(-1).cpu().numpy() - f.write('{} {} '.format(k, len(vr))) - for vv in vr: - f.write(' ') - f.write(struct.pack('>f' ,float(vv)).hex()) - f.write('\n') diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/calibrator.h b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/calibrator.h deleted file mode 100644 index 81e873b1..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/calibrator.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifdef ENABLE_INT8_CALIBRATOR - -#ifndef ENTROPY_CALIBRATOR_H -#define ENTROPY_CALIBRATOR_H - -#include "NvInfer.h" -#include -#include - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#else -#define TRT_NOEXCEPT -#endif - -//! \class Int8EntropyCalibrator2 -//! -//! \brief Implements Entropy calibrator 2. -//! CalibrationAlgoType is kENTROPY_CALIBRATION_2. -//! -class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 -{ -public: - Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true); - - virtual ~Int8EntropyCalibrator2(); - int getBatchSize() const TRT_NOEXCEPT override; - bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override; - const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override; - void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override; - -private: - int batchsize_; - int input_w_; - int input_h_; - int img_idx_; - std::string img_dir_; - std::vector img_files_; - size_t input_count_; - std::string calib_table_name_; - const char* input_blob_name_; - bool read_cache_; - void* device_input_; - std::vector calib_cache_; -}; - -#endif // ENTROPY_CALIBRATOR_H -#endif \ No newline at end of file diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/common.hpp b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/common.hpp deleted file mode 100644 index ed84f0bf..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/common.hpp +++ /dev/null @@ -1,328 +0,0 @@ -#ifndef YOLOV5_COMMON_H_ -#define YOLOV5_COMMON_H_ - -#include -#include -#include -#include -#include -#include "NvInfer.h" -#include "yololayer.h" - -using namespace nvinfer1; - -cv::Rect get_rect(cv::Mat& img, float bbox[4]) { - float l, r, t, b; - float r_w = Yolo::INPUT_W / (img.cols * 1.0); - float r_h = Yolo::INPUT_H / (img.rows * 1.0); - if (r_h > r_w) { - l = bbox[0] - bbox[2] / 2.f; - r = bbox[0] + bbox[2] / 2.f; - t = bbox[1] - bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2; - b = bbox[1] + bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2; - l = l / r_w; - r = r / r_w; - t = t / r_w; - b = b / r_w; - } else { - l = bbox[0] - bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2; - r = bbox[0] + bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2; - t = bbox[1] - bbox[3] / 2.f; - b = bbox[1] + bbox[3] / 2.f; - l = l / r_h; - r = r / r_h; - t = t / r_h; - b = b / r_h; - } - return cv::Rect(round(l), round(t), round(r - l), round(b - t)); -} - -float iou(float lbox[4], float rbox[4]) { - float interBox[] = { - (std::max)(lbox[0] - lbox[2] / 2.f , rbox[0] - rbox[2] / 2.f), //left - (std::min)(lbox[0] + lbox[2] / 2.f , rbox[0] + rbox[2] / 2.f), //right - (std::max)(lbox[1] - lbox[3] / 2.f , rbox[1] - rbox[3] / 2.f), //top - (std::min)(lbox[1] + lbox[3] / 2.f , rbox[1] + rbox[3] / 2.f), //bottom - }; - - if (interBox[2] > interBox[3] || interBox[0] > interBox[1]) - return 0.0f; - - float interBoxS = (interBox[1] - interBox[0])*(interBox[3] - interBox[2]); - return interBoxS / (lbox[2] * lbox[3] + rbox[2] * rbox[3] - interBoxS); -} - -bool cmp(const Yolo::Detection& a, const Yolo::Detection& b) { - return a.conf > b.conf; -} - -void nms(std::vector& res, float *output, float conf_thresh, float nms_thresh = 0.5) { - int det_size = sizeof(Yolo::Detection) / sizeof(float); - std::map> m; - for (int i = 0; i < output[0] && i < Yolo::MAX_OUTPUT_BBOX_COUNT; i++) { - if (output[1 + det_size * i + 4] <= conf_thresh) continue; - Yolo::Detection det; - memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float)); - if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector()); - m[det.class_id].push_back(det); - } - for (auto it = m.begin(); it != m.end(); it++) { - //std::cout << it->second[0].class_id << " --- " << std::endl; - auto& dets = it->second; - std::sort(dets.begin(), dets.end(), cmp); - for (size_t m = 0; m < dets.size(); ++m) { - auto& item = dets[m]; - res.push_back(item); - for (size_t n = m + 1; n < dets.size(); ++n) { - if (iou(item.bbox, dets[n].bbox) > nms_thresh) { - dets.erase(dets.begin() + n); - --n; - } - } - } - } -} - -// TensorRT weight files have a simple space delimited format: -// [type] [size] -std::map loadWeights(const std::string file) { - std::cout << "Loading weights: " << file << std::endl; - std::map weightMap; - - // Open weights file - std::ifstream input(file); - assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!"); - - // Read number of weight blobs - int32_t count; - input >> count; - assert(count > 0 && "Invalid weight map file."); - - while (count--) - { - Weights wt{ DataType::kFLOAT, nullptr, 0 }; - uint32_t size; - - // Read name and type of blob - std::string name; - input >> name >> std::dec >> size; - wt.type = DataType::kFLOAT; - - // Load blob - uint32_t* val = reinterpret_cast(malloc(sizeof(val) * size)); - for (uint32_t x = 0, y = size; x < y; ++x) - { - input >> std::hex >> val[x]; - } - wt.values = val; - - wt.count = size; - weightMap[name] = wt; - } - - return weightMap; -} - -IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map& weightMap, ITensor& input, std::string lname, float eps) { - float *gamma = (float*)weightMap[lname + ".weight"].values; - float *beta = (float*)weightMap[lname + ".bias"].values; - float *mean = (float*)weightMap[lname + ".running_mean"].values; - float *var = (float*)weightMap[lname + ".running_var"].values; - int len = weightMap[lname + ".running_var"].count; - - float *scval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - scval[i] = gamma[i] / sqrt(var[i] + eps); - } - Weights scale{ DataType::kFLOAT, scval, len }; - - float *shval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps); - } - Weights shift{ DataType::kFLOAT, shval, len }; - - float *pval = reinterpret_cast(malloc(sizeof(float) * len)); - for (int i = 0; i < len; i++) { - pval[i] = 1.0; - } - Weights power{ DataType::kFLOAT, pval, len }; - - weightMap[lname + ".scale"] = scale; - weightMap[lname + ".shift"] = shift; - weightMap[lname + ".power"] = power; - IScaleLayer* scale_1 = network->addScale(input, ScaleMode::kCHANNEL, shift, scale, power); - assert(scale_1); - return scale_1; -} - -ILayer* convBlock(INetworkDefinition *network, std::map& weightMap, ITensor& input, int outch, int ksize, int s, int g, std::string lname) { - Weights emptywts{ DataType::kFLOAT, nullptr, 0 }; - int p = ksize / 3; - IConvolutionLayer* conv1 = network->addConvolutionNd(input, outch, DimsHW{ ksize, ksize }, weightMap[lname + ".conv.weight"], emptywts); - assert(conv1); - conv1->setStrideNd(DimsHW{ s, s }); - conv1->setPaddingNd(DimsHW{ p, p }); - conv1->setNbGroups(g); - IScaleLayer* bn1 = addBatchNorm2d(network, weightMap, *conv1->getOutput(0), lname + ".bn", 1e-3); - - // silu = x * sigmoid - auto sig = network->addActivation(*bn1->getOutput(0), ActivationType::kSIGMOID); - assert(sig); - auto ew = network->addElementWise(*bn1->getOutput(0), *sig->getOutput(0), ElementWiseOperation::kPROD); - assert(ew); - return ew; -} - -ILayer* focus(INetworkDefinition *network, std::map& weightMap, ITensor& input, int inch, int outch, int ksize, std::string lname) { - ISliceLayer *s1 = network->addSlice(input, Dims3{ 0, 0, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s2 = network->addSlice(input, Dims3{ 0, 1, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s3 = network->addSlice(input, Dims3{ 0, 0, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ISliceLayer *s4 = network->addSlice(input, Dims3{ 0, 1, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 }); - ITensor* inputTensors[] = { s1->getOutput(0), s2->getOutput(0), s3->getOutput(0), s4->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 4); - auto conv = convBlock(network, weightMap, *cat->getOutput(0), outch, ksize, 1, 1, lname + ".conv"); - return conv; -} - -ILayer* bottleneck(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, bool shortcut, int g, float e, std::string lname) { - auto cv1 = convBlock(network, weightMap, input, (int)((float)c2 * e), 1, 1, 1, lname + ".cv1"); - auto cv2 = convBlock(network, weightMap, *cv1->getOutput(0), c2, 3, 1, g, lname + ".cv2"); - if (shortcut && c1 == c2) { - auto ew = network->addElementWise(input, *cv2->getOutput(0), ElementWiseOperation::kSUM); - return ew; - } - return cv2; -} - -ILayer* bottleneckCSP(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) { - Weights emptywts{ DataType::kFLOAT, nullptr, 0 }; - int c_ = (int)((float)c2 * e); - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - auto cv2 = network->addConvolutionNd(input, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv2.weight"], emptywts); - ITensor *y1 = cv1->getOutput(0); - for (int i = 0; i < n; i++) { - auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i)); - y1 = b->getOutput(0); - } - auto cv3 = network->addConvolutionNd(*y1, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv3.weight"], emptywts); - - ITensor* inputTensors[] = { cv3->getOutput(0), cv2->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 2); - - IScaleLayer* bn = addBatchNorm2d(network, weightMap, *cat->getOutput(0), lname + ".bn", 1e-4); - auto lr = network->addActivation(*bn->getOutput(0), ActivationType::kLEAKY_RELU); - lr->setAlpha(0.1); - - auto cv4 = convBlock(network, weightMap, *lr->getOutput(0), c2, 1, 1, 1, lname + ".cv4"); - return cv4; -} - -ILayer* C3(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) { - int c_ = (int)((float)c2 * e); - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - auto cv2 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv2"); - ITensor *y1 = cv1->getOutput(0); - for (int i = 0; i < n; i++) { - auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i)); - y1 = b->getOutput(0); - } - - ITensor* inputTensors[] = { y1, cv2->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 2); - - auto cv3 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv3"); - return cv3; -} - -ILayer* SPP(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int k1, int k2, int k3, std::string lname) { - int c_ = c1 / 2; - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - - auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k1, k1 }); - pool1->setPaddingNd(DimsHW{ k1 / 2, k1 / 2 }); - pool1->setStrideNd(DimsHW{ 1, 1 }); - auto pool2 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k2, k2 }); - pool2->setPaddingNd(DimsHW{ k2 / 2, k2 / 2 }); - pool2->setStrideNd(DimsHW{ 1, 1 }); - auto pool3 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k3, k3 }); - pool3->setPaddingNd(DimsHW{ k3 / 2, k3 / 2 }); - pool3->setStrideNd(DimsHW{ 1, 1 }); - - ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 4); - - auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2"); - return cv2; -} - -// SPPF -ILayer* SPPF(INetworkDefinition *network, std::map& weightMap, ITensor& input, int c1, int c2, int k, std::string lname) { - int c_ = c1 / 2; - auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1"); - - auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k }); - pool1->setPaddingNd(DimsHW{ k / 2, k / 2 }); - pool1->setStrideNd(DimsHW{ 1, 1 }); - auto pool2 = network->addPoolingNd(*pool1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k }); - pool2->setPaddingNd(DimsHW{ k / 2, k / 2 }); - pool2->setStrideNd(DimsHW{ 1, 1 }); - auto pool3 = network->addPoolingNd(*pool2->getOutput(0), PoolingType::kMAX, DimsHW{ k, k }); - pool3->setPaddingNd(DimsHW{ k / 2, k / 2 }); - pool3->setStrideNd(DimsHW{ 1, 1 }); - ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) }; - auto cat = network->addConcatenation(inputTensors, 4); - auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2"); - return cv2; -} - -// - -std::vector> getAnchors(std::map& weightMap, std::string lname) { - std::vector> anchors; - Weights wts = weightMap[lname + ".anchor_grid"]; - int anchor_len = Yolo::CHECK_COUNT * 2; - for (int i = 0; i < wts.count / anchor_len; i++) { - auto *p = (const float*)wts.values + i * anchor_len; - std::vector anchor(p, p + anchor_len); - anchors.push_back(anchor); - } - return anchors; -} - -IPluginV2Layer* addYoLoLayer(INetworkDefinition *network, std::map& weightMap, std::string lname, std::vector dets) { - auto creator = getPluginRegistry()->getPluginCreator("YoloLayer_TRT", "1"); - auto anchors = getAnchors(weightMap, lname); - PluginField plugin_fields[2]; - int netinfo[4] = {Yolo::CLASS_NUM, Yolo::INPUT_W, Yolo::INPUT_H, Yolo::MAX_OUTPUT_BBOX_COUNT}; - plugin_fields[0].data = netinfo; - plugin_fields[0].length = 4; - plugin_fields[0].name = "netinfo"; - plugin_fields[0].type = PluginFieldType::kFLOAT32; - int scale = 8; - std::vector kernels; - for (size_t i = 0; i < anchors.size(); i++) { - Yolo::YoloKernel kernel; - kernel.width = Yolo::INPUT_W / scale; - kernel.height = Yolo::INPUT_H / scale; - memcpy(kernel.anchors, &anchors[i][0], anchors[i].size() * sizeof(float)); - kernels.push_back(kernel); - scale *= 2; - } - plugin_fields[1].data = &kernels[0]; - plugin_fields[1].length = kernels.size(); - plugin_fields[1].name = "kernels"; - plugin_fields[1].type = PluginFieldType::kFLOAT32; - PluginFieldCollection plugin_data; - plugin_data.nbFields = 2; - plugin_data.fields = plugin_fields; - IPluginV2 *plugin_obj = creator->createPlugin("yololayer", &plugin_data); - std::vector input_tensors; - for (auto det: dets) { - input_tensors.push_back(det->getOutput(0)); - } - auto yolo = network->addPluginV2(&input_tensors[0], input_tensors.size(), *plugin_obj); - return yolo; -} -#endif - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/cuda_utils.h b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/cuda_utils.h deleted file mode 100644 index 8fbd3199..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/cuda_utils.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef TRTX_CUDA_UTILS_H_ -#define TRTX_CUDA_UTILS_H_ - -#include - -#ifndef CUDA_CHECK -#define CUDA_CHECK(callstr)\ - {\ - cudaError_t error_code = callstr;\ - if (error_code != cudaSuccess) {\ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\ - assert(0);\ - }\ - } -#endif // CUDA_CHECK - -#endif // TRTX_CUDA_UTILS_H_ - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/logging.h b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/logging.h deleted file mode 100644 index 1339ee2f..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/logging.h +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSORRT_LOGGING_H -#define TENSORRT_LOGGING_H - -#include "NvInferRuntimeCommon.h" -#include -#include -#include -#include -#include -#include -#include - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#else -#define TRT_NOEXCEPT -#endif - -using Severity = nvinfer1::ILogger::Severity; - -class LogStreamConsumerBuffer : public std::stringbuf -{ -public: - LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mOutput(stream) - , mPrefix(prefix) - , mShouldLog(shouldLog) - { - } - - LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) - : mOutput(other.mOutput) - { - } - - ~LogStreamConsumerBuffer() - { - // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence - // std::streambuf::pptr() gives a pointer to the current position of the output sequence - // if the pointer to the beginning is not equal to the pointer to the current position, - // call putOutput() to log the output to the stream - if (pbase() != pptr()) - { - putOutput(); - } - } - - // synchronizes the stream buffer and returns 0 on success - // synchronizing the stream buffer consists of inserting the buffer contents into the stream, - // resetting the buffer and flushing the stream - virtual int sync() - { - putOutput(); - return 0; - } - - void putOutput() - { - if (mShouldLog) - { - // prepend timestamp - std::time_t timestamp = std::time(nullptr); - tm* tm_local = std::localtime(×tamp); - std::cout << "["; - std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; - std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; - std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; - // std::stringbuf::str() gets the string contents of the buffer - // insert the buffer contents pre-appended by the appropriate prefix into the stream - mOutput << mPrefix << str(); - // set the buffer to empty - str(""); - // flush the stream - mOutput.flush(); - } - } - - void setShouldLog(bool shouldLog) - { - mShouldLog = shouldLog; - } - -private: - std::ostream& mOutput; - std::string mPrefix; - bool mShouldLog; -}; - -//! -//! \class LogStreamConsumerBase -//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer -//! -class LogStreamConsumerBase -{ -public: - LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) - : mBuffer(stream, prefix, shouldLog) - { - } - -protected: - LogStreamConsumerBuffer mBuffer; -}; - -//! -//! \class LogStreamConsumer -//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. -//! Order of base classes is LogStreamConsumerBase and then std::ostream. -//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field -//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. -//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. -//! Please do not change the order of the parent classes. -//! -class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream -{ -public: - //! \brief Creates a LogStreamConsumer which logs messages with level severity. - //! Reportable severity determines if the messages are severe enough to be logged. - LogStreamConsumer(Severity reportableSeverity, Severity severity) - : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(severity <= reportableSeverity) - , mSeverity(severity) - { - } - - LogStreamConsumer(LogStreamConsumer&& other) - : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) - , std::ostream(&mBuffer) // links the stream buffer with the stream - , mShouldLog(other.mShouldLog) - , mSeverity(other.mSeverity) - { - } - - void setReportableSeverity(Severity reportableSeverity) - { - mShouldLog = mSeverity <= reportableSeverity; - mBuffer.setShouldLog(mShouldLog); - } - -private: - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } - - static std::string severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } - - bool mShouldLog; - Severity mSeverity; -}; - -//! \class Logger -//! -//! \brief Class which manages logging of TensorRT tools and samples -//! -//! \details This class provides a common interface for TensorRT tools and samples to log information to the console, -//! and supports logging two types of messages: -//! -//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) -//! - Test pass/fail messages -//! -//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is -//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. -//! -//! In the future, this class could be extended to support dumping test results to a file in some standard format -//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). -//! -//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger -//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT -//! library and messages coming from the sample. -//! -//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the -//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger -//! object. - -class Logger : public nvinfer1::ILogger -{ -public: - Logger(Severity severity = Severity::kWARNING) - : mReportableSeverity(severity) - { - } - - //! - //! \enum TestResult - //! \brief Represents the state of a given test - //! - enum class TestResult - { - kRUNNING, //!< The test is running - kPASSED, //!< The test passed - kFAILED, //!< The test failed - kWAIVED //!< The test was waived - }; - - //! - //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger - //! \return The nvinfer1::ILogger associated with this Logger - //! - //! TODO Once all samples are updated to use this method to register the logger with TensorRT, - //! we can eliminate the inheritance of Logger from ILogger - //! - nvinfer1::ILogger& getTRTLogger() - { - return *this; - } - - //! - //! \brief Implementation of the nvinfer1::ILogger::log() virtual method - //! - //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the - //! inheritance from nvinfer1::ILogger - //! - void log(Severity severity, const char* msg) TRT_NOEXCEPT override - { - LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; - } - - //! - //! \brief Method for controlling the verbosity of logging output - //! - //! \param severity The logger will only emit messages that have severity of this level or higher. - //! - void setReportableSeverity(Severity severity) - { - mReportableSeverity = severity; - } - - //! - //! \brief Opaque handle that holds logging information for a particular test - //! - //! This object is an opaque handle to information used by the Logger to print test results. - //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used - //! with Logger::reportTest{Start,End}(). - //! - class TestAtom - { - public: - TestAtom(TestAtom&&) = default; - - private: - friend class Logger; - - TestAtom(bool started, const std::string& name, const std::string& cmdline) - : mStarted(started) - , mName(name) - , mCmdline(cmdline) - { - } - - bool mStarted; - std::string mName; - std::string mCmdline; - }; - - //! - //! \brief Define a test for logging - //! - //! \param[in] name The name of the test. This should be a string starting with - //! "TensorRT" and containing dot-separated strings containing - //! the characters [A-Za-z0-9_]. - //! For example, "TensorRT.sample_googlenet" - //! \param[in] cmdline The command line used to reproduce the test - // - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - //! - static TestAtom defineTest(const std::string& name, const std::string& cmdline) - { - return TestAtom(false, name, cmdline); - } - - //! - //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments - //! as input - //! - //! \param[in] name The name of the test - //! \param[in] argc The number of command-line arguments - //! \param[in] argv The array of command-line arguments (given as C strings) - //! - //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). - static TestAtom defineTest(const std::string& name, int argc, char const* const* argv) - { - auto cmdline = genCmdlineString(argc, argv); - return defineTest(name, cmdline); - } - - //! - //! \brief Report that a test has started. - //! - //! \pre reportTestStart() has not been called yet for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has started - //! - static void reportTestStart(TestAtom& testAtom) - { - reportTestResult(testAtom, TestResult::kRUNNING); - assert(!testAtom.mStarted); - testAtom.mStarted = true; - } - - //! - //! \brief Report that a test has ended. - //! - //! \pre reportTestStart() has been called for the given testAtom - //! - //! \param[in] testAtom The handle to the test that has ended - //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, - //! TestResult::kFAILED, TestResult::kWAIVED - //! - static void reportTestEnd(const TestAtom& testAtom, TestResult result) - { - assert(result != TestResult::kRUNNING); - assert(testAtom.mStarted); - reportTestResult(testAtom, result); - } - - static int reportPass(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kPASSED); - return EXIT_SUCCESS; - } - - static int reportFail(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kFAILED); - return EXIT_FAILURE; - } - - static int reportWaive(const TestAtom& testAtom) - { - reportTestEnd(testAtom, TestResult::kWAIVED); - return EXIT_SUCCESS; - } - - static int reportTest(const TestAtom& testAtom, bool pass) - { - return pass ? reportPass(testAtom) : reportFail(testAtom); - } - - Severity getReportableSeverity() const - { - return mReportableSeverity; - } - -private: - //! - //! \brief returns an appropriate string for prefixing a log message with the given severity - //! - static const char* severityPrefix(Severity severity) - { - switch (severity) - { - case Severity::kINTERNAL_ERROR: return "[F] "; - case Severity::kERROR: return "[E] "; - case Severity::kWARNING: return "[W] "; - case Severity::kINFO: return "[I] "; - case Severity::kVERBOSE: return "[V] "; - default: assert(0); return ""; - } - } - - //! - //! \brief returns an appropriate string for prefixing a test result message with the given result - //! - static const char* testResultString(TestResult result) - { - switch (result) - { - case TestResult::kRUNNING: return "RUNNING"; - case TestResult::kPASSED: return "PASSED"; - case TestResult::kFAILED: return "FAILED"; - case TestResult::kWAIVED: return "WAIVED"; - default: assert(0); return ""; - } - } - - //! - //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity - //! - static std::ostream& severityOstream(Severity severity) - { - return severity >= Severity::kINFO ? std::cout : std::cerr; - } - - //! - //! \brief method that implements logging test results - //! - static void reportTestResult(const TestAtom& testAtom, TestResult result) - { - severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " - << testAtom.mCmdline << std::endl; - } - - //! - //! \brief generate a command line string from the given (argc, argv) values - //! - static std::string genCmdlineString(int argc, char const* const* argv) - { - std::stringstream ss; - for (int i = 0; i < argc; i++) - { - if (i > 0) - ss << " "; - ss << argv[i]; - } - return ss.str(); - } - - Severity mReportableSeverity; -}; - -namespace -{ - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE -//! -//! Example usage: -//! -//! LOG_VERBOSE(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO -//! -//! Example usage: -//! -//! LOG_INFO(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_INFO(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING -//! -//! Example usage: -//! -//! LOG_WARN(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_WARN(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR -//! -//! Example usage: -//! -//! LOG_ERROR(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_ERROR(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); -} - -//! -//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR -// ("fatal" severity) -//! -//! Example usage: -//! -//! LOG_FATAL(logger) << "hello world" << std::endl; -//! -inline LogStreamConsumer LOG_FATAL(const Logger& logger) -{ - return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); -} - -} // anonymous namespace - -#endif // TENSORRT_LOGGING_H diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.h b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.h deleted file mode 100644 index fe50d039..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/utils.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TRTX_YOLOV5_UTILS_H_ -#define TRTX_YOLOV5_UTILS_H_ - -#include -#include - -static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) { - int w, h, x, y; - float r_w = input_w / (img.cols*1.0); - float r_h = input_h / (img.rows*1.0); - if (r_h > r_w) { - w = input_w; - h = r_w * img.rows; - x = 0; - y = (input_h - h) / 2; - } else { - w = r_h * img.cols; - h = input_h; - x = (input_w - w) / 2; - y = 0; - } - cv::Mat re(h, w, CV_8UC3); - cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR); - cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128)); - re.copyTo(out(cv::Rect(x, y, re.cols, re.rows))); - return out; -} - -static inline int read_files_in_dir(const char *p_dir_name, std::vector &file_names) { - DIR *p_dir = opendir(p_dir_name); - if (p_dir == nullptr) { - return -1; - } - - struct dirent* p_file = nullptr; - while ((p_file = readdir(p_dir)) != nullptr) { - if (strcmp(p_file->d_name, ".") != 0 && - strcmp(p_file->d_name, "..") != 0) { - //std::string cur_file_name(p_dir_name); - //cur_file_name += "/"; - //cur_file_name += p_file->d_name; - std::string cur_file_name(p_file->d_name); - file_names.push_back(cur_file_name); - } - } - - closedir(p_dir); - return 0; -} - -#endif // TRTX_YOLOV5_UTILS_H_ - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/yololayer.h b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/yololayer.h deleted file mode 100644 index 7224bde4..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/yololayer.h +++ /dev/null @@ -1,146 +0,0 @@ -#ifndef _YOLO_LAYER_H -#define _YOLO_LAYER_H - -#include -#include -#include "NvInfer.h" - - -#if NV_TENSORRT_MAJOR >= 8 -#define TRT_NOEXCEPT noexcept -#define TRT_CONST_ENQUEUE const -#else -#define TRT_NOEXCEPT -#define TRT_CONST_ENQUEUE -#endif - -namespace Yolo -{ - static constexpr int CHECK_COUNT = 3; - static constexpr float IGNORE_THRESH = 0.1f; - struct YoloKernel - { - int width; - int height; - float anchors[CHECK_COUNT * 2]; - }; - static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000; - static constexpr int CLASS_NUM = 80; - static constexpr int INPUT_H = 640; // yolov5's input height and width must be divisible by 32. - static constexpr int INPUT_W = 640; - - static constexpr int LOCATIONS = 4; - struct alignas(float) Detection { - //center_x center_y w h - float bbox[LOCATIONS]; - float conf; // bbox_conf * cls_conf - float class_id; - }; -} - -namespace nvinfer1 -{ - class YoloLayerPlugin : public IPluginV2IOExt - { - public: - YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector& vYoloKernel); - YoloLayerPlugin(const void* data, size_t length); - ~YoloLayerPlugin(); - - int getNbOutputs() const TRT_NOEXCEPT override - { - return 1; - } - - Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override; - - int initialize() TRT_NOEXCEPT override; - - virtual void terminate() TRT_NOEXCEPT override {}; - - virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; } - - virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override; - - virtual size_t getSerializationSize() const TRT_NOEXCEPT override; - - virtual void serialize(void* buffer) const TRT_NOEXCEPT override; - - bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override { - return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT; - } - - const char* getPluginType() const TRT_NOEXCEPT override; - - const char* getPluginVersion() const TRT_NOEXCEPT override; - - void destroy() TRT_NOEXCEPT override; - - IPluginV2IOExt* clone() const TRT_NOEXCEPT override; - - void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override; - - const char* getPluginNamespace() const TRT_NOEXCEPT override; - - DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override; - - bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override; - - bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override; - - void attachToContext( - cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override; - - void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override; - - void detachFromContext() TRT_NOEXCEPT override; - - private: - void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1); - int mThreadCount = 256; - const char* mPluginNamespace; - int mKernelCount; - int mClassCount; - int mYoloV5NetWidth; - int mYoloV5NetHeight; - int mMaxOutObject; - std::vector mYoloKernel; - void** mAnchor; - }; - - class YoloPluginCreator : public IPluginCreator - { - public: - YoloPluginCreator(); - - ~YoloPluginCreator() override = default; - - const char* getPluginName() const TRT_NOEXCEPT override; - - const char* getPluginVersion() const TRT_NOEXCEPT override; - - const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override; - - IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override; - - IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override; - - void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override - { - mNamespace = libNamespace; - } - - const char* getPluginNamespace() const TRT_NOEXCEPT override - { - return mNamespace.c_str(); - } - - private: - std::string mNamespace; - static PluginFieldCollection mFC; - static std::vector mPluginAttributes; - }; - REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); -}; - -#endif diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/calibrator.cpp b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/calibrator.cpp deleted file mode 100644 index 6fae5d66..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/calibrator.cpp +++ /dev/null @@ -1,83 +0,0 @@ -#ifdef ENABLE_INT8_CALIBRATOR - -#include -#include -#include -#include -#include "calibrator.h" -#include "cuda_utils.h" -#include "utils.h" - -Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache) - : batchsize_(batchsize) - , input_w_(input_w) - , input_h_(input_h) - , img_idx_(0) - , img_dir_(img_dir) - , calib_table_name_(calib_table_name) - , input_blob_name_(input_blob_name) - , read_cache_(read_cache) -{ - input_count_ = 3 * input_w * input_h * batchsize; - CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float))); - read_files_in_dir(img_dir, img_files_); -} - -Int8EntropyCalibrator2::~Int8EntropyCalibrator2() -{ - CUDA_CHECK(cudaFree(device_input_)); -} - -int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT -{ - return batchsize_; -} - -bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT -{ - if (img_idx_ + batchsize_ > (int)img_files_.size()) { - return false; - } - - std::vector input_imgs_; - for (int i = img_idx_; i < img_idx_ + batchsize_; i++) { - std::cout << img_files_[i] << " " << i << std::endl; - cv::Mat temp = cv::imread(img_dir_ + img_files_[i]); - if (temp.empty()){ - std::cerr << "Fatal error: image cannot open!" << std::endl; - return false; - } - cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_); - input_imgs_.push_back(pr_img); - } - img_idx_ += batchsize_; - cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false); - - CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice)); - assert(!strcmp(names[0], input_blob_name_)); - bindings[0] = device_input_; - return true; -} - -const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT -{ - std::cout << "reading calib cache: " << calib_table_name_ << std::endl; - calib_cache_.clear(); - std::ifstream input(calib_table_name_, std::ios::binary); - input >> std::noskipws; - if (read_cache_ && input.good()) - { - std::copy(std::istream_iterator(input), std::istream_iterator(), std::back_inserter(calib_cache_)); - } - length = calib_cache_.size(); - return length ? calib_cache_.data() : nullptr; -} - -void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT -{ - std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl; - std::ofstream output(calib_table_name_, std::ios::binary); - output.write(reinterpret_cast(cache), length); -} - -#endif \ No newline at end of file diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yololayer.cu b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yololayer.cu deleted file mode 100644 index 4c042a66..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yololayer.cu +++ /dev/null @@ -1,313 +0,0 @@ -#include -#include -#include -#include "yololayer.h" -#include "cuda_utils.h" - -namespace Tn -{ - template - void write(char*& buffer, const T& val) - { - *reinterpret_cast(buffer) = val; - buffer += sizeof(T); - } - - template - void read(const char*& buffer, T& val) - { - val = *reinterpret_cast(buffer); - buffer += sizeof(T); - } -} - -using namespace Yolo; - -namespace nvinfer1 -{ - YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector& vYoloKernel) - { - mClassCount = classCount; - mYoloV5NetWidth = netWidth; - mYoloV5NetHeight = netHeight; - mMaxOutObject = maxOut; - mYoloKernel = vYoloKernel; - mKernelCount = vYoloKernel.size(); - - CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); - size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); - const auto& yolo = mYoloKernel[ii]; - CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); - } - } - YoloLayerPlugin::~YoloLayerPlugin() - { - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaFree(mAnchor[ii])); - } - CUDA_CHECK(cudaFreeHost(mAnchor)); - } - - // create the plugin at runtime from a byte stream - YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) - { - using namespace Tn; - const char *d = reinterpret_cast(data), *a = d; - read(d, mClassCount); - read(d, mThreadCount); - read(d, mKernelCount); - read(d, mYoloV5NetWidth); - read(d, mYoloV5NetHeight); - read(d, mMaxOutObject); - mYoloKernel.resize(mKernelCount); - auto kernelSize = mKernelCount * sizeof(YoloKernel); - memcpy(mYoloKernel.data(), d, kernelSize); - d += kernelSize; - CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); - size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; - for (int ii = 0; ii < mKernelCount; ii++) - { - CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); - const auto& yolo = mYoloKernel[ii]; - CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); - } - assert(d == a + length); - } - - void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT - { - using namespace Tn; - char* d = static_cast(buffer), *a = d; - write(d, mClassCount); - write(d, mThreadCount); - write(d, mKernelCount); - write(d, mYoloV5NetWidth); - write(d, mYoloV5NetHeight); - write(d, mMaxOutObject); - auto kernelSize = mKernelCount * sizeof(YoloKernel); - memcpy(d, mYoloKernel.data(), kernelSize); - d += kernelSize; - - assert(d == a + getSerializationSize()); - } - - size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT - { - return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); - } - - int YoloLayerPlugin::initialize() TRT_NOEXCEPT - { - return 0; - } - - Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT - { - //output the result to channel - int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); - - return Dims3(totalsize + 1, 1, 1); - } - - // Set plugin namespace - void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT - { - mPluginNamespace = pluginNamespace; - } - - const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT - { - return mPluginNamespace; - } - - // Return the DataType of the plugin output at the requested index - DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT - { - return DataType::kFLOAT; - } - - // Return true if output tensor is broadcast across a batch. - bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT - { - return false; - } - - // Return true if plugin can use input that is broadcast across batch without replication. - bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT - { - return false; - } - - void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT - { - } - - // Attach the plugin object to an execution context and grant the plugin the access to some context resource. - void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT - { - } - - // Detach the plugin object from its execution context. - void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} - - const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT - { - return "YoloLayer_TRT"; - } - - const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT - { - return "1"; - } - - void YoloLayerPlugin::destroy() TRT_NOEXCEPT - { - delete this; - } - - // Clone the plugin - IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT - { - YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); - p->setPluginNamespace(mPluginNamespace); - return p; - } - - __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; - - __global__ void CalDetection(const float *input, float *output, int noElements, - const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) - { - - int idx = threadIdx.x + blockDim.x * blockIdx.x; - if (idx >= noElements) return; - - int total_grid = yoloWidth * yoloHeight; - int bnIdx = idx / total_grid; - idx = idx - total_grid * bnIdx; - int info_len_i = 5 + classes; - const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); - - for (int k = 0; k < CHECK_COUNT; ++k) { - float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); - if (box_prob < IGNORE_THRESH) continue; - int class_id = 0; - float max_cls_prob = 0.0; - for (int i = 5; i < info_len_i; ++i) { - float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); - if (p > max_cls_prob) { - max_cls_prob = p; - class_id = i - 5; - } - } - float *res_count = output + bnIdx * outputElem; - int count = (int)atomicAdd(res_count, 1); - if (count >= maxoutobject) return; - char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); - Detection *det = (Detection*)(data); - - int row = idx / yoloWidth; - int col = idx % yoloWidth; - - //Location - // pytorch: - // y = x[i].sigmoid() - // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy - // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth - det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; - det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; - - // W: (Pw * e^tw) / FeaturemapW * netwidth - // v5: https://github.com/ultralytics/yolov5/issues/471 - det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); - det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; - det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); - det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; - det->conf = box_prob * max_cls_prob; - det->class_id = class_id; - } - } - - void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) - { - int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); - for (int idx = 0; idx < batchSize; ++idx) { - CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float))); - } - int numElem = 0; - for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { - const auto& yolo = mYoloKernel[i]; - numElem = yolo.width * yolo.height * batchSize; - if (numElem < mThreadCount) mThreadCount = numElem; - - //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); - CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > - (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); - } - } - - - int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT - { - forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); - return 0; - } - - PluginFieldCollection YoloPluginCreator::mFC{}; - std::vector YoloPluginCreator::mPluginAttributes; - - YoloPluginCreator::YoloPluginCreator() - { - mPluginAttributes.clear(); - - mFC.nbFields = mPluginAttributes.size(); - mFC.fields = mPluginAttributes.data(); - } - - const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT - { - return "YoloLayer_TRT"; - } - - const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT - { - return "1"; - } - - const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT - { - return &mFC; - } - - IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT - { - assert(fc->nbFields == 2); - assert(strcmp(fc->fields[0].name, "netinfo") == 0); - assert(strcmp(fc->fields[1].name, "kernels") == 0); - int *p_netinfo = (int*)(fc->fields[0].data); - int class_count = p_netinfo[0]; - int input_w = p_netinfo[1]; - int input_h = p_netinfo[2]; - int max_output_object_count = p_netinfo[3]; - std::vector kernels(fc->fields[1].length); - memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); - YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); - obj->setPluginNamespace(mNamespace.c_str()); - return obj; - } - - IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT - { - // This object will be deleted when the network is destroyed, which will - // call YoloLayerPlugin::destroy() - YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); - obj->setPluginNamespace(mNamespace.c_str()); - return obj; - } -} - diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yolov5.cpp b/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yolov5.cpp deleted file mode 100644 index e945154d..00000000 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/yolov5.cpp +++ /dev/null @@ -1,529 +0,0 @@ -#include -#include -#include -#include "cuda_utils.h" -#include "logging.h" -#include "common.hpp" -#include "utils.h" -#include "calibrator.h" -#include "GLViewer.hpp" - -#include - -#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32 -#define DEVICE 0 // GPU id -#define NMS_THRESH 0.4 -#define CONF_THRESH 0.5 -#define BATCH_SIZE 1 - -// stuff we know about the network and the input/output blobs -static const int INPUT_H = Yolo::INPUT_H; -static const int INPUT_W = Yolo::INPUT_W; -static const int CLASS_NUM = Yolo::CLASS_NUM; -static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof (Yolo::Detection) / sizeof (float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1 -const char* INPUT_BLOB_NAME = "data"; -const char* OUTPUT_BLOB_NAME = "prob"; -static Logger gLogger; - -static int get_width(int x, float gw, int divisor = 8) { - return int(ceil((x * gw) / divisor)) * divisor; -} - -static int get_depth(int x, float gd) { - if (x == 1) return 1; - int r = round(x * gd); - if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) { - --r; - } - return std::max(r, 1); -} - -ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) { - INetworkDefinition* network = builder->createNetworkV2(0U); - - // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME - ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{3, INPUT_H, INPUT_W}); - assert(data); - - std::map weightMap = loadWeights(wts_name); - - /* ------ yolov5 backbone------ */ - auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0"); - assert(conv0); - auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1"); - auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2"); - auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3"); - auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4"); - auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5"); - auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6"); - auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7"); - auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.8"); - auto spp9 = SPPF(network, weightMap, *bottleneck_csp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.9"); - - /* ------ yolov5 head ------ */ - auto conv10 = convBlock(network, weightMap, *spp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10"); - - auto upsample11 = network->addResize(*conv10->getOutput(0)); - assert(upsample11); - upsample11->setResizeMode(ResizeMode::kNEAREST); - upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions()); - - ITensor * inputTensors12[] = {upsample11->getOutput(0), bottleneck_csp6->getOutput(0)}; - auto cat12 = network->addConcatenation(inputTensors12, 2); - auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13"); - auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14"); - - auto upsample15 = network->addResize(*conv14->getOutput(0)); - assert(upsample15); - upsample15->setResizeMode(ResizeMode::kNEAREST); - upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions()); - - ITensor * inputTensors16[] = {upsample15->getOutput(0), bottleneck_csp4->getOutput(0)}; - auto cat16 = network->addConcatenation(inputTensors16, 2); - - auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17"); - - /* ------ detect ------ */ - IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]); - auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18"); - ITensor * inputTensors19[] = {conv18->getOutput(0), conv14->getOutput(0)}; - auto cat19 = network->addConcatenation(inputTensors19, 2); - auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20"); - IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]); - auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21"); - ITensor * inputTensors22[] = {conv21->getOutput(0), conv10->getOutput(0)}; - auto cat22 = network->addConcatenation(inputTensors22, 2); - auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23"); - IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]); - - auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector{det0, det1, det2}); - yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME); - network->markOutput(*yolo->getOutput(0)); - - // Build engine - builder->setMaxBatchSize(maxBatchSize); - config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB -#if defined(USE_FP16) - config->setFlag(BuilderFlag::kFP16); -#elif defined(USE_INT8) - std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl; - assert(builder->platformHasFastInt8()); - config->setFlag(BuilderFlag::kINT8); - Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME); - config->setInt8Calibrator(calibrator); -#endif - - std::cout << "Building engine, please wait for a while..." << std::endl; - ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config); - std::cout << "Build engine successfully!" << std::endl; - - // Don't need the network any more - network->destroy(); - - // Release host memory - for (auto& mem : weightMap) { - free((void*) (mem.second.values)); - } - - return engine; -} - -//v6.0 -ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) { - INetworkDefinition* network = builder->createNetworkV2(0U); - - // Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME - ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{3, INPUT_H, INPUT_W}); - assert(data); - - std::map weightMap = loadWeights(wts_name); - - /* ------ yolov5 backbone------ */ - auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0"); - auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1"); - auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2"); - auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3"); - auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4"); - auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5"); - auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6"); - auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7"); - auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8"); - auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9"); - auto c3_10 = C3(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.10"); - auto sppf11 = SPPF(network, weightMap, *c3_10->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.11"); - - /* ------ yolov5 head ------ */ - auto conv12 = convBlock(network, weightMap, *sppf11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12"); - auto upsample13 = network->addResize(*conv12->getOutput(0)); - assert(upsample13); - upsample13->setResizeMode(ResizeMode::kNEAREST); - upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions()); - ITensor * inputTensors14[] = {upsample13->getOutput(0), c3_8->getOutput(0)}; - auto cat14 = network->addConcatenation(inputTensors14, 2); - auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15"); - - auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16"); - auto upsample17 = network->addResize(*conv16->getOutput(0)); - assert(upsample17); - upsample17->setResizeMode(ResizeMode::kNEAREST); - upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions()); - ITensor * inputTensors18[] = {upsample17->getOutput(0), c3_6->getOutput(0)}; - auto cat18 = network->addConcatenation(inputTensors18, 2); - auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19"); - - auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20"); - auto upsample21 = network->addResize(*conv20->getOutput(0)); - assert(upsample21); - upsample21->setResizeMode(ResizeMode::kNEAREST); - upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions()); - ITensor * inputTensors21[] = {upsample21->getOutput(0), c3_4->getOutput(0)}; - auto cat22 = network->addConcatenation(inputTensors21, 2); - auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23"); - - auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24"); - ITensor * inputTensors25[] = {conv24->getOutput(0), conv20->getOutput(0)}; - auto cat25 = network->addConcatenation(inputTensors25, 2); - auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26"); - - auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27"); - ITensor * inputTensors28[] = {conv27->getOutput(0), conv16->getOutput(0)}; - auto cat28 = network->addConcatenation(inputTensors28, 2); - auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29"); - - auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30"); - ITensor * inputTensors31[] = {conv30->getOutput(0), conv12->getOutput(0)}; - auto cat31 = network->addConcatenation(inputTensors31, 2); - auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32"); - - /* ------ detect ------ */ - IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]); - IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]); - IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]); - IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW { - 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]); - - auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector{det0, det1, det2, det3}); - yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME); - network->markOutput(*yolo->getOutput(0)); - - // Build engine - builder->setMaxBatchSize(maxBatchSize); - config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB -#if defined(USE_FP16) - config->setFlag(BuilderFlag::kFP16); -#elif defined(USE_INT8) - std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl; - assert(builder->platformHasFastInt8()); - config->setFlag(BuilderFlag::kINT8); - Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME); - config->setInt8Calibrator(calibrator); -#endif - - std::cout << "Building engine, please wait for a while..." << std::endl; - ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config); - std::cout << "Build engine successfully!" << std::endl; - - // Don't need the network any more - network->destroy(); - - // Release host memory - for (auto& mem : weightMap) { - free((void*) (mem.second.values)); - } - - return engine; -} - -void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) { - // Create builder - IBuilder* builder = createInferBuilder(gLogger); - IBuilderConfig* config = builder->createBuilderConfig(); - - // Create model to populate the network, then set the outputs and create an engine - ICudaEngine *engine = nullptr; - if (is_p6) { - engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name); - } else { - engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name); - } - assert(engine != nullptr); - - // Serialize the engine - (*modelStream) = engine->serialize(); - - // Close everything down - engine->destroy(); - config->destroy(); - builder->destroy(); -} - -void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) { - // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host - CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof (float), cudaMemcpyHostToDevice, stream)); - context.enqueue(batchSize, buffers, stream, nullptr); - CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof (float), cudaMemcpyDeviceToHost, stream)); - cudaStreamSynchronize(stream); -} - -bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw) { - if (argc < 4) return false; - if (std::string(argv[1]) == "-s" && (argc == 5 || argc == 7)) { - wts = std::string(argv[2]); - engine = std::string(argv[3]); - auto net = std::string(argv[4]); - if (net[0] == 'n') { - gd = 0.33; - gw = 0.25; - } else if (net[0] == 's') { - gd = 0.33; - gw = 0.50; - } else if (net[0] == 'm') { - gd = 0.67; - gw = 0.75; - } else if (net[0] == 'l') { - gd = 1.0; - gw = 1.0; - } else if (net[0] == 'x') { - gd = 1.33; - gw = 1.25; - } else if (net[0] == 'c' && argc == 7) { - gd = atof(argv[5]); - gw = atof(argv[6]); - } else { - return false; - } - if (net.size() == 2 && net[1] == '6') { - is_p6 = true; - } - } else if (std::string(argv[1]) == "-d") { - engine = std::string(argv[2]); - } else { - return false; - } - return true; -} - -void print(std::string msg_prefix, sl::ERROR_CODE err_code, std::string msg_suffix) { - std::cout << "[Sample] "; - if (err_code != sl::ERROR_CODE::SUCCESS) - std::cout << "[Error] "; - std::cout << msg_prefix << " "; - if (err_code != sl::ERROR_CODE::SUCCESS) { - std::cout << " | " << toString(err_code) << " : "; - std::cout << toVerbose(err_code); - } - if (!msg_suffix.empty()) - std::cout << " " << msg_suffix; - std::cout << std::endl; -} - -std::vector cvt(const cv::Rect &bbox_in){ - std::vector bbox_out(4); - bbox_out[0] = sl::uint2(bbox_in.x, bbox_in.y); - bbox_out[1] = sl::uint2(bbox_in.x + bbox_in.width, bbox_in.y); - bbox_out[2] = sl::uint2(bbox_in.x + bbox_in.width, bbox_in.y + bbox_in.height); - bbox_out[3] = sl::uint2(bbox_in.x, bbox_in.y + bbox_in.height); - return bbox_out; -} - -int main(int argc, char** argv) { - - std::string wts_name = ""; - std::string engine_name = ""; - bool is_p6 = false; - float gd = 0.0f, gw = 0.0f; - if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw)) { - std::cerr << "arguments not right!" << std::endl; - std::cerr << "./yolov5 -s [.wts] [.engine] [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl; - std::cerr << "./yolov5 -d [.engine] [zed camera id / optional svo filepath] // deserialize plan file and run inference" << std::endl; - return -1; - } - - // create a model using the API directly and serialize it to a stream - if (!wts_name.empty()) { - cudaSetDevice(DEVICE); - IHostMemory * modelStream{ nullptr}; - APIToModel(BATCH_SIZE, &modelStream, is_p6, gd, gw, wts_name); - assert(modelStream != nullptr); - std::ofstream p(engine_name, std::ios::binary); - if (!p) { - std::cerr << "could not open plan output file" << std::endl; - return -1; - } - p.write(reinterpret_cast (modelStream->data()), modelStream->size()); - modelStream->destroy(); - return 0; - } - - /// Opening the ZED camera before the model deserialization to avoid cuda context issue - sl::Camera zed; - sl::InitParameters init_parameters; - init_parameters.sdk_verbose = true; - init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; - init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; // OpenGL's coordinate system is right_handed - - if (argc > 3) { - std::string zed_opt = argv[3]; - if (zed_opt.find(".svo") != std::string::npos) - init_parameters.input.setFromSVOFile(zed_opt.c_str()); - } - // Open the camera - auto returned_state = zed.open(init_parameters); - if (returned_state != sl::ERROR_CODE::SUCCESS) { - print("Camera Open", returned_state, "Exit program."); - return EXIT_FAILURE; - } - zed.enablePositionalTracking(); - // Custom OD - sl::ObjectDetectionParameters detection_parameters; - detection_parameters.enable_tracking = true; - detection_parameters.enable_segmentation = false; // designed to give person pixel mask - detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::CUSTOM_BOX_OBJECTS; - returned_state = zed.enableObjectDetection(detection_parameters); - if (returned_state != sl::ERROR_CODE::SUCCESS) { - print("enableObjectDetection", returned_state, "\nExit program."); - zed.close(); - return EXIT_FAILURE; - } - auto camera_config = zed.getCameraInformation().camera_configuration; - sl::Resolution pc_resolution(std::min((int) camera_config.resolution.width, 720), std::min((int) camera_config.resolution.height, 404)); - auto camera_info = zed.getCameraInformation(pc_resolution).camera_configuration; - // Create OpenGL Viewer - GLViewer viewer; - viewer.init(argc, argv, camera_info.calibration_parameters.left_cam, true); - // --------- - - - // deserialize the .engine and run inference - std::ifstream file(engine_name, std::ios::binary); - if (!file.good()) { - std::cerr << "read " << engine_name << " error!" << std::endl; - return -1; - } - char *trtModelStream = nullptr; - size_t size = 0; - file.seekg(0, file.end); - size = file.tellg(); - file.seekg(0, file.beg); - trtModelStream = new char[size]; - assert(trtModelStream); - file.read(trtModelStream, size); - file.close(); - - // prepare input data --------------------------- - static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W]; - static float prob[BATCH_SIZE * OUTPUT_SIZE]; - IRuntime* runtime = createInferRuntime(gLogger); - assert(runtime != nullptr); - ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size); - assert(engine != nullptr); - IExecutionContext* context = engine->createExecutionContext(); - assert(context != nullptr); - delete[] trtModelStream; - assert(engine->getNbBindings() == 2); - void* buffers[2]; - // In order to bind the buffers, we need to know the names of the input and output tensors. - // Note that indices are guaranteed to be less than IEngine::getNbBindings() - const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME); - const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME); - assert(inputIndex == 0); - assert(outputIndex == 1); - // Create GPU buffers on device - CUDA_CHECK(cudaMalloc(&buffers[inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof (float))); - CUDA_CHECK(cudaMalloc(&buffers[outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof (float))); - // Create stream - cudaStream_t stream; - CUDA_CHECK(cudaStreamCreate(&stream)); - - assert(BATCH_SIZE == 1); // This sample only support batch 1 for now - - sl::Mat left_sl, point_cloud; - cv::Mat left_cv_rgb; - sl::ObjectDetectionRuntimeParameters objectTracker_parameters_rt; - sl::Objects objects; - sl::Pose cam_w_pose; - cam_w_pose.pose_data.setIdentity(); - - while (viewer.isAvailable()) { - if (zed.grab() == sl::ERROR_CODE::SUCCESS) { - - zed.retrieveImage(left_sl, sl::VIEW::LEFT); - - // Preparing inference - cv::Mat left_cv_rgba = slMat2cvMat(left_sl); - cv::cvtColor(left_cv_rgba, left_cv_rgb, cv::COLOR_BGRA2BGR); - if (left_cv_rgb.empty()) continue; - cv::Mat pr_img = preprocess_img(left_cv_rgb, INPUT_W, INPUT_H); // letterbox BGR to RGB - int i = 0; - int batch = 0; - for (int row = 0; row < INPUT_H; ++row) { - uchar* uc_pixel = pr_img.data + row * pr_img.step; - for (int col = 0; col < INPUT_W; ++col) { - data[batch * 3 * INPUT_H * INPUT_W + i] = (float) uc_pixel[2] / 255.0; - data[batch * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float) uc_pixel[1] / 255.0; - data[batch * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float) uc_pixel[0] / 255.0; - uc_pixel += 3; - ++i; - } - } - - // Running inference - doInference(*context, stream, buffers, data, prob, BATCH_SIZE); - std::vector> batch_res(BATCH_SIZE); - auto& res = batch_res[batch]; - nms(res, &prob[batch * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH); - - // Preparing for ZED SDK ingesting - std::vector objects_in; - for (auto &it : res) { - sl::CustomBoxObjectData tmp; - cv::Rect r = get_rect(left_cv_rgb, it.bbox); - // Fill the detections into the correct format - tmp.unique_object_id = sl::generate_unique_id(); - tmp.probability = it.conf; - tmp.label = (int) it.class_id; - tmp.bounding_box_2d = cvt(r); - tmp.is_grounded = ((int) it.class_id == 0); // Only the first class (person) is grounded, that is moving on the floor plane - // others are tracked in full 3D space - objects_in.push_back(tmp); - } - // Send the custom detected boxes to the ZED - zed.ingestCustomBoxObjects(objects_in); - - - // Displaying 'raw' objects - for (size_t j = 0; j < res.size(); j++) { - cv::Rect r = get_rect(left_cv_rgb, res[j].bbox); - cv::rectangle(left_cv_rgb, r, cv::Scalar(0x27, 0xC1, 0x36), 2); - cv::putText(left_cv_rgb, std::to_string((int) res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2); - } - cv::imshow("Objects", left_cv_rgb); - cv::waitKey(10); - - // Retrieve the tracked objects, with 2D and 3D attributes - zed.retrieveObjects(objects, objectTracker_parameters_rt); - // GL Viewer - zed.retrieveMeasure(point_cloud, sl::MEASURE::XYZRGBA, sl::MEM::GPU, pc_resolution); - zed.getPosition(cam_w_pose, sl::REFERENCE_FRAME::WORLD); - viewer.updateData(point_cloud, objects.object_list, cam_w_pose.pose_data); - } - } - - // Release stream and buffers - cudaStreamDestroy(stream); - CUDA_CHECK(cudaFree(buffers[inputIndex])); - CUDA_CHECK(cudaFree(buffers[outputIndex])); - // Destroy the engine - context->destroy(); - engine->destroy(); - runtime->destroy(); - viewer.exit(); - - return 0; -} diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/CMakeLists.txt b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/CMakeLists.txt similarity index 87% rename from object detection/custom detector/cpp/tensorrt_yolov5_v6.0/CMakeLists.txt rename to object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/CMakeLists.txt index 1d8aab75..7a38533e 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/CMakeLists.txt +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/CMakeLists.txt @@ -1,9 +1,11 @@ cmake_minimum_required(VERSION 3.5) -PROJECT(yolov5_zed) +PROJECT(yolov8_seg_onnx_zed) set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD_REQUIRED ON) +# set(CMAKE_BUILD_TYPE RelWithDebInfo) + option(LINK_SHARED_ZED "Link with the ZED SDK shared executable" ON) if (NOT LINK_SHARED_ZED AND MSVC) @@ -18,10 +20,11 @@ IF (ENABLE_INT8_CALIBRATOR) ADD_DEFINITIONS(-DENABLE_INT8_CALIBRATOR) ENDIF() -find_package(ZED 3 REQUIRED) +find_package(ZED 4 REQUIRED) find_package(OpenCV REQUIRED) find_package(GLUT REQUIRED) find_package(GLEW REQUIRED) +SET(OpenGL_GL_PREFERENCE GLVND) find_package(OpenGL REQUIRED) find_package(CUDA REQUIRED) @@ -53,16 +56,19 @@ link_directories(/usr/lib/x86_64-linux-gnu/) FILE(GLOB_RECURSE SRC_FILES src/*.c*) FILE(GLOB_RECURSE HDR_FILES include/*.h*) -cuda_add_executable(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) +ADD_EXECUTABLE(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) add_definitions(-O3 -D_MWAITXINTRIN_H_INCLUDED -Wno-deprecated-declarations) +# Add debugging-specific flags +# target_compile_options(${PROJECT_NAME} PRIVATE -g) + if (LINK_SHARED_ZED) SET(ZED_LIBS ${ZED_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY}) else() SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) endif() -SET(TRT_LIBS nvinfer) +SET(TRT_LIBS nvinfer nvonnxparser nvinfer_plugin) target_link_libraries(${PROJECT_NAME} ${TRT_LIBS} diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/README.md b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/README.md new file mode 100644 index 00000000..60dc88c4 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/README.md @@ -0,0 +1,79 @@ +# Yolov8 with ZED Custom Box & Masks input + +This sample is designed to run a state-of-the-art instance segmentation model using the highly optimized TensorRT framework. The images are taken from the ZED SDK, and the 2D box detections and predicted masks are then ingested into the ZED SDK to extract 3D information (localization, 3D bounding boxes) and tracking. + +This sample uses a TensorRT-optimized ONNX model. It is compatible with YOLOv8-seg. It can be used with the default model trained on the COCO dataset (80 classes) provided by the framework maintainers. + +A custom detector can be trained with the same architecture. This tutorials walk you through the workflow of training a custom detector: https://github.com/roboflow/notebooks/blob/main/notebooks/train-yolov8-instance-segmentation-on-custom-dataset.ipynb + +## Getting Started + + - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) + - Check the [Documentation](https://www.stereolabs.com/docs/) + - [TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html) + +## Workflow + +This sample is expecting a TensorRT engine, optimized from an ONNX model. The ONNX model can be exported from Pytorch using the original YOLO code. Please refer to the section corresponding to the needed version. + +## YOLOv8 (recommended) + +### Installing yolov8 + +YOLOv8 can be installed directly from pip using the following command: + +``` +python -m pip install ultralytics +``` + +### ONNX file export + +In this documentation, we'll use the CLI for export https://docs.ultralytics.com/modes/export/ + +``` +yolo export model=yolov8s-seg.pt format=onnx simplify=True dynamic=False imgsz=640 +``` + +For a custom model model the weight file can be changed: + +``` +yolo export model=yolov8s-seg_custom_model.pt format=onnx simplify=True dynamic=False imgsz=640 +``` + +Please refer to the corresponding documentation for more details https://github.com/ultralytics/ultralytics + + +### ONNX to TensorRT Engine + +TensorRT applies heavy optimization by processing the network structure itself and benchmarking all the available implementations of each inference function to take the fastest. The result is the inference engine. This process can take a few minutes so we usually want to generate it the first time and then save it for later reload. This step should be done at each model or weight change only once. + +Please note that this sample requires a fixed size and that image should also be squared (e.g. 640x640). + +You can either set the input size implicitely, with + +```sh +./yolov8_seg_onnx_zed -s yolov8s-seg.onnx yolov8s-seg.engine +``` + +Or explicitely with + +```sh +./yolov8_seg_onnx_zed -s yolov8s-seg.onnx yolov8s-seg.engine images:1x3x640x640 +``` + +### Build the sample + + - Build for [Windows](https://www.stereolabs.com/docs/app-development/cpp/windows/) + - Build for [Linux/Jetson](https://www.stereolabs.com/docs/app-development/cpp/linux/) + +### Running the sample with the generated engine + +```sh +./yolov8_seg_onnx_zed [.engine] [zed camera id / optional svo filepath] // deserialize and run inference + +# For example yolov8s-seg with a plugged zed camera +./yolov8_seg_onnx_zed yolov8s-seg.engine + +# For example yolov8s-seg with an SVO file +./yolov8_seg_onnx_zed yolov8s-seg.engine ./foo.svo +``` diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/GLViewer.hpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/GLViewer.hpp new file mode 100644 index 00000000..2f1b143c --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/GLViewer.hpp @@ -0,0 +1,323 @@ +#ifndef __GLVIEWER_HPP__ +#define __GLVIEWER_HPP__ + +#include +#include + +#include + +#include +#include + +#include +#include + +#include + +#include "utils.h" + + +#ifndef M_PI +#define M_PI 3.141592653f +#endif + +#define MOUSE_R_SENSITIVITY 0.03f +#define MOUSE_UZ_SENSITIVITY 0.5f +#define MOUSE_DZ_SENSITIVITY 1.25f +#define MOUSE_T_SENSITIVITY 0.05f +#define KEY_T_SENSITIVITY 0.1f + +// Utils +std::vector getBBoxOnFloorPlane(std::vector const& bbox, sl::Pose const& cam_pose); + +// 3D + +class CameraGL { +public: + + CameraGL() { + } + + enum DIRECTION { + UP, DOWN, LEFT, RIGHT, FORWARD, BACK + }; + CameraGL(sl::Translation const& position, sl::Translation const& direction, sl::Translation const& vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) + ~CameraGL(); + + void update(); + void setProjection(float const horizontalFOV, float const verticalFOV, float const znear, float const zfar); + const sl::Transform& getViewProjectionMatrix() const; + + float getHorizontalFOV() const; + float getVerticalFOV() const; + + // Set an offset between the eye of the camera and its position + // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 + // Note: coordinates are in local space + void setOffsetFromPosition(sl::Translation const& offset); + const sl::Translation& getOffsetFromPosition() const; + + void setDirection(sl::Translation const& direction, sl::Translation const &vertical); + void translate(sl::Translation const& t); + void setPosition(sl::Translation const& p); + void rotate(sl::Orientation const& rot); + void rotate(sl::Rotation const& m); + void setRotation(sl::Orientation const& rot); + void setRotation(sl::Rotation const& m); + + const sl::Translation& getPosition() const; + const sl::Translation& getForward() const; + const sl::Translation& getRight() const; + const sl::Translation& getUp() const; + const sl::Translation& getVertical() const; + float getZNear() const; + float getZFar() const; + + static const sl::Translation ORIGINAL_FORWARD; + static const sl::Translation ORIGINAL_UP; + static const sl::Translation ORIGINAL_RIGHT; + + sl::Transform projection_; + bool usePerspective_; +private: + void updateVectors(); + void updateView(); + void updateVPMatrix(); + + sl::Translation offset_; + sl::Translation position_; + sl::Translation forward_; + sl::Translation up_; + sl::Translation right_; + sl::Translation vertical_; + + sl::Orientation rotation_; + + sl::Transform view_; + sl::Transform vpMatrix_; + float horizontalFieldOfView_; + float verticalFieldOfView_; + float znear_; + float zfar_; +}; + +class Shader { +public: + + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} + Shader(const GLchar* vs, const GLchar* fs); + ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); + GLuint getProgramId(); + + static const GLint ATTRIB_VERTICES_POS = 0; + static const GLint ATTRIB_COLOR_POS = 1; +private: + bool compile(GLuint &shaderId, GLenum const type, GLchar const* src); + GLuint verterxId_; + GLuint fragmentId_; + GLuint programId_; +}; + +struct ShaderData { + Shader it; + GLuint MVP_Mat; +}; + +class Simple3DObject { +public: + + Simple3DObject() { + } + Simple3DObject(sl::Translation const& position, bool const isStatic); + ~Simple3DObject(); + + void addPt(sl::float3 const& pt); + void addClr(sl::float4 const& clr); + + void addBBox(std::vector const& pts, sl::float4& clr); + void addPoint(sl::float3 const& pt, sl::float4 const& clr); + void addTriangle(sl::float3 const& p1, sl::float3 const& p2, sl::float3 const& p3, sl::float4 const& clr); + void addLine(sl::float3 const& p1, sl::float3 const& p2, sl::float4 const& clr); + + // New 3D rendering + void addFullEdges(std::vector const& pts, sl::float4& clr); + void addVerticalEdges(std::vector const& pts, sl::float4& clr); + void addTopFace(std::vector const& pts, sl::float4& clr); + void addVerticalFaces(std::vector const& pts, sl::float4& clr); + + void pushToGPU(); + void clear(); + + void setDrawingType(GLenum const type); + + void draw(); + + void translate(sl::Translation const& t); + void setPosition(sl::Translation const& p); + + void setRT(sl::Transform const& mRT); + + void rotate(sl::Orientation const& rot); + void rotate(sl::Rotation const& m); + void setRotation(sl::Orientation const& rot); + void setRotation(sl::Rotation const& m); + + const sl::Translation& getPosition() const; + + sl::Transform getModelMatrix() const; +private: + std::vector vertices_; + std::vector colors_; + std::vector indices_; + + bool isStatic_; + + GLenum drawingType_; + + GLuint vaoID_; + /* + Vertex buffer IDs: + - [0]: Vertices coordinates; + - [1]: Indices; + */ + GLuint vboID_[2]; + + sl::Translation position_; + sl::Orientation rotation_; +}; + +class PointCloud { +public: + PointCloud(); + ~PointCloud(); + + // Initialize Opengl and Cuda buffers + // Warning: must be called in the Opengl thread + void initialize(sl::Resolution const& res); + // Push a new point cloud + // Warning: can be called from any thread but the mutex "mutexData" must be locked + void pushNewPC(sl::Mat const& matXYZRGBA); + // Update the Opengl buffer + // Warning: must be called in the Opengl thread + void update(); + // Draw the point cloud + // Warning: must be called in the Opengl thread + void draw(sl::Transform const& vp); + // Close (disable update) + void close(); + +private: + sl::Mat matGPU_; + bool hasNewPCL_ = false; + ShaderData shader; + size_t numBytes_; + float* xyzrgbaMappedBuf_; + GLuint bufferGLID_; + cudaGraphicsResource* bufferCudaID_; +}; + +struct ObjectClassName { + sl::float3 position; + std::string name; + sl::float4 color; +}; + +// This class manages input events, window and Opengl rendering pipeline + +class GLViewer { +public: + GLViewer(); + ~GLViewer(); + bool isAvailable(); + bool isPlaying() const { return play; } + void setPlaying(const bool p) { play = p; } + + void init(int argc, char **argv, sl::CameraParameters const& param, bool const isTrackingON); + void updateData(sl::Mat const& matXYZRGBA, std::vector const& objs, sl::Transform const& cam_pose); + + int getKey() { + int const key{last_key}; + last_key = -1; + return key; + } + + void exit(); +private: + // Rendering loop method called each frame by glutDisplayFunc + void render(); + // Everything that needs to be updated before rendering must be done in this method + void update(); + // Once everything is updated, every renderable objects must be drawn in this method + void draw(); + // Clear and refresh inputs' data + void clearInputs(); + + void printText(); + void createBboxRendering(std::vector const&bbox, sl::float4& bbox_clr); + void createIDRendering(sl::float3 const& center, sl::float4 const& clr, unsigned int const id); + + // Glut functions callbacks + static void drawCallback(); + static void mouseButtonCallback(int button, int state, int x, int y); + static void mouseMotionCallback(int x, int y); + static void reshapeCallback(int width, int height); + static void keyPressedCallback(unsigned char c, int x, int y); + static void keyReleasedCallback(unsigned char c, int x, int y); + static void idle(); + + bool available; + + enum MOUSE_BUTTON { + LEFT = 0, + MIDDLE = 1, + RIGHT = 2, + WHEEL_UP = 3, + WHEEL_DOWN = 4 + }; + + enum KEY_STATE { + UP = 'u', + DOWN = 'd', + FREE = 'f' + }; + + bool isTrackingON_ = false; + + bool mouseButton_[3]; + int mouseWheelPosition_; + int mouseCurrentPosition_[2]; + int mouseMotion_[2]; + int previousMouseMotion_[2]; + KEY_STATE keyStates_[256]; + + std::mutex mtx; + + Simple3DObject frustum; + PointCloud pointCloud_; + CameraGL camera_; + ShaderData shaderLine; + ShaderData shader; + sl::float4 bckgrnd_clr; + sl::Transform cam_pose; + + std::vector objectsName; + Simple3DObject BBox_edges; + Simple3DObject BBox_faces; + Simple3DObject skeletons; + Simple3DObject floor_grid; + + bool play{true}; + int last_key{-1}; +}; + +#endif // __GLVIEWER_HPP__ diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/utils.h b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/utils.h new file mode 100644 index 00000000..06a3d3e5 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/utils.h @@ -0,0 +1,160 @@ +#ifndef TRTX_YOLOV8SEG_UTILS_H_ +#define TRTX_YOLOV8SEG_UTILS_H_ + +#include +#include + +#include +#include +#include + +std::vector> const CLASS_COLORS = { + {0, 114, 189}, {217, 83, 25}, {237, 177, 32}, {126, 47, 142}, {119, 172, 48}, {77, 190, 238}, + {162, 20, 47}, {76, 76, 76}, {153, 153, 153}, {255, 0, 0}, {255, 128, 0}, {191, 191, 0}, + {0, 255, 0}, {0, 0, 255}, {170, 0, 255}, {85, 85, 0}, {85, 170, 0}, {85, 255, 0}, + {170, 85, 0}, {170, 170, 0}, {170, 255, 0}, {255, 85, 0}, {255, 170, 0}, {255, 255, 0}, + {0, 85, 128}, {0, 170, 128}, {0, 255, 128}, {85, 0, 128}, {85, 85, 128}, {85, 170, 128}, + {85, 255, 128}, {170, 0, 128}, {170, 85, 128}, {170, 170, 128}, {170, 255, 128}, {255, 0, 128}, + {255, 85, 128}, {255, 170, 128}, {255, 255, 128}, {0, 85, 255}, {0, 170, 255}, {0, 255, 255}, + {85, 0, 255}, {85, 85, 255}, {85, 170, 255}, {85, 255, 255}, {170, 0, 255}, {170, 85, 255}, + {170, 170, 255}, {170, 255, 255}, {255, 0, 255}, {255, 85, 255}, {255, 170, 255}, {85, 0, 0}, + {128, 0, 0}, {170, 0, 0}, {212, 0, 0}, {255, 0, 0}, {0, 43, 0}, {0, 85, 0}, + {0, 128, 0}, {0, 170, 0}, {0, 212, 0}, {0, 255, 0}, {0, 0, 43}, {0, 0, 85}, + {0, 0, 128}, {0, 0, 170}, {0, 0, 212}, {0, 0, 255}, {0, 0, 0}, {36, 36, 36}, + {73, 73, 73}, {109, 109, 109}, {146, 146, 146}, {182, 182, 182}, {219, 219, 219}, {0, 114, 189}, + {80, 183, 189}, {128, 128, 0}}; + +inline cv::Scalar generateColorID_u(int const idx) { + if (idx < 0) return cv::Scalar(236, 184, 36, 255); + int color_idx = idx % CLASS_COLORS.size(); + return cv::Scalar(CLASS_COLORS[color_idx][0U], CLASS_COLORS[color_idx][1U], CLASS_COLORS[color_idx][2U], 255); +} + +inline sl::float4 generateColorID_f(int const idx) { + cv::Scalar const clr_u{generateColorID_u(idx)}; + sl::float4 clr_f{static_cast(clr_u.val[0U]), static_cast(clr_u.val[1U]), static_cast(clr_u.val[2U]), 255.F}; + return clr_f / 255.F; +} + +inline sl::float4 getColorClass(int const idx) { + cv::Scalar const scalar{generateColorID_u(idx)}; + sl::float4 clr{static_cast(scalar[0U]), static_cast(scalar[1U]), static_cast(scalar[2U]), static_cast(scalar[3U])}; + return clr / 255.F; +} + +inline bool renderObject(const sl::ObjectData& i, const bool isTrackingON) { + if (isTrackingON) + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); + else + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); +} + +template +inline uchar _applyFading(T val, float current_alpha, double current_clr) { + return static_cast (current_alpha * current_clr + (1.0 - current_alpha) * val); +} + +inline cv::Vec4b applyFading(cv::Scalar val, float current_alpha, cv::Scalar current_clr) { + cv::Vec4b out; + out[0] = _applyFading(val.val[0], current_alpha, current_clr.val[0]); + out[1] = _applyFading(val.val[1], current_alpha, current_clr.val[1]); + out[2] = _applyFading(val.val[2], current_alpha, current_clr.val[2]); + out[3] = 255; + return out; +} + +inline void drawVerticalLine( + cv::Mat &left_display, + cv::Point2i start_pt, + cv::Point2i end_pt, + cv::Scalar clr, + int thickness) { + int n_steps = 7; + cv::Point2i pt1, pt4; + pt1.x = ((n_steps - 1) * start_pt.x + end_pt.x) / n_steps; + pt1.y = ((n_steps - 1) * start_pt.y + end_pt.y) / n_steps; + + pt4.x = (start_pt.x + (n_steps - 1) * end_pt.x) / n_steps; + pt4.y = (start_pt.y + (n_steps - 1) * end_pt.y) / n_steps; + + cv::line(left_display, start_pt, pt1, clr, thickness); + cv::line(left_display, pt4, end_pt, clr, thickness); +} + +inline cv::Mat slMat2cvMat(sl::Mat const& input) { + // Mapping between MAT_TYPE and CV_TYPE + int cv_type = -1; + switch (input.getDataType()) { + case sl::MAT_TYPE::F32_C1: cv_type = CV_32FC1; + break; + case sl::MAT_TYPE::F32_C2: cv_type = CV_32FC2; + break; + case sl::MAT_TYPE::F32_C3: cv_type = CV_32FC3; + break; + case sl::MAT_TYPE::F32_C4: cv_type = CV_32FC4; + break; + case sl::MAT_TYPE::U8_C1: cv_type = CV_8UC1; + break; + case sl::MAT_TYPE::U8_C2: cv_type = CV_8UC2; + break; + case sl::MAT_TYPE::U8_C3: cv_type = CV_8UC3; + break; + case sl::MAT_TYPE::U8_C4: cv_type = CV_8UC4; + break; + default: break; + } + + return cv::Mat(input.getHeight(), input.getWidth(), cv_type, input.getPtr(sl::MEM::CPU)); +} + +// Be careful about the memory owner, might want to use it like : +// cvMat2slMat(cv_mat).copyTo(sl_mat, sl::COPY_TYPE::CPU_CPU); +inline sl::Mat cvMat2slMat(cv::Mat const& input) { + sl::MAT_TYPE sl_type; + switch (input.type()) { + case CV_32FC1: sl_type = sl::MAT_TYPE::F32_C1; + break; + case CV_32FC2: sl_type = sl::MAT_TYPE::F32_C2; + break; + case CV_32FC3: sl_type = sl::MAT_TYPE::F32_C3; + break; + case CV_32FC4: sl_type = sl::MAT_TYPE::F32_C4; + break; + case CV_8UC1: sl_type = sl::MAT_TYPE::U8_C1; + break; + case CV_8UC2: sl_type = sl::MAT_TYPE::U8_C2; + break; + case CV_8UC3: sl_type = sl::MAT_TYPE::U8_C3; + break; + case CV_8UC4: sl_type = sl::MAT_TYPE::U8_C4; + break; + default: break; + } + return sl::Mat(input.cols, input.rows, sl_type, input.data, input.step, sl::MEM::CPU); +} + +inline bool readFile(std::string filename, std::vector &file_content) { + // open the file: + std::ifstream instream(filename, std::ios::in | std::ios::binary); + if (!instream.is_open()) return true; + file_content = std::vector((std::istreambuf_iterator(instream)), std::istreambuf_iterator()); + return false; +} + +inline std::vector split_str(std::string const& s, std::string const& delimiter) { + size_t pos_start{0U}, pos_end, delim_len{delimiter.length()}; + std::string token; + std::vector res; + + while ((pos_end = s.find(delimiter, pos_start)) != std::string::npos) { + token = s.substr(pos_start, pos_end - pos_start); + pos_start = pos_end + delim_len; + res.push_back(token); + } + + res.push_back(s.substr(pos_start)); + return res; +} + +#endif // TRTX_YOLOV8SEG_UTILS_H_ + diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg.hpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg.hpp new file mode 100644 index 00000000..1e1777a7 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg.hpp @@ -0,0 +1,402 @@ +// +// Created by ubuntu on 2/8/23. +// +// https://github.com/triple-Mu/YOLOv8-TensorRT/blob/main/csrc/segment/normal/include/yolov8-seg.hpp + +#ifndef SEGMENT_NORMAL_YOLOV8_SEG_HPP +#define SEGMENT_NORMAL_YOLOV8_SEG_HPP +#include "NvInferPlugin.h" +#include "yolov8-seg_common.hpp" +#include + + +#if NV_TENSORRT_MAJOR >= 10 +#define trt_name_engine_get_binding_name getIOTensorName +#define trt_name_engine_get_nb_binding getNbIOTensors +#else +#define trt_name_engine_get_binding_name getBindingName +#define trt_name_engine_get_nb_binding getNbBindings +#endif + + +using namespace seg; + +class YOLOv8_seg { +public: + explicit YOLOv8_seg(const std::string& engine_file_path); + ~YOLOv8_seg(); + + void make_pipe(bool warmup = true); + void copy_from_Mat(const cv::Mat& image); + void copy_from_Mat(const cv::Mat& image, const cv::Size& size); + void letterbox(const cv::Mat& image, cv::Mat& out, const cv::Size& size); + void infer(); + void postprocess(std::vector& objs, + float score_thres = 0.25f, + float iou_thres = 0.65f, + int topk = 100, + int seg_channels = 32); + + int num_bindings; + int num_inputs = 0; + int num_outputs = 0; + std::vector input_bindings; + std::vector output_bindings; + std::vector host_ptrs; + std::vector device_ptrs; + + PreParam pparam; + +private: + nvinfer1::ICudaEngine* engine = nullptr; + nvinfer1::IRuntime* runtime = nullptr; + nvinfer1::IExecutionContext* context = nullptr; + cudaStream_t stream = nullptr; + Logger gLogger{nvinfer1::ILogger::Severity::kERROR}; +}; + +YOLOv8_seg::YOLOv8_seg(const std::string& engine_file_path) +{ + std::ifstream file(engine_file_path, std::ios::binary); + assert(file.good()); + file.seekg(0, std::ios::end); + auto size = file.tellg(); + file.seekg(0, std::ios::beg); + char* trtModelStream = new char[size]; + assert(trtModelStream); + file.read(trtModelStream, size); + file.close(); + initLibNvInferPlugins(&this->gLogger, ""); + this->runtime = nvinfer1::createInferRuntime(this->gLogger); + assert(this->runtime != nullptr); + + this->engine = this->runtime->deserializeCudaEngine(trtModelStream, size); + assert(this->engine != nullptr); + delete[] trtModelStream; + this->context = this->engine->createExecutionContext(); + + assert(this->context != nullptr); + cudaStreamCreate(&this->stream); + this->num_bindings = this->engine->trt_name_engine_get_nb_binding(); + + for (int i = 0; i < this->num_bindings; ++i) { + Binding binding; + nvinfer1::Dims dims; +#if NV_TENSORRT_MAJOR > 8 || (NV_TENSORRT_MAJOR >= 8 && NV_TENSORRT_MINOR > 4) + nvinfer1::DataType dtype = this->engine->getTensorDataType(this->engine->getIOTensorName(i)); +#else + nvinfer1::DataType dtype = this->engine->getBindingDataType(i); +#endif + std::string name = this->engine->trt_name_engine_get_binding_name(i); + binding.name = name; + binding.dsize = type_to_size(dtype); + +#if NV_TENSORRT_MAJOR > 8 || (NV_TENSORRT_MAJOR >= 8 && NV_TENSORRT_MINOR > 4) + if (engine->getTensorIOMode(engine->getIOTensorName(i)) == nvinfer1::TensorIOMode::kINPUT) +#else + if (engine->bindingIsInput(i)) +#endif + { + this->num_inputs += 1; +#if NV_TENSORRT_MAJOR >= 10 + dims = this->engine->getProfileShape(name.c_str(), 0, nvinfer1::OptProfileSelector::kMAX); +#else + dims = this->engine->getProfileDimensions(i, 0, nvinfer1::OptProfileSelector::kMAX); +#endif + binding.size = get_size_by_dims(dims); + binding.dims = dims; + this->input_bindings.push_back(binding); + // set max opt shape +#if NV_TENSORRT_MAJOR >= 10 + this->context->setInputShape(name.c_str(), dims); +#else + this->context->setBindingDimensions(i, dims); +#endif + } + else { +#if NV_TENSORRT_MAJOR >= 10 + dims = this->engine->getTensorShape(this->engine->getIOTensorName(i)); +#else + dims = this->context->getBindingDimensions(i); +#endif + binding.size = get_size_by_dims(dims); + binding.dims = dims; + this->output_bindings.push_back(binding); + this->num_outputs += 1; + } + } +} + +YOLOv8_seg::~YOLOv8_seg() +{ +#if NV_TENSORRT_MAJOR >= 10 + delete this->context; + delete this->engine; + delete this->runtime; +#else + this->context->destroy(); + this->engine->destroy(); + this->runtime->destroy(); +#endif + cudaStreamDestroy(this->stream); + for (auto& ptr : this->device_ptrs) { + CHECK(cudaFree(ptr)); + } + + for (auto& ptr : this->host_ptrs) { + CHECK(cudaFreeHost(ptr)); + } +} + +void YOLOv8_seg::make_pipe(bool warmup) +{ + + for (auto& bindings : this->input_bindings) { + void* d_ptr; + CHECK(cudaMallocAsync(&d_ptr, bindings.size * bindings.dsize, this->stream)); + this->device_ptrs.push_back(d_ptr); + } + + for (auto& bindings : this->output_bindings) { + void * d_ptr, *h_ptr; + size_t size = bindings.size * bindings.dsize; + CHECK(cudaMallocAsync(&d_ptr, size, this->stream)); + CHECK(cudaHostAlloc(&h_ptr, size, 0)); + this->device_ptrs.push_back(d_ptr); + this->host_ptrs.push_back(h_ptr); + } + + if (warmup) { + for (int i = 0; i < 10; i++) { + for (auto& bindings : this->input_bindings) { + size_t size = bindings.size * bindings.dsize; + void* h_ptr = malloc(size); + memset(h_ptr, 0, size); + CHECK(cudaMemcpyAsync(this->device_ptrs[0], h_ptr, size, cudaMemcpyHostToDevice, this->stream)); + free(h_ptr); + } + this->infer(); + } + printf("model warmup 10 times\n"); + } +} + +void YOLOv8_seg::letterbox(const cv::Mat& image, cv::Mat& out, const cv::Size& size) +{ + const float inp_h = size.height; + const float inp_w = size.width; + float height = image.rows; + float width = image.cols; + + float r = std::min(inp_h / height, inp_w / width); + int padw = std::round(width * r); + int padh = std::round(height * r); + + cv::Mat tmp; + if ((int)width != padw || (int)height != padh) { + cv::resize(image, tmp, cv::Size(padw, padh)); + } + else { + tmp = image.clone(); + } + + float dw = inp_w - padw; + float dh = inp_h - padh; + + dw /= 2.0f; + dh /= 2.0f; + int top = int(std::round(dh - 0.1f)); + int bottom = int(std::round(dh + 0.1f)); + int left = int(std::round(dw - 0.1f)); + int right = int(std::round(dw + 0.1f)); + + cv::copyMakeBorder(tmp, tmp, top, bottom, left, right, cv::BORDER_CONSTANT, {114, 114, 114}); + // cv::imshow("PreProc", tmp); + + cv::dnn::blobFromImage(tmp, out, 1 / 255.f, cv::Size(), cv::Scalar(0, 0, 0), true, false, CV_32F); + this->pparam.ratio = 1 / r; + this->pparam.dw = dw; + this->pparam.dh = dh; + this->pparam.height = height; + this->pparam.width = width; +} + +void YOLOv8_seg::copy_from_Mat(const cv::Mat& image) +{ + cv::Mat nchw; + auto& in_binding = this->input_bindings[0]; + auto width = in_binding.dims.d[3]; + auto height = in_binding.dims.d[2]; + cv::Size size{width, height}; + this->letterbox(image, nchw, size); + +#if NV_TENSORRT_MAJOR >= 10 + this->context->setInputShape(in_binding.name.c_str(), nvinfer1::Dims{4, {1, 3, height, width}}); +#else + this->context->setBindingDimensions(0, nvinfer1::Dims{4, {1, 3, height, width}}); +#endif + + CHECK(cudaMemcpyAsync( + this->device_ptrs[0], nchw.ptr(), nchw.total() * nchw.elemSize(), cudaMemcpyHostToDevice, this->stream)); +} + +void YOLOv8_seg::copy_from_Mat(const cv::Mat& image, const cv::Size& size) +{ + cv::Mat nchw; + this->letterbox(image, nchw, size); +#if NV_TENSORRT_MAJOR >= 10 + auto& in_binding = this->input_bindings[0]; + auto width = in_binding.dims.d[3]; + auto height = in_binding.dims.d[2]; + this->context->setInputShape(in_binding.name.c_str(), nvinfer1::Dims{4, {1, 3, height, width}}); +#else + this->context->setBindingDimensions(0, nvinfer1::Dims{4, {1, 3, size.height, size.width}}); +#endif + CHECK(cudaMemcpyAsync( + this->device_ptrs[0], nchw.ptr(), nchw.total() * nchw.elemSize(), cudaMemcpyHostToDevice, this->stream)); +} + +void YOLOv8_seg::infer() +{ +#if (NV_TENSORRT_MAJOR < 8) || (NV_TENSORRT_MAJOR == 8 && NV_TENSORRT_MINOR < 5) + this->context->enqueueV2(this->device_ptrs.data(), this->stream, nullptr); +#else + for (unsigned int i = 0; i < this->num_inputs; ++i) { + this->context->setTensorAddress(this->input_bindings[i].name.c_str(), this->device_ptrs[i]); + } + for (unsigned int i = 0; i < this->num_outputs; ++i) { + this->context->setTensorAddress(this->output_bindings[i].name.c_str(), this->device_ptrs[i + this->num_inputs]); + } + this->context->enqueueV3(this->stream); +#endif + for (int i = 0; i < this->num_outputs; ++i) { + size_t osize = this->output_bindings[i].size * this->output_bindings[i].dsize; + CHECK(cudaMemcpyAsync( + this->host_ptrs[i], this->device_ptrs[i + this->num_inputs], osize, cudaMemcpyDeviceToHost, this->stream)); + } + cudaStreamSynchronize(this->stream); +} + +void YOLOv8_seg::postprocess( + std::vector& objs, float score_thres, float iou_thres, int topk, int seg_channels) +{ + objs.clear(); + auto input_h = this->input_bindings[0].dims.d[2]; + auto input_w = this->input_bindings[0].dims.d[3]; + int seg_h = input_h / 4; + int seg_w = input_w / 4; + int num_channels, num_anchors, num_classes; + bool flag = false; + int bid; + int bcnt = -1; + for (auto& o : this->output_bindings) { + bcnt += 1; + if (o.dims.nbDims == 3) { + num_channels = o.dims.d[1]; + num_anchors = o.dims.d[2]; + flag = true; + bid = bcnt; + } + } + assert(flag); + num_classes = num_channels - seg_channels - 4; + + auto& dw = this->pparam.dw; + auto& dh = this->pparam.dh; + auto& width = this->pparam.width; + auto& height = this->pparam.height; + auto& ratio = this->pparam.ratio; + + cv::Mat output = cv::Mat(num_channels, num_anchors, CV_32F, static_cast(this->host_ptrs[bid])); + output = output.t(); + + cv::Mat protos = cv::Mat(seg_channels, seg_h * seg_w, CV_32F, static_cast(this->host_ptrs[1 - bid])); + + std::vector labels; + std::vector scores; + std::vector bboxes; + std::vector mask_confs; + std::vector indices; + + for (int i = 0; i < num_anchors; i++) { + auto row_ptr = output.row(i).ptr(); + auto bboxes_ptr = row_ptr; + auto scores_ptr = row_ptr + 4; + auto mask_confs_ptr = row_ptr + 4 + num_classes; + auto max_s_ptr = std::max_element(scores_ptr, scores_ptr + num_classes); + float score = *max_s_ptr; + if (score > score_thres) { + float x = *bboxes_ptr++ - dw; + float y = *bboxes_ptr++ - dh; + float w = *bboxes_ptr++; + float h = *bboxes_ptr; + + float x0 = clamp((x - 0.5f * w) * ratio, 0.f, width); + float y0 = clamp((y - 0.5f * h) * ratio, 0.f, height); + float x1 = clamp((x + 0.5f * w) * ratio, 0.f, width); + float y1 = clamp((y + 0.5f * h) * ratio, 0.f, height); + + int label = max_s_ptr - scores_ptr; + cv::Rect_ bbox; + bbox.x = x0; + bbox.y = y0; + bbox.width = x1 - x0; + bbox.height = y1 - y0; + + cv::Mat mask_conf = cv::Mat(1, seg_channels, CV_32F, mask_confs_ptr); + + bboxes.push_back(bbox); + labels.push_back(label); + scores.push_back(score); + mask_confs.push_back(mask_conf); + } + } + +#if defined(BATCHED_NMS) + cv::dnn::NMSBoxesBatched(bboxes, scores, labels, score_thres, iou_thres, indices); +#else + cv::dnn::NMSBoxes(bboxes, scores, score_thres, iou_thres, indices); +#endif + + cv::Mat masks; + int cnt = 0; + for (auto& i : indices) { + if (cnt >= topk) { + break; + } + cv::Rect tmp = bboxes[i]; + Object obj; + obj.label = labels[i]; + obj.rect = tmp; + obj.prob = scores[i]; + masks.push_back(mask_confs[i]); + objs.push_back(obj); + cnt += 1; + } + + if (masks.empty()) { + // masks is empty + } + else { + cv::Mat matmulRes = (masks * protos).t(); + cv::Mat maskMat = matmulRes.reshape(indices.size(), {seg_h, seg_w}); + + std::vector maskChannels; + cv::split(maskMat, maskChannels); + int scale_dw = dw / input_w * seg_w; + int scale_dh = dh / input_h * seg_h; + + cv::Rect roi(scale_dw, scale_dh, seg_w - 2 * scale_dw, seg_h - 2 * scale_dh); + + for (int i = 0; i < indices.size(); i++) { + cv::Mat dest, mask; + cv::exp(-maskChannels[i], dest); + dest = 1.0 / (1.0 + dest); + dest = dest(roi); + cv::resize(dest, mask, cv::Size((int)width, (int)height), cv::INTER_LINEAR); + objs[i].boxMask = mask(objs[i].rect) > 0.5f; + } + } +} + +#endif // SEGMENT_NORMAL_YOLOV8_SEG_HPP diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_common.hpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_common.hpp new file mode 100644 index 00000000..9cee67a6 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_common.hpp @@ -0,0 +1,143 @@ +// +// Created by ubuntu on 2/8/23. +// +// https://github.com/triple-Mu/YOLOv8-TensorRT/blob/main/csrc/segment/normal/include/common.hpp + +#ifndef SEGMENT_NORMAL_COMMON_HPP +#define SEGMENT_NORMAL_COMMON_HPP +#include "NvInfer.h" +#include "opencv2/opencv.hpp" +#include +#include + +#define CHECK(call) \ + do { \ + const cudaError_t error_code = call; \ + if (error_code != cudaSuccess) { \ + printf("CUDA Error:\n"); \ + printf(" File: %s\n", __FILE__); \ + printf(" Line: %d\n", __LINE__); \ + printf(" Error code: %d\n", error_code); \ + printf(" Error text: %s\n", cudaGetErrorString(error_code)); \ + exit(1); \ + } \ + } while (0) + +class Logger: public nvinfer1::ILogger { +public: + nvinfer1::ILogger::Severity reportableSeverity; + + explicit Logger(nvinfer1::ILogger::Severity severity = nvinfer1::ILogger::Severity::kINFO): + reportableSeverity(severity) + { + } + + void log(nvinfer1::ILogger::Severity severity, const char* msg) noexcept override + { + if (severity > reportableSeverity) { + return; + } + switch (severity) { + case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: + std::cerr << "INTERNAL_ERROR: "; + break; + case nvinfer1::ILogger::Severity::kERROR: + std::cerr << "ERROR: "; + break; + case nvinfer1::ILogger::Severity::kWARNING: + std::cerr << "WARNING: "; + break; + case nvinfer1::ILogger::Severity::kINFO: + std::cerr << "INFO: "; + break; + default: + std::cerr << "VERBOSE: "; + break; + } + std::cerr << msg << std::endl; + } +}; + +inline int get_size_by_dims(const nvinfer1::Dims& dims) +{ + int size = 1; + for (int i = 0; i < dims.nbDims; i++) { + size *= dims.d[i]; + } + return size; +} + +inline int type_to_size(const nvinfer1::DataType& dataType) +{ + switch (dataType) { + case nvinfer1::DataType::kFLOAT: + return 4; + case nvinfer1::DataType::kHALF: + return 2; + case nvinfer1::DataType::kINT32: + return 4; + case nvinfer1::DataType::kINT8: + return 1; + case nvinfer1::DataType::kBOOL: + return 1; + default: + return 4; + } +} + +inline static float clamp(float val, float min, float max) +{ + return val > min ? (val < max ? val : max) : min; +} + +inline bool IsPathExist(const std::string& path) +{ + if (access(path.c_str(), 0) == F_OK) { + return true; + } + return false; +} + +inline bool IsFile(const std::string& path) +{ + if (!IsPathExist(path)) { + printf("%s:%d %s not exist\n", __FILE__, __LINE__, path.c_str()); + return false; + } + struct stat buffer; + return (stat(path.c_str(), &buffer) == 0 && S_ISREG(buffer.st_mode)); +} + +inline bool IsFolder(const std::string& path) +{ + if (!IsPathExist(path)) { + return false; + } + struct stat buffer; + return (stat(path.c_str(), &buffer) == 0 && S_ISDIR(buffer.st_mode)); +} + +namespace seg { +struct Binding { + size_t size = 1; + size_t dsize = 1; + nvinfer1::Dims dims; + std::string name; +}; + +struct Object { + cv::Rect_ rect; + int label = 0; + float prob = 0.0; + cv::Mat boxMask; +}; + +struct PreParam { + float ratio = 1.0f; + float dw = 0.0f; + float dh = 0.0f; + float height = 0; + float width = 0; +}; +} // namespace seg +#endif // SEGMENT_NORMAL_COMMON_HPP diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_optim.hpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_optim.hpp new file mode 100644 index 00000000..bb856cb1 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/include/yolov8-seg_optim.hpp @@ -0,0 +1,17 @@ +#ifndef __YOLOV8SEG_OPTIM_HPP__ +#define __YOLOV8SEG_OPTIM_HPP__ + +#include + +#include + +struct OptimDim { + nvinfer1::Dims4 size; + std::string tensor_name; + + int setFromString(std::string const& arg); +}; + +int build_engine(std::string const& onnx_path, std::string const& engine_path, OptimDim const& dyn_dim_profile); + +#endif // __YOLOV8SEG_OPTIM_HPP__ \ No newline at end of file diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/GLViewer.cpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/GLViewer.cpp similarity index 87% rename from object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/GLViewer.cpp rename to object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/GLViewer.cpp index c7eab2e6..19ddca19 100644 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/src/GLViewer.cpp +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/GLViewer.cpp @@ -1,6 +1,8 @@ -#include "GLViewer.hpp" + #include +#include "GLViewer.hpp" + #if defined(_DEBUG) && defined(_WIN32) //#error "This sample should not be built in Debug mode, use RelWithDebInfo if you want to do step by step." @@ -62,7 +64,7 @@ bool GLViewer::isAvailable() { return available; } -Simple3DObject createFrustum(sl::CameraParameters param) { +Simple3DObject createFrustum(sl::CameraParameters const& param) { // Create 3D axis Simple3DObject it(sl::Translation(0, 0, 0), true); @@ -105,7 +107,7 @@ void CloseFunc(void) { currentInstance_->exit(); } -void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isTrackingON) { +void GLViewer::init(int argc, char **argv, sl::CameraParameters const& param, bool const isTrackingON) { glutInit(&argc, argv); int wnd_w = glutGet(GLUT_SCREEN_WIDTH); int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); @@ -133,10 +135,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m, bool isT isTrackingON_ = isTrackingON; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -191,7 +193,7 @@ void GLViewer::render() { } } -void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs, sl::Transform& pose) { +void GLViewer::updateData(sl::Mat const& matXYZRGBA, std::vector const& objs, sl::Transform const& pose) { mtx.lock(); pointCloud_.pushNewPC(matXYZRGBA); BBox_edges.clear(); @@ -215,35 +217,6 @@ void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs else createIDRendering(objs[i].position, clr_id, objs[i].id); - if (0) { // display centroid as a cross - sl::float3 centroid = objs[i].position; - const float size_cendtroid = 50; // mm - sl::float3 centroid1 = centroid; - centroid1.y += size_cendtroid; - sl::float3 centroid2 = objs[i].position; - centroid2.y -= size_cendtroid; - sl::float3 centroid3 = objs[i].position; - centroid3.x += size_cendtroid; - sl::float3 centroid4 = objs[i].position; - centroid4.x -= size_cendtroid; - - BBox_edges.addLine(centroid1, centroid2, sl::float4(1.f, 0.5f, 0.5f, 1.f)); - BBox_edges.addLine(centroid3, centroid4, sl::float4(1.f, 0.5f, 0.5f, 1.f)); - } - - //Display sekeleton if available - #if 0 - auto clr_bones = generateColorID_f(objs[i].id); - auto keypoints = objs[i].keypoint; - if (keypoints.size() > 0) { - for (auto &limb : sl::BODY_BONES) { - sl::float3 kp_1 = keypoints[(int) limb.first]; - sl::float3 kp_2 = keypoints[(int) limb.second]; - if (std::isfinite(kp_1.x) && std::isfinite(kp_2.x)) - skeletons.addLine(kp_1, kp_2, clr_bones); - } - } - #endif createBboxRendering(bb_, clr_id); } } @@ -251,7 +224,7 @@ void GLViewer::updateData(sl::Mat &matXYZRGBA, std::vector &objs mtx.unlock(); } -void GLViewer::createBboxRendering(std::vector &bbox, sl::float4 bbox_clr) { +void GLViewer::createBboxRendering(std::vector const& bbox, sl::float4& bbox_clr) { // First create top and bottom full edges BBox_edges.addFullEdges(bbox, bbox_clr); // Add faded vertical edges @@ -262,7 +235,7 @@ void GLViewer::createBboxRendering(std::vector &bbox, sl::float4 bbo BBox_faces.addTopFace(bbox, bbox_clr); } -void GLViewer::createIDRendering(sl::float3 & center, sl::float4 clr, unsigned int id) { +void GLViewer::createIDRendering(sl::float3 const& center, sl::float4 const& clr, unsigned int id) { ObjectClassName tmp; tmp.name = "ID: " + std::to_string(id); tmp.color = clr; @@ -289,6 +262,10 @@ void GLViewer::update() { camera_.setDirection(sl::Translation(0.0f, -1.0f, 0.0f), sl::Translation(0.0f, 1.0f, 0.0f)); } + if (keyStates_['p'] == KEY_STATE::UP || keyStates_['P'] == KEY_STATE::UP) { + play = !play; + } + // Rotate camera with mouse if (mouseButton_[MOUSE_BUTTON::LEFT]) { camera_.rotate(sl::Rotation((float) mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); @@ -379,9 +356,12 @@ void GLViewer::printText() { void GLViewer::clearInputs() { mouseMotion_[0] = mouseMotion_[1] = 0; mouseWheelPosition_ = 0; - for (unsigned int i = 0; i < 256; ++i) + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; if (keyStates_[i] != KEY_STATE::DOWN) keyStates_[i] = KEY_STATE::FREE; + } } void GLViewer::drawCallback() { @@ -428,7 +408,7 @@ void GLViewer::idle() { glutPostRedisplay(); } -Simple3DObject::Simple3DObject(sl::Translation position, bool isStatic) : isStatic_(isStatic) { +Simple3DObject::Simple3DObject(sl::Translation const& position, bool const isStatic) : isStatic_(isStatic) { vaoID_ = 0; drawingType_ = GL_TRIANGLES; position_ = position; @@ -442,7 +422,7 @@ Simple3DObject::~Simple3DObject() { } } -void Simple3DObject::addBBox(std::vector &pts, sl::float4 clr) { +void Simple3DObject::addBBox(std::vector const& pts, sl::float4& clr) { int start_id = vertices_.size() / 3; float transparency_top = 0.05f, transparency_bottom = 0.75f; @@ -460,37 +440,37 @@ void Simple3DObject::addBBox(std::vector &pts, sl::float4 clr) { } } -void Simple3DObject::addPt(sl::float3 pt) { +void Simple3DObject::addPt(sl::float3 const& pt) { vertices_.push_back(pt.x); vertices_.push_back(pt.y); vertices_.push_back(pt.z); } -void Simple3DObject::addClr(sl::float4 clr) { +void Simple3DObject::addClr(sl::float4 const& clr) { colors_.push_back(clr.b); colors_.push_back(clr.g); colors_.push_back(clr.r); colors_.push_back(clr.a); } -void Simple3DObject::addPoint(sl::float3 pt, sl::float4 clr) { +void Simple3DObject::addPoint(sl::float3 const& pt, sl::float4 const& clr) { addPt(pt); addClr(clr); indices_.push_back((int) indices_.size()); } -void Simple3DObject::addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr) { +void Simple3DObject::addLine(sl::float3 const& p1, sl::float3 const& p2, sl::float4 const& clr) { addPoint(p1, clr); addPoint(p2, clr); } -void Simple3DObject::addTriangle(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr) { +void Simple3DObject::addTriangle(sl::float3 const& p1, sl::float3 const& p2, sl::float3 const& p3, sl::float4 const& clr) { addPoint(p1, clr); addPoint(p2, clr); addPoint(p3, clr); } -void Simple3DObject::addFullEdges(std::vector &pts, sl::float4 clr) { +void Simple3DObject::addFullEdges(std::vector const& pts, sl::float4& clr) { clr.w = 0.4f; int start_id = vertices_.size() / 3; @@ -513,7 +493,7 @@ void Simple3DObject::addFullEdges(std::vector &pts, sl::float4 clr) } } -void Simple3DObject::addVerticalEdges(std::vector &pts, sl::float4 clr) { +void Simple3DObject::addVerticalEdges(std::vector const& pts, sl::float4& clr) { auto addSingleVerticalLine = [&](sl::float3 top_pt, sl::float3 bot_pt) { std::vector current_pts{ top_pt, @@ -543,13 +523,13 @@ void Simple3DObject::addVerticalEdges(std::vector &pts, sl::float4 c addSingleVerticalLine(pts[3], pts[7]); } -void Simple3DObject::addTopFace(std::vector &pts, sl::float4 clr) { +void Simple3DObject::addTopFace(std::vector const& pts, sl::float4& clr) { clr.a = 0.3f; for (auto it : pts) addPoint(it, clr); } -void Simple3DObject::addVerticalFaces(std::vector &pts, sl::float4 clr) { +void Simple3DObject::addVerticalFaces(std::vector const& pts, sl::float4& clr) { auto addQuad = [&](std::vector quad_pts, float alpha1, float alpha2) { // To use only with 4 points for (unsigned int i = 0; i < quad_pts.size(); ++i) { addPt(quad_pts[i]); @@ -682,7 +662,7 @@ void Simple3DObject::clear() { indices_.clear(); } -void Simple3DObject::setDrawingType(GLenum type) { +void Simple3DObject::setDrawingType(GLenum const type) { drawingType_ = type; } @@ -694,32 +674,32 @@ void Simple3DObject::draw() { } } -void Simple3DObject::translate(const sl::Translation& t) { +void Simple3DObject::translate(sl::Translation const& t) { position_ = position_ + t; } -void Simple3DObject::setPosition(const sl::Translation& p) { +void Simple3DObject::setPosition(sl::Translation const& p) { position_ = p; } -void Simple3DObject::setRT(const sl::Transform& mRT) { +void Simple3DObject::setRT(sl::Transform const& mRT) { position_ = mRT.getTranslation(); rotation_ = mRT.getOrientation(); } -void Simple3DObject::rotate(const sl::Orientation& rot) { +void Simple3DObject::rotate(sl::Orientation const& rot) { rotation_ = rot * rotation_; } -void Simple3DObject::rotate(const sl::Rotation& m) { +void Simple3DObject::rotate(sl::Rotation const& m) { this->rotate(sl::Orientation(m)); } -void Simple3DObject::setRotation(const sl::Orientation& rot) { +void Simple3DObject::setRotation(sl::Orientation const& rot) { rotation_ = rot; } -void Simple3DObject::setRotation(const sl::Rotation& m) { +void Simple3DObject::setRotation(sl::Rotation const& m) { this->setRotation(sl::Orientation(m)); } @@ -735,6 +715,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -770,19 +754,19 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { return programId_; } -bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { +bool Shader::compile(GLuint &shaderId, GLenum const type, GLchar const* src) { shaderId = glCreateShader(type); if (shaderId == 0) { std::cout << "ERROR: shader type (" << type << ") does not exist" << std::endl; @@ -838,7 +822,7 @@ PointCloud::~PointCloud() { close(); } -void checkError(cudaError_t err) { +void checkError(cudaError_t const& err) { if (err != cudaSuccess) std::cerr << "Error: (" << err << "): " << cudaGetErrorString(err) << std::endl; } @@ -851,7 +835,7 @@ void PointCloud::close() { } } -void PointCloud::initialize(sl::Resolution res) { +void PointCloud::initialize(sl::Resolution const& res) { glGenBuffers(1, &bufferGLID_); glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); glBufferData(GL_ARRAY_BUFFER, res.area() * 4 * sizeof (float), 0, GL_STATIC_DRAW); @@ -859,7 +843,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); @@ -868,7 +852,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsResourceGetMappedPointer((void**) &xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_)); } -void PointCloud::pushNewPC(sl::Mat &matXYZRGBA) { +void PointCloud::pushNewPC(sl::Mat const& matXYZRGBA) { if (matGPU_.isInit()) { matGPU_.setFrom(matXYZRGBA, sl::COPY_TYPE::GPU_GPU); hasNewPCL_ = true; @@ -882,7 +866,7 @@ void PointCloud::update() { } } -void PointCloud::draw(const sl::Transform& vp) { +void PointCloud::draw(sl::Transform const& vp) { if (matGPU_.isInit()) { glUseProgram(shader.it.getProgramId()); glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, vp.m); @@ -901,7 +885,7 @@ const sl::Translation CameraGL::ORIGINAL_FORWARD = sl::Translation(0, 0, 1); const sl::Translation CameraGL::ORIGINAL_UP = sl::Translation(0, 1, 0); const sl::Translation CameraGL::ORIGINAL_RIGHT = sl::Translation(1, 0, 0); -CameraGL::CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical) { +CameraGL::CameraGL(sl::Translation const& position, sl::Translation const& direction, sl::Translation const& vertical) { this->position_ = position; setDirection(direction, vertical); @@ -922,7 +906,7 @@ void CameraGL::update() { updateVPMatrix(); } -void CameraGL::setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) { +void CameraGL::setProjection(float const horizontalFOV, float const verticalFOV, float const znear, float const zfar) { horizontalFieldOfView_ = horizontalFOV; verticalFieldOfView_ = verticalFOV; znear_ = znear; @@ -952,7 +936,7 @@ float CameraGL::getVerticalFOV() const { return verticalFieldOfView_; } -void CameraGL::setOffsetFromPosition(const sl::Translation& o) { +void CameraGL::setOffsetFromPosition(sl::Translation const& o) { offset_ = o; } @@ -960,7 +944,7 @@ const sl::Translation& CameraGL::getOffsetFromPosition() const { return offset_; } -void CameraGL::setDirection(const sl::Translation& direction, const sl::Translation& vertical) { +void CameraGL::setDirection(sl::Translation const& direction, sl::Translation const& vertical) { sl::Translation dirNormalized = direction; dirNormalized.normalize(); this->rotation_ = sl::Orientation(ORIGINAL_FORWARD, dirNormalized * -1.f); @@ -970,29 +954,29 @@ void CameraGL::setDirection(const sl::Translation& direction, const sl::Translat rotate(sl::Rotation(M_PI, ORIGINAL_FORWARD)); } -void CameraGL::translate(const sl::Translation& t) { +void CameraGL::translate(sl::Translation const& t) { position_ = position_ + t; } -void CameraGL::setPosition(const sl::Translation& p) { +void CameraGL::setPosition(sl::Translation const& p) { position_ = p; } -void CameraGL::rotate(const sl::Orientation& rot) { +void CameraGL::rotate(sl::Orientation const& rot) { rotation_ = rot * rotation_; updateVectors(); } -void CameraGL::rotate(const sl::Rotation& m) { +void CameraGL::rotate(sl::Rotation const& m) { this->rotate(sl::Orientation(m)); } -void CameraGL::setRotation(const sl::Orientation& rot) { +void CameraGL::setRotation(sl::Orientation const& rot) { rotation_ = rot; updateVectors(); } -void CameraGL::setRotation(const sl::Rotation& m) { +void CameraGL::setRotation(sl::Rotation const& m) { this->setRotation(sl::Orientation(m)); } diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/main.cpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/main.cpp new file mode 100644 index 00000000..a90fc466 --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/main.cpp @@ -0,0 +1,243 @@ +#include +#include +#include + +#include +#include + +#include "GLViewer.hpp" +#include "utils.h" +#include "yolov8-seg.hpp" +#include "yolov8-seg_optim.hpp" + +using namespace nvinfer1; + +void print(std::string const& msg_prefix, sl::ERROR_CODE const err_code, std::string const& msg_suffix) { + std::cout << "[Sample] "; + if (err_code != sl::ERROR_CODE::SUCCESS) + std::cout << "[Error] "; + std::cout << msg_prefix << " "; + if (err_code != sl::ERROR_CODE::SUCCESS) { + std::cout << " | " << err_code << ": "; + std::cout << toVerbose(err_code); + } + if (!msg_suffix.empty()) + std::cout << " " << msg_suffix; + std::cout << std::endl; +} + +static void draw_objects(cv::Mat const& image, + cv::Mat &res, + sl::Objects const& objs, + std::vector> const& colors) +{ + res = image.clone(); + cv::Mat mask{image.clone()}; + for (sl::ObjectData const& obj : objs.object_list) { + size_t const idx_color{obj.id % colors.size()}; + cv::Scalar const color{cv::Scalar(colors[idx_color][0U], colors[idx_color][1U], colors[idx_color][2U])}; + + cv::Rect const rect{static_cast(obj.bounding_box_2d[0U].x), + static_cast(obj.bounding_box_2d[0U].y), + static_cast(obj.bounding_box_2d[1U].x - obj.bounding_box_2d[0U].x), + static_cast(obj.bounding_box_2d[2U].y - obj.bounding_box_2d[0U].y)}; + cv::rectangle(res, rect, color, 2); + + char text[256U]; + sprintf(text, "Class %d - %.1f%%", obj.raw_label, obj.confidence); + if (obj.mask.isInit() && obj.mask.getWidth() > 0U && obj.mask.getHeight() > 0U) { + const cv::Mat obj_mask = slMat2cvMat(obj.mask); + mask(rect).setTo(color, obj_mask); + } + + int baseLine{0}; + cv::Size const label_size{cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine)}; + + int const x{rect.x}; + int const y{std::min(rect.y + 1, res.rows)}; + + cv::rectangle(res, cv::Rect(x, y, label_size.width, label_size.height + baseLine), {0, 0, 255}, -1); + cv::putText(res, text, cv::Point(x, y + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.4, {255, 255, 255}, 1); + } + cv::addWeighted(res, 0.5, mask, 0.8, 1, res); +} + +static std::vector convertCvRect2SdkBbox(cv::Rect_ const& bbox_in) { + std::vector bbox_out; + bbox_out.push_back(sl::uint2(static_cast(bbox_in.x), + static_cast(bbox_in.y))); + bbox_out.push_back(sl::uint2(static_cast(bbox_in.x + bbox_in.width), + static_cast(bbox_in.y))); + bbox_out.push_back(sl::uint2(static_cast(bbox_in.x + bbox_in.width), + static_cast(bbox_in.y + bbox_in.height))); + bbox_out.push_back(sl::uint2(static_cast(bbox_in.x), + static_cast(bbox_in.y + bbox_in.height))); + return bbox_out; +} + +int main(int argc, char** argv) { + if (argc == 1) { + std::cout << "Usage:" << std::endl + << " 1. ./yolov8_seg_onnx_zed -s yolov8s-seg.onnx yolov8s-seg.engine" << std::endl + << " 2. ./yolov8_seg_onnx_zed -s yolov8s-seg.onnx yolov8s-seg.engine images:1x3x640x640" << std::endl + << " 3. ./yolov8_seg_onnx_zed yolov8s-seg.engine " << std::endl + << " 4. ./yolov8_seg_onnx_zed yolov8s-seg.engine" << std::endl; + return 0; + } + + // Check Optim engine first + if (std::string(argv[1U]) == "-s" && (argc >= 4)) { + std::string const onnx_path{std::string(argv[2U])}; + std::string const engine_path{std::string(argv[3U])}; + + OptimDim dyn_dim_profile; + if (argc == 5) { + std::string const optim_profile{std::string(argv[4U])}; + if (dyn_dim_profile.setFromString(optim_profile) != 0) { + std::cerr << "Invalid dynamic dimension argument '" << optim_profile << "'," + << " expecting something like 'images:1x3x512x512'" << std::endl; + return -1; + } + } + + build_engine(onnx_path, engine_path, dyn_dim_profile); + return 0; + } + + /// Opening the ZED camera before the model deserialization to avoid cuda context issue + sl::InitParameters init_parameters; + init_parameters.sdk_verbose = true; + init_parameters.depth_mode = sl::DEPTH_MODE::NEURAL; + init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; // OpenGL's coordinate system is right_handed + + if (argc > 2) { + std::string const zed_opt{argv[2U]}; + if (zed_opt.find(".svo") != std::string::npos) + init_parameters.input.setFromSVOFile(zed_opt.c_str()); + } + + // Open the camera + sl::Camera zed; + sl::ERROR_CODE state{zed.open(init_parameters)}; + if (state != sl::ERROR_CODE::SUCCESS) { + std::cerr << "[ERROR] Opening camera: " << state << std::endl; + return EXIT_FAILURE; + } + + // Enable Positional Tracking + zed.enablePositionalTracking(); + + // Custom OD + sl::ObjectDetectionParameters detection_parameters; + detection_parameters.enable_tracking = true; + detection_parameters.enable_segmentation = true; + detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::CUSTOM_BOX_OBJECTS; + state = zed.enableObjectDetection(detection_parameters); + if (state != sl::ERROR_CODE::SUCCESS) { + std::cerr << "[ERROR] Enabling Object Detection: " << state << std::endl; + zed.close(); + return EXIT_FAILURE; + } + + // Get camera configuration + sl::CameraConfiguration const camera_config{zed.getCameraInformation().camera_configuration}; + sl::Resolution const pc_resolution{std::min(camera_config.resolution.width, 720UL), + std::min(camera_config.resolution.height, 404UL)}; + sl::CameraConfiguration const camera_info{zed.getCameraInformation(pc_resolution).camera_configuration}; + + // Create OpenGL Viewer + GLViewer viewer; + viewer.init(argc, argv, camera_info.calibration_parameters.left_cam, true); + + // Creating the inference engine class + std::string const engine_name{argv[1U]}; + YOLOv8_seg detector{engine_name}; + detector.make_pipe(false); + + // Prepare detector input/output + cv::Mat left_cv, pred_raw; + int const topk{100}; + int const seg_channels{32}; + float const score_thres{0.5F}; + float const iou_thres{0.65F}; + std::vector objs; + + // Prepare SDK input/output + sl::Mat left_sl, point_cloud; + cv::Mat pred_sdk; + sl::Objects objects; + sl::Pose cam_w_pose; + cam_w_pose.pose_data.setIdentity(); + int key{0}; + + sl::CustomObjectDetectionRuntimeParameters cod_rt_param; + + while (key != 'q' && key != 27) { + if (zed.grab() == sl::ERROR_CODE::SUCCESS) { + // Get image for inference + zed.retrieveImage(left_sl, sl::VIEW::LEFT); + left_cv = slMat2cvMat(left_sl); + cv::cvtColor(left_cv, left_cv, cv::COLOR_BGRA2BGR); + + // Running inference + detector.copy_from_Mat(left_cv); + detector.infer(); + + // Post process output + objs.clear(); + detector.postprocess(objs, score_thres, iou_thres, topk, seg_channels); + + // Preparing for ZED SDK ingesting + std::vector objects_in; + objects_in.reserve(objs.size()); + for (seg::Object& obj : objs) { + objects_in.emplace_back(); + sl::CustomMaskObjectData &tmp{objects_in.back()}; + tmp.unique_object_id = sl::generate_unique_id(); + tmp.probability = obj.prob; + tmp.label = obj.label; + tmp.bounding_box_2d = convertCvRect2SdkBbox(obj.rect); + tmp.is_grounded = (obj.label == 0); // Only the first class (person) is grounded, that is moving on the floor plane + // others are tracked in full 3D space + cvMat2slMat(obj.boxMask).copyTo(tmp.box_mask, sl::COPY_TYPE::CPU_CPU); + } + + // Send the custom detected boxes with masks to the ZED + zed.ingestCustomMaskObjects(objects_in); + + // Retrieve the tracked objects, with 2D and 3D attributes + zed.retrieveObjects(objects, cod_rt_param); + + // Draw raw prediction + draw_objects(left_cv, pred_sdk, objects, CLASS_COLORS); + cv::imshow("ZED SDK Objects", pred_sdk); + + // GL Viewer + zed.retrieveMeasure(point_cloud, sl::MEASURE::XYZRGBA, sl::MEM::GPU, pc_resolution); + zed.getPosition(cam_w_pose, sl::REFERENCE_FRAME::WORLD); + viewer.updateData(point_cloud, objects.object_list, cam_w_pose.pose_data); + + int const cv_key{cv::waitKey(10)}; + int const gl_key{viewer.getKey()}; + key = (gl_key == -1) ? cv_key : gl_key; + if (key == 'p' || key == 32) { + viewer.setPlaying(!viewer.isPlaying()); + } + while ((key == -1) && !viewer.isPlaying() && viewer.isAvailable()) { + int const cv_key{cv::waitKey(10)}; + int const gl_key{viewer.getKey()}; + key = (gl_key == -1) ? cv_key : gl_key; + if (key == 'p' || key == 32) { + viewer.setPlaying(!viewer.isPlaying()); + } + } + } + if (!viewer.isAvailable()) + break; + } + + cv::destroyAllWindows(); + viewer.exit(); + + return 0; +} diff --git a/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/yolov8-seg_optim.cpp b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/yolov8-seg_optim.cpp new file mode 100644 index 00000000..70e31bfe --- /dev/null +++ b/object detection/custom detector/cpp/tensorrt_yolov8_seg_onnx/src/yolov8-seg_optim.cpp @@ -0,0 +1,161 @@ +#include "yolov8-seg_optim.hpp" + +#include +#include +#include + +#include +#include + +#include "utils.h" +#include "yolov8-seg_common.hpp" + +static Logger gLogger{nvinfer1::ILogger::Severity::kINFO}; + +int OptimDim::setFromString(std::string const& arg) { + // "images:1x3x512x512" + std::vector const v_{split_str(arg, ":")}; + if (v_.size() != 2U) + return -1; + + std::string const dims_str{v_.back()}; + std::vector const v{split_str(dims_str, "x")}; + + size.nbDims = 4; + // assuming batch is 1 and channel is 3 + size.d[0U] = 1; + size.d[1U] = 3; + + if (v.size() == 2U) { + size.d[2U] = stoi(v[0U]); + size.d[3U] = stoi(v[1U]); + } + else if (v.size() == 3U) { + size.d[2U] = stoi(v[1U]); + size.d[3U] = stoi(v[2U]); + } + else if (v.size() == 4U) { + size.d[2U] = stoi(v[2U]); + size.d[3U] = stoi(v[3U]); + } + else + return -1; + + if (size.d[2U] != size.d[3U]) + std::cerr << "Warning only squared input are currently supported" << std::endl; + + tensor_name = v_.front(); + return 0; +} + +int build_engine(std::string const& onnx_path, std::string const& engine_path, OptimDim const& dyn_dim_profile) { + std::vector onnx_file_content; + if (readFile(onnx_path, onnx_file_content) != 0) + return -1; + + if (onnx_file_content.empty()) + return -1; + + nvinfer1::ICudaEngine * engine; + // Create engine (onnx) + std::cout << "Creating engine from onnx model" << std::endl; + + auto builder = nvinfer1::createInferBuilder(gLogger); + if (!builder) { + std::cerr << "createInferBuilder failed" << std::endl; + return 1; + } + + auto explicitBatch = 1U << static_cast (nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); + auto network = builder->createNetworkV2(explicitBatch); + + if (!network) { + std::cerr << "createNetwork failed" << std::endl; + return -1; + } + + auto config = builder->createBuilderConfig(); + if (!config) { + std::cerr << "createBuilderConfig failed" << std::endl; + return -1; + } + + ////////// Dynamic dimensions handling : support only 1 size at a time + if (!dyn_dim_profile.tensor_name.empty()) { + + nvinfer1::IOptimizationProfile* profile = builder->createOptimizationProfile(); + + profile->setDimensions(dyn_dim_profile.tensor_name.c_str(), nvinfer1::OptProfileSelector::kMIN, dyn_dim_profile.size); + profile->setDimensions(dyn_dim_profile.tensor_name.c_str(), nvinfer1::OptProfileSelector::kOPT, dyn_dim_profile.size); + profile->setDimensions(dyn_dim_profile.tensor_name.c_str(), nvinfer1::OptProfileSelector::kMAX, dyn_dim_profile.size); + + config->addOptimizationProfile(profile); +#if NV_TENSORRT_MAJOR < 10 + builder->setMaxBatchSize(1); +#endif + } + + auto parser = nvonnxparser::createParser(*network, gLogger); + if (!parser) { + std::cerr << "nvonnxparser::createParser failed" << std::endl; + return -1; + } + + bool parsed = false; + unsigned char *onnx_model_buffer = onnx_file_content.data(); + size_t onnx_model_buffer_size = onnx_file_content.size() * sizeof (char); + parsed = parser->parse(onnx_model_buffer, onnx_model_buffer_size); + + if (!parsed) { + std::cerr << "onnx file parsing failed" << std::endl; + return -1; + } + + if (builder->platformHasFastFp16()) { + std::cout << "FP16 enabled!" << std::endl; + config->setFlag(nvinfer1::BuilderFlag::kFP16); + } + + //////////////// Actual engine building + +#if NV_TENSORRT_MAJOR >= 10 + engine = nullptr; + if (builder->isNetworkSupported(*network, *config)) { + std::unique_ptr serializedEngine{builder->buildSerializedNetwork(*network, *config)}; + if (serializedEngine != nullptr && serializedEngine.get() && serializedEngine->size() > 0) { + nvinfer1::IRuntime* infer = nvinfer1::createInferRuntime(gLogger); + engine = infer->deserializeCudaEngine(serializedEngine->data(), serializedEngine->size()); + } + } +#else + engine = builder->buildEngineWithConfig(*network, *config); +#endif + + onnx_file_content.clear(); + + // write plan file if it is specified + if (engine == nullptr) return -1; + nvinfer1::IHostMemory* ptr = engine->serialize(); + assert(ptr); + if (ptr == nullptr) return -1; + + FILE *fp = fopen(engine_path.c_str(), "wb"); + fwrite(reinterpret_cast (ptr->data()), ptr->size() * sizeof (char), 1, fp); + fclose(fp); + +#if NV_TENSORRT_MAJOR >= 10 + delete parser; + delete network; + delete config; + delete builder; + delete engine; +#else + parser->destroy(); + network->destroy(); + config->destroy(); + builder->destroy(); + engine->destroy(); +#endif + + return 0; +} diff --git a/object detection/custom detector/python/pytorch_yolov8/detector.py b/object detection/custom detector/python/pytorch_yolov8/detector.py index b2bf540c..29a1327e 100644 --- a/object detection/custom detector/python/pytorch_yolov8/detector.py +++ b/object detection/custom detector/python/pytorch_yolov8/detector.py @@ -72,7 +72,7 @@ def torch_thread(weights, img_size, conf_thres=0.2, iou_thres=0.45): if run_signal: lock.acquire() - img = cv2.cvtColor(image_net, cv2.COLOR_BGRA2RGB) + img = cv2.cvtColor(image_net, cv2.COLOR_RGBA2RGB) # https://docs.ultralytics.com/modes/predict/#video-suffixes det = model.predict(img, save=False, imgsz=img_size, conf=conf_thres, iou=iou_thres)[0].cpu().numpy().boxes diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/README.md b/object detection/custom detector/python/pytorch_yolov8_seg/README.md new file mode 100644 index 00000000..4d72fdbd --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/README.md @@ -0,0 +1,40 @@ +# ZED SDK - Instance Detection + +This sample shows how to do instance segmentation using the official Pytorch implementation of YOLOv8-seg from a ZED camera and ingest them into the ZED SDK to extract 3D informations and tracking for each objects. + +## Getting Started + + - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) and [pyZED Package](https://www.stereolabs.com/docs/app-development/python/install/) + - Check the [Documentation](https://www.stereolabs.com/docs/object-detection/custom-od/) + +## Setting up + + - Install ultralytics using pip + +```sh +pip install ultralytics +``` + +## Run the program + +``` +# With a SVO file +python detector.py --weights yolov8s-seg.pt --img_size 640 --conf_thres 0.4 --svo path/to/file.svo + +# With a plugged ZED Camera +python detector.py --weights yolov8s-seg.pt --img_size 640 --conf_thres 0.4 +``` + +### Features + + - The camera point cloud is displayed in a 3D OpenGL view + - 3D bounding boxes around detected objects are drawn + - Objects classes and confidences can be changed + +## Training your own model + +This sample can use any model trained with YOLOv8-seg, including custom trained one. For a getting started on how to trained a model on a custom dataset with YOLOv8-seg, see here https://docs.ultralytics.com/tutorials/train-custom-datasets/ + +## Support + +If you need assistance go to our Community site at https://community.stereolabs.com/ diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/tracking_viewer.py b/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/tracking_viewer.py new file mode 100644 index 00000000..af71bd90 --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/tracking_viewer.py @@ -0,0 +1,247 @@ +import cv2 +import numpy as np + +from cv_viewer.utils import * +import pyzed.sl as sl +import math +from collections import deque + + +# ---------------------------------------------------------------------- +# 2D LEFT VIEW +# ---------------------------------------------------------------------- + + +def cvt(pt, scale): + """ + Function that scales point coordinates + """ + out = [pt[0] * scale[0], pt[1] * scale[1]] + return out + + +def get_image_position(bounding_box_image, img_scale): + out_position = np.zeros(2) + out_position[0] = (bounding_box_image[0][0] + (bounding_box_image[2][0] - bounding_box_image[0][0]) * 0.5) * \ + img_scale[0] + out_position[1] = (bounding_box_image[0][1] + (bounding_box_image[2][1] - bounding_box_image[0][1]) * 0.5) * \ + img_scale[1] + return out_position + + +def render_2D(left_display, img_scale, objects, is_tracking_on): + overlay = left_display.copy() + + line_thickness = 2 + for obj in objects.object_list: + if render_object(obj, is_tracking_on): + base_color = generate_color_id_u(obj.id) + # Display image scaled 2D bounding box + top_left_corner = cvt(obj.bounding_box_2d[0], img_scale) + top_right_corner = cvt(obj.bounding_box_2d[1], img_scale) + bottom_right_corner = cvt(obj.bounding_box_2d[2], img_scale) + bottom_left_corner = cvt(obj.bounding_box_2d[3], img_scale) + + # Creation of the 2 horizontal lines + cv2.line(left_display, (int(top_left_corner[0]), int(top_left_corner[1])), + (int(top_right_corner[0]), int(top_right_corner[1])), base_color, line_thickness) + cv2.line(left_display, (int(bottom_left_corner[0]), int(bottom_left_corner[1])), + (int(bottom_right_corner[0]), int(bottom_right_corner[1])), base_color, line_thickness) + # Creation of 2 vertical lines + draw_vertical_line(left_display, bottom_left_corner, top_left_corner, base_color, line_thickness) + draw_vertical_line(left_display, bottom_right_corner, top_right_corner, base_color, line_thickness) + + # Scaled ROI + roi_height = int(top_right_corner[0] - top_left_corner[0]) + roi_width = int(bottom_left_corner[1] - top_left_corner[1]) + overlay_roi = overlay[int(top_left_corner[1]): int(top_left_corner[1] + roi_width), + int(top_left_corner[0]): int(top_left_corner[0] + roi_height)] + + if obj.mask.is_init(): + overlay_roi[obj.mask.numpy() != 0] = base_color + + # Display Object label as text + position_image = get_image_position(obj.bounding_box_2d, img_scale) + text_position = (int(position_image[0] - 20), int(position_image[1] - 12)) + text = "class " + str(obj.raw_label) + text_color = (255, 255, 255, 255) + cv2.putText(left_display, text, text_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, text_color, 1) + + # Diplay Object distance to camera as text + if np.isfinite(obj.position[2]): + text = str(round(abs(obj.position[2]), 1)) + "M" + text_position = (int(position_image[0] - 20), int(position_image[1])) + cv2.putText(left_display, text, text_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, text_color, 1) + + # Here, overlay is as the left image, but with opaque masks on each detected objects + cv2.addWeighted(left_display, 0.7, overlay, 0.3, 0.0, left_display) + +# ---------------------------------------------------------------------- +# 2D TRACKING VIEW +# ---------------------------------------------------------------------- + +class TrackingViewer: + def __init__(self, res, fps, D_max): + # Window size + self.window_width = res.width + self.window_height = res.height + + # Visualisation settings + self.has_background_ready = False + self.background = np.full((self.window_height, self.window_width, 4), [245, 239, 239, 255], np.uint8) + + # Invert Z due to Y axis of ocv window + # Show objects between [z_min, 0] (z_min < 0) + self.z_min = -D_max + # Show objects between [x_min, x_max] + self.x_min = self.z_min + self.x_max = -self.x_min + + # Conversion from world position to pixel coordinates + self.x_step = (self.x_max - self.x_min) / self.window_width + self.z_step = abs(self.z_min) / self.window_height + + self.camera_calibration = sl.CalibrationParameters() + + # List of alive tracks + self.tracklets = [] + + def set_camera_calibration(self, calib): + self.camera_calibration = calib + self.has_background_ready = False + + def generate_view(self, objects, current_camera_pose, tracking_view, tracking_enabled): + # To get position in WORLD reference + for obj in objects.object_list: + pos = obj.position + tmp_pos = sl.Translation() + tmp_pos.init_vector(pos[0], pos[1], pos[2]) + new_pos = ( + tmp_pos * current_camera_pose.get_orientation()).get() + current_camera_pose.get_translation().get() + obj.position = np.array([new_pos[0], new_pos[1], new_pos[2]]) + + # Initialize visualisation + if not self.has_background_ready: + self.generate_background() + + np.copyto(tracking_view, self.background, 'no') + + if tracking_enabled: + # First add new points and remove the ones that are too old + current_timestamp = objects.timestamp.get_seconds() + self.add_to_tracklets(objects, current_timestamp) + self.prune_old_points(current_timestamp) + + # Draw all tracklets + self.draw_tracklets(tracking_view, current_camera_pose) + else: + self.draw_points(objects.object_list, tracking_view, current_camera_pose) + + def add_to_tracklets(self, objects, current_timestamp): + for obj in objects.object_list: + if (obj.tracking_state != sl.OBJECT_TRACKING_STATE.OK) or (not np.isfinite(obj.position[0])) or ( + obj.id < 0): + continue + + new_object = True + for i in range(len(self.tracklets)): + if self.tracklets[i].id == obj.id: + new_object = False + self.tracklets[i].add_point(obj, current_timestamp) + + # In case this object does not belong to existing tracks + if (new_object): + self.tracklets.append(Tracklet(obj, obj.label, current_timestamp)) + + def prune_old_points(self, ts): + track_to_delete = [] + for it in self.tracklets: + if ((ts - it.last_timestamp) > (3)): + track_to_delete.append(it) + + for it in track_to_delete: + self.tracklets.remove(it) + + # ---------------------------------------------------------------------- + # Drawing functions + # ---------------------------------------------------------------------- + + def draw_points(self, objects, tracking_view, current_camera_pose): + for obj in objects: + if (not np.isfinite(obj.position[0])): + continue + clr = generate_color_id_u(obj.id) + pt = TrackPoint(obj.position) + cv_start_point = self.to_cv_point(pt.get_xyz(), current_camera_pose) + cv2.circle(tracking_view, (int(cv_start_point[0]), int(cv_start_point[1])), 6, clr, 2) + + def draw_tracklets(self, tracking_view, current_camera_pose): + for track in self.tracklets: + clr = generate_color_id_u(track.id) + cv_start_point = self.to_cv_point(track.positions[0].get_xyz(), current_camera_pose) + for point_index in range(1, len(track.positions)): + cv_end_point = self.to_cv_point(track.positions[point_index].get_xyz(), current_camera_pose) + cv2.line(tracking_view, (int(cv_start_point[0]), int(cv_start_point[1])), + (int(cv_end_point[0]), int(cv_end_point[1])), clr, 3) + cv_start_point = cv_end_point + cv2.circle(tracking_view, (int(cv_start_point[0]), int(cv_start_point[1])), 6, clr, -1) + + def generate_background(self): + camera_color = [255, 230, 204, 255] + + # Get FOV intersection with window borders + fov = 2.0 * math.atan( + self.camera_calibration.left_cam.image_size.width / (2.0 * self.camera_calibration.left_cam.fx)) + + z_at_x_max = self.x_max / math.tan(fov / 2.0) + left_intersection_pt = self.to_cv_point(self.x_min, -z_at_x_max) + right_intersection_pt = self.to_cv_point(self.x_max, -z_at_x_max) + + # Drawing camera + camera_pts = np.array([left_intersection_pt + , right_intersection_pt + , [int(self.window_width / 2), self.window_height]] + , dtype=np.int32) + cv2.fillConvexPoly(self.background, camera_pts, camera_color) + + def to_cv_point(self, x, z): + out = [] + if isinstance(x, float) and isinstance(z, float): + out = [int((x - self.x_min) / self.x_step), int((z - self.z_min) / self.z_step)] + elif isinstance(x, list) and isinstance(z, sl.Pose): + # Go to camera current pose + rotation = z.get_rotation_matrix() + rotation.inverse() + tmp = x - (z.get_translation() * rotation.get_orientation()).get() + new_position = sl.Translation() + new_position.init_vector(tmp[0], tmp[1], tmp[2]) + out = [int(((new_position.get()[0] - self.x_min) / self.x_step) + 0.5), + int(((new_position.get()[2] - self.z_min) / self.z_step) + 0.5)] + elif isinstance(x, TrackPoint) and isinstance(z, sl.Pose): + pos = x.get_xyz() + out = self.to_cv_point(pos, z) + else: + print("Unhandled argument type") + return out + + +class TrackPoint: + def __init__(self, pos_): + self.x = pos_[0] + self.y = pos_[1] + self.z = pos_[2] + + def get_xyz(self): + return [self.x, self.y, self.z] + + +class Tracklet: + def __init__(self, obj_, type_, timestamp_): + self.id = obj_.id + self.object_type = type_ + self.positions = deque() + self.add_point(obj_, timestamp_) + + def add_point(self, obj_, timestamp_): + self.positions.append(TrackPoint(obj_.position)) + self.last_timestamp = timestamp_ diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/utils.py b/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/utils.py new file mode 100644 index 00000000..825bdbbd --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/cv_viewer/utils.py @@ -0,0 +1,38 @@ +import cv2 +import numpy as np +import pyzed.sl as sl + +id_colors = [(232, 176, 59), + (175, 208, 25), + (102, 205, 105), + (185, 0, 255), + (99, 107, 252)] + + +def render_object(object_data, is_tracking_on): + if is_tracking_on: + return object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OK + else: + return (object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OK) or ( + object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OFF) + + +def generate_color_id_u(idx): + arr = [] + if idx < 0: + arr = [236, 184, 36, 255] + else: + color_idx = idx % 5 + arr = [id_colors[color_idx][0], id_colors[color_idx][1], id_colors[color_idx][2], 255] + return arr + + +def draw_vertical_line(left_display, start_pt, end_pt, clr, thickness): + n_steps = 7 + pt1 = [((n_steps - 1) * start_pt[0] + end_pt[0]) / n_steps + , ((n_steps - 1) * start_pt[1] + end_pt[1]) / n_steps] + pt4 = [(start_pt[0] + (n_steps - 1) * end_pt[0]) / n_steps + , (start_pt[1] + (n_steps - 1) * end_pt[1]) / n_steps] + + cv2.line(left_display, (int(start_pt[0]), int(start_pt[1])), (int(pt1[0]), int(pt1[1])), clr, thickness) + cv2.line(left_display, (int(pt4[0]), int(pt4[1])), (int(end_pt[0]), int(end_pt[1])), clr, thickness) diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/detector.py b/object detection/custom detector/python/pytorch_yolov8_seg/detector.py new file mode 100644 index 00000000..8bfaadfb --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/detector.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 + +import numpy as np + +import argparse +import torch +import cv2 +import pyzed.sl as sl +from ultralytics import YOLO +from ultralytics.engine.results import Results + +from threading import Lock, Thread +from time import sleep +from typing import List + +import ogl_viewer.viewer as gl +import cv_viewer.tracking_viewer as cv_viewer + +# Globals +lock = Lock() +run_signal = False +exit_signal = False +image_net: np.ndarray = None +detections: List[sl.CustomMaskObjectData] = None +sl_mats: List[sl.Mat] = None # We need it to keep the ownership of the sl.Mat + + +def xywh2abcd_(xywh: np.ndarray) -> np.ndarray: + output = np.zeros((4, 2)) + + # Center / Width / Height -> BBox corners coordinates + x_min = (xywh[0] - 0.5 * xywh[2]) + x_max = (xywh[0] + 0.5 * xywh[2]) + y_min = (xywh[1] - 0.5 * xywh[3]) + y_max = (xywh[1] + 0.5 * xywh[3]) + + # A ------ B + # | Object | + # D ------ C + + output[0][0] = x_min + output[0][1] = y_min + + output[1][0] = x_max + output[1][1] = y_min + + output[2][0] = x_max + output[2][1] = y_max + + output[3][0] = x_min + output[3][1] = y_max + return output + + +def detections_to_custom_masks_(dets: Results) -> List[sl.CustomMaskObjectData]: + global sl_mats + output = [] + sl_mats = [] + for det in dets.cpu().numpy(): + # Creating ingestable objects for the ZED SDK + obj = sl.CustomMaskObjectData() + + # Bounding box + box = det.boxes + xywh = box.xywh[0] + abcd = xywh2abcd_(xywh) + + obj.bounding_box_2d = abcd + obj.label = box.cls + obj.probability = box.conf + obj.is_grounded = False + + # Mask + if det.masks is not None: + mask_bin = (det.masks.data[0] * 255).astype(np.uint8) + mask_bin = mask_bin[int(abcd[0][1]): int(abcd[2][1]), + int(abcd[0][0]): int(abcd[2][0])] + if not mask_bin.flags.c_contiguous: + mask_bin = np.ascontiguousarray(mask_bin) + + # Mask as a sl mat + sl_mat = sl.Mat(width=mask_bin.shape[1], + height=mask_bin.shape[0], + mat_type=sl.MAT_TYPE.U8_C1, + memory_type=sl.MEM.CPU) + p_sl_as_cv = sl_mat.get_data() + np.copyto(p_sl_as_cv, mask_bin) + sl_mats.append(sl_mat) + + obj.box_mask = sl_mat + else: + print("[Warning] No mask found in the prediction. Did you use a seg model?") + + output.append(obj) + + return output + + +def torch_thread_(weights: str, img_size: int, conf_thres: float = 0.2, iou_thres: float = 0.45) -> None: + global image_net, exit_signal, run_signal, detections + + print("Initializing Network...") + model = YOLO(weights) + print("Network Initialized...") + + while not exit_signal: + if run_signal: + lock.acquire() + + # Run inference + img = cv2.cvtColor(image_net, cv2.COLOR_BGRA2RGB) + det = model.predict(img, save=False, retina_masks=True, imgsz=img_size, conf=conf_thres, iou=iou_thres, verbose=False)[0] + + # ZED CustomMasks format + detections = detections_to_custom_masks_(det) + lock.release() + run_signal = False + + sleep(0.01) + + +def main_(args: argparse.Namespace): + global image_net, exit_signal, run_signal, detections + + capture_thread = Thread(target=torch_thread_, + kwargs={'weights': args.weights, 'img_size': args.img_size, "conf_thres": args.conf_thres}) + capture_thread.start() + + # Create a InitParameters object and set configuration parameters + input_type = sl.InputType() + if args.svo is not None: + input_type.set_from_svo_file(args.svo) + init_params = sl.InitParameters(input_t=input_type, svo_real_time_mode=True) + init_params.coordinate_units = sl.UNIT.METER + init_params.depth_mode = sl.DEPTH_MODE.ULTRA # QUALITY + init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP + init_params.depth_maximum_distance = 50 + + # Initialize the camera + print("Initializing Camera...") + zed = sl.Camera() + status = zed.open(init_params) + if status != sl.ERROR_CODE.SUCCESS: + print(repr(status)) + exit() + print("Camera Initialized") + + # Enable Positional Tracking + positional_tracking_parameters = sl.PositionalTrackingParameters() + # If the camera is static, uncomment the following line to have better performances and boxes sticked to the ground. + # positional_tracking_parameters.set_as_static = True + zed.enable_positional_tracking(positional_tracking_parameters) + + # Enable Object Detection + obj_param = sl.ObjectDetectionParameters() + obj_param.detection_model = sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS + obj_param.enable_tracking = True + obj_param.enable_segmentation = True + zed.enable_object_detection(obj_param) + + # Display + camera_infos = zed.get_camera_information() + camera_res = camera_infos.camera_configuration.resolution + + # Create OpenGL viewer + viewer = gl.GLViewer() + point_cloud_res = sl.Resolution(min(camera_res.width, 720), min(camera_res.height, 404)) + point_cloud_render = sl.Mat() + viewer.init(camera_infos.camera_model, point_cloud_res, obj_param.enable_tracking) + point_cloud = sl.Mat(point_cloud_res.width, point_cloud_res.height, sl.MAT_TYPE.F32_C4, sl.MEM.CPU) + image_left = sl.Mat() + + # Utilities for 2D display + display_resolution = sl.Resolution(min(camera_res.width, 1280), min(camera_res.height, 720)) + image_scale = [display_resolution.width / camera_res.width, display_resolution.height / camera_res.height] + image_left_ocv = np.full((display_resolution.height, display_resolution.width, 4), [245, 239, 239, 255], np.uint8) + + # Utilities for tracks view + camera_config = camera_infos.camera_configuration + tracks_resolution = sl.Resolution(400, display_resolution.height) + track_view_generator = cv_viewer.TrackingViewer(tracks_resolution, camera_config.fps, init_params.depth_maximum_distance) + track_view_generator.set_camera_calibration(camera_config.calibration_parameters) + image_track_ocv = np.zeros((tracks_resolution.height, tracks_resolution.width, 4), np.uint8) + + # Prepare runtime retrieval + runtime_params = sl.RuntimeParameters() + obj_runtime_param = sl.ObjectDetectionRuntimeParameters() + cam_w_pose = sl.Pose() + image_left_tmp = sl.Mat() + objects = sl.Objects() + + while viewer.is_available() and not exit_signal: + if zed.grab(runtime_params) == sl.ERROR_CODE.SUCCESS: + # -- Get the image + lock.acquire() + zed.retrieve_image(image_left_tmp, sl.VIEW.LEFT) + image_net = image_left_tmp.get_data() + lock.release() + run_signal = True + + # -- Detection running on the other thread + while run_signal: + sleep(0.001) + + # Wait for detections + lock.acquire() + # -- Ingest detections + zed.ingest_custom_mask_objects(detections) + lock.release() + zed.retrieve_objects(objects, obj_runtime_param) + + # -- Display + # Retrieve display data + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, point_cloud_res) + point_cloud.copy_to(point_cloud_render) + zed.retrieve_image(image_left, sl.VIEW.LEFT, sl.MEM.CPU, display_resolution) + zed.get_position(cam_w_pose, sl.REFERENCE_FRAME.WORLD) + + # 3D rendering + viewer.updateData(point_cloud_render, objects) + # 2D rendering + np.copyto(image_left_ocv, image_left.get_data()) + cv_viewer.render_2D(image_left_ocv, image_scale, objects, obj_param.enable_tracking) + global_image = cv2.hconcat([image_left_ocv, image_track_ocv]) + # Tracking view + track_view_generator.generate_view(objects, cam_w_pose, image_track_ocv, objects.is_tracked) + + cv2.imshow("ZED | 2D View and Birds View", global_image) + key = cv2.waitKey(10) + if key in (27, ord('q'), ord('Q')): + exit_signal = True + else: + exit_signal = True + + viewer.exit() + exit_signal = True + zed.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='yolov8m.pt', help='model.pt path(s)') + parser.add_argument('--svo', type=str, default=None, help='optional svo file') + parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf_thres', type=float, default=0.4, help='object confidence threshold') + args = parser.parse_args() + + with torch.no_grad(): + main_(args) diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/viewer.py b/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/viewer.py new file mode 100644 index 00000000..102a4d15 --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/viewer.py @@ -0,0 +1,769 @@ +from OpenGL.GL import * +from OpenGL.GLUT import * +from OpenGL.GLU import * + +import ctypes +import sys +import math +from threading import Lock +import numpy as np +import array +from enum import IntEnum + +from cv_viewer.utils import * +import ogl_viewer.zed_model as zm +import pyzed.sl as sl + +VERTEX_SHADER = """ +# version 330 core +layout(location = 0) in vec3 in_Vertex; +layout(location = 1) in vec4 in_Color; +uniform mat4 u_mvpMatrix; +out vec4 b_color; +void main() { + b_color = in_Color; + gl_Position = u_mvpMatrix * vec4(in_Vertex, 1); +} +""" + +FRAGMENT_SHADER = """ +# version 330 core +in vec4 b_color; +layout(location = 0) out vec4 out_Color; +void main() { + out_Color = b_color; +} +""" + +POINTCLOUD_VERTEX_SHADER = """ +#version 330 core +layout(location = 0) in vec4 in_VertexRGBA; +uniform mat4 u_mvpMatrix; +out vec4 b_color; +void main() { + uint vertexColor = floatBitsToUint(in_VertexRGBA.w); + vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16); + b_color = vec4(clr_int.r / 255.0f, clr_int.g / 255.0f, clr_int.b / 255.0f, 1.f); + gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1); +} +""" + +POINTCLOUD_FRAGMENT_SHADER = """ +#version 330 core +in vec4 b_color; +layout(location = 0) out vec4 out_Color; +void main() { + out_Color = b_color; +} +""" + +M_PI = 3.1415926 + +GRID_SIZE = 9.0 + + +def generate_color_id(_idx): + clr = np.divide(generate_color_id_u(_idx), 255.0) + clr[0], clr[2] = clr[2], clr[0] + return clr + + +class Shader: + def __init__(self, _vs, _fs): + + self.program_id = glCreateProgram() + vertex_id = self.compile(GL_VERTEX_SHADER, _vs) + fragment_id = self.compile(GL_FRAGMENT_SHADER, _fs) + + glAttachShader(self.program_id, vertex_id) + glAttachShader(self.program_id, fragment_id) + glBindAttribLocation(self.program_id, 0, "in_vertex") + glBindAttribLocation(self.program_id, 1, "in_texCoord") + glLinkProgram(self.program_id) + + if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE: + info = glGetProgramInfoLog(self.program_id) + glDeleteProgram(self.program_id) + glDeleteShader(vertex_id) + glDeleteShader(fragment_id) + raise RuntimeError('Error linking program: %s' % (info)) + glDeleteShader(vertex_id) + glDeleteShader(fragment_id) + + def compile(self, _type, _src): + try: + shader_id = glCreateShader(_type) + if shader_id == 0: + print("ERROR: shader type {0} does not exist".format(_type)) + exit() + + glShaderSource(shader_id, _src) + glCompileShader(shader_id) + if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE: + info = glGetShaderInfoLog(shader_id) + glDeleteShader(shader_id) + raise RuntimeError('Shader compilation failed: %s' % (info)) + return shader_id + except: + glDeleteShader(shader_id) + raise + + def get_program_id(self): + return self.program_id + + +class Simple3DObject: + def __init__(self, _is_static, pts_size=3, clr_size=3): + self.is_init = False + self.drawing_type = GL_TRIANGLES + self.is_static = _is_static + self.clear() + self.pt_type = pts_size + self.clr_type = clr_size + + def add_pt(self, _pts): # _pts [x,y,z] + for pt in _pts: + self.vertices.append(pt) + + def add_clr(self, _clrs): # _clr [r,g,b] + for clr in _clrs: + self.colors.append(clr) + + def add_point_clr(self, _pt, _clr): + self.add_pt(_pt) + self.add_clr(_clr) + self.indices.append(len(self.indices)) + + def add_line(self, _p1, _p2, _clr): + self.add_point_clr(_p1, _clr) + self.add_point_clr(_p2, _clr) + + def addFace(self, p1, p2, p3, clr): + self.add_point_clr(p1, clr) + self.add_point_clr(p2, clr) + self.add_point_clr(p3, clr) + + def add_full_edges(self, _pts, _clr): + start_id = int(len(self.vertices) / 3) + + for i in range(len(_pts)): + self.add_pt(_pts[i]) + self.add_clr(_clr) + + box_links_top = np.array([0, 1, 1, 2, 2, 3, 3, 0]) + i = 0 + while i < box_links_top.size: + self.indices.append(start_id + box_links_top[i]) + self.indices.append(start_id + box_links_top[i + 1]) + i = i + 2 + + box_links_bottom = np.array([4, 5, 5, 6, 6, 7, 7, 4]) + i = 0 + while i < box_links_bottom.size: + self.indices.append(start_id + box_links_bottom[i]) + self.indices.append(start_id + box_links_bottom[i + 1]) + i = i + 2 + + def __add_single_vertical_line(self, _top_pt, _bottom_pt, _clr): + current_pts = np.array( + [_top_pt, + ((GRID_SIZE - 1) * np.array(_top_pt) + np.array(_bottom_pt)) / GRID_SIZE, + ((GRID_SIZE - 2) * np.array(_top_pt) + np.array(_bottom_pt) * 2) / GRID_SIZE, + (2 * np.array(_top_pt) + np.array(_bottom_pt) * (GRID_SIZE - 2)) / GRID_SIZE, + (np.array(_top_pt) + np.array(_bottom_pt) * (GRID_SIZE - 1)) / GRID_SIZE, + _bottom_pt + ], np.float32) + start_id = int(len(self.vertices) / 3) + for i in range(len(current_pts)): + self.add_pt(current_pts[i]) + if (i == 2 or i == 3): + _clr[3] = 0 + else: + _clr[3] = 0.75 + self.add_clr(_clr) + + box_links = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5]) + i = 0 + while i < box_links.size: + self.indices.append(start_id + box_links[i]) + self.indices.append(start_id + box_links[i + 1]) + i = i + 2 + + def add_vertical_edges(self, _pts, _clr): + self.__add_single_vertical_line(_pts[0], _pts[4], _clr) + self.__add_single_vertical_line(_pts[1], _pts[5], _clr) + self.__add_single_vertical_line(_pts[2], _pts[6], _clr) + self.__add_single_vertical_line(_pts[3], _pts[7], _clr) + + def add_top_face(self, _pts, _clr): + _clr[3] = 0.5 + for pt in _pts: + self.add_point_clr(pt, _clr) + + def __add_quad(self, _quad_pts, _alpha1, _alpha2, _clr): + for i in range(len(_quad_pts)): + self.add_pt(_quad_pts[i]) + if i < 2: + _clr[3] = _alpha1 + else: + _clr[3] = _alpha2 + self.add_clr(_clr) + + self.indices.append(len(self.indices)) + self.indices.append(len(self.indices)) + self.indices.append(len(self.indices)) + self.indices.append(len(self.indices)) + + def add_vertical_faces(self, _pts, _clr): + # For each face, we need to add 4 quads (the first 2 indexes are always the top points of the quad) + quads = [[0, 3, 7, 4] # Front face + , [3, 2, 6, 7] # Right face + , [2, 1, 5, 6] # Back face + , [1, 0, 4, 5]] # Left face + + alpha = .5 + + # Create gradually fading quads + for quad in quads: + quad_pts_1 = [ + _pts[quad[0]], + _pts[quad[1]], + ((GRID_SIZE - 0.5) * np.array(_pts[quad[1]]) + 0.5 * np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 0.5) * np.array(_pts[quad[0]]) + 0.5 * np.array(_pts[quad[3]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_1, alpha, alpha, _clr) + + quad_pts_2 = [ + ((GRID_SIZE - 0.5) * np.array(_pts[quad[0]]) + 0.5 * np.array(_pts[quad[3]])) / GRID_SIZE, + ((GRID_SIZE - 0.5) * np.array(_pts[quad[1]]) + 0.5 * np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 1.0) * np.array(_pts[quad[1]]) + np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 1.0) * np.array(_pts[quad[0]]) + np.array(_pts[quad[3]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_2, alpha, 2 * alpha / 3, _clr) + + quad_pts_3 = [ + ((GRID_SIZE - 1.0) * np.array(_pts[quad[0]]) + np.array(_pts[quad[3]])) / GRID_SIZE, + ((GRID_SIZE - 1.0) * np.array(_pts[quad[1]]) + np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 1.5) * np.array(_pts[quad[1]]) + 1.5 * np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 1.5) * np.array(_pts[quad[0]]) + 1.5 * np.array(_pts[quad[3]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_3, 2 * alpha / 3, alpha / 3, _clr) + + quad_pts_4 = [ + ((GRID_SIZE - 1.5) * np.array(_pts[quad[0]]) + 1.5 * np.array(_pts[quad[3]])) / GRID_SIZE, + ((GRID_SIZE - 1.5) * np.array(_pts[quad[1]]) + 1.5 * np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 2.0) * np.array(_pts[quad[1]]) + 2.0 * np.array(_pts[quad[2]])) / GRID_SIZE, + ((GRID_SIZE - 2.0) * np.array(_pts[quad[0]]) + 2.0 * np.array(_pts[quad[3]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_4, alpha / 3, 0.0, _clr) + + quad_pts_5 = [ + (np.array(_pts[quad[1]]) * 2.0 + (GRID_SIZE - 2.0) * np.array(_pts[quad[2]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) * 2.0 + (GRID_SIZE - 2.0) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) * 1.5 + (GRID_SIZE - 1.5) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[1]]) * 1.5 + (GRID_SIZE - 1.5) * np.array(_pts[quad[2]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_5, 0.0, alpha / 3, _clr) + + quad_pts_6 = [ + (np.array(_pts[quad[1]]) * 1.5 + (GRID_SIZE - 1.5) * np.array(_pts[quad[2]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) * 1.5 + (GRID_SIZE - 1.5) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) + (GRID_SIZE - 1.0) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[1]]) + (GRID_SIZE - 1.0) * np.array(_pts[quad[2]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_6, alpha / 3, 2 * alpha / 3, _clr) + + quad_pts_7 = [ + (np.array(_pts[quad[1]]) + (GRID_SIZE - 1.0) * np.array(_pts[quad[2]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) + (GRID_SIZE - 1.0) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[0]]) * 0.5 + (GRID_SIZE - 0.5) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[1]]) * 0.5 + (GRID_SIZE - 0.5) * np.array(_pts[quad[2]])) / GRID_SIZE + ] + self.__add_quad(quad_pts_7, 2 * alpha / 3, alpha, _clr) + + quad_pts_8 = [ + (np.array(_pts[quad[0]]) * 0.5 + (GRID_SIZE - 0.5) * np.array(_pts[quad[3]])) / GRID_SIZE, + (np.array(_pts[quad[1]]) * 0.5 + (GRID_SIZE - 0.5) * np.array(_pts[quad[2]])) / GRID_SIZE, + np.array(_pts[quad[2]]), np.array(_pts[quad[3]]) + ] + self.__add_quad(quad_pts_8, alpha, alpha, _clr) + + def push_to_GPU(self): + if (self.is_init == False): + self.vboID = glGenBuffers(3) + self.is_init = True + + if (self.is_static): + type_draw = GL_STATIC_DRAW + else: + type_draw = GL_DYNAMIC_DRAW + + if len(self.vertices): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glBufferData(GL_ARRAY_BUFFER, len(self.vertices) * self.vertices.itemsize, + (GLfloat * len(self.vertices))(*self.vertices), type_draw) + + if len(self.colors): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glBufferData(GL_ARRAY_BUFFER, len(self.colors) * self.colors.itemsize, + (GLfloat * len(self.colors))(*self.colors), type_draw) + + if len(self.indices): + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(self.indices) * self.indices.itemsize, + (GLuint * len(self.indices))(*self.indices), type_draw) + + self.elementbufferSize = len(self.indices) + + def init(self, res): + if (self.is_init == False): + self.vboID = glGenBuffers(3) + self.is_init = True + + if (self.is_static): + type_draw = GL_STATIC_DRAW + else: + type_draw = GL_DYNAMIC_DRAW + + self.elementbufferSize = res.width * res.height + + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glBufferData(GL_ARRAY_BUFFER, self.elementbufferSize * self.pt_type * self.vertices.itemsize, None, type_draw) + + if (self.clr_type): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glBufferData(GL_ARRAY_BUFFER, self.elementbufferSize * self.clr_type * self.colors.itemsize, None, + type_draw) + + for i in range(0, self.elementbufferSize): + self.indices.append(i + 1) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(self.indices) * self.indices.itemsize, + (GLuint * len(self.indices))(*self.indices), type_draw) + + def setPoints(self, pc): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glBufferSubData(GL_ARRAY_BUFFER, 0, self.elementbufferSize * self.pt_type * self.vertices.itemsize, + ctypes.c_void_p(pc.get_pointer())) + glBindBuffer(GL_ARRAY_BUFFER, 0) + + def clear(self): + self.vertices = array.array('f') + self.colors = array.array('f') + self.indices = array.array('I') + self.elementbufferSize = 0 + + def set_drawing_type(self, _type): + self.drawing_type = _type + + def draw(self): + if (self.elementbufferSize): + glEnableVertexAttribArray(0) + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glVertexAttribPointer(0, self.pt_type, GL_FLOAT, GL_FALSE, 0, None) + + if (self.clr_type): + glEnableVertexAttribArray(1) + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glVertexAttribPointer(1, self.clr_type, GL_FLOAT, GL_FALSE, 0, None) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glDrawElements(self.drawing_type, self.elementbufferSize, GL_UNSIGNED_INT, None) + + glDisableVertexAttribArray(0) + glDisableVertexAttribArray(1) + + +class GLViewer: + def __init__(self): + self.available = False + self.mutex = Lock() + self.camera = CameraGL() + self.wheelPosition = 0. + self.mouse_button = [False, False] + self.mouseCurrentPosition = [0., 0.] + self.previousMouseMotion = [0., 0.] + self.mouseMotion = [0., 0.] + self.zedModel = Simple3DObject(True) + self.BBox_faces = Simple3DObject(False, 3, 4) + self.BBox_edges = Simple3DObject(False, 3, 4) + self.point_cloud = Simple3DObject(False, 4) + self.is_tracking_on = False # Show tracked objects only + + def init(self, camera_model, res, is_tracking_on): + glutInit(sys.argv) + wnd_w = int(glutGet(GLUT_SCREEN_WIDTH) * 0.9) + wnd_h = int(glutGet(GLUT_SCREEN_HEIGHT) * 0.9) + glutInitWindowSize(wnd_w, wnd_h) + glutInitWindowPosition(int(wnd_w * 0.05), int(wnd_h * 0.05)) + + glutInitDisplayMode(GLUT_DOUBLE | GLUT_SRGB | GLUT_DEPTH) + glutCreateWindow("ZED Object Detection") + glViewport(0, 0, wnd_w, wnd_h) + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, + GLUT_ACTION_CONTINUE_EXECUTION) + + glEnable(GL_DEPTH_TEST) + + glEnable(GL_BLEND) + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + + glEnable(GL_LINE_SMOOTH) + glHint(GL_LINE_SMOOTH_HINT, GL_NICEST) + + self.is_tracking_on = is_tracking_on + + # Compile and create the shader for 3D objects + self.shader_image = Shader(VERTEX_SHADER, FRAGMENT_SHADER) + self.shader_image_MVP = glGetUniformLocation(self.shader_image.get_program_id(), "u_mvpMatrix") + + self.shader_pc = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER) + self.shader_pc_MVP = glGetUniformLocation(self.shader_pc.get_program_id(), "u_mvpMatrix") + + self.bckgrnd_clr = np.array([223 / 255., 230 / 255., 233 / 255.]) + + if camera_model == sl.MODEL.ZED: + for i in range(0, zm.NB_ALLUMINIUM_TRIANGLES * 3, 3): + for j in range(3): + index = int(zm.alluminium_triangles[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices[index * 3], zm.vertices[index * 3 + 1], zm.vertices[index * 3 + 2]], + [zm.ALLUMINIUM_COLOR.r, zm.ALLUMINIUM_COLOR.g, zm.ALLUMINIUM_COLOR.b]) + + for i in range(0, zm.NB_DARK_TRIANGLES * 3, 3): + for j in range(3): + index = int(zm.dark_triangles[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices[index * 3], zm.vertices[index * 3 + 1], zm.vertices[index * 3 + 2]], + [zm.DARK_COLOR.r, zm.DARK_COLOR.g, zm.DARK_COLOR.b]) + elif camera_model == sl.MODEL.ZED_M: + for i in range(0, zm.NB_AL_ZEDM_TRI * 3, 3): + for j in range(3): + index = int(zm.al_triangles_m[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices_m[index * 3], zm.vertices_m[index * 3 + 1], zm.vertices_m[index * 3 + 2]], + [zm.ALLUMINIUM_COLOR.r, zm.ALLUMINIUM_COLOR.g, zm.ALLUMINIUM_COLOR.b]) + + for i in range(0, zm.NB_DARK_ZEDM_TRI * 3, 3): + for j in range(3): + index = int(zm.dark_triangles_m[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices_m[index * 3], zm.vertices_m[index * 3 + 1], zm.vertices_m[index * 3 + 2]], + [zm.DARK_COLOR.r, zm.DARK_COLOR.g, zm.DARK_COLOR.b]) + + for i in range(0, zm.NB_GRAY_ZEDM_TRI * 3, 3): + for j in range(3): + index = int(zm.gray_triangles_m[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices_m[index * 3], zm.vertices_m[index * 3 + 1], zm.vertices_m[index * 3 + 2]], + [zm.GRAY_COLOR.r, zm.GRAY_COLOR.g, zm.GRAY_COLOR.b]) + + for i in range(0, zm.NB_YELLOW_ZEDM_TRI * 3, 3): + for j in range(3): + index = int(zm.yellow_triangles_m[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices_m[index * 3], zm.vertices_m[index * 3 + 1], zm.vertices_m[index * 3 + 2]], + [zm.YELLOW_COLOR.r, zm.YELLOW_COLOR.g, zm.YELLOW_COLOR.b]) + + elif camera_model == sl.MODEL.ZED2: + for i in range(0, zm.NB_ALLUMINIUM_TRIANGLES * 3, 3): + for j in range(3): + index = int(zm.alluminium_triangles[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices[index * 3], zm.vertices[index * 3 + 1], zm.vertices[index * 3 + 2]], + [zm.DARK_COLOR.r, zm.DARK_COLOR.g, zm.DARK_COLOR.b]) + + for i in range(0, zm.NB_DARK_TRIANGLES * 3, 3): + for j in range(3): + index = int(zm.dark_triangles[i + j] - 1) + self.zedModel.add_point_clr( + [zm.vertices[index * 3], zm.vertices[index * 3 + 1], zm.vertices[index * 3 + 2]], + [zm.GRAY_COLOR.r, zm.GRAY_COLOR.g, zm.GRAY_COLOR.b]) + self.zedModel.set_drawing_type(GL_TRIANGLES) + self.zedModel.push_to_GPU() + + self.point_cloud.init(res) + self.point_cloud.set_drawing_type(GL_POINTS) + + self.BBox_edges.set_drawing_type(GL_LINES) + self.BBox_faces.set_drawing_type(GL_QUADS) + + # Register GLUT callback functions + glutDisplayFunc(self.draw_callback) + glutIdleFunc(self.idle) + glutKeyboardFunc(self.keyPressedCallback) + glutCloseFunc(self.close_func) + glutMouseFunc(self.on_mouse) + glutMotionFunc(self.on_mousemove) + glutReshapeFunc(self.on_resize) + + self.available = True + + def is_available(self): + if self.available: + glutMainLoopEvent() + return self.available + + def render_object(self, _object_data): # _object_data of type sl.ObjectData + if self.is_tracking_on: + return (_object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OK) + else: + return ( + _object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OK or _object_data.tracking_state == sl.OBJECT_TRACKING_STATE.OFF) + + def updateData(self, pc, _objs): + self.mutex.acquire() + self.point_cloud.setPoints(pc) + + # Clear frame objects + self.BBox_edges.clear() + self.BBox_faces.clear() + + for i in range(len(_objs.object_list)): + if self.render_object(_objs.object_list[i]): + bounding_box = np.array(_objs.object_list[i].bounding_box) + if bounding_box.any(): + color_id = generate_color_id(_objs.object_list[i].id) + self.create_bbox_rendering(bounding_box, color_id) + + self.mutex.release() + + def create_bbox_rendering(self, _bbox, _bbox_clr): + # First create top and bottom full edges + self.BBox_edges.add_full_edges(_bbox, _bbox_clr) + # Add faded vertical edges + self.BBox_edges.add_vertical_edges(_bbox, _bbox_clr) + # Add faces + self.BBox_faces.add_vertical_faces(_bbox, _bbox_clr) + # Add top face + self.BBox_faces.add_top_face(_bbox, _bbox_clr) + + def idle(self): + if self.available: + glutPostRedisplay() + + def exit(self): + if self.available: + self.available = False + + def close_func(self): + if self.available: + self.available = False + + def keyPressedCallback(self, key, x, y): + if ord(key) == 27: # 'Esc' key + self.close_func() + + def on_mouse(self, *args, **kwargs): + (key, Up, x, y) = args + if key == 0: + self.mouse_button[0] = (Up == 0) + elif key == 2: + self.mouse_button[1] = (Up == 0) + elif (key == 3): + self.wheelPosition = self.wheelPosition + 1 + elif (key == 4): + self.wheelPosition = self.wheelPosition - 1 + + self.mouseCurrentPosition = [x, y] + self.previousMouseMotion = [x, y] + + def on_mousemove(self, *args, **kwargs): + (x, y) = args + self.mouseMotion[0] = x - self.previousMouseMotion[0] + self.mouseMotion[1] = y - self.previousMouseMotion[1] + self.previousMouseMotion = [x, y] + glutPostRedisplay() + + def on_resize(self, Width, Height): + glViewport(0, 0, Width, Height) + self.camera.setProjection(Height / Width) + + def draw_callback(self): + if self.available: + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + glClearColor(self.bckgrnd_clr[0], self.bckgrnd_clr[1], self.bckgrnd_clr[2], 1.) + + self.mutex.acquire() + self.update() + self.draw() + self.mutex.release() + + glutSwapBuffers() + glutPostRedisplay() + + def update(self): + if (self.mouse_button[0]): + r = sl.Rotation() + vert = self.camera.vertical_ + tmp = vert.get() + vert.init_vector(tmp[0] * 1., tmp[1] * 1., tmp[2] * 1.) + r.init_angle_translation(self.mouseMotion[0] * 0.002, vert) + self.camera.rotate(r) + + r.init_angle_translation(self.mouseMotion[1] * 0.002, self.camera.right_) + self.camera.rotate(r) + + if (self.mouse_button[1]): + t = sl.Translation() + tmp = self.camera.right_.get() + scale = self.mouseMotion[0] * -0.01 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + tmp = self.camera.up_.get() + scale = self.mouseMotion[1] * 0.01 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + if (self.wheelPosition != 0): + t = sl.Translation() + tmp = self.camera.forward_.get() + scale = self.wheelPosition * -0.065 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + self.BBox_edges.push_to_GPU() + self.BBox_faces.push_to_GPU() + + self.camera.update() + + self.mouseMotion = [0., 0.] + self.wheelPosition = 0 + + def draw(self): + vpMatrix = self.camera.getViewProjectionMatrix() + + glUseProgram(self.shader_pc.get_program_id()) + glUniformMatrix4fv(self.shader_pc_MVP, 1, GL_TRUE, (GLfloat * len(vpMatrix))(*vpMatrix)) + glPointSize(1.2) + self.point_cloud.draw() + glUseProgram(0) + + glUseProgram(self.shader_image.get_program_id()) + glUniformMatrix4fv(self.shader_image_MVP, 1, GL_TRUE, (GLfloat * len(vpMatrix))(*vpMatrix)) + glLineWidth(4.) + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) + self.zedModel.draw() + self.BBox_faces.draw() + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) + glLineWidth(2.) + self.BBox_edges.draw() + glUseProgram(0) + + +class CameraGL: + def __init__(self): + self.ORIGINAL_FORWARD = sl.Translation() + self.ORIGINAL_FORWARD.init_vector(0, 0, 1) + self.ORIGINAL_UP = sl.Translation() + self.ORIGINAL_UP.init_vector(0, 1, 0) + self.ORIGINAL_RIGHT = sl.Translation() + self.ORIGINAL_RIGHT.init_vector(1, 0, 0) + self.znear = 0.5 + self.zfar = 100. + self.horizontalFOV = 70. + self.orientation_ = sl.Orientation() + self.position_ = sl.Translation() + self.forward_ = sl.Translation() + self.up_ = sl.Translation() + self.right_ = sl.Translation() + self.vertical_ = sl.Translation() + self.vpMatrix_ = sl.Matrix4f() + self.offset_ = sl.Translation() + self.offset_.init_vector(0, 0, 5) + self.projection_ = sl.Matrix4f() + self.projection_.set_identity() + self.setProjection(1.78) + + self.position_.init_vector(0., 0., 0.) + tmp = sl.Translation() + tmp.init_vector(0, 0, -.1) + tmp2 = sl.Translation() + tmp2.init_vector(0, 1, 0) + self.setDirection(tmp, tmp2) + + def update(self): + dot_ = sl.Translation.dot_translation(self.vertical_, self.up_) + if (dot_ < 0.): + tmp = self.vertical_.get() + self.vertical_.init_vector(tmp[0] * -1., tmp[1] * -1., tmp[2] * -1.) + transformation = sl.Transform() + + tmp_position = self.position_.get() + tmp = (self.offset_ * self.orientation_).get() + new_position = sl.Translation() + new_position.init_vector(tmp_position[0] + tmp[0], tmp_position[1] + tmp[1], tmp_position[2] + tmp[2]) + transformation.init_orientation_translation(self.orientation_, new_position) + transformation.inverse() + self.vpMatrix_ = self.projection_ * transformation + + def setProjection(self, im_ratio): + fov_x = self.horizontalFOV * 3.1416 / 180. + fov_y = self.horizontalFOV * im_ratio * 3.1416 / 180. + + self.projection_[(0, 0)] = 1. / math.tan(fov_x * .5) + self.projection_[(1, 1)] = 1. / math.tan(fov_y * .5) + self.projection_[(2, 2)] = -(self.zfar + self.znear) / (self.zfar - self.znear) + self.projection_[(3, 2)] = -1. + self.projection_[(2, 3)] = -(2. * self.zfar * self.znear) / (self.zfar - self.znear) + self.projection_[(3, 3)] = 0. + + def getViewProjectionMatrix(self): + tmp = self.vpMatrix_.m + vpMat = array.array('f') + for row in tmp: + for v in row: + vpMat.append(v) + return vpMat + + def getViewProjectionMatrixRT(self, tr): + tmp = self.vpMatrix_ + tmp.transpose() + tr.transpose() + tmp = (tr * tmp).m + vpMat = array.array('f') + for row in tmp: + for v in row: + vpMat.append(v) + return vpMat + + def setDirection(self, dir, vert): + dir.normalize() + tmp = dir.get() + dir.init_vector(tmp[0] * -1., tmp[1] * -1., tmp[2] * -1.) + self.orientation_.init_translation(self.ORIGINAL_FORWARD, dir) + self.updateVectors() + self.vertical_ = vert + if (sl.Translation.dot_translation(self.vertical_, self.up_) < 0.): + tmp = sl.Rotation() + tmp.init_angle_translation(3.14, self.ORIGINAL_FORWARD) + self.rotate(tmp) + + def translate(self, t): + ref = self.position_.get() + tmp = t.get() + self.position_.init_vector(ref[0] + tmp[0], ref[1] + tmp[1], ref[2] + tmp[2]) + + def setPosition(self, p): + self.position_ = p + + def rotate(self, r): + tmp = sl.Orientation() + tmp.init_rotation(r) + self.orientation_ = tmp * self.orientation_ + self.updateVectors() + + def setRotation(self, r): + self.orientation_.init_rotation(r) + self.updateVectors() + + def updateVectors(self): + self.forward_ = self.ORIGINAL_FORWARD * self.orientation_ + self.up_ = self.ORIGINAL_UP * self.orientation_ + right = self.ORIGINAL_RIGHT + tmp = right.get() + right.init_vector(tmp[0] * -1., tmp[1] * -1., tmp[2] * -1.) + self.right_ = right * self.orientation_ diff --git a/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/zed_model.py b/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/zed_model.py new file mode 100644 index 00000000..f0a23ff4 --- /dev/null +++ b/object detection/custom detector/python/pytorch_yolov8_seg/ogl_viewer/zed_model.py @@ -0,0 +1,2736 @@ +import numpy as np + + +class Color: + def __init__(self, pr, pg, pb): + self.r = pr + self.g = pg + self.b = pb + + +NB_ALLUMINIUM_TRIANGLES = 54 +NB_DARK_TRIANGLES = 54 +ALLUMINIUM_COLOR = Color(0.79, 0.82, 0.93) +DARK_COLOR = Color(0.07, 0.07, 0.07) + +vertices = np.array([ + -0.068456, -0.016299, 0.016299 + , -0.068456, 0.016299, 0.016299 + , -0.068456, 0.016299, -0.016299 + , -0.068456, -0.016299, -0.016299 + , -0.076606, 0.014115, 0.016299 + , -0.082572, 0.008150, 0.016299 + , -0.084755, -0.000000, 0.016299 + , -0.082572, -0.008150, 0.016299 + , -0.076606, -0.014115, 0.016299 + , -0.076606, -0.014115, -0.016299 + , -0.082572, -0.008150, -0.016299 + , -0.084755, -0.000000, -0.016299 + , -0.082572, 0.008150, -0.016299 + , -0.076606, 0.014115, -0.016299 + , -0.053494, -0.009779, -0.016299 + , -0.048604, -0.008469, -0.016299 + , -0.045024, -0.004890, -0.016299 + , -0.043714, 0.000000, -0.016299 + , -0.045024, 0.004890, -0.016299 + , -0.048604, 0.008469, -0.016299 + , -0.053494, 0.009779, -0.016299 + , -0.058383, 0.008469, -0.016299 + , -0.061963, 0.004890, -0.016299 + , -0.063273, 0.000000, -0.016299 + , -0.061963, -0.004890, -0.016299 + , -0.058383, -0.008469, -0.016299 + , 0.000000, -0.016299, -0.016299 + , 0.068456, -0.016299, 0.016299 + , 0.000000, 0.016299, -0.016299 + , 0.068456, 0.016299, 0.016299 + , 0.068456, 0.016299, -0.016299 + , 0.068456, -0.016299, -0.016299 + , 0.076606, 0.014115, 0.016299 + , 0.082572, 0.008150, 0.016299 + , 0.084755, -0.000000, 0.016299 + , 0.082572, -0.008150, 0.016299 + , 0.076606, -0.014115, 0.016299 + , 0.076606, -0.014115, -0.016299 + , 0.082572, -0.008150, -0.016299 + , 0.084755, -0.000000, -0.016299 + , 0.082572, 0.008150, -0.016299 + , 0.076606, 0.014115, -0.016299 + , 0.053494, -0.009779, -0.016299 + , 0.048604, -0.008469, -0.016299 + , 0.045024, -0.004890, -0.016299 + , 0.043714, 0.000000, -0.016299 + , 0.045024, 0.004890, -0.016299 + , 0.048604, 0.008469, -0.016299 + , 0.053494, 0.009779, -0.016299 + , 0.058383, 0.008469, -0.016299 + , 0.061963, 0.004890, -0.016299 + , 0.063273, 0.000000, -0.016299 + , 0.061963, -0.004890, -0.016299 + , 0.058383, -0.008469, -0.016299 + , 0.053494, 0.000000, -0.016299 + , -0.053494, 0.000000, -0.016299 +]) + +alluminium_triangles = np.array([ + 1, 10, 4 + , 6, 14, 13 + , 7, 13, 12 + , 8, 12, 11 + , 9, 11, 10 + , 5, 3, 14 + , 44, 45, 55 + , 47, 48, 55 + , 43, 44, 55 + , 46, 47, 55 + , 52, 53, 55 + , 48, 49, 55 + , 54, 43, 55 + , 50, 51, 55 + , 53, 54, 55 + , 49, 50, 55 + , 45, 46, 55 + , 51, 52, 55 + , 27, 32, 28 + , 38, 28, 32 + , 42, 34, 41 + , 41, 35, 40 + , 40, 36, 39 + , 39, 37, 38 + , 31, 33, 42 + , 27, 1, 4 + , 20, 19, 56 + , 22, 21, 56 + , 23, 22, 56 + , 24, 23, 56 + , 19, 18, 56 + , 21, 20, 56 + , 17, 16, 56 + , 26, 25, 56 + , 15, 26, 56 + , 18, 17, 56 + , 16, 15, 56 + , 25, 24, 56 + , 2, 29, 3 + , 31, 29, 30 + , 1, 9, 10 + , 6, 5, 14 + , 7, 6, 13 + , 8, 7, 12 + , 9, 8, 11 + , 5, 2, 3 + , 38, 37, 28 + , 42, 33, 34 + , 41, 34, 35 + , 40, 35, 36 + , 39, 36, 37 + , 31, 30, 33 + , 27, 28, 1 + , 2, 30, 29 +]) + +dark_triangles = np.array([ + 23, 3, 22 + , 13, 10, 11 + , 4, 14, 3 + , 11, 12, 13 + , 9, 6, 8 + , 1, 5, 9 + , 8, 6, 7 + , 1, 30, 2 + , 21, 22, 3 + , 23, 24, 3 + , 24, 25, 4 + , 3, 24, 4 + , 25, 26, 4 + , 26, 15, 4 + , 16, 17, 27 + , 17, 18, 27 + , 18, 19, 29 + , 27, 18, 29 + , 19, 20, 29 + , 20, 21, 29 + , 3, 29, 21 + , 16, 27, 15 + , 27, 4, 15 + , 51, 50, 31 + , 38, 41, 39 + , 32, 42, 38 + , 39, 41, 40 + , 34, 37, 36 + , 28, 33, 30 + , 36, 35, 34 + , 49, 31, 50 + , 51, 31, 52 + , 52, 32, 53 + , 31, 32, 52 + , 53, 32, 54 + , 54, 32, 43 + , 44, 27, 45 + , 45, 27, 46 + , 46, 29, 47 + , 27, 29, 46 + , 47, 29, 48 + , 48, 29, 49 + , 31, 49, 29 + , 44, 43, 27 + , 27, 43, 32 + , 13, 14, 10 + , 4, 10, 14 + , 9, 5, 6 + , 1, 2, 5 + , 1, 28, 30 + , 38, 42, 41 + , 32, 31, 42 + , 34, 33, 37 + , 28, 37, 33 +]) + +vertices_m = np.array([ + 0.030800, 0.013300, 0.000001 + , 0.058785, 0.013300, -0.002250 + , 0.058785, 0.013300, 0.002251 + , 0.059839, 0.013300, -0.001999 + , 0.060770, 0.013300, -0.001351 + , 0.059839, 0.013300, 0.002000 + , 0.060770, 0.013300, 0.001352 + , 0.002815, 0.013300, -0.002250 + , 0.002815, 0.013300, 0.002251 + , 0.001761, 0.013300, -0.001999 + , 0.000830, 0.013300, -0.001351 + , 0.001761, 0.013300, 0.002000 + , 0.000830, 0.013300, 0.001352 + , 0.061449, 0.013300, -0.000563 + , 0.061449, 0.013300, 0.000564 + , 0.000152, 0.013300, 0.000564 + , 0.000152, 0.013300, -0.000563 + , 0.030800, -0.013333, 0.000001 + , 0.058785, -0.013333, -0.002250 + , 0.058785, -0.013333, 0.002251 + , 0.059839, -0.013333, -0.001999 + , 0.060770, -0.013333, -0.001351 + , 0.059839, -0.013333, 0.002000 + , 0.060770, -0.013333, 0.001352 + , 0.002815, -0.013333, -0.002250 + , 0.002815, -0.013333, 0.002251 + , 0.001761, -0.013333, -0.001999 + , 0.000830, -0.013333, -0.001351 + , 0.001761, -0.013333, 0.002000 + , 0.000830, -0.013333, 0.001352 + , 0.061449, -0.013333, 0.000564 + , 0.061449, -0.013333, -0.000563 + , 0.000152, -0.013333, -0.000563 + , 0.000152, -0.013333, 0.000564 + , -0.031684, 0.009412, 0.000501 + , -0.031809, 0.008300, 0.000501 + , -0.028977, 0.012805, 0.000501 + , -0.029926, 0.012209, 0.000501 + , -0.026809, 0.013300, 0.000501 + , -0.027920, 0.013175, 0.000501 + , -0.030718, 0.011417, 0.000501 + , -0.031314, 0.010469, 0.000501 + , -0.031809, -0.008310, 0.000501 + , -0.031684, -0.009431, 0.000501 + , -0.028977, -0.012824, 0.000501 + , -0.029926, -0.012228, 0.000501 + , -0.026847, -0.013300, 0.000500 + , -0.027920, -0.013194, 0.000501 + , -0.030718, -0.011437, 0.000501 + , -0.031314, -0.010488, 0.000501 + , -0.031684, 0.009412, -0.000500 + , -0.031809, 0.008300, -0.000500 + , -0.028977, 0.012805, -0.000500 + , -0.029926, 0.012209, -0.000500 + , -0.026809, 0.013300, -0.000500 + , -0.027920, 0.013175, -0.000500 + , -0.030718, 0.011417, -0.000500 + , -0.031314, 0.010469, -0.000500 + , -0.031809, -0.008310, -0.000500 + , -0.031684, -0.009431, -0.000500 + , -0.029926, -0.012228, -0.000500 + , -0.028977, -0.012824, -0.000500 + , -0.027920, -0.013194, -0.000500 + , -0.026847, -0.013300, -0.000500 + , -0.031314, -0.010488, -0.000500 + , -0.030718, -0.011437, -0.000500 + , -0.031809, 0.006354, -0.000500 + , -0.031809, 0.006354, 0.000501 + , -0.031809, -0.006364, -0.000500 + , -0.031809, -0.006364, 0.000501 + , -0.031809, 0.005707, -0.000700 + , -0.031809, 0.005707, 0.000701 + , -0.031809, -0.005716, -0.000700 + , -0.031809, -0.005716, 0.000701 + , -0.031809, 0.005128, -0.001423 + , -0.031809, 0.005128, 0.001424 + , -0.031809, -0.005138, -0.001423 + , -0.031809, -0.005138, 0.001424 + , -0.031809, 0.004146, -0.002297 + , -0.031809, 0.004146, 0.002299 + , -0.031809, -0.004156, -0.002297 + , -0.031809, -0.004156, 0.002299 + , -0.031809, 0.003313, -0.002495 + , -0.031809, 0.003313, 0.002497 + , -0.031809, -0.003322, -0.002495 + , -0.031809, -0.003322, 0.002497 + , -0.031809, -0.000005, 0.000001 + , -0.026800, 0.013300, -0.000500 + , -0.026800, 0.013300, 0.000501 + , 0.088376, 0.013300, -0.000500 + , 0.088376, 0.013300, 0.000501 + , 0.093228, 0.009412, 0.000501 + , 0.093353, 0.008300, 0.000501 + , 0.090522, 0.012805, 0.000501 + , 0.091470, 0.012209, 0.000501 + , 0.089464, 0.013175, 0.000501 + , 0.092262, 0.011417, 0.000501 + , 0.092858, 0.010469, 0.000501 + , 0.093353, -0.008310, 0.000501 + , 0.093228, -0.009431, 0.000501 + , 0.090522, -0.012824, 0.000501 + , 0.091470, -0.012228, 0.000501 + , 0.088376, -0.013321, 0.000501 + , 0.089464, -0.013194, 0.000501 + , 0.092262, -0.011437, 0.000501 + , 0.092858, -0.010488, 0.000501 + , 0.093228, 0.009412, -0.000500 + , 0.093353, 0.008300, -0.000500 + , 0.090522, 0.012805, -0.000500 + , 0.091470, 0.012209, -0.000500 + , 0.089464, 0.013175, -0.000500 + , 0.092262, 0.011417, -0.000500 + , 0.092858, 0.010469, -0.000500 + , 0.093353, -0.008310, -0.000500 + , 0.093228, -0.009431, -0.000500 + , 0.091470, -0.012228, -0.000500 + , 0.090522, -0.012824, -0.000500 + , 0.089464, -0.013194, -0.000500 + , 0.088376, -0.013321, -0.000500 + , 0.092858, -0.010488, -0.000500 + , 0.092262, -0.011437, -0.000500 + , -0.001600, 0.000000, -0.018000 + , 0.017592, 0.000000, -0.018000 + , 0.007996, -0.009596, -0.018000 + , 0.007996, -0.009763, -0.008431 + , 0.007996, 0.009763, -0.008431 + , 0.007996, 0.009596, -0.018000 + , -0.001767, 0.000000, -0.008431 + , 0.002258, -0.007899, -0.008431 + , 0.002356, -0.007764, -0.018000 + , 0.005033, -0.009127, -0.018000 + , 0.004982, -0.009286, -0.008431 + , 0.000097, -0.005738, -0.008431 + , 0.000232, -0.005640, -0.018000 + , -0.001131, -0.002963, -0.018000 + , -0.001290, -0.003014, -0.008431 + , 0.000097, 0.005738, -0.008431 + , 0.000232, 0.005640, -0.018000 + , -0.001131, 0.002963, -0.018000 + , -0.001290, 0.003014, -0.008431 + , 0.002258, 0.007899, -0.008431 + , 0.002356, 0.007764, -0.018000 + , 0.005033, 0.009127, -0.018000 + , 0.004982, 0.009286, -0.008431 + , 0.017759, 0.000000, -0.008431 + , 0.013734, 0.007899, -0.008431 + , 0.013636, 0.007764, -0.018000 + , 0.010959, 0.009127, -0.018000 + , 0.011010, 0.009286, -0.008431 + , 0.015895, 0.005738, -0.008431 + , 0.015760, 0.005640, -0.018000 + , 0.017123, 0.002963, -0.018000 + , 0.017282, 0.003014, -0.008431 + , 0.015895, -0.005738, -0.008431 + , 0.015760, -0.005640, -0.018000 + , 0.017123, -0.002963, -0.018000 + , 0.017282, -0.003014, -0.008431 + , 0.013734, -0.007899, -0.008431 + , 0.013636, -0.007764, -0.018000 + , 0.010959, -0.009127, -0.018000 + , 0.011010, -0.009286, -0.008431 + , 0.004827, 0.009763, -0.007940 + , 0.007996, 0.010264, -0.007940 + , -0.001767, -0.003169, -0.007940 + , -0.002269, 0.000000, -0.007940 + , 0.004827, -0.009763, -0.007940 + , 0.001963, -0.008304, -0.007940 + , 0.007996, -0.010264, -0.007940 + , -0.000308, -0.006033, -0.007940 + , -0.001767, 0.003169, -0.007940 + , -0.000308, 0.006033, -0.007940 + , 0.001963, 0.008304, -0.007940 + , 0.011165, -0.009763, -0.007940 + , 0.017759, 0.003169, -0.007940 + , 0.018260, -0.000000, -0.007940 + , 0.011165, 0.009763, -0.007940 + , 0.014029, 0.008304, -0.007940 + , 0.016300, 0.006033, -0.007940 + , 0.017759, -0.003169, -0.007940 + , 0.016300, -0.006033, -0.007940 + , 0.014029, -0.008304, -0.007940 + , 0.002356, -0.007764, -0.019500 + , 0.005033, -0.009127, -0.019500 + , 0.007996, -0.009596, -0.019500 + , 0.000232, -0.005640, -0.019500 + , -0.001600, 0.000000, -0.019500 + , -0.001131, -0.002963, -0.019500 + , 0.000232, 0.005640, -0.019500 + , -0.001131, 0.002963, -0.019500 + , 0.002356, 0.007764, -0.019500 + , 0.007996, 0.009596, -0.019500 + , 0.005033, 0.009127, -0.019500 + , 0.013636, 0.007764, -0.019500 + , 0.010959, 0.009127, -0.019500 + , 0.015760, 0.005640, -0.019500 + , 0.017592, 0.000000, -0.019500 + , 0.017123, 0.002963, -0.019500 + , 0.015760, -0.005640, -0.019500 + , 0.017123, -0.002963, -0.019500 + , 0.013636, -0.007764, -0.019500 + , 0.010959, -0.009127, -0.019500 + , 0.002356, -0.007764, -0.022997 + , 0.005033, -0.009127, -0.022997 + , 0.007996, -0.009596, -0.022997 + , 0.000232, -0.005640, -0.022997 + , -0.001600, 0.000000, -0.022997 + , -0.001131, -0.002963, -0.022997 + , 0.000232, 0.005640, -0.022997 + , -0.001131, 0.002963, -0.022997 + , 0.002356, 0.007764, -0.022997 + , 0.007996, 0.009596, -0.022997 + , 0.005033, 0.009127, -0.022997 + , 0.013636, 0.007764, -0.022997 + , 0.010959, 0.009127, -0.022997 + , 0.015760, 0.005640, -0.022997 + , 0.017592, 0.000000, -0.022997 + , 0.017123, 0.002963, -0.022997 + , 0.015760, -0.005640, -0.022997 + , 0.017123, -0.002963, -0.022997 + , 0.013636, -0.007764, -0.022997 + , 0.010959, -0.009127, -0.022997 + , 0.002745, -0.007227, -0.022997 + , 0.005238, -0.008497, -0.022997 + , 0.007996, -0.008933, -0.022997 + , 0.000769, -0.005250, -0.022997 + , -0.000937, 0.000000, -0.022997 + , -0.000501, -0.002758, -0.022997 + , 0.000769, 0.005250, -0.022997 + , -0.000501, 0.002758, -0.022997 + , 0.002745, 0.007227, -0.022997 + , 0.007996, 0.008933, -0.022997 + , 0.005238, 0.008497, -0.022997 + , 0.013246, 0.007227, -0.022997 + , 0.010754, 0.008497, -0.022997 + , 0.015223, 0.005250, -0.022997 + , 0.016929, 0.000000, -0.022997 + , 0.016493, 0.002758, -0.022997 + , 0.015223, -0.005250, -0.022997 + , 0.016493, -0.002758, -0.022997 + , 0.013246, -0.007227, -0.022997 + , 0.010754, -0.008497, -0.022997 + , 0.004095, -0.005369, -0.022203 + , 0.005947, -0.006313, -0.022203 + , 0.007996, -0.006637, -0.022203 + , 0.002626, -0.003901, -0.022203 + , 0.001359, 0.000000, -0.022203 + , 0.001683, -0.002049, -0.022203 + , 0.002626, 0.003901, -0.022203 + , 0.001683, 0.002049, -0.022203 + , 0.004095, 0.005369, -0.022203 + , 0.007996, 0.006637, -0.022203 + , 0.005947, 0.006313, -0.022203 + , 0.011897, 0.005369, -0.022203 + , 0.010045, 0.006313, -0.022203 + , 0.013365, 0.003901, -0.022203 + , 0.014633, 0.000000, -0.022203 + , 0.014308, 0.002049, -0.022203 + , 0.013365, -0.003901, -0.022203 + , 0.014308, -0.002049, -0.022203 + , 0.011897, -0.005369, -0.022203 + , 0.010045, -0.006313, -0.022203 + , 0.004446, -0.004886, -0.021500 + , 0.006131, -0.005744, -0.021500 + , 0.007996, -0.006039, -0.021500 + , 0.003110, -0.003549, -0.021500 + , 0.001957, 0.000000, -0.021500 + , 0.002252, -0.001865, -0.021500 + , 0.003110, 0.003549, -0.021500 + , 0.002252, 0.001865, -0.021500 + , 0.004446, 0.004886, -0.021500 + , 0.007996, 0.006039, -0.021500 + , 0.006131, 0.005744, -0.021500 + , 0.011545, 0.004886, -0.021500 + , 0.009861, 0.005744, -0.021500 + , 0.012882, 0.003549, -0.021500 + , 0.014035, 0.000000, -0.021500 + , 0.013740, 0.001865, -0.021500 + , 0.012882, -0.003549, -0.021500 + , 0.013740, -0.001865, -0.021500 + , 0.011545, -0.004886, -0.021500 + , 0.009861, -0.005744, -0.021500 + , 0.004446, -0.004886, -0.020078 + , 0.006131, -0.005744, -0.020078 + , 0.007996, -0.006039, -0.020078 + , 0.003110, -0.003549, -0.020078 + , 0.001957, 0.000000, -0.020078 + , 0.002252, -0.001865, -0.020078 + , 0.003110, 0.003549, -0.020078 + , 0.002252, 0.001865, -0.020078 + , 0.004446, 0.004886, -0.020078 + , 0.007996, 0.006039, -0.020078 + , 0.006131, 0.005744, -0.020078 + , 0.011545, 0.004886, -0.020078 + , 0.009861, 0.005744, -0.020078 + , 0.012882, 0.003549, -0.020078 + , 0.014035, 0.000000, -0.020078 + , 0.013740, 0.001865, -0.020078 + , 0.012882, -0.003549, -0.020078 + , 0.013740, -0.001865, -0.020078 + , 0.011545, -0.004886, -0.020078 + , 0.009861, -0.005744, -0.020078 + , -0.026847, -0.013300, -0.006500 + , -0.031847, -0.008300, -0.006500 + , -0.029965, -0.012209, -0.006500 + , -0.027959, -0.013175, -0.006500 + , -0.029016, -0.012805, -0.006500 + , -0.031352, -0.010469, -0.006500 + , -0.030756, -0.011417, -0.006500 + , -0.031722, -0.009412, -0.006500 + , 0.088353, -0.013310, -0.006500 + , -0.031847, 0.008300, -0.006500 + , -0.026847, 0.013300, -0.006500 + , -0.030756, 0.011417, -0.006500 + , -0.031722, 0.009412, -0.006500 + , -0.031352, 0.010469, -0.006500 + , -0.029016, 0.012805, -0.006500 + , -0.029965, 0.012209, -0.006500 + , -0.027959, 0.013175, -0.006500 + , 0.088353, 0.013300, -0.006500 + , 0.093353, 0.008300, -0.006500 + , 0.091470, 0.012209, -0.006500 + , 0.089464, 0.013175, -0.006500 + , 0.090522, 0.012805, -0.006500 + , 0.092858, 0.010469, -0.006500 + , 0.092262, 0.011417, -0.006500 + , 0.093228, 0.009412, -0.006500 + , 0.093353, -0.008310, -0.006500 + , 0.091470, -0.012228, -0.006500 + , 0.089464, -0.013194, -0.006500 + , 0.090522, -0.012824, -0.006500 + , 0.092858, -0.010488, -0.006500 + , 0.092262, -0.011437, -0.006500 + , 0.093228, -0.009431, -0.006500 + , -0.031722, -0.009412, -0.002250 + , -0.031809, -0.004156, -0.002297 + , -0.029016, -0.012805, -0.002250 + , -0.029965, -0.012209, -0.002250 + , -0.026847, -0.013300, -0.002250 + , -0.027959, -0.013175, -0.002250 + , -0.030756, -0.011417, -0.002250 + , -0.031352, -0.010469, -0.002250 + , 0.088353, -0.013310, -0.002250 + , -0.031809, 0.004146, -0.002297 + , -0.027959, 0.013175, -0.002250 + , -0.026847, 0.013300, -0.002250 + , -0.031352, 0.010469, -0.002250 + , -0.030756, 0.011417, -0.002250 + , -0.031722, 0.009412, -0.002250 + , -0.029965, 0.012209, -0.002250 + , -0.029016, 0.012805, -0.002250 + , 0.088353, 0.013300, -0.002250 + , 0.093228, 0.009412, -0.002250 + , 0.093353, 0.008300, -0.002250 + , 0.090522, 0.012805, -0.002250 + , 0.091470, 0.012209, -0.002250 + , 0.089464, 0.013175, -0.002250 + , 0.092262, 0.011417, -0.002250 + , 0.092858, 0.010469, -0.002250 + , 0.093353, -0.008310, -0.002250 + , 0.093228, -0.009431, -0.002250 + , 0.090522, -0.012824, -0.002250 + , 0.091470, -0.012228, -0.002250 + , 0.089464, -0.013194, -0.002250 + , 0.092262, -0.011437, -0.002250 + , 0.092858, -0.010488, -0.002250 + , 0.002815, -0.013333, -0.002250 + , 0.001761, 0.013300, -0.001999 + , 0.058785, -0.013333, -0.002250 + , 0.059839, 0.013300, -0.001999 + , -0.031722, -0.009412, -0.000500 + , -0.031847, -0.006340, -0.000500 + , -0.029016, -0.012805, -0.000500 + , -0.029965, -0.012209, -0.000500 + , -0.026847, -0.013300, -0.000500 + , -0.027959, -0.013175, -0.000500 + , -0.030756, -0.011417, -0.000500 + , -0.031352, -0.010469, -0.000500 + , 0.000152, -0.013333, -0.000563 + , -0.031847, 0.006354, -0.000500 + , -0.027959, 0.013175, -0.000500 + , -0.026847, 0.013300, -0.000500 + , -0.031352, 0.010469, -0.000500 + , -0.030756, 0.011417, -0.000500 + , -0.031722, 0.009412, -0.000500 + , -0.029965, 0.012209, -0.000500 + , -0.029016, 0.012805, -0.000500 + , 0.088353, 0.013300, -0.000500 + , 0.061448, 0.013300, -0.000563 + , 0.093228, 0.009412, -0.000500 + , 0.093353, 0.008300, -0.000500 + , 0.090522, 0.012805, -0.000500 + , 0.091470, 0.012209, -0.000500 + , 0.089464, 0.013175, -0.000500 + , 0.092262, 0.011417, -0.000500 + , 0.092858, 0.010469, -0.000500 + , 0.093353, -0.008310, -0.000500 + , 0.093228, -0.009431, -0.000500 + , 0.090522, -0.012824, -0.000500 + , 0.091470, -0.012228, -0.000500 + , 0.088353, -0.013310, -0.000500 + , 0.089464, -0.013194, -0.000500 + , 0.092262, -0.011437, -0.000500 + , 0.092858, -0.010488, -0.000500 + , 0.000151, 0.013300, -0.000563 + , 0.061448, -0.013333, -0.000563 + , 0.058800, 0.013300, -0.002250 + , 0.002815, 0.013300, -0.002250 + , 0.000830, 0.013300, -0.001351 + , 0.060770, 0.013300, -0.001351 + , 0.060770, -0.013333, -0.001351 + , 0.059839, -0.013333, -0.001999 + , 0.000830, -0.013333, -0.001351 + , 0.001761, -0.013333, -0.001999 + , -0.026844, -0.011518, -0.007940 + , -0.026847, -0.011634, -0.007898 + , -0.027589, -0.011551, -0.007898 + , -0.027563, -0.011437, -0.007940 + , -0.028294, -0.011304, -0.007898 + , -0.028243, -0.011199, -0.007940 + , -0.028926, -0.010907, -0.007898 + , -0.028854, -0.010816, -0.007940 + , -0.029454, -0.010379, -0.007898 + , -0.029363, -0.010306, -0.007940 + , -0.029852, -0.009746, -0.007898 + , -0.029747, -0.009696, -0.007940 + , -0.030098, -0.009041, -0.007898 + , -0.029985, -0.009016, -0.007940 + , -0.030181, -0.008300, -0.007898 + , -0.030066, -0.008297, -0.007940 + , -0.030181, 0.008300, -0.007898 + , -0.030065, 0.008296, -0.007940 + , -0.030098, 0.009041, -0.007898 + , -0.029985, 0.009015, -0.007940 + , -0.029852, 0.009746, -0.007898 + , -0.029747, 0.009696, -0.007940 + , -0.029454, 0.010379, -0.007898 + , -0.029363, 0.010306, -0.007940 + , -0.028926, 0.010907, -0.007898 + , -0.028854, 0.010816, -0.007940 + , -0.028294, 0.011304, -0.007898 + , -0.028243, 0.011199, -0.007940 + , -0.027589, 0.011551, -0.007898 + , -0.027563, 0.011437, -0.007940 + , -0.026847, 0.011634, -0.007898 + , -0.026844, 0.011518, -0.007940 + , 0.088349, 0.011518, -0.007940 + , 0.088353, 0.011634, -0.007898 + , 0.089094, 0.011551, -0.007898 + , 0.089068, 0.011437, -0.007940 + , 0.089799, 0.011304, -0.007898 + , 0.089749, 0.011199, -0.007940 + , 0.090432, 0.010907, -0.007898 + , 0.090359, 0.010816, -0.007940 + , 0.090960, 0.010379, -0.007898 + , 0.090869, 0.010306, -0.007940 + , 0.091357, 0.009746, -0.007898 + , 0.091252, 0.009696, -0.007940 + , 0.091604, 0.009041, -0.007898 + , 0.091490, 0.009016, -0.007940 + , 0.091637, 0.008187, -0.007940 + , 0.091724, 0.008300, -0.007867 + , 0.088353, -0.011644, -0.007898 + , 0.088349, -0.011528, -0.007940 + , 0.089094, -0.011570, -0.007898 + , 0.089069, -0.011456, -0.007940 + , 0.089799, -0.011323, -0.007898 + , 0.089749, -0.011219, -0.007940 + , 0.090432, -0.010926, -0.007898 + , 0.090359, -0.010835, -0.007940 + , 0.090960, -0.010398, -0.007898 + , 0.090869, -0.010325, -0.007940 + , 0.091357, -0.009766, -0.007898 + , 0.091252, -0.009715, -0.007940 + , 0.091604, -0.009061, -0.007898 + , 0.091490, -0.009035, -0.007940 + , 0.091724, -0.008310, -0.007867 + , 0.091637, -0.008196, -0.007940 + , -0.031809, -0.003322, -0.002495 + , -0.031809, 0.003313, -0.002495 + , -0.031809, 0.005707, -0.000700 + , -0.031809, 0.005128, -0.001423 + , -0.031809, -0.005716, -0.000700 + , -0.031809, -0.005138, -0.001423 + , 0.061397, 0.000000, -0.018000 + , 0.080589, 0.000000, -0.018000 + , 0.070993, -0.009596, -0.018000 + , 0.070993, -0.009763, -0.008431 + , 0.070993, 0.009763, -0.008431 + , 0.070993, 0.009596, -0.018000 + , 0.061230, 0.000000, -0.008431 + , 0.065255, -0.007899, -0.008431 + , 0.065353, -0.007764, -0.018000 + , 0.068030, -0.009127, -0.018000 + , 0.067979, -0.009286, -0.008431 + , 0.063094, -0.005738, -0.008431 + , 0.063229, -0.005640, -0.018000 + , 0.061866, -0.002963, -0.018000 + , 0.061707, -0.003014, -0.008431 + , 0.063094, 0.005738, -0.008431 + , 0.063229, 0.005640, -0.018000 + , 0.061866, 0.002963, -0.018000 + , 0.061707, 0.003014, -0.008431 + , 0.065255, 0.007899, -0.008431 + , 0.065353, 0.007764, -0.018000 + , 0.068030, 0.009127, -0.018000 + , 0.067979, 0.009286, -0.008431 + , 0.080756, 0.000000, -0.008431 + , 0.076731, 0.007899, -0.008431 + , 0.076633, 0.007764, -0.018000 + , 0.073956, 0.009127, -0.018000 + , 0.074007, 0.009286, -0.008431 + , 0.078892, 0.005738, -0.008431 + , 0.078757, 0.005640, -0.018000 + , 0.080120, 0.002963, -0.018000 + , 0.080279, 0.003014, -0.008431 + , 0.078892, -0.005738, -0.008431 + , 0.078757, -0.005640, -0.018000 + , 0.080120, -0.002963, -0.018000 + , 0.080279, -0.003014, -0.008431 + , 0.076731, -0.007899, -0.008431 + , 0.076633, -0.007764, -0.018000 + , 0.073956, -0.009127, -0.018000 + , 0.074007, -0.009286, -0.008431 + , 0.067824, 0.009763, -0.007940 + , 0.070993, 0.010264, -0.007940 + , 0.061230, -0.003169, -0.007940 + , 0.060728, 0.000000, -0.007940 + , 0.067824, -0.009763, -0.007940 + , 0.064960, -0.008304, -0.007940 + , 0.070993, -0.010264, -0.007940 + , 0.062688, -0.006033, -0.007940 + , 0.061230, 0.003169, -0.007940 + , 0.062688, 0.006033, -0.007940 + , 0.064960, 0.008304, -0.007940 + , 0.074162, -0.009763, -0.007940 + , 0.080756, 0.003169, -0.007940 + , 0.081257, -0.000000, -0.007940 + , 0.074162, 0.009763, -0.007940 + , 0.077026, 0.008304, -0.007940 + , 0.079297, 0.006033, -0.007940 + , 0.080756, -0.003169, -0.007940 + , 0.079297, -0.006033, -0.007940 + , 0.077026, -0.008304, -0.007940 + , 0.065353, -0.007764, -0.019500 + , 0.068030, -0.009127, -0.019500 + , 0.070993, -0.009596, -0.019500 + , 0.063229, -0.005640, -0.019500 + , 0.061397, 0.000000, -0.019500 + , 0.061866, -0.002963, -0.019500 + , 0.063229, 0.005640, -0.019500 + , 0.061866, 0.002963, -0.019500 + , 0.065353, 0.007764, -0.019500 + , 0.070993, 0.009596, -0.019500 + , 0.068030, 0.009127, -0.019500 + , 0.076633, 0.007764, -0.019500 + , 0.073956, 0.009127, -0.019500 + , 0.078757, 0.005640, -0.019500 + , 0.080589, 0.000000, -0.019500 + , 0.080120, 0.002963, -0.019500 + , 0.078757, -0.005640, -0.019500 + , 0.080120, -0.002963, -0.019500 + , 0.076633, -0.007764, -0.019500 + , 0.073956, -0.009127, -0.019500 + , 0.065353, -0.007764, -0.022997 + , 0.068030, -0.009127, -0.022997 + , 0.070993, -0.009596, -0.022997 + , 0.063229, -0.005640, -0.022997 + , 0.061397, 0.000000, -0.022997 + , 0.061866, -0.002963, -0.022997 + , 0.063229, 0.005640, -0.022997 + , 0.061866, 0.002963, -0.022997 + , 0.065353, 0.007764, -0.022997 + , 0.070993, 0.009596, -0.022997 + , 0.068030, 0.009127, -0.022997 + , 0.076633, 0.007764, -0.022997 + , 0.073956, 0.009127, -0.022997 + , 0.078757, 0.005640, -0.022997 + , 0.080589, 0.000000, -0.022997 + , 0.080120, 0.002963, -0.022997 + , 0.078757, -0.005640, -0.022997 + , 0.080120, -0.002963, -0.022997 + , 0.076633, -0.007764, -0.022997 + , 0.073956, -0.009127, -0.022997 + , 0.065742, -0.007227, -0.022997 + , 0.068235, -0.008497, -0.022997 + , 0.070993, -0.008933, -0.022997 + , 0.063766, -0.005250, -0.022997 + , 0.062060, 0.000000, -0.022997 + , 0.062496, -0.002758, -0.022997 + , 0.063766, 0.005250, -0.022997 + , 0.062496, 0.002758, -0.022997 + , 0.065742, 0.007227, -0.022997 + , 0.070993, 0.008933, -0.022997 + , 0.068235, 0.008497, -0.022997 + , 0.076243, 0.007227, -0.022997 + , 0.073751, 0.008497, -0.022997 + , 0.078220, 0.005250, -0.022997 + , 0.079926, 0.000000, -0.022997 + , 0.079490, 0.002758, -0.022997 + , 0.078220, -0.005250, -0.022997 + , 0.079490, -0.002758, -0.022997 + , 0.076243, -0.007227, -0.022997 + , 0.073751, -0.008497, -0.022997 + , 0.067092, -0.005369, -0.022203 + , 0.068944, -0.006313, -0.022203 + , 0.070993, -0.006637, -0.022203 + , 0.065623, -0.003901, -0.022203 + , 0.064356, 0.000000, -0.022203 + , 0.064680, -0.002049, -0.022203 + , 0.065623, 0.003901, -0.022203 + , 0.064680, 0.002049, -0.022203 + , 0.067092, 0.005369, -0.022203 + , 0.070993, 0.006637, -0.022203 + , 0.068944, 0.006313, -0.022203 + , 0.074894, 0.005369, -0.022203 + , 0.073042, 0.006313, -0.022203 + , 0.076362, 0.003901, -0.022203 + , 0.077630, 0.000000, -0.022203 + , 0.077305, 0.002049, -0.022203 + , 0.076362, -0.003901, -0.022203 + , 0.077305, -0.002049, -0.022203 + , 0.074894, -0.005369, -0.022203 + , 0.073042, -0.006313, -0.022203 + , 0.067443, -0.004886, -0.021500 + , 0.069128, -0.005744, -0.021500 + , 0.070993, -0.006039, -0.021500 + , 0.066107, -0.003549, -0.021500 + , 0.064954, 0.000000, -0.021500 + , 0.065249, -0.001865, -0.021500 + , 0.066107, 0.003549, -0.021500 + , 0.065249, 0.001865, -0.021500 + , 0.067443, 0.004886, -0.021500 + , 0.070993, 0.006039, -0.021500 + , 0.069128, 0.005744, -0.021500 + , 0.074542, 0.004886, -0.021500 + , 0.072858, 0.005744, -0.021500 + , 0.075879, 0.003549, -0.021500 + , 0.077032, 0.000000, -0.021500 + , 0.076737, 0.001865, -0.021500 + , 0.075879, -0.003549, -0.021500 + , 0.076737, -0.001865, -0.021500 + , 0.074542, -0.004886, -0.021500 + , 0.072858, -0.005744, -0.021500 + , 0.067443, -0.004886, -0.020078 + , 0.069128, -0.005744, -0.020078 + , 0.070993, -0.006039, -0.020078 + , 0.066107, -0.003549, -0.020078 + , 0.064954, 0.000000, -0.020078 + , 0.065249, -0.001865, -0.020078 + , 0.066107, 0.003549, -0.020078 + , 0.065249, 0.001865, -0.020078 + , 0.067443, 0.004886, -0.020078 + , 0.070993, 0.006039, -0.020078 + , 0.069128, 0.005744, -0.020078 + , 0.074542, 0.004886, -0.020078 + , 0.072858, 0.005744, -0.020078 + , 0.075879, 0.003549, -0.020078 + , 0.077032, 0.000000, -0.020078 + , 0.076737, 0.001865, -0.020078 + , 0.075879, -0.003549, -0.020078 + , 0.076737, -0.001865, -0.020078 + , 0.074542, -0.004886, -0.020078 + , 0.072858, -0.005744, -0.020078 + , -0.026847, -0.013300, 0.006300 + , 0.088353, -0.013310, 0.006300 + , 0.002815, -0.013333, 0.002250 + , -0.026847, 0.013300, 0.006300 + , 0.002815, 0.013300, 0.002250 + , 0.058800, 0.013300, 0.002250 + , 0.088353, 0.013300, 0.006300 + , -0.026847, -0.013300, 0.002250 + , -0.026847, -0.013300, 0.000500 + , -0.027959, -0.013175, 0.006300 + , -0.029016, -0.012805, 0.006300 + , -0.026847, 0.013300, 0.000500 + , 0.000151, 0.013300, 0.000563 + , 0.000830, 0.013300, 0.001351 + , -0.029965, -0.012209, 0.006300 + , 0.001761, 0.013300, 0.001999 + , -0.030756, -0.011417, 0.006300 + , -0.031352, -0.010469, 0.006300 + , -0.031722, -0.009412, 0.006300 + , -0.031847, -0.008300, 0.006300 + , -0.026847, 0.013300, 0.002250 + , -0.031847, 0.008300, 0.006300 + , -0.027959, 0.013175, 0.006300 + , -0.031722, 0.009412, 0.006300 + , -0.031352, 0.010469, 0.006300 + , -0.029016, 0.012805, 0.006300 + , -0.030756, 0.011417, 0.006300 + , -0.029965, 0.012209, 0.006300 + , -0.027959, -0.013175, 0.000500 + , -0.027959, -0.013175, 0.002250 + , -0.029016, -0.012805, 0.000500 + , -0.029016, -0.012805, 0.002250 + , -0.029965, -0.012209, 0.000500 + , -0.029965, -0.012209, 0.002250 + , -0.030756, -0.011417, 0.000500 + , -0.030756, -0.011417, 0.002250 + , -0.031352, -0.010469, 0.000500 + , -0.031352, -0.010469, 0.002250 + , -0.031722, -0.009412, 0.000500 + , -0.031722, -0.009412, 0.002250 + , -0.031847, -0.006364, 0.000500 + , -0.031809, -0.004156, 0.002299 + , -0.031847, 0.006354, 0.000500 + , -0.031809, 0.004146, 0.002299 + , -0.031722, 0.009412, 0.000500 + , -0.031722, 0.009412, 0.002250 + , -0.027959, 0.013175, 0.002250 + , -0.027959, 0.013175, 0.000500 + , -0.031352, 0.010469, 0.000500 + , -0.031352, 0.010469, 0.002250 + , -0.029016, 0.012805, 0.002250 + , -0.029016, 0.012805, 0.000500 + , -0.030756, 0.011417, 0.000500 + , -0.030756, 0.011417, 0.002250 + , -0.029965, 0.012209, 0.002250 + , -0.029965, 0.012209, 0.000500 + , 0.059839, 0.013300, 0.001999 + , 0.060770, 0.013300, 0.001351 + , 0.088353, 0.013300, 0.002250 + , 0.058785, -0.013333, 0.002250 + , 0.089464, 0.013175, 0.006300 + , 0.090522, 0.012805, 0.006300 + , 0.091470, 0.012209, 0.006300 + , 0.092262, 0.011417, 0.006300 + , 0.092858, 0.010469, 0.006300 + , 0.093228, 0.009412, 0.006300 + , 0.093353, 0.008300, 0.006300 + , 0.088353, -0.013310, 0.002250 + , 0.059839, -0.013333, 0.001999 + , 0.060770, -0.013333, 0.001351 + , 0.061448, 0.013300, 0.000563 + , 0.088353, 0.013300, 0.000500 + , 0.088353, -0.013310, 0.000500 + , 0.061448, -0.013333, 0.000563 + , 0.093353, -0.008310, 0.006300 + , 0.089464, -0.013194, 0.006300 + , 0.093228, -0.009431, 0.006300 + , 0.092858, -0.010488, 0.006300 + , 0.090522, -0.012824, 0.006300 + , 0.092262, -0.011437, 0.006300 + , 0.091470, -0.012228, 0.006300 + , 0.089464, 0.013175, 0.002250 + , 0.090522, 0.012805, 0.002250 + , 0.091470, 0.012209, 0.002250 + , 0.089464, 0.013175, 0.000500 + , 0.090522, 0.012805, 0.000500 + , 0.092262, 0.011417, 0.002250 + , 0.091470, 0.012209, 0.000500 + , 0.092858, 0.010469, 0.002250 + , 0.092262, 0.011417, 0.000500 + , 0.093228, 0.009412, 0.002250 + , 0.093353, 0.008300, 0.002250 + , 0.092858, 0.010469, 0.000500 + , 0.093228, 0.009412, 0.000500 + , 0.093353, 0.008300, 0.000500 + , 0.093353, -0.008310, 0.002250 + , 0.093353, -0.008310, 0.000500 + , 0.093228, -0.009431, 0.002250 + , 0.089464, -0.013194, 0.002250 + , 0.092858, -0.010488, 0.002250 + , 0.090522, -0.012824, 0.002250 + , 0.092262, -0.011437, 0.002250 + , 0.091470, -0.012228, 0.002250 + , 0.093228, -0.009431, 0.000500 + , 0.089464, -0.013194, 0.000500 + , 0.092858, -0.010488, 0.000500 + , 0.090522, -0.012824, 0.000500 + , 0.092262, -0.011437, 0.000500 + , 0.091470, -0.012228, 0.000500 + , 0.000152, -0.013333, 0.000564 + , 0.000830, -0.013333, 0.001352 + , 0.001761, -0.013333, 0.002000 + , -0.026591, -0.013201, 0.006500 + , 0.088097, -0.013211, 0.006500 + , -0.026591, 0.013201, 0.006500 + , 0.088097, 0.013201, 0.006500 + , 0.088477, 0.011982, 0.006500 + , -0.026591, 0.011982, 0.006500 + , -0.027698, -0.013077, 0.006500 + , -0.028751, -0.012710, 0.006500 + , -0.029695, -0.012119, 0.006500 + , -0.030483, -0.011333, 0.006500 + , -0.031076, -0.010391, 0.006500 + , -0.031445, -0.009342, 0.006500 + , -0.031569, -0.008238, 0.006500 + , -0.027698, 0.013077, 0.006500 + , -0.031445, 0.009342, 0.006500 + , -0.031569, 0.008238, 0.006500 + , -0.028751, 0.012710, 0.006500 + , -0.031076, 0.010391, 0.006500 + , -0.029695, 0.012118, 0.006500 + , -0.030483, 0.011332, 0.006500 + , -0.027703, -0.011897, 0.006500 + , -0.026192, -0.011982, 0.006500 + , -0.028399, -0.011645, 0.006500 + , -0.029024, -0.011241, 0.006500 + , -0.029545, -0.010703, 0.006500 + , -0.029937, -0.010058, 0.006500 + , -0.030180, -0.009340, 0.006500 + , -0.030263, -0.008585, 0.006500 + , -0.030263, 0.008585, 0.006500 + , -0.030180, 0.009340, 0.006500 + , -0.027703, 0.011897, 0.006500 + , -0.029937, 0.010059, 0.006500 + , -0.028399, 0.011646, 0.006500 + , -0.029545, 0.010703, 0.006500 + , -0.029024, 0.011241, 0.006500 + , 0.088477, -0.011992, 0.006500 + , 0.089204, 0.013077, 0.006500 + , 0.090256, 0.012710, 0.006500 + , 0.091200, 0.012118, 0.006500 + , 0.091989, 0.011332, 0.006500 + , 0.092582, 0.010391, 0.006500 + , 0.092950, 0.009342, 0.006500 + , 0.093075, 0.008238, 0.006500 + , 0.089204, -0.013096, 0.006500 + , 0.092950, -0.009361, 0.006500 + , 0.093075, -0.008248, 0.006500 + , 0.090256, -0.012729, 0.006500 + , 0.092582, -0.010410, 0.006500 + , 0.091200, -0.012138, 0.006500 + , 0.091989, -0.011352, 0.006500 + , 0.089209, 0.011897, 0.006500 + , 0.089905, 0.011646, 0.006500 + , 0.090529, 0.011241, 0.006500 + , 0.091050, 0.010703, 0.006500 + , 0.091443, 0.010059, 0.006500 + , 0.091686, 0.009340, 0.006500 + , 0.091768, 0.008585, 0.006500 + , 0.091768, -0.008595, 0.006500 + , 0.091686, -0.009360, 0.006500 + , 0.089209, -0.011916, 0.006500 + , 0.091443, -0.010078, 0.006500 + , 0.089905, -0.011665, 0.006500 + , 0.091050, -0.010723, 0.006500 + , 0.090529, -0.011260, 0.006500 + , -0.031809, -0.003322, 0.002497 + , -0.031809, 0.003313, 0.002497 + , -0.031809, 0.005707, 0.000701 + , -0.031809, 0.005128, 0.001424 + , -0.031809, -0.005138, 0.001424 + , -0.031809, -0.005716, 0.000701 + , 0.070993, -0.000000, -0.021500 + , 0.076551, 0.004038, -0.021500 + , 0.075030, 0.005558, -0.021500 + , 0.073114, 0.006534, -0.021500 + , 0.077863, 0.000000, -0.021500 + , 0.077527, 0.002121, -0.021500 + , 0.065435, -0.004038, -0.021500 + , 0.066955, -0.005558, -0.021500 + , 0.068872, -0.006534, -0.021500 + , 0.076551, -0.004038, -0.021500 + , 0.077527, -0.002121, -0.021500 + , 0.065435, 0.004038, -0.021500 + , 0.064459, 0.002121, -0.021500 + , 0.064123, 0.000000, -0.021500 + , 0.068872, 0.006534, -0.021500 + , 0.066955, 0.005558, -0.021500 + , 0.070993, 0.006870, -0.021500 + , 0.073114, -0.006534, -0.021500 + , 0.075030, -0.005558, -0.021500 + , 0.070993, -0.006870, -0.021500 + , 0.064459, -0.002121, -0.021500 + , 0.065353, -0.007764, -0.018000 + , 0.068030, -0.009127, -0.018000 + , 0.068030, -0.009127, -0.019500 + , 0.065353, -0.007764, -0.019500 + , 0.070993, -0.009596, -0.018000 + , 0.070993, -0.009596, -0.019500 + , 0.063229, -0.005640, -0.018000 + , 0.063229, -0.005640, -0.019500 + , 0.061397, 0.000000, -0.018000 + , 0.061866, -0.002963, -0.018000 + , 0.061866, -0.002963, -0.019500 + , 0.061397, 0.000000, -0.019500 + , 0.063229, 0.005640, -0.018000 + , 0.061866, 0.002963, -0.018000 + , 0.061866, 0.002963, -0.019500 + , 0.063229, 0.005640, -0.019500 + , 0.065353, 0.007764, -0.018000 + , 0.065353, 0.007764, -0.019500 + , 0.070993, 0.009596, -0.018000 + , 0.068030, 0.009127, -0.018000 + , 0.068030, 0.009127, -0.019500 + , 0.070993, 0.009596, -0.019500 + , 0.076633, 0.007764, -0.018000 + , 0.073956, 0.009127, -0.018000 + , 0.073956, 0.009127, -0.019500 + , 0.076633, 0.007764, -0.019500 + , 0.078757, 0.005640, -0.018000 + , 0.078757, 0.005640, -0.019500 + , 0.080589, 0.000000, -0.018000 + , 0.080120, 0.002963, -0.018000 + , 0.080120, 0.002963, -0.019500 + , 0.080589, 0.000000, -0.019500 + , 0.078757, -0.005640, -0.018000 + , 0.080120, -0.002963, -0.018000 + , 0.080120, -0.002963, -0.019500 + , 0.078757, -0.005640, -0.019500 + , 0.076633, -0.007764, -0.018000 + , 0.076633, -0.007764, -0.019500 + , 0.073956, -0.009127, -0.018000 + , 0.073956, -0.009127, -0.019500 + , 0.002356, -0.007764, -0.018000 + , 0.005033, -0.009127, -0.018000 + , 0.005033, -0.009127, -0.019500 + , 0.002356, -0.007764, -0.019500 + , 0.007996, -0.009596, -0.018000 + , 0.007996, -0.009596, -0.019500 + , 0.000232, -0.005640, -0.018000 + , 0.000232, -0.005640, -0.019500 + , -0.001600, 0.000000, -0.018000 + , -0.001131, -0.002963, -0.018000 + , -0.001131, -0.002963, -0.019500 + , -0.001600, 0.000000, -0.019500 + , 0.000232, 0.005640, -0.018000 + , -0.001131, 0.002963, -0.018000 + , -0.001131, 0.002963, -0.019500 + , 0.000232, 0.005640, -0.019500 + , 0.002356, 0.007764, -0.018000 + , 0.002356, 0.007764, -0.019500 + , 0.007996, 0.009596, -0.018000 + , 0.005033, 0.009127, -0.018000 + , 0.005033, 0.009127, -0.019500 + , 0.007996, 0.009596, -0.019500 + , 0.013636, 0.007764, -0.018000 + , 0.010959, 0.009127, -0.018000 + , 0.010959, 0.009127, -0.019500 + , 0.013636, 0.007764, -0.019500 + , 0.015760, 0.005640, -0.018000 + , 0.015760, 0.005640, -0.019500 + , 0.017592, 0.000000, -0.018000 + , 0.017123, 0.002963, -0.018000 + , 0.017123, 0.002963, -0.019500 + , 0.017592, 0.000000, -0.019500 + , 0.015760, -0.005640, -0.018000 + , 0.017123, -0.002963, -0.018000 + , 0.017123, -0.002963, -0.019500 + , 0.015760, -0.005640, -0.019500 + , 0.013636, -0.007764, -0.018000 + , 0.013636, -0.007764, -0.019500 + , 0.010959, -0.009127, -0.018000 + , 0.010959, -0.009127, -0.019500 + , 0.007996, -0.000000, -0.021500 + , 0.013554, 0.004038, -0.021500 + , 0.012033, 0.005558, -0.021500 + , 0.010117, 0.006534, -0.021500 + , 0.014866, 0.000000, -0.021500 + , 0.014530, 0.002121, -0.021500 + , 0.002438, -0.004038, -0.021500 + , 0.003958, -0.005558, -0.021500 + , 0.005875, -0.006534, -0.021500 + , 0.013554, -0.004038, -0.021500 + , 0.014530, -0.002121, -0.021500 + , 0.002438, 0.004038, -0.021500 + , 0.001462, 0.002121, -0.021500 + , 0.001126, 0.000000, -0.021500 + , 0.005875, 0.006534, -0.021500 + , 0.003958, 0.005558, -0.021500 + , 0.007996, 0.006870, -0.021500 + , 0.010117, -0.006534, -0.021500 + , 0.012033, -0.005558, -0.021500 + , 0.007996, -0.006870, -0.021500 + , 0.001462, -0.002121, -0.021500 + , 0.090529, -0.008928, 0.006500 + , 0.089905, -0.009249, 0.006500 + , 0.089209, -0.009448, 0.006500 + , 0.088477, -0.009506, 0.006500 + , -0.026234, -0.009497, 0.006500 + , -0.027703, -0.009430, 0.006500 + , -0.028399, -0.009231, 0.006500 + , -0.029024, -0.008910, 0.006500 + , 0.090529, -0.009792, 0.006500 + , 0.089905, -0.010144, 0.006500 + , 0.089209, -0.010362, 0.006500 + , 0.088477, -0.010427, 0.006500 + , -0.026219, -0.010417, 0.006500 + , -0.027703, -0.010344, 0.006500 + , -0.028399, -0.010125, 0.006500 + , -0.029024, -0.009773, 0.006500 + , 0.090529, 0.008867, 0.006500 + , 0.089905, 0.009187, 0.006500 + , 0.089209, 0.009385, 0.006500 + , 0.088477, 0.009453, 0.006500 + , -0.026549, 0.009454, 0.006500 + , -0.027703, 0.009387, 0.006500 + , -0.028399, 0.009189, 0.006500 + , -0.029024, 0.008869, 0.006500 + , 0.090529, 0.009705, 0.006500 + , 0.089905, 0.010055, 0.006500 + , 0.089209, 0.010272, 0.006500 + , 0.088477, 0.010346, 0.006500 + , -0.026564, 0.010347, 0.006500 + , -0.027703, 0.010273, 0.006500 + , -0.028399, 0.010056, 0.006500 + , -0.029024, 0.009707, 0.006500 + , 0.090529, -0.010555, 0.006500 + , 0.089905, -0.010935, 0.006500 + , 0.089209, -0.011170, 0.006500 + , 0.088477, -0.011240, 0.006500 + , -0.026205, -0.011231, 0.006500 + , -0.027703, -0.011151, 0.006500 + , -0.028399, -0.010915, 0.006500 + , -0.029024, -0.010536, 0.006500 + , 0.090529, 0.010411, 0.006500 + , 0.089905, 0.010786, 0.006500 + , 0.089209, 0.011019, 0.006500 + , 0.088477, 0.011098, 0.006500 + , -0.026577, 0.011099, 0.006500 + , -0.027703, 0.011020, 0.006500 + , -0.028399, 0.010787, 0.006500 + , -0.029024, 0.010412, 0.006500 +]) + +NB_AL_ZEDM_TRI = 125 +al_triangles_m = np.array([ + 2, 1, 8 + , 1, 2, 4 + , 1, 4, 5 + , 1, 5, 14 + , 1, 14, 15 + , 1, 6, 3 + , 1, 7, 6 + , 1, 15, 7 + , 1, 10, 8 + , 1, 11, 10 + , 1, 17, 11 + , 1, 16, 17 + , 1, 9, 12 + , 1, 12, 13 + , 1, 13, 16 + , 3, 9, 1 + , 3, 9, 1 + , 18, 21, 19 + , 18, 22, 21 + , 18, 32, 22 + , 18, 31, 32 + , 18, 31, 32 + , 18, 20, 23 + , 18, 23, 24 + , 18, 24, 31 + , 19, 25, 18 + , 20, 18, 26 + , 18, 25, 27 + , 18, 27, 28 + , 18, 28, 33 + , 18, 33, 34 + , 18, 33, 34 + , 18, 29, 26 + , 18, 30, 29 + , 18, 34, 30 + , 51, 35, 52 + , 52, 35, 36 + , 53, 37, 54 + , 54, 37, 38 + , 55, 39, 56 + , 56, 39, 40 + , 56, 40, 53 + , 53, 40, 37 + , 57, 41, 58 + , 58, 41, 42 + , 54, 38, 57 + , 57, 38, 41 + , 58, 42, 51 + , 51, 42, 35 + , 59, 43, 60 + , 60, 43, 44 + , 61, 46, 62 + , 62, 46, 45 + , 48, 47, 63 + , 63, 47, 64 + , 62, 45, 63 + , 63, 45, 48 + , 65, 50, 66 + , 66, 50, 49 + , 66, 49, 61 + , 61, 49, 46 + , 60, 44, 65 + , 65, 44, 50 + , 52, 36, 67 + , 67, 36, 68 + , 43, 59, 70 + , 70, 59, 69 + , 67, 68, 71 + , 71, 68, 72 + , 70, 69, 74 + , 74, 69, 73 + , 71, 72, 87 + , 74, 73, 87 + , 71, 87, 75 + , 87, 72, 76 + , 74, 87, 78 + , 87, 73, 77 + , 75, 87, 79 + , 87, 76, 80 + , 87, 77, 81 + , 78, 87, 82 + , 79, 87, 83 + , 87, 80, 84 + , 87, 81, 85 + , 82, 87, 86 + , 83, 87, 85 + , 87, 84, 86 + , 88, 17, 89 + , 89, 17, 16 + , 90, 91, 14 + , 14, 91, 15 + , 107, 108, 92 + , 92, 108, 93 + , 109, 110, 94 + , 94, 110, 95 + , 90, 111, 91 + , 91, 111, 96 + , 111, 109, 96 + , 96, 109, 94 + , 112, 113, 97 + , 97, 113, 98 + , 110, 112, 95 + , 95, 112, 97 + , 113, 107, 98 + , 98, 107, 92 + , 108, 114, 93 + , 93, 114, 99 + , 114, 115, 99 + , 99, 115, 100 + , 116, 117, 102 + , 102, 117, 101 + , 118, 119, 104 + , 104, 119, 103 + , 117, 118, 101 + , 101, 118, 104 + , 120, 121, 106 + , 106, 121, 105 + , 121, 116, 105 + , 105, 116, 102 + , 115, 120, 100 + , 100, 120, 106 + , 64, 47, 33 + , 33, 47, 34 + , 119, 32, 103 + , 103, 32, 31 +]) + +NB_DARK_ZEDM_TRI = 1268 +dark_triangles_m = np.array([ + 126, 144, 127 + , 127, 144, 143 + , 128, 136, 122 + , 122, 136, 135 + , 129, 132, 130 + , 130, 132, 131 + , 132, 125, 131 + , 131, 125, 124 + , 133, 129, 134 + , 134, 129, 130 + , 136, 133, 135 + , 135, 133, 134 + , 137, 140, 138 + , 138, 140, 139 + , 140, 128, 139 + , 139, 128, 122 + , 141, 137, 142 + , 142, 137, 138 + , 144, 141, 143 + , 143, 141, 142 + , 124, 125, 160 + , 160, 125, 161 + , 145, 153, 123 + , 123, 153, 152 + , 146, 149, 147 + , 147, 149, 148 + , 149, 126, 148 + , 148, 126, 127 + , 150, 146, 151 + , 151, 146, 147 + , 153, 150, 152 + , 152, 150, 151 + , 154, 157, 155 + , 155, 157, 156 + , 157, 145, 156 + , 156, 145, 123 + , 158, 154, 159 + , 159, 154, 155 + , 161, 158, 160 + , 160, 158, 159 + , 126, 163, 144 + , 144, 163, 162 + , 128, 165, 136 + , 136, 165, 164 + , 129, 167, 132 + , 132, 167, 166 + , 132, 166, 125 + , 125, 166, 168 + , 133, 169, 129 + , 129, 169, 167 + , 136, 164, 133 + , 133, 164, 169 + , 137, 171, 140 + , 140, 171, 170 + , 140, 170, 128 + , 128, 170, 165 + , 141, 172, 137 + , 137, 172, 171 + , 144, 162, 141 + , 141, 162, 172 + , 125, 168, 161 + , 161, 168, 173 + , 145, 175, 153 + , 153, 175, 174 + , 146, 177, 149 + , 149, 177, 176 + , 149, 176, 126 + , 126, 176, 163 + , 150, 178, 146 + , 146, 178, 177 + , 153, 174, 150 + , 150, 174, 178 + , 154, 180, 157 + , 157, 180, 179 + , 157, 179, 145 + , 145, 179, 175 + , 158, 181, 154 + , 154, 181, 180 + , 161, 173, 158 + , 158, 173, 181 + , 182, 183, 202 + , 202, 183, 203 + , 183, 184, 203 + , 203, 184, 204 + , 185, 182, 205 + , 205, 182, 202 + , 186, 187, 206 + , 206, 187, 207 + , 187, 185, 207 + , 207, 185, 205 + , 188, 189, 208 + , 208, 189, 209 + , 189, 186, 209 + , 209, 186, 206 + , 190, 188, 210 + , 210, 188, 208 + , 191, 192, 211 + , 211, 192, 212 + , 192, 190, 212 + , 212, 190, 210 + , 193, 194, 213 + , 213, 194, 214 + , 194, 191, 214 + , 214, 191, 211 + , 195, 193, 215 + , 215, 193, 213 + , 196, 197, 216 + , 216, 197, 217 + , 197, 195, 217 + , 217, 195, 215 + , 198, 199, 218 + , 218, 199, 219 + , 199, 196, 219 + , 219, 196, 216 + , 200, 198, 220 + , 220, 198, 218 + , 184, 201, 204 + , 204, 201, 221 + , 201, 200, 221 + , 221, 200, 220 + , 202, 203, 222 + , 222, 203, 223 + , 203, 204, 223 + , 223, 204, 224 + , 205, 202, 225 + , 225, 202, 222 + , 206, 207, 226 + , 226, 207, 227 + , 207, 205, 227 + , 227, 205, 225 + , 208, 209, 228 + , 228, 209, 229 + , 209, 206, 229 + , 229, 206, 226 + , 210, 208, 230 + , 230, 208, 228 + , 211, 212, 231 + , 231, 212, 232 + , 212, 210, 232 + , 232, 210, 230 + , 213, 214, 233 + , 233, 214, 234 + , 214, 211, 234 + , 234, 211, 231 + , 215, 213, 235 + , 235, 213, 233 + , 216, 217, 236 + , 236, 217, 237 + , 217, 215, 237 + , 237, 215, 235 + , 218, 219, 238 + , 238, 219, 239 + , 219, 216, 239 + , 239, 216, 236 + , 220, 218, 240 + , 240, 218, 238 + , 204, 221, 224 + , 224, 221, 241 + , 221, 220, 241 + , 241, 220, 240 + , 223, 243, 222 + , 222, 243, 242 + , 224, 244, 223 + , 223, 244, 243 + , 222, 242, 225 + , 225, 242, 245 + , 227, 247, 226 + , 226, 247, 246 + , 225, 245, 227 + , 227, 245, 247 + , 229, 249, 228 + , 228, 249, 248 + , 226, 246, 229 + , 229, 246, 249 + , 228, 248, 230 + , 230, 248, 250 + , 232, 252, 231 + , 231, 252, 251 + , 230, 250, 232 + , 232, 250, 252 + , 234, 254, 233 + , 233, 254, 253 + , 231, 251, 234 + , 234, 251, 254 + , 233, 253, 235 + , 235, 253, 255 + , 237, 257, 236 + , 236, 257, 256 + , 235, 255, 237 + , 237, 255, 257 + , 239, 259, 238 + , 238, 259, 258 + , 236, 256, 239 + , 239, 256, 259 + , 238, 258, 240 + , 240, 258, 260 + , 241, 261, 224 + , 224, 261, 244 + , 240, 260, 241 + , 241, 260, 261 + , 263, 262, 243 + , 243, 262, 242 + , 264, 263, 244 + , 244, 263, 243 + , 242, 262, 245 + , 245, 262, 265 + , 267, 266, 247 + , 247, 266, 246 + , 265, 267, 245 + , 245, 267, 247 + , 269, 268, 249 + , 249, 268, 248 + , 266, 269, 246 + , 246, 269, 249 + , 268, 270, 248 + , 248, 270, 250 + , 272, 271, 252 + , 252, 271, 251 + , 270, 272, 250 + , 250, 272, 252 + , 274, 273, 254 + , 254, 273, 253 + , 271, 274, 251 + , 251, 274, 254 + , 273, 275, 253 + , 253, 275, 255 + , 277, 276, 257 + , 257, 276, 256 + , 275, 277, 255 + , 255, 277, 257 + , 279, 278, 259 + , 259, 278, 258 + , 276, 279, 256 + , 256, 279, 259 + , 278, 280, 258 + , 258, 280, 260 + , 281, 264, 261 + , 261, 264, 244 + , 280, 281, 260 + , 260, 281, 261 + , 262, 263, 282 + , 282, 263, 283 + , 263, 264, 283 + , 283, 264, 284 + , 265, 262, 285 + , 285, 262, 282 + , 266, 267, 286 + , 286, 267, 287 + , 267, 265, 287 + , 287, 265, 285 + , 268, 269, 288 + , 288, 269, 289 + , 269, 266, 289 + , 289, 266, 286 + , 270, 268, 290 + , 290, 268, 288 + , 271, 272, 291 + , 291, 272, 292 + , 272, 270, 292 + , 292, 270, 290 + , 273, 274, 293 + , 293, 274, 294 + , 274, 271, 294 + , 294, 271, 291 + , 275, 273, 295 + , 295, 273, 293 + , 276, 277, 296 + , 296, 277, 297 + , 277, 275, 297 + , 297, 275, 295 + , 278, 279, 298 + , 298, 279, 299 + , 279, 276, 299 + , 299, 276, 296 + , 280, 278, 300 + , 300, 278, 298 + , 264, 281, 284 + , 284, 281, 301 + , 281, 280, 301 + , 301, 280, 300 + , 320, 461, 327 + , 461, 460, 327 + , 460, 477, 327 + , 477, 476, 327 + , 309, 303, 334 + , 334, 303, 335 + , 306, 304, 336 + , 336, 304, 337 + , 302, 305, 338 + , 338, 305, 339 + , 305, 306, 339 + , 339, 306, 336 + , 308, 307, 340 + , 340, 307, 341 + , 304, 308, 337 + , 337, 308, 340 + , 307, 309, 341 + , 341, 309, 334 + , 366, 302, 338 + , 303, 478, 335 + , 318, 312, 344 + , 344, 312, 345 + , 315, 313, 346 + , 346, 313, 347 + , 314, 348, 311 + , 311, 348, 343 + , 314, 315, 348 + , 348, 315, 346 + , 317, 316, 349 + , 349, 316, 350 + , 313, 317, 347 + , 347, 317, 349 + , 316, 318, 350 + , 350, 318, 344 + , 312, 367, 345 + , 326, 320, 352 + , 352, 320, 353 + , 323, 321, 354 + , 354, 321, 355 + , 319, 322, 351 + , 351, 322, 356 + , 322, 323, 356 + , 356, 323, 354 + , 325, 324, 357 + , 357, 324, 358 + , 321, 325, 355 + , 355, 325, 357 + , 324, 326, 358 + , 358, 326, 352 + , 320, 327, 353 + , 353, 327, 359 + , 327, 333, 359 + , 359, 333, 360 + , 328, 330, 362 + , 362, 330, 361 + , 329, 310, 363 + , 363, 310, 342 + , 330, 329, 361 + , 361, 329, 363 + , 331, 332, 365 + , 365, 332, 364 + , 332, 328, 364 + , 364, 328, 362 + , 333, 331, 360 + , 360, 331, 365 + , 368, 310, 366 + , 366, 310, 302 + , 367, 312, 407 + , 310, 368, 342 + , 369, 319, 351 + , 370, 482, 371 + , 336, 337, 372 + , 372, 337, 373 + , 338, 339, 374 + , 374, 339, 375 + , 339, 336, 375 + , 375, 336, 372 + , 340, 341, 376 + , 376, 341, 377 + , 337, 340, 373 + , 373, 340, 376 + , 341, 334, 377 + , 377, 334, 370 + , 366, 338, 413 + , 413, 338, 412 + , 412, 338, 378 + , 338, 374, 378 + , 344, 345, 380 + , 380, 345, 381 + , 346, 347, 382 + , 382, 347, 383 + , 481, 348, 480 + , 480, 348, 384 + , 348, 346, 384 + , 384, 346, 382 + , 349, 350, 385 + , 385, 350, 386 + , 347, 349, 383 + , 383, 349, 385 + , 350, 344, 386 + , 386, 344, 380 + , 409, 351, 388 + , 388, 351, 387 + , 352, 353, 389 + , 389, 353, 390 + , 354, 355, 391 + , 391, 355, 392 + , 351, 356, 387 + , 387, 356, 393 + , 356, 354, 393 + , 393, 354, 391 + , 357, 358, 394 + , 394, 358, 395 + , 355, 357, 392 + , 392, 357, 394 + , 358, 352, 395 + , 395, 352, 389 + , 353, 359, 390 + , 390, 359, 396 + , 359, 360, 396 + , 396, 360, 397 + , 362, 361, 399 + , 399, 361, 398 + , 363, 342, 401 + , 401, 342, 400 + , 361, 363, 398 + , 398, 363, 401 + , 365, 364, 403 + , 403, 364, 402 + , 364, 362, 402 + , 402, 362, 399 + , 360, 365, 397 + , 397, 365, 403 + , 367, 408, 345 + , 345, 408, 381 + , 342, 368, 411 + , 406, 319, 369 + , 312, 319, 407 + , 407, 319, 406 + , 369, 351, 409 + , 381, 408, 404 + , 400, 410, 405 + , 411, 410, 342 + , 342, 410, 400 + , 414, 415, 463 + , 463, 415, 462 + , 414, 417, 415 + , 415, 417, 416 + , 416, 417, 418 + , 418, 417, 419 + , 418, 419, 420 + , 420, 419, 421 + , 420, 421, 422 + , 422, 421, 423 + , 422, 423, 424 + , 424, 423, 425 + , 424, 425, 426 + , 426, 425, 427 + , 426, 427, 428 + , 428, 427, 429 + , 428, 429, 430 + , 430, 429, 431 + , 431, 433, 430 + , 430, 433, 432 + , 432, 433, 434 + , 434, 433, 435 + , 434, 435, 436 + , 436, 435, 437 + , 436, 437, 438 + , 438, 437, 439 + , 438, 439, 440 + , 440, 439, 441 + , 440, 441, 442 + , 442, 441, 443 + , 442, 443, 444 + , 444, 443, 445 + , 444, 445, 447 + , 447, 445, 446 + , 446, 449, 447 + , 447, 449, 448 + , 448, 449, 450 + , 450, 449, 451 + , 450, 451, 452 + , 452, 451, 453 + , 452, 453, 454 + , 454, 453, 455 + , 454, 455, 456 + , 456, 455, 457 + , 456, 457, 458 + , 458, 457, 459 + , 458, 459, 461 + , 461, 459, 460 + , 463, 462, 465 + , 465, 462, 464 + , 464, 466, 465 + , 465, 466, 467 + , 467, 466, 469 + , 469, 466, 468 + , 469, 468, 471 + , 471, 468, 470 + , 471, 470, 473 + , 473, 470, 472 + , 473, 472, 475 + , 475, 472, 474 + , 474, 476, 475 + , 475, 476, 477 + , 303, 309, 428 + , 428, 309, 426 + , 304, 306, 420 + , 420, 306, 418 + , 305, 302, 416 + , 416, 302, 415 + , 306, 305, 418 + , 418, 305, 416 + , 307, 308, 424 + , 424, 308, 422 + , 308, 304, 422 + , 422, 304, 420 + , 309, 307, 426 + , 426, 307, 424 + , 428, 430, 303 + , 303, 430, 311 + , 312, 318, 444 + , 444, 318, 442 + , 313, 315, 436 + , 436, 315, 434 + , 314, 311, 432 + , 432, 311, 430 + , 315, 314, 434 + , 434, 314, 432 + , 316, 317, 440 + , 440, 317, 438 + , 317, 313, 438 + , 438, 313, 436 + , 318, 316, 442 + , 442, 316, 440 + , 320, 326, 461 + , 461, 326, 458 + , 321, 323, 452 + , 452, 323, 450 + , 322, 319, 448 + , 448, 319, 447 + , 323, 322, 450 + , 450, 322, 448 + , 324, 325, 456 + , 456, 325, 454 + , 325, 321, 454 + , 454, 321, 452 + , 326, 324, 458 + , 458, 324, 456 + , 462, 310, 464 + , 464, 310, 329 + , 466, 464, 330 + , 330, 464, 329 + , 468, 466, 328 + , 328, 466, 330 + , 470, 468, 332 + , 332, 468, 328 + , 472, 470, 331 + , 331, 470, 332 + , 474, 472, 333 + , 333, 472, 331 + , 327, 476, 333 + , 333, 476, 474 + , 427, 425, 433 + , 433, 425, 435 + , 425, 423, 435 + , 435, 423, 437 + , 423, 421, 437 + , 437, 421, 439 + , 421, 419, 439 + , 439, 419, 441 + , 419, 417, 441 + , 441, 417, 443 + , 417, 414, 443 + , 443, 414, 445 + , 463, 465, 446 + , 446, 465, 449 + , 465, 467, 449 + , 449, 467, 451 + , 467, 469, 451 + , 451, 469, 453 + , 469, 471, 453 + , 453, 471, 455 + , 471, 473, 455 + , 455, 473, 457 + , 473, 475, 457 + , 457, 475, 459 + , 475, 477, 459 + , 459, 477, 460 + , 431, 429, 433 + , 433, 429, 427 + , 415, 302, 462 + , 462, 302, 310 + , 463, 446, 414 + , 414, 446, 445 + , 312, 444, 319 + , 319, 444, 447 + , 311, 479, 303 + , 303, 479, 478 + , 479, 311, 343 + , 480, 384, 379 + , 343, 348, 481 + , 483, 482, 334 + , 334, 482, 370 + , 334, 335, 483 + , 488, 506, 489 + , 489, 506, 505 + , 490, 498, 484 + , 484, 498, 497 + , 491, 494, 492 + , 492, 494, 493 + , 494, 487, 493 + , 493, 487, 486 + , 495, 491, 496 + , 496, 491, 492 + , 498, 495, 497 + , 497, 495, 496 + , 499, 502, 500 + , 500, 502, 501 + , 502, 490, 501 + , 501, 490, 484 + , 503, 499, 504 + , 504, 499, 500 + , 506, 503, 505 + , 505, 503, 504 + , 486, 487, 522 + , 522, 487, 523 + , 507, 515, 485 + , 485, 515, 514 + , 508, 511, 509 + , 509, 511, 510 + , 511, 488, 510 + , 510, 488, 489 + , 512, 508, 513 + , 513, 508, 509 + , 515, 512, 514 + , 514, 512, 513 + , 516, 519, 517 + , 517, 519, 518 + , 519, 507, 518 + , 518, 507, 485 + , 520, 516, 521 + , 521, 516, 517 + , 523, 520, 522 + , 522, 520, 521 + , 488, 525, 506 + , 506, 525, 524 + , 490, 527, 498 + , 498, 527, 526 + , 491, 529, 494 + , 494, 529, 528 + , 494, 528, 487 + , 487, 528, 530 + , 495, 531, 491 + , 491, 531, 529 + , 498, 526, 495 + , 495, 526, 531 + , 499, 533, 502 + , 502, 533, 532 + , 502, 532, 490 + , 490, 532, 527 + , 503, 534, 499 + , 499, 534, 533 + , 506, 524, 503 + , 503, 524, 534 + , 487, 530, 523 + , 523, 530, 535 + , 507, 537, 515 + , 515, 537, 536 + , 508, 539, 511 + , 511, 539, 538 + , 511, 538, 488 + , 488, 538, 525 + , 512, 540, 508 + , 508, 540, 539 + , 515, 536, 512 + , 512, 536, 540 + , 516, 542, 519 + , 519, 542, 541 + , 519, 541, 507 + , 507, 541, 537 + , 520, 543, 516 + , 516, 543, 542 + , 523, 535, 520 + , 520, 535, 543 + , 544, 545, 564 + , 564, 545, 565 + , 545, 546, 565 + , 565, 546, 566 + , 547, 544, 567 + , 567, 544, 564 + , 548, 549, 568 + , 568, 549, 569 + , 549, 547, 569 + , 569, 547, 567 + , 550, 551, 570 + , 570, 551, 571 + , 551, 548, 571 + , 571, 548, 568 + , 552, 550, 572 + , 572, 550, 570 + , 553, 554, 573 + , 573, 554, 574 + , 554, 552, 574 + , 574, 552, 572 + , 555, 556, 575 + , 575, 556, 576 + , 556, 553, 576 + , 576, 553, 573 + , 557, 555, 577 + , 577, 555, 575 + , 558, 559, 578 + , 578, 559, 579 + , 559, 557, 579 + , 579, 557, 577 + , 560, 561, 580 + , 580, 561, 581 + , 561, 558, 581 + , 581, 558, 578 + , 562, 560, 582 + , 582, 560, 580 + , 546, 563, 566 + , 566, 563, 583 + , 563, 562, 583 + , 583, 562, 582 + , 564, 565, 584 + , 584, 565, 585 + , 565, 566, 585 + , 585, 566, 586 + , 567, 564, 587 + , 587, 564, 584 + , 568, 569, 588 + , 588, 569, 589 + , 569, 567, 589 + , 589, 567, 587 + , 570, 571, 590 + , 590, 571, 591 + , 571, 568, 591 + , 591, 568, 588 + , 572, 570, 592 + , 592, 570, 590 + , 573, 574, 593 + , 593, 574, 594 + , 574, 572, 594 + , 594, 572, 592 + , 575, 576, 595 + , 595, 576, 596 + , 576, 573, 596 + , 596, 573, 593 + , 577, 575, 597 + , 597, 575, 595 + , 578, 579, 598 + , 598, 579, 599 + , 579, 577, 599 + , 599, 577, 597 + , 580, 581, 600 + , 600, 581, 601 + , 581, 578, 601 + , 601, 578, 598 + , 582, 580, 602 + , 602, 580, 600 + , 566, 583, 586 + , 586, 583, 603 + , 583, 582, 603 + , 603, 582, 602 + , 585, 605, 584 + , 584, 605, 604 + , 586, 606, 585 + , 585, 606, 605 + , 584, 604, 587 + , 587, 604, 607 + , 589, 609, 588 + , 588, 609, 608 + , 587, 607, 589 + , 589, 607, 609 + , 591, 611, 590 + , 590, 611, 610 + , 588, 608, 591 + , 591, 608, 611 + , 590, 610, 592 + , 592, 610, 612 + , 594, 614, 593 + , 593, 614, 613 + , 592, 612, 594 + , 594, 612, 614 + , 596, 616, 595 + , 595, 616, 615 + , 593, 613, 596 + , 596, 613, 616 + , 595, 615, 597 + , 597, 615, 617 + , 599, 619, 598 + , 598, 619, 618 + , 597, 617, 599 + , 599, 617, 619 + , 601, 621, 600 + , 600, 621, 620 + , 598, 618, 601 + , 601, 618, 621 + , 600, 620, 602 + , 602, 620, 622 + , 603, 623, 586 + , 586, 623, 606 + , 602, 622, 603 + , 603, 622, 623 + , 625, 624, 605 + , 605, 624, 604 + , 626, 625, 606 + , 606, 625, 605 + , 604, 624, 607 + , 607, 624, 627 + , 629, 628, 609 + , 609, 628, 608 + , 627, 629, 607 + , 607, 629, 609 + , 631, 630, 611 + , 611, 630, 610 + , 628, 631, 608 + , 608, 631, 611 + , 630, 632, 610 + , 610, 632, 612 + , 634, 633, 614 + , 614, 633, 613 + , 632, 634, 612 + , 612, 634, 614 + , 636, 635, 616 + , 616, 635, 615 + , 633, 636, 613 + , 613, 636, 616 + , 635, 637, 615 + , 615, 637, 617 + , 639, 638, 619 + , 619, 638, 618 + , 637, 639, 617 + , 617, 639, 619 + , 641, 640, 621 + , 621, 640, 620 + , 638, 641, 618 + , 618, 641, 621 + , 640, 642, 620 + , 620, 642, 622 + , 643, 626, 623 + , 623, 626, 606 + , 642, 643, 622 + , 622, 643, 623 + , 624, 625, 644 + , 644, 625, 645 + , 625, 626, 645 + , 645, 626, 646 + , 627, 624, 647 + , 647, 624, 644 + , 628, 629, 648 + , 648, 629, 649 + , 629, 627, 649 + , 649, 627, 647 + , 630, 631, 650 + , 650, 631, 651 + , 631, 628, 651 + , 651, 628, 648 + , 632, 630, 652 + , 652, 630, 650 + , 633, 634, 653 + , 653, 634, 654 + , 634, 632, 654 + , 654, 632, 652 + , 635, 636, 655 + , 655, 636, 656 + , 636, 633, 656 + , 656, 633, 653 + , 637, 635, 657 + , 657, 635, 655 + , 638, 639, 658 + , 658, 639, 659 + , 639, 637, 659 + , 659, 637, 657 + , 640, 641, 660 + , 660, 641, 661 + , 641, 638, 661 + , 661, 638, 658 + , 642, 640, 662 + , 662, 640, 660 + , 626, 643, 646 + , 646, 643, 663 + , 643, 642, 663 + , 663, 642, 662 + , 664, 665, 666 + , 666, 665, 723 + , 665, 664, 777 + , 777, 664, 776 + , 667, 668, 670 + , 670, 668, 669 + , 778, 779, 781 + , 781, 779, 780 + , 671, 666, 775 + , 667, 670, 778 + , 778, 670, 779 + , 666, 671, 664 + , 776, 664, 782 + , 782, 664, 673 + , 782, 673, 783 + , 783, 673, 674 + , 675, 676, 677 + , 783, 674, 784 + , 784, 674, 678 + , 679, 668, 667 + , 784, 678, 785 + , 785, 678, 680 + , 785, 680, 786 + , 786, 680, 681 + , 786, 681, 787 + , 787, 681, 682 + , 682, 683, 787 + , 787, 683, 788 + , 667, 684, 679 + , 778, 789, 667 + , 667, 789, 686 + , 791, 685, 790 + , 790, 685, 687 + , 789, 792, 686 + , 686, 792, 689 + , 687, 688, 790 + , 790, 688, 793 + , 792, 794, 689 + , 689, 794, 691 + , 688, 690, 793 + , 793, 690, 795 + , 794, 795, 691 + , 691, 795, 690 + , 671, 672, 693 + , 693, 672, 692 + , 693, 692, 695 + , 695, 692, 694 + , 695, 694, 697 + , 697, 694, 696 + , 664, 671, 673 + , 673, 671, 693 + , 673, 693, 674 + , 674, 693, 695 + , 697, 696, 699 + , 699, 696, 698 + , 782, 796, 776 + , 776, 796, 797 + , 674, 695, 678 + , 678, 695, 697 + , 699, 698, 701 + , 701, 698, 700 + , 798, 796, 783 + , 783, 796, 782 + , 678, 697, 680 + , 680, 697, 699 + , 701, 700, 703 + , 703, 700, 702 + , 799, 798, 784 + , 784, 798, 783 + , 703, 844, 705 + , 680, 699, 681 + , 681, 699, 701 + , 785, 800, 784 + , 784, 800, 799 + , 681, 701, 682 + , 682, 701, 703 + , 786, 801, 785 + , 785, 801, 800 + , 682, 703, 683 + , 683, 703, 705 + , 787, 802, 786 + , 786, 802, 801 + , 675, 677, 684 + , 684, 677, 679 + , 788, 803, 787 + , 787, 803, 802 + , 685, 841, 707 + , 803, 788, 804 + , 804, 788, 791 + , 683, 685, 788 + , 788, 685, 791 + , 708, 842, 706 + , 710, 711, 684 + , 684, 711, 675 + , 709, 708, 713 + , 713, 708, 712 + , 714, 715, 710 + , 710, 715, 711 + , 713, 712, 717 + , 717, 712, 716 + , 718, 719, 714 + , 714, 719, 715 + , 717, 716, 718 + , 718, 716, 719 + , 707, 709, 685 + , 685, 709, 687 + , 686, 710, 667 + , 667, 710, 684 + , 687, 709, 688 + , 688, 709, 713 + , 689, 714, 686 + , 686, 714, 710 + , 805, 804, 790 + , 790, 804, 791 + , 688, 713, 690 + , 690, 713, 717 + , 691, 718, 689 + , 689, 718, 714 + , 778, 781, 789 + , 789, 781, 806 + , 690, 717, 691 + , 691, 717, 718 + , 807, 805, 793 + , 793, 805, 790 + , 789, 806, 792 + , 792, 806, 808 + , 809, 807, 795 + , 795, 807, 793 + , 792, 808, 794 + , 794, 808, 810 + , 810, 809, 794 + , 794, 809, 795 + , 811, 777, 797 + , 797, 777, 776 + , 669, 720, 670 + , 720, 721, 722 + , 720, 722, 670 + , 779, 670, 812 + , 812, 670, 724 + , 812, 724, 813 + , 813, 724, 725 + , 813, 725, 814 + , 814, 725, 726 + , 814, 726, 815 + , 815, 726, 727 + , 815, 727, 816 + , 816, 727, 728 + , 816, 728, 817 + , 817, 728, 729 + , 729, 730, 817 + , 817, 730, 818 + , 731, 732, 723 + , 732, 731, 733 + , 733, 731, 736 + , 721, 734, 722 + , 722, 734, 735 + , 736, 737, 733 + , 665, 731, 723 + , 777, 819, 665 + , 665, 819, 739 + , 821, 738, 820 + , 820, 738, 740 + , 819, 822, 739 + , 739, 822, 742 + , 740, 741, 820 + , 820, 741, 823 + , 822, 824, 742 + , 742, 824, 744 + , 825, 823, 743 + , 743, 823, 741 + , 824, 825, 744 + , 744, 825, 743 + , 826, 780, 812 + , 812, 780, 779 + , 827, 826, 813 + , 813, 826, 812 + , 670, 722, 724 + , 724, 722, 745 + , 828, 827, 814 + , 814, 827, 813 + , 724, 745, 725 + , 725, 745, 746 + , 815, 829, 814 + , 814, 829, 828 + , 725, 746, 726 + , 726, 746, 747 + , 722, 735, 745 + , 745, 735, 748 + , 816, 830, 815 + , 815, 830, 829 + , 745, 748, 746 + , 746, 748, 749 + , 726, 747, 727 + , 727, 747, 750 + , 817, 831, 816 + , 816, 831, 830 + , 818, 832, 817 + , 817, 832, 831 + , 746, 749, 747 + , 747, 749, 751 + , 727, 750, 728 + , 728, 750, 752 + , 747, 751, 750 + , 750, 751, 753 + , 728, 752, 729 + , 729, 752, 754 + , 729, 754, 730 + , 730, 754, 755 + , 750, 753, 752 + , 752, 753, 756 + , 752, 756, 754 + , 754, 756, 757 + , 754, 757, 755 + , 755, 757, 758 + , 818, 821, 832 + , 832, 821, 833 + , 730, 738, 818 + , 818, 738, 821 + , 730, 755, 738 + , 738, 755, 759 + , 755, 758, 759 + , 759, 758, 760 + , 821, 820, 833 + , 833, 820, 834 + , 835, 819, 811 + , 811, 819, 777 + , 820, 823, 834 + , 834, 823, 836 + , 837, 822, 835 + , 835, 822, 819 + , 823, 825, 836 + , 836, 825, 838 + , 839, 824, 837 + , 837, 824, 822 + , 825, 824, 838 + , 838, 824, 839 + , 738, 759, 740 + , 740, 759, 761 + , 739, 762, 665 + , 665, 762, 731 + , 740, 761, 741 + , 741, 761, 763 + , 742, 764, 739 + , 739, 764, 762 + , 741, 763, 743 + , 743, 763, 765 + , 744, 766, 742 + , 742, 766, 764 + , 743, 765, 744 + , 744, 765, 766 + , 759, 760, 761 + , 761, 760, 767 + , 762, 768, 731 + , 731, 768, 736 + , 761, 767, 763 + , 763, 767, 769 + , 764, 770, 762 + , 762, 770, 768 + , 763, 769, 765 + , 765, 769, 771 + , 766, 772, 764 + , 764, 772, 770 + , 765, 771, 766 + , 766, 771, 772 + , 775, 774, 671 + , 671, 774, 672 + , 672, 774, 773 + , 683, 705, 840 + , 840, 841, 683 + , 683, 841, 685 + , 843, 842, 709 + , 709, 842, 708 + , 707, 843, 709 + , 702, 845, 703 + , 703, 845, 844 + , 845, 702, 704 + , 808, 1014, 810 + , 810, 1014, 1015 + , 806, 1013, 808 + , 808, 1013, 1014 + , 781, 1012, 806 + , 806, 1012, 1013 + , 781, 780, 1012 + , 1012, 780, 1011 + , 780, 826, 1011 + , 1011, 826, 1010 + , 826, 827, 1010 + , 1010, 827, 1009 + , 827, 828, 1009 + , 1009, 828, 1008 + , 968, 976, 969 + , 969, 976, 977 + , 969, 977, 970 + , 970, 977, 978 + , 970, 978, 971 + , 971, 978, 979 + , 972, 971, 980 + , 980, 971, 979 + , 973, 972, 981 + , 981, 972, 980 + , 974, 973, 982 + , 982, 973, 981 + , 975, 974, 983 + , 983, 974, 982 + , 976, 1000, 977 + , 977, 1000, 1001 + , 977, 1001, 978 + , 978, 1001, 1002 + , 978, 1002, 979 + , 979, 1002, 1003 + , 980, 979, 1004 + , 1004, 979, 1003 + , 981, 980, 1005 + , 1005, 980, 1004 + , 982, 981, 1006 + , 1006, 981, 1005 + , 983, 982, 1007 + , 1007, 982, 1006 + , 985, 984, 969 + , 969, 984, 968 + , 986, 985, 970 + , 970, 985, 969 + , 987, 986, 971 + , 971, 986, 970 + , 988, 987, 972 + , 972, 987, 971 + , 989, 988, 973 + , 973, 988, 972 + , 990, 989, 974 + , 974, 989, 973 + , 991, 990, 975 + , 975, 990, 974 + , 993, 992, 985 + , 985, 992, 984 + , 994, 993, 986 + , 986, 993, 985 + , 995, 994, 987 + , 987, 994, 986 + , 996, 995, 988 + , 988, 995, 987 + , 996, 988, 997 + , 997, 988, 989 + , 997, 989, 998 + , 998, 989, 990 + , 998, 990, 999 + , 999, 990, 991 + , 1000, 839, 1001 + , 1001, 839, 837 + , 1001, 837, 1002 + , 1002, 837, 835 + , 1002, 835, 1003 + , 1003, 835, 811 + , 1004, 1003, 797 + , 797, 1003, 811 + , 1005, 1004, 796 + , 796, 1004, 797 + , 1006, 1005, 798 + , 798, 1005, 796 + , 1007, 1006, 799 + , 799, 1006, 798 + , 1009, 1008, 993 + , 993, 1008, 992 + , 1010, 1009, 994 + , 994, 1009, 993 + , 1011, 1010, 995 + , 995, 1010, 994 + , 1012, 1011, 996 + , 996, 1011, 995 + , 1012, 996, 1013 + , 1013, 996, 997 + , 1013, 997, 1014 + , 1014, 997, 998 + , 1014, 998, 1015 + , 1015, 998, 999 + , 831, 992, 830 + , 830, 992, 1008 + , 832, 984, 831 + , 831, 984, 992 + , 832, 833, 984 + , 984, 833, 968 + , 833, 834, 968 + , 968, 834, 976 + , 834, 836, 976 + , 976, 836, 1000 + , 802, 983, 801 + , 801, 983, 1007 + , 803, 975, 802 + , 802, 975, 983 + , 804, 991, 803 + , 803, 991, 975 + , 804, 805, 991 + , 991, 805, 999 + , 805, 807, 999 + , 999, 807, 1015 + , 801, 1007, 800 + , 800, 1007, 799 + , 807, 809, 1015 + , 1015, 809, 810 + , 830, 1008, 829 + , 829, 1008, 828 + , 839, 1000, 838 + , 838, 1000, 836 +]) + +NB_GRAY_ZEDM_TRI = 40 +GRAY_COLOR = Color(0.22, 0.22, 0.22) +gray_triangles_m = np.array([ + 849, 846, 848 + , 846, 847, 848 + , 847, 846, 851 + , 846, 850, 851 + , 852, 853, 846 + , 846, 853, 854 + , 850, 846, 856 + , 846, 855, 856 + , 859, 846, 858 + , 846, 857, 858 + , 860, 861, 846 + , 846, 861, 857 + , 860, 846, 862 + , 862, 846, 849 + , 863, 864, 846 + , 846, 864, 855 + , 854, 865, 846 + , 846, 865, 863 + , 859, 866, 846 + , 846, 866, 852 + , 950, 947, 949 + , 947, 948, 949 + , 948, 947, 952 + , 947, 951, 952 + , 953, 954, 947 + , 947, 954, 955 + , 951, 947, 957 + , 947, 956, 957 + , 960, 947, 959 + , 947, 958, 959 + , 961, 962, 947 + , 947, 962, 958 + , 961, 947, 963 + , 963, 947, 950 + , 964, 965, 947 + , 947, 965, 956 + , 955, 966, 947 + , 947, 966, 964 + , 960, 967, 947 + , 947, 967, 953 +]) + +NB_YELLOW_ZEDM_TRI = 80 +YELLOW_COLOR = Color(1., 1., 0.) +yellow_triangles_m = np.array([ + 867, 868, 870 + , 870, 868, 869 + , 868, 871, 869 + , 869, 871, 872 + , 873, 867, 874 + , 874, 867, 870 + , 875, 876, 878 + , 878, 876, 877 + , 876, 873, 877 + , 877, 873, 874 + , 879, 880, 882 + , 882, 880, 881 + , 880, 875, 881 + , 881, 875, 878 + , 883, 879, 884 + , 884, 879, 882 + , 885, 886, 888 + , 888, 886, 887 + , 886, 883, 887 + , 887, 883, 884 + , 889, 890, 892 + , 892, 890, 891 + , 890, 885, 891 + , 891, 885, 888 + , 893, 889, 894 + , 894, 889, 892 + , 895, 896, 898 + , 898, 896, 897 + , 896, 893, 897 + , 897, 893, 894 + , 899, 900, 902 + , 902, 900, 901 + , 900, 895, 901 + , 901, 895, 898 + , 903, 899, 904 + , 904, 899, 902 + , 871, 905, 872 + , 872, 905, 906 + , 905, 903, 906 + , 906, 903, 904 + , 907, 908, 910 + , 910, 908, 909 + , 908, 911, 909 + , 909, 911, 912 + , 913, 907, 914 + , 914, 907, 910 + , 915, 916, 918 + , 918, 916, 917 + , 916, 913, 917 + , 917, 913, 914 + , 919, 920, 922 + , 922, 920, 921 + , 920, 915, 921 + , 921, 915, 918 + , 923, 919, 924 + , 924, 919, 922 + , 925, 926, 928 + , 928, 926, 927 + , 926, 923, 927 + , 927, 923, 924 + , 929, 930, 932 + , 932, 930, 931 + , 930, 925, 931 + , 931, 925, 928 + , 933, 929, 934 + , 934, 929, 932 + , 935, 936, 938 + , 938, 936, 937 + , 936, 933, 937 + , 937, 933, 934 + , 939, 940, 942 + , 942, 940, 941 + , 940, 935, 941 + , 941, 935, 938 + , 943, 939, 944 + , 944, 939, 942 + , 911, 945, 912 + , 912, 945, 946 + , 945, 943, 946 + , 946, 943, 944 +]) diff --git a/object detection/image viewer/cpp/include/GLViewer.hpp b/object detection/image viewer/cpp/include/GLViewer.hpp index 29c0af70..aa1688cb 100644 --- a/object detection/image viewer/cpp/include/GLViewer.hpp +++ b/object detection/image viewer/cpp/include/GLViewer.hpp @@ -24,9 +24,19 @@ class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -48,9 +58,10 @@ class Simple3DObject { public: Simple3DObject(); - Simple3DObject(sl::Translation position, bool isStatic); ~Simple3DObject(); + void init(const sl::Translation& position, const bool isStatic); + bool isInit(); void addPt(sl::float3 pt); void addClr(sl::float4 clr); diff --git a/object detection/image viewer/cpp/src/GLViewer.cpp b/object detection/image viewer/cpp/src/GLViewer.cpp index 1d544e5e..b4dbb19a 100644 --- a/object detection/image viewer/cpp/src/GLViewer.cpp +++ b/object detection/image viewer/cpp/src/GLViewer.cpp @@ -149,17 +149,17 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters param, bool isTr glEnable(GL_FRAMEBUFFER_SRGB); // Compile and create the shader for 3D objects - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); // Create the rendering camera setRenderCameraProjection(param,0.5f,20); // Create the bounding box object - BBox_edges = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges.init(sl::Translation(0, 0, 0), false); BBox_edges.setDrawingType(GL_LINES); - BBox_faces = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces.init(sl::Translation(0, 0, 0), false); BBox_faces.setDrawingType(GL_QUADS); // Set background color (black) @@ -371,19 +371,10 @@ void GLViewer::idle() { glutPostRedisplay(); } -Simple3DObject::Simple3DObject() { - is_init=false; -} - -Simple3DObject::Simple3DObject(sl::Translation position, bool isStatic) : isStatic_(isStatic) { - vaoID_ = 0; - drawingType_ = GL_TRIANGLES; - position_ = position; - rotation_.setIdentity(); -} +Simple3DObject::Simple3DObject() : vaoID_(0), is_init(false) {} Simple3DObject::~Simple3DObject() { - if (vaoID_ != 0) { + if (is_init) { glDeleteBuffers(4, vboID_); glDeleteVertexArrays(1, &vaoID_); vaoID_=0; @@ -391,6 +382,14 @@ Simple3DObject::~Simple3DObject() { } } +void Simple3DObject::init(const sl::Translation& position, const bool isStatic) { + vaoID_ = 0; + drawingType_ = GL_TRIANGLES; + position_ = position; + isStatic_ = isStatic; + rotation_.setIdentity(); +} + bool Simple3DObject::isInit() { return is_init; @@ -871,6 +870,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -906,12 +909,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -971,18 +974,19 @@ const GLchar* IMAGE_VERTEX_SHADER = "}\n"; -ImageHandler::ImageHandler() {} +ImageHandler::ImageHandler() : imageTex(0) {} ImageHandler::~ImageHandler() { close(); } void ImageHandler::close() { - glDeleteTextures(1, &imageTex); + if (imageTex != 0) + glDeleteTextures(1, &imageTex); } bool ImageHandler::initialize(sl::Resolution res) { - shaderImage.it = Shader(IMAGE_VERTEX_SHADER,IMAGE_FRAGMENT_SHADER); + shaderImage.it.set(IMAGE_VERTEX_SHADER,IMAGE_FRAGMENT_SHADER); texID = glGetUniformLocation(shaderImage.it.getProgramId(), "texImage"); static const GLfloat g_quad_vertex_buffer_data[] = { -1.0f, -1.0f, 0.0f, diff --git a/object detection/multi-camera/cpp/CMakeLists.txt b/object detection/multi-camera/cpp/CMakeLists.txt index 2a8e1d7a..cf117b6f 100644 --- a/object detection/multi-camera/cpp/CMakeLists.txt +++ b/object detection/multi-camera/cpp/CMakeLists.txt @@ -11,12 +11,14 @@ if (NOT LINK_SHARED_ZED AND MSVC) endif() find_package(ZED 4 REQUIRED) +find_package(OpenCV REQUIRED) find_package(CUDA REQUIRED) find_package(GLUT REQUIRED) find_package(GLEW REQUIRED) SET(OpenGL_GL_PREFERENCE GLVND) find_package(OpenGL REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${ZED_INCLUDE_DIRS}) include_directories(${GLEW_INCLUDE_DIRS}) @@ -28,6 +30,7 @@ link_directories(${CUDA_LIBRARY_DIRS}) link_directories(${GLEW_LIBRARY_DIRS}) link_directories(${GLUT_LIBRARY_DIRS}) link_directories(${OpenGL_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) IF(NOT WIN32) SET(SPECIAL_OS_LIBS "pthread") @@ -48,7 +51,7 @@ else() SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) endif() -TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OpenCV_LIBRARIES}) if(INSTALL_SAMPLES) LIST(APPEND SAMPLE_LIST ${PROJECT_NAME}) diff --git a/object detection/multi-camera/cpp/include/ClientPublisher.hpp b/object detection/multi-camera/cpp/include/ClientPublisher.hpp index 30183576..768d55d5 100644 --- a/object detection/multi-camera/cpp/include/ClientPublisher.hpp +++ b/object detection/multi-camera/cpp/include/ClientPublisher.hpp @@ -5,6 +5,31 @@ #include #include +#include + +struct Trigger{ + + void notifyZED() { + cv.notify_all(); + if (running) { + bool wait_for_zed = true; + size_t const nb_zed{states.size()}; + while (wait_for_zed) { + int count_r = 0; + for (auto const& it : states) + count_r += it.second; + wait_for_zed = count_r != nb_zed; + sl::sleep_ms(1); + } + for (auto &it : states) + it.second = false; + } + } + + std::condition_variable cv; + bool running = true; + std::map states; +}; class ClientPublisher{ @@ -12,24 +37,18 @@ class ClientPublisher{ ClientPublisher(); ~ClientPublisher(); - bool open(sl::InputType); + bool open(const sl::InputType& input, Trigger* ref); void start(); void stop(); - void setStartSVOPosition(unsigned pos); - sl::Objects getObjects(); - - bool isRunning() { - return running; - } + void setStartSVOPosition(const unsigned pos); private: sl::Camera zed; void work(); std::thread runner; - bool running; int serial; - sl::Objects objects; - sl::Bodies bodies; + std::mutex mtx; + Trigger *p_trigger; }; #endif // ! __SENDER_RUNNER_HDR__ diff --git a/object detection/multi-camera/cpp/include/GLViewer.hpp b/object detection/multi-camera/cpp/include/GLViewer.hpp index 0c89ab00..2a403a29 100644 --- a/object detection/multi-camera/cpp/include/GLViewer.hpp +++ b/object detection/multi-camera/cpp/include/GLViewer.hpp @@ -1,330 +1,380 @@ -#ifndef __VIEWER_INCLUDE__ -#define __VIEWER_INCLUDE__ - -#include - -#include -#include - -#include -#include - -#include -#include - -#ifndef M_PI -#define M_PI 3.141592653f -#endif - -#define MOUSE_R_SENSITIVITY 0.03f -#define MOUSE_UZ_SENSITIVITY 0.75f -#define MOUSE_DZ_SENSITIVITY 1.25f -#define MOUSE_T_SENSITIVITY 0.05f -#define KEY_T_SENSITIVITY 0.1f - - -/////////////////////////////////////////////////////////////////////////////////////////////// - -class Shader { -public: - - Shader() { - } - Shader(const GLchar* vs, const GLchar* fs); - ~Shader(); - GLuint getProgramId(); - - static const GLint ATTRIB_VERTICES_POS = 0; - static const GLint ATTRIB_COLOR_POS = 1; - static const GLint ATTRIB_NORMAL = 2; -private: - bool compile(GLuint &shaderId, GLenum type, const GLchar* src); - GLuint verterxId_; - GLuint fragmentId_; - GLuint programId_; -}; - -struct ShaderData { - Shader it; - GLuint MVP_Mat; -}; - -class Simple3DObject { -public: - - Simple3DObject(); - - ~Simple3DObject(); - - void addPoint(sl::float3 pt, sl::float3 clr); - void addLine(sl::float3 pt1, sl::float3 pt2, sl::float3 clr); - void addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float3 clr); - void addBBox(std::vector &pts, sl::float3 clr); - void addPt(sl::float3 pt); - void addClr(sl::float4 clr); - - void pushToGPU(); - void clear(); - - void setStatic(bool _static) { - isStatic_ = _static; - } - - void setDrawingType(GLenum type); - - void draw(); - -private: - std::vector vertices_; - std::vector colors_; - std::vector indices_; - - bool isStatic_; - bool need_update; - GLenum drawingType_; - GLuint vaoID_; - GLuint vboID_[3]; -}; - -class CameraGL { -public: - - CameraGL() { - } - - enum DIRECTION { - UP, DOWN, LEFT, RIGHT, FORWARD, BACK - }; - CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) - ~CameraGL(); - - void update(); - void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar); - const sl::Transform& getViewProjectionMatrix() const; - - float getHorizontalFOV() const; - float getVerticalFOV() const; - - // Set an offset between the eye of the camera and its position - // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 - // Note: coordinates are in local space - void setOffsetFromPosition(const sl::Translation& offset); - const sl::Translation& getOffsetFromPosition() const; - - void setDirection(const sl::Translation& direction, const sl::Translation &vertical); - void translate(const sl::Translation& t); - void setPosition(const sl::Translation& p); - void rotate(const sl::Orientation& rot); - void rotate(const sl::Rotation& m); - void setRotation(const sl::Orientation& rot); - void setRotation(const sl::Rotation& m); - - const sl::Translation& getPosition() const; - const sl::Translation& getForward() const; - const sl::Translation& getRight() const; - const sl::Translation& getUp() const; - const sl::Translation& getVertical() const; - float getZNear() const; - float getZFar() const; - - static const sl::Translation ORIGINAL_FORWARD; - static const sl::Translation ORIGINAL_UP; - static const sl::Translation ORIGINAL_RIGHT; - - sl::Transform projection_; - bool usePerspective_; -private: - void updateVectors(); - void updateView(); - void updateVPMatrix(); - - sl::Translation offset_; - sl::Translation position_; - sl::Translation forward_; - sl::Translation up_; - sl::Translation right_; - sl::Translation vertical_; - - sl::Orientation rotation_; - - sl::Transform view_; - sl::Transform vpMatrix_; - float horizontalFieldOfView_; - float verticalFieldOfView_; - float znear_; - float zfar_; -}; - - -class PointCloud { -public: - PointCloud(); - ~PointCloud(); - - // Initialize Opengl and Cuda buffers - // Warning: must be called in the Opengl thread - void initialize(sl::Mat&, sl::float3 clr); - // Push a new point cloud - // Warning: can be called from any thread but the mutex "mutexData" must be locked - void pushNewPC(); - // Draw the point cloud - // Warning: must be called in the Opengl thread - void draw(const sl::Transform& vp, bool draw_clr); - // Close (disable update) - void close(); - -private: - sl::Mat refMat; - sl::float3 clr; - - Shader shader_; - GLuint shMVPMatrixLoc_; - GLuint shDrawColor; - GLuint shColor; - - size_t numBytes_; - float* xyzrgbaMappedBuf_; - GLuint bufferGLID_; - cudaGraphicsResource* bufferCudaID_; -}; - -class CameraViewer { -public: - CameraViewer(); - ~CameraViewer(); - - // Initialize Opengl and Cuda buffers - bool initialize(sl::Mat& image, sl::float3 clr); - // Push a new Image + Z buffer and transform into a point cloud - void pushNewImage(); - // Draw the Image - void draw(sl::Transform vpMatrix); - // Close (disable update) - void close(); - - Simple3DObject frustum; -private: - sl::Mat ref; - cudaArray_t ArrIm; - cudaGraphicsResource* cuda_gl_ressource;//cuda GL resource - Shader shader; - GLuint shMVPMatrixLocTex_; - - GLuint texture; - GLuint vaoID_; - GLuint vboID_[3]; - - std::vector faces; - std::vector vert; - std::vector uv; -}; - -struct ObjectClassName { - sl::float3 position; - std::string name_lineA; - std::string name_lineB; - sl::float3 color; -}; - -// This class manages input events, window and Opengl rendering pipeline - -class GLViewer { -public: - GLViewer(); - ~GLViewer(); - bool isAvailable(); - void init(int argc, char **argv); - - void updateCamera(int, sl::Mat &, sl::Mat &); - - void updateObjects(sl::Objects &objs,std::map& singldata, sl::FusionMetrics& metrics); - - void setCameraPose(int, sl::Transform); - - unsigned char getKey() { - auto ret_v = lastPressedKey; - lastPressedKey = ' '; - return ret_v; - } - - void exit(); -private: - void render(); - void update(); - void draw(); - void clearInputs(); - void setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar); - - void printText(); - - // Glut functions callbacks - static void drawCallback(); - static void mouseButtonCallback(int button, int state, int x, int y); - static void mouseMotionCallback(int x, int y); - static void reshapeCallback(int width, int height); - static void keyPressedCallback(unsigned char c, int x, int y); - static void keyReleasedCallback(unsigned char c, int x, int y); - static void idle(); - - // void addSKeleton(sl::BodyData &, Simple3DObject &, sl::float3 clr_id, bool raw, sl::BODY_FORMAT format); - // void addSKeleton(sl::BodyData &, Simple3DObject &, sl::float3 clr_id, bool raw); - void addObject(sl::ObjectData &obj, Simple3DObject &simpleObj, sl::float3 clr_id, bool raw); - - - bool available; - bool drawBbox = false; - - enum MOUSE_BUTTON { - LEFT = 0, - MIDDLE = 1, - RIGHT = 2, - WHEEL_UP = 3, - WHEEL_DOWN = 4 - }; - - enum KEY_STATE { - UP = 'u', - DOWN = 'd', - FREE = 'f' - }; - - unsigned char lastPressedKey; - - bool mouseButton_[3]; - int mouseWheelPosition_; - int mouseCurrentPosition_[2]; - int mouseMotion_[2]; - int previousMouseMotion_[2]; - KEY_STATE keyStates_[256]; - - std::mutex mtx; - - ShaderData shader; - - sl::Transform projection_; - sl::float3 bckgrnd_clr; - - std::map point_clouds; - std::map viewers; - std::map poses; - - std::map skeletons_raw; - std::map colors; - std::map colors_sk; - - std::vector fusionStats; - - CameraGL camera_; - Simple3DObject skeletons; - Simple3DObject floor_grid; - - bool show_pc = true; - bool show_raw = false; - bool draw_flat_color = false; - - std::uniform_int_distribution uint_dist360; - std::mt19937 rng; - -}; - -#endif /* __VIEWER_INCLUDE__ */ +#ifndef __VIEWER_INCLUDE__ +#define __VIEWER_INCLUDE__ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include "utils.hpp" + + +#ifndef M_PI +#define M_PI 3.141592653f +#endif + +#define MOUSE_R_SENSITIVITY 0.03f +#define MOUSE_UZ_SENSITIVITY 0.75f +#define MOUSE_DZ_SENSITIVITY 1.25f +#define MOUSE_T_SENSITIVITY 0.05f +#define KEY_T_SENSITIVITY 0.1f + +// Utils +std::vector getBBoxOnFloorPlane(std::vector &bbox, sl::Pose cam_pose); + +// 3D + +class CameraGL { +public: + + CameraGL() { + } + + enum DIRECTION { + UP, DOWN, LEFT, RIGHT, FORWARD, BACK + }; + CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) + ~CameraGL(); + + void update(); + void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar); + const sl::Transform& getViewProjectionMatrix() const; + + float getHorizontalFOV() const; + float getVerticalFOV() const; + + // Set an offset between the eye of the camera and its position + // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 + // Note: coordinates are in local space + void setOffsetFromPosition(const sl::Translation& offset); + const sl::Translation& getOffsetFromPosition() const; + + void setDirection(const sl::Translation& direction, const sl::Translation& vertical); + void translate(const sl::Translation& t); + void setPosition(const sl::Translation& p); + void rotate(const sl::Orientation& rot); + void rotate(const sl::Rotation& m); + void setRotation(const sl::Orientation& rot); + void setRotation(const sl::Rotation& m); + + const sl::Translation& getPosition() const; + const sl::Translation& getForward() const; + const sl::Translation& getRight() const; + const sl::Translation& getUp() const; + const sl::Translation& getVertical() const; + float getZNear() const; + float getZFar() const; + + static const sl::Translation ORIGINAL_FORWARD; + static const sl::Translation ORIGINAL_UP; + static const sl::Translation ORIGINAL_RIGHT; + + sl::Transform projection_; + bool usePerspective_; +private: + void updateVectors(); + void updateView(); + void updateVPMatrix(); + + sl::Translation offset_; + sl::Translation position_; + sl::Translation forward_; + sl::Translation up_; + sl::Translation right_; + sl::Translation vertical_; + + sl::Orientation rotation_; + + sl::Transform view_; + sl::Transform vpMatrix_; + float horizontalFieldOfView_; + float verticalFieldOfView_; + float znear_; + float zfar_; +}; + +class Shader { +public: + + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} + Shader(const GLchar* vs, const GLchar* fs); + ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); + GLuint getProgramId(); + + static const GLint ATTRIB_VERTICES_POS = 0; + static const GLint ATTRIB_COLOR_POS = 1; + static const GLint ATTRIB_NORMAL = 2; + +private: + bool compile(GLuint &shaderId, GLenum type, const GLchar* src); + GLuint verterxId_; + GLuint fragmentId_; + GLuint programId_; +}; + +struct ShaderData { + Shader it; + GLuint MVP_Mat; +}; + +class Simple3DObject { +public: + + Simple3DObject() { + } + Simple3DObject(sl::Translation position, bool isStatic); + ~Simple3DObject(); + + void addPt(sl::float3 pt); + void addClr(sl::float4 clr); + + void addBBox(std::vector &pts, sl::float4 clr); + void addPoint(sl::float3 pt, sl::float4 clr); + void addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr); + void addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr); + + // New 3D rendering + void addFullEdges(std::vector &pts, sl::float4 clr); + void addVerticalEdges(std::vector &pts, sl::float4 clr); + void addTopFace(std::vector &pts, sl::float4 clr); + void addVerticalFaces(std::vector &pts, sl::float4 clr); + + void pushToGPU(); + void clear(); + + void setDrawingType(GLenum type); + + void draw(); + + void translate(const sl::Translation& t); + void setPosition(const sl::Translation& p); + + void setRT(const sl::Transform& mRT); + + void rotate(const sl::Orientation& rot); + void rotate(const sl::Rotation& m); + void setRotation(const sl::Orientation& rot); + void setRotation(const sl::Rotation& m); + + const sl::Translation& getPosition() const; + + sl::Transform getModelMatrix() const; + + std::vector vertices_; + std::vector colors_; + std::vector indices_; + + bool isStatic_; + GLenum drawingType_; + + GLuint vaoID_; +private: + /* + Vertex buffer IDs: + - [0]: Vertices coordinates + - [1]: colors; + - [2]: Indices; + */ + GLuint vboID_[3]; + + sl::Translation position_; + sl::Orientation rotation_; +}; + +class PointCloud { +public: + PointCloud(); + ~PointCloud(); + + // Initialize Opengl and Cuda buffers + // Warning: must be called in the Opengl thread + void initialize(sl::Mat&, sl::float3 clr); + // Push a new point cloud + // Warning: can be called from any thread but the mutex "mutexData" must be locked + void pushNewPC(); + // Draw the point cloud + // Warning: must be called in the Opengl thread + void draw(const sl::Transform& vp, bool draw_clr); + // Close (disable update) + void close(); + +private: + sl::Mat refMat; + sl::float3 clr; + + Shader shader_; + GLuint shMVPMatrixLoc_; + GLuint shDrawColor; + GLuint shColor; + + size_t numBytes_; + float* xyzrgbaMappedBuf_; + GLuint bufferGLID_; + cudaGraphicsResource* bufferCudaID_; +}; + +struct ObjectClassName { + sl::float3 position; + std::string name_lineA; + std::string name_lineB; + sl::float3 color; +}; + +class CameraViewer { +public: + CameraViewer(); + ~CameraViewer(); + + // Initialize Opengl and Cuda buffers + bool initialize(sl::Mat& image, sl::float3 clr); + // Push a new Image + Z buffer and transform into a point cloud + void pushNewImage(); + // Draw the Image + void draw(sl::Transform vpMatrix); + // Close (disable update) + void close(); + + Simple3DObject frustum; +private: + sl::Mat ref; + cudaArray_t ArrIm; + cudaGraphicsResource* cuda_gl_ressource;//cuda GL resource + Shader shader; + GLuint shMVPMatrixLocTex_; + + GLuint texture; + GLuint vaoID_; + GLuint vboID_[3]; + + std::vector faces; + std::vector vert; + std::vector uv; +}; + +// This class manages input events, window and Opengl rendering pipeline + +class GLViewer { +public: + GLViewer(); + ~GLViewer(); + bool isAvailable(); + bool isPlaying() const { return play; } + + void init(int argc, char **argv, const std::vector& cameras_id); + void updateObjects(const std::unordered_map& objs, + const std::unordered_map>& singledata, + const sl::FusionMetrics& metrics); + + void updateCamera(int, sl::Mat&, sl::Mat&); + void updateCamera(sl::Mat&); + void setCameraPose(int, sl::Transform); + + int getKey() { + const int key = last_key; + last_key = -1; + return key; + } + + void exit(); +private: + // Rendering loop method called each frame by glutDisplayFunc + void render(); + // Everything that needs to be updated before rendering must be done in this method + void update(); + // Once everything is updated, every renderable objects must be drawn in this method + void draw(); + // Clear and refresh inputs' data + void clearInputs(); + + void setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar); + void printText(); + + void createBboxRendering(std::vector &bbox, sl::float4 bbox_clr); + sl::float3 getColor(int, bool); + + // Glut functions callbacks + static void drawCallback(); + static void mouseButtonCallback(int button, int state, int x, int y); + static void mouseMotionCallback(int x, int y); + static void reshapeCallback(int width, int height); + static void keyPressedCallback(unsigned char c, int x, int y); + static void keyReleasedCallback(unsigned char c, int x, int y); + static void idle(); + + bool available; + + enum MOUSE_BUTTON { + LEFT = 0, + MIDDLE = 1, + RIGHT = 2, + WHEEL_UP = 3, + WHEEL_DOWN = 4 + }; + + enum KEY_STATE { + UP = 'u', + DOWN = 'd', + FREE = 'f' + }; + + bool mouseButton_[3]; + int mouseWheelPosition_; + int mouseCurrentPosition_[2]; + int mouseMotion_[2]; + int previousMouseMotion_[2]; + KEY_STATE keyStates_[256]; + + std::mutex mtx; + std::mutex mtx_clr; + + std::map point_clouds; + std::map viewers; + std::map poses; + CameraGL camera_; + ShaderData shaderLine; + ShaderData shader; + sl::float4 bckgrnd_clr; + + sl::Transform projection_; + + std::map BBox_edges_per_cam; + std::map BBox_faces_per_cam; + std::map colors; + std::map colors_bbox; + std::vector fusionStats; + Simple3DObject BBox_edges; + Simple3DObject BBox_faces; + Simple3DObject floor_grid; + + bool show_pc = true; + bool show_raw = false; + bool show_fused = true; + bool draw_flat_color = false; + + std::uniform_int_distribution uint_dist360; + std::mt19937 rng; + + bool play = true; + int last_key = -1; +}; + +#endif /* __VIEWER_INCLUDE__ */ diff --git a/object detection/multi-camera/cpp/include/utils.hpp b/object detection/multi-camera/cpp/include/utils.hpp index 776cf1f6..2cd9e006 100644 --- a/object detection/multi-camera/cpp/include/utils.hpp +++ b/object detection/multi-camera/cpp/include/utils.hpp @@ -2,13 +2,54 @@ #include +inline bool renderObject(const sl::ObjectData& i, const bool isTrackingON) { + if (isTrackingON) + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); + else + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); +} + +float const id_colors[5][3] = { + { 232.0f, 176.0f ,59.0f }, + { 175.0f, 208.0f ,25.0f }, + { 102.0f, 205.0f ,105.0f}, + { 185.0f, 0.0f ,255.0f}, + { 99.0f, 107.0f ,252.0f} +}; + +inline sl::float4 generateColorID_u(int idx) { + if (idx < 0) return sl::float4(236, 184, 36, 255); + int color_idx = idx % 5; + return sl::float4(id_colors[color_idx][0], id_colors[color_idx][1], id_colors[color_idx][2], 255); +} + +inline sl::float4 generateColorID_f(int idx) { + auto clr_u = generateColorID_u(idx); + return sl::float4(static_cast(clr_u[0]) / 255.f, static_cast(clr_u[1]) / 255.f, static_cast(clr_u[2]) / 255.f, 1.f); +} + +float const class_colors[6][3] = { + { 44.0f, 117.0f, 255.0f}, // PEOPLE + { 255.0f, 0.0f, 255.0f}, // VEHICLE + { 0.0f, 0.0f, 255.0f}, + { 0.0f, 255.0f, 255.0f}, + { 0.0f, 255.0f, 0.0f}, + { 255.0f, 255.0f, 255.0f} +}; + +inline sl::float4 getColorClass(int idx) { + idx = std::min(5, idx); + sl::float4 clr(class_colors[idx][0], class_colors[idx][1], class_colors[idx][2], 1.f); + return clr / 255.f; +} + /** * @brief Compute the start frame of each SVO for playback to be synced * * @param svo_files Map camera index to SVO file path * @return Map camera index to starting SVO frame for synced playback */ -std::map syncDATA(std::map svo_files) { +static std::map syncDATA(std::map svo_files) { std::map output; // map of camera index and frame index of the starting point for each // Open all SVO @@ -53,4 +94,4 @@ std::map syncDATA(std::map svo_files) { for (auto &it : p_zeds) it.second->close(); return output; -} +} \ No newline at end of file diff --git a/object detection/multi-camera/cpp/src/ClientPublisher.cpp b/object detection/multi-camera/cpp/src/ClientPublisher.cpp index 7b13436b..ceb7dc90 100644 --- a/object detection/multi-camera/cpp/src/ClientPublisher.cpp +++ b/object detection/multi-camera/cpp/src/ClientPublisher.cpp @@ -1,6 +1,6 @@ #include "ClientPublisher.hpp" -ClientPublisher::ClientPublisher() : running(false) +ClientPublisher::ClientPublisher() { } @@ -9,44 +9,42 @@ ClientPublisher::~ClientPublisher() zed.close(); } -bool ClientPublisher::open(sl::InputType input) { +bool ClientPublisher::open(const sl::InputType& input, Trigger* ref) { // already running if (runner.joinable()) return false; + p_trigger = ref; + sl::InitParameters init_parameters; init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; init_parameters.input = input; - if (input.getType() == sl::InputType::INPUT_TYPE::SVO_FILE) - init_parameters.svo_real_time_mode = true; init_parameters.coordinate_units = sl::UNIT::METER; - init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; + init_parameters.depth_stabilization = 5; auto state = zed.open(init_parameters); if (state != sl::ERROR_CODE::SUCCESS) { std::cout << "Error: " << state << std::endl; return false; - } + } + serial = zed.getCameraInformation().serial_number; + p_trigger->states[serial] = false; - // define the body tracking parameters, as the fusion can does the tracking and fitting you don't need to enable them here, unless you need it for your app - sl::BodyTrackingParameters body_tracking_parameters; - body_tracking_parameters.detection_model = sl::BODY_TRACKING_MODEL::HUMAN_BODY_MEDIUM; - body_tracking_parameters.body_format = sl::BODY_FORMAT::BODY_18; - body_tracking_parameters.enable_body_fitting = false; - body_tracking_parameters.enable_tracking = false; - state = zed.enableBodyTracking(body_tracking_parameters); + // in most cases in object detection setup, the cameras are static + sl::PositionalTrackingParameters positional_tracking_parameters; + positional_tracking_parameters.set_as_static = true; + state = zed.enablePositionalTracking(positional_tracking_parameters); if (state != sl::ERROR_CODE::SUCCESS) { std::cout << "Error: " << state << std::endl; return false; - } + } - // define the body tracking parameters, as the fusion can does the tracking and fitting you don't need to enable them here, unless you need it for your app sl::ObjectDetectionParameters object_detection_parameters; - object_detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::MULTI_CLASS_BOX_ACCURATE; - object_detection_parameters.enable_tracking = false; - object_detection_parameters.instance_module_id = 20; + object_detection_parameters.detection_model = sl::OBJECT_DETECTION_MODEL::MULTI_CLASS_BOX_FAST; + object_detection_parameters.enable_tracking = true; + object_detection_parameters.fused_objects_group_name = "MULTI_CLASS_BOX"; state = zed.enableObjectDetection(object_detection_parameters); if (state != sl::ERROR_CODE::SUCCESS) { @@ -54,26 +52,12 @@ bool ClientPublisher::open(sl::InputType input) { return false; } - - // in most cases in body tracking setup, the cameras are static - sl::PositionalTrackingParameters positional_tracking_parameters; - // in most cases for body detection application the camera is static: - positional_tracking_parameters.set_as_static = true; - state = zed.enablePositionalTracking(positional_tracking_parameters); - if (state != sl::ERROR_CODE::SUCCESS) - { - std::cout << "Error: " << state << std::endl; - return false; - } - - return true; } void ClientPublisher::start() { if (zed.isOpened()) { - running = true; // the camera should stream its data so the fusion can subscibe to it to gather the detected body and others metadata needed for the process. zed.startPublishing(); // the thread can start to process the camera grab in background @@ -83,7 +67,6 @@ void ClientPublisher::start() void ClientPublisher::stop() { - running = false; if (runner.joinable()) runner.join(); zed.close(); @@ -91,29 +74,29 @@ void ClientPublisher::stop() void ClientPublisher::work() { + sl::RuntimeParameters rt; + rt.confidence_threshold = 50; + + sl::Objects objects; + // in this sample we use a dummy thread to process the ZED data. // you can replace it by your own application and use the ZED like you use to, retrieve its images, depth, sensors data and so on. // as long as you call the grab function and the retrieveObjects (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module. - while (running) { - if (zed.grab() == sl::ERROR_CODE::SUCCESS) { - /* - Your App - - */ - zed.retrieveObjects(objects, sl::ObjectDetectionRuntimeParameters(), 20); - zed.retrieveBodies(bodies); + while (p_trigger->running) { + std::unique_lock lk(mtx); + p_trigger->cv.wait(lk); + if (p_trigger->running) { + if (zed.grab(rt) == sl::ERROR_CODE::SUCCESS) { + /* + Your App + */ + zed.retrieveObjects(objects, sl::ObjectDetectionRuntimeParameters()); + } } - std::this_thread::sleep_for(std::chrono::microseconds(100)); - + p_trigger->states[serial] = true; } } void ClientPublisher::setStartSVOPosition(unsigned pos) { zed.setSVOPosition(pos); - zed.grab(); } - -sl::Objects ClientPublisher::getObjects(){ - return objects; -} - diff --git a/object detection/multi-camera/cpp/src/GLViewer.cpp b/object detection/multi-camera/cpp/src/GLViewer.cpp index e98fd374..fbb87413 100644 --- a/object detection/multi-camera/cpp/src/GLViewer.cpp +++ b/object detection/multi-camera/cpp/src/GLViewer.cpp @@ -1,1142 +1,1457 @@ -#include "GLViewer.hpp" - -const GLchar* VERTEX_SHADER = - "#version 330 core\n" - "layout(location = 0) in vec3 in_Vertex;\n" - "layout(location = 1) in vec3 in_Color;\n" - "uniform mat4 u_mvpMatrix;\n" - "out vec3 b_color;\n" - "void main() {\n" - " b_color = in_Color.bgr;\n" - " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" - "}"; - -const GLchar* FRAGMENT_SHADER = - "#version 330 core\n" - "in vec3 b_color;\n" - "layout(location = 0) out vec4 color;\n" - "void main() {\n" - " color = vec4(b_color, 0.95);\n" - "}"; - - -const GLchar* POINTCLOUD_VERTEX_SHADER = - "#version 330 core\n" - "layout(location = 0) in vec4 in_VertexRGBA;\n" - "out vec4 b_color;\n" - "uniform mat4 u_mvpMatrix;\n" - "uniform vec3 u_color;\n" - "uniform bool u_drawFlat;\n" - "void main() {\n" - // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) - " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" - " if(u_drawFlat)\n" - " b_color = vec4(u_color.bgr, .85f);\n" - "else{" - " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n" - " b_color = vec4(clr_int.b / 255.0f, clr_int.g / 255.0f, clr_int.r / 255.0f, .85f);\n" - " }" - " gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1);\n" - "}"; - -const GLchar* POINTCLOUD_FRAGMENT_SHADER = - "#version 330 core\n" - "in vec4 b_color;\n" - "layout(location = 0) out vec4 out_Color;\n" - "void main() {\n" - " out_Color = b_color;\n" - "}"; - -const GLchar* VERTEX_SHADER_TEXTURE = - "#version 330 core\n" - "layout(location = 0) in vec3 in_Vertex;\n" - "layout(location = 1) in vec2 in_UVs;\n" - "uniform mat4 u_mvpMatrix;\n" - "out vec2 UV;\n" - "void main() {\n" - " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" - " UV = in_UVs;\n" - "}\n"; - -const GLchar* FRAGMENT_SHADER_TEXTURE = - "#version 330 core\n" - "in vec2 UV;\n" - "uniform sampler2D texture_sampler;\n" - "void main() {\n" - " gl_FragColor = vec4(texture(texture_sampler, UV).bgr, 1.0);\n" - "}\n"; - - -GLViewer* currentInstance_ = nullptr; - -GLViewer::GLViewer() : available(false) { - currentInstance_ = this; - mouseButton_[0] = mouseButton_[1] = mouseButton_[2] = false; - clearInputs(); - previousMouseMotion_[0] = previousMouseMotion_[1] = 0; -} - -GLViewer::~GLViewer() { -} - -void GLViewer::exit() { - if (currentInstance_) { - available = false; - } -} - -bool GLViewer::isAvailable() { - if (currentInstance_ && available) { - glutMainLoopEvent(); - } - return available; -} - -void CloseFunc(void) { - if (currentInstance_) currentInstance_->exit(); -} - -void addVert(Simple3DObject &obj, float i_f, float limit, float height, sl::float4 &clr) { - auto p1 = sl::float3(i_f, height, -limit); - auto p2 = sl::float3(i_f, height, limit); - auto p3 = sl::float3(-limit, height, i_f); - auto p4 = sl::float3(limit, height, i_f); - - obj.addLine(p1, p2, clr); - obj.addLine(p3, p4, clr); -} - -void GLViewer::init(int argc, char **argv) { - - glutInit(&argc, argv); - int wnd_w = glutGet(GLUT_SCREEN_WIDTH); - int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); - - glutInitWindowSize(1200, 700); - glutInitWindowPosition(wnd_w * 0.05, wnd_h * 0.05); - glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH); - - glutCreateWindow("ZED| 3D View"); - - GLenum err = glewInit(); - if (GLEW_OK != err) - std::cout << "ERROR: glewInit failed: " << glewGetErrorString(err) << "\n"; - - glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); - - glEnable(GL_DEPTH_TEST); - glEnable(GL_TEXTURE_2D); - glEnable(GL_BLEND); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); - -#ifndef JETSON_STYLE - glEnable(GL_POINT_SMOOTH); -#endif - - // Compile and create the shader for 3D objects - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); - shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - - // Create the camera - camera_ = CameraGL(sl::Translation(0, 2, 10), sl::Translation(0, 0, -1)); - - // Create the skeletons objects - skeletons.setDrawingType(GL_LINES); - floor_grid.setDrawingType(GL_LINES); - - // Set background color (black) - bckgrnd_clr = sl::float4(0.2f, 0.19f, 0.2f, 1.0f); - - - float limit = 20.0f; - sl::float4 clr_grid(80, 80, 80, 255); - clr_grid /= 255.f; - - float grid_height = -0; - for (int i = (int) (-limit); i <= (int) (limit); i++) - addVert(floor_grid, i, limit, grid_height, clr_grid); - - floor_grid.pushToGPU(); - - std::random_device dev; - rng = std::mt19937(dev()); - uint_dist360 = std::uniform_int_distribution(0, 360); - - // Map glut function on this class methods - glutDisplayFunc(GLViewer::drawCallback); - glutMouseFunc(GLViewer::mouseButtonCallback); - glutMotionFunc(GLViewer::mouseMotionCallback); - glutReshapeFunc(GLViewer::reshapeCallback); - glutKeyboardFunc(GLViewer::keyPressedCallback); - glutKeyboardUpFunc(GLViewer::keyReleasedCallback); - glutCloseFunc(CloseFunc); - - available = true; -} - -sl::float3 newColor(float hh) { - float s = 1.; - float v = 1.; - - sl::float3 clr; - int i = (int)hh; - float ff = hh - i; - float p = v * (1.0 - s); - float q = v * (1.0 - (s * ff)); - float t = v * (1.0 - (s * (1.0 - ff))); - switch (i) { - case 0: - clr.r = v; - clr.g = t; - clr.b = p; - break; - case 1: - clr.r = q; - clr.g = v; - clr.b = p; - break; - case 2: - clr.r = p; - clr.g = v; - clr.b = t; - break; - - case 3: - clr.r = p; - clr.g = q; - clr.b = v; - break; - case 4: - clr.r = t; - clr.g = p; - clr.b = v; - break; - case 5: - default: - clr.r = v; - clr.g = p; - clr.b = q; - break; - } - return clr; -} - -void GLViewer::updateCamera(int id, sl::Mat &view, sl::Mat &pc){ - mtx.lock(); - if (colors.find(id) == colors.end()) { - float hh = uint_dist360(rng) / 60.f; - colors[id] = newColor(hh); - } - - if(view.isInit() && viewers.find(id) == viewers.end()) - viewers[id].initialize(view, colors[id]); - - if(pc.isInit() && point_clouds.find(id) == point_clouds.end()) - point_clouds[id].initialize(pc, colors[id]); - - mtx.unlock(); -} - -void GLViewer::setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar) { - // Just slightly up the ZED camera FOV to make a small black border - float fov_y = (params.v_fov + 0.5f) * M_PI / 180.f; - float fov_x = (params.h_fov + 0.5f) * M_PI / 180.f; - - projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); - projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); - projection_(2, 2) = -(zfar + znear) / (zfar - znear); - projection_(3, 2) = -1; - projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); - projection_(3, 3) = 0; - - projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); //Horizontal FoV. - projection_(0, 1) = 0; - projection_(0, 2) = 2.0f * ((params.image_size.width - 1.0f * params.cx) / params.image_size.width) - 1.0f; //Horizontal offset. - projection_(0, 3) = 0; - - projection_(1, 0) = 0; - projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); //Vertical FoV. - projection_(1, 2) = -(2.0f * ((params.image_size.height - 1.0f * params.cy) / params.image_size.height) - 1.0f); //Vertical offset. - projection_(1, 3) = 0; - - projection_(2, 0) = 0; - projection_(2, 1) = 0; - projection_(2, 2) = -(zfar + znear) / (zfar - znear); //Near and far planes. - projection_(2, 3) = -(2.0f * zfar * znear) / (zfar - znear); //Near and far planes. - - projection_(3, 0) = 0; - projection_(3, 1) = 0; - projection_(3, 2) = -1; - projection_(3, 3) = 0.0f; -} - -void GLViewer::render() { - if (available) { - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); - glClearColor(bckgrnd_clr.r, bckgrnd_clr.g, bckgrnd_clr.b, 1.f); - update(); - draw(); - printText(); - glutSwapBuffers(); - glutPostRedisplay(); - } -} - -void GLViewer::setCameraPose(int id, sl::Transform pose) { - mtx.lock(); - poses[id] = pose; - if (colors.find(id) == colors.end()) { - float hh = uint_dist360(rng) / 60.f; - colors[id] = newColor(hh); - } - mtx.unlock(); -} - -inline bool renderObject(const sl::ObjectData& i, const bool isTrackingON) { - if (isTrackingON) - return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); - else - return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); -} - -template -void createSKPrimitive(sl::BodyData& body, const std::vector>& map, Simple3DObject& skp, sl::float3 clr_id, bool raw) { - const float cylinder_thickness = raw ? 0.01f : 0.025f; - - for (auto& limb : map) { - sl::float3 kp_1 = body.keypoint[getIdx(limb.first)]; - sl::float3 kp_2 = body.keypoint[getIdx(limb.second)]; - if (std::isfinite(kp_1.norm()) && std::isfinite(kp_2.norm())) - skp.addLine(kp_1, kp_2, clr_id); - } -} - -void GLViewer::addObject(sl::ObjectData &obj, Simple3DObject &simpleObj, sl::float3 clr_id, bool raw) { - simpleObj.addBBox(obj.bounding_box, clr_id); -} - -// void GLViewer::addSKeleton(sl::BodyData& obj, Simple3DObject& simpleObj, sl::float3 clr_id, bool raw) { -// switch (obj.keypoint.size()) { -// case 18: -// addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_18); -// break; -// case 34: -// addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_34); -// break; -// case 38: -// addSKeleton(obj, simpleObj, clr_id, raw, sl::BODY_FORMAT::BODY_38); -// break; -// } -// } - -void GLViewer::updateObjects(sl::Objects &objects, std::map& singldata, sl::FusionMetrics& metrics) { - mtx.lock(); - - if (objects.is_new) { - skeletons.clear(); - for(auto &it:objects.object_list) { - - if (colors_sk.find(it.id) == colors_sk.end()) { - float hh = uint_dist360(rng) / 60.f; - colors_sk[it.id] = newColor(hh); - } - - if (renderObject(it, objects.is_tracked)) - { - addObject(it, skeletons, colors_sk[it.id], false); - } - } - - } - - fusionStats.clear(); - int id = 0; - - ObjectClassName obj_str; - obj_str.name_lineA = "Publishers :" + std::to_string(metrics.mean_camera_fused); - obj_str.name_lineB = "Sync :" + std::to_string(metrics.mean_stdev_between_camera * 1000.f); - obj_str.color = sl::float4(0.9,0.9,0.9,1); - obj_str.position = sl::float3(10, (id * 30), 0); - fusionStats.push_back(obj_str); - - for (auto &it : singldata) { - auto clr = colors[it.first.sn]; - id++; - if (it.second.is_new) - { - auto& sk_r = skeletons_raw[it.first.sn]; - sk_r.clear(); - sk_r.setDrawingType(GL_LINES); - - for (auto& obj : it.second.object_list) { - if(renderObject(obj, it.second.is_tracked)) - { - addObject(obj, sk_r, clr, true); - } - } - } - - ObjectClassName obj_str; - obj_str.name_lineA = "CAM: " + std::to_string(it.first.sn) + " FPS: " + std::to_string(metrics.camera_individual_stats[it.first].received_fps); - obj_str.name_lineB = "Ratio Detection :" + std::to_string(metrics.camera_individual_stats[it.first].ratio_detection) + " Delta " + std::to_string(metrics.camera_individual_stats[it.first].delta_ts * 1000.f); - obj_str.color = clr; - obj_str.position = sl::float3(10, (id * 30), 0); - fusionStats.push_back(obj_str); - } - mtx.unlock(); -} - -void GLViewer::update() { - - if (keyStates_['q'] == KEY_STATE::UP || keyStates_['Q'] == KEY_STATE::UP || keyStates_[27] == KEY_STATE::UP) { - currentInstance_->exit(); - return; - } - - if (keyStates_['r'] == KEY_STATE::UP) - currentInstance_->show_raw = !currentInstance_->show_raw; - - if (keyStates_['c'] == KEY_STATE::UP) - currentInstance_->draw_flat_color = !currentInstance_->draw_flat_color; - - if (keyStates_['p'] == KEY_STATE::UP) - currentInstance_->show_pc = !currentInstance_->show_pc; - - // Rotate camera with mouse - if (mouseButton_[MOUSE_BUTTON::LEFT]) { - camera_.rotate(sl::Rotation((float) mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); - camera_.rotate(sl::Rotation((float) mouseMotion_[0] * MOUSE_R_SENSITIVITY, camera_.getVertical() * -1.f)); - } - - // Translate camera with mouse - if (mouseButton_[MOUSE_BUTTON::RIGHT]) { - camera_.translate(camera_.getUp() * (float) mouseMotion_[1] * MOUSE_T_SENSITIVITY); - camera_.translate(camera_.getRight() * (float) mouseMotion_[0] * MOUSE_T_SENSITIVITY); - } - - // Zoom in with mouse wheel - if (mouseWheelPosition_ != 0) { - //float distance = sl::Translation(camera_.getOffsetFromPosition()).norm(); - if (mouseWheelPosition_ > 0 /* && distance > camera_.getZNear()*/) { // zoom - camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f * -1); - } else if (/*distance < camera_.getZFar()*/ mouseWheelPosition_ < 0) {// unzoom - //camera_.setOffsetFromPosition(camera_.getOffsetFromPosition() * MOUSE_DZ_SENSITIVITY); - camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f); - } - } - - camera_.update(); - mtx.lock(); - // Update point cloud buffers - skeletons.pushToGPU(); - for(auto &it: skeletons_raw) - it.second.pushToGPU(); - - for(auto &it: point_clouds) - it.second.pushNewPC(); - - for(auto &it: viewers) - it.second.pushNewImage(); - - mtx.unlock(); - clearInputs(); -} - - -void GLViewer::draw() { - - glPolygonMode(GL_FRONT, GL_LINE); - glPolygonMode(GL_BACK, GL_LINE); - glLineWidth(2.f); - glPointSize(1.f); - - sl::Transform vpMatrix = camera_.getViewProjectionMatrix(); - glUseProgram(shader.it.getProgramId()); - glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, vpMatrix.m); - - floor_grid.draw(); - skeletons.draw(); - - if (show_raw) - for (auto& it : skeletons_raw) - it.second.draw(); - - for (auto& it : viewers) { - sl::Transform pose_ = vpMatrix * poses[it.first]; - glUniformMatrix4fv(shader.MVP_Mat, 1, GL_FALSE, sl::Transform::transpose(pose_).m); - viewers[it.first].frustum.draw(); - } - - glUseProgram(0); - - for (auto& it : poses) { - sl::Transform vpMatrix_world = vpMatrix * it.second; - - if(show_pc) - if(point_clouds.find(it.first) != point_clouds.end()) - point_clouds[it.first].draw(vpMatrix_world, draw_flat_color); - - if (viewers.find(it.first) != viewers.end()) - viewers[it.first].draw(vpMatrix_world); - } -} - -sl::float2 compute3Dprojection(sl::float3 &pt, const sl::Transform &cam, sl::Resolution wnd_size) { - sl::float4 pt4d(pt.x, pt.y, pt.z, 1.); - auto proj3D_cam = pt4d * cam; - sl::float2 proj2D; - proj2D.x = ((proj3D_cam.x / pt4d.w) * wnd_size.width) / (2.f * proj3D_cam.w) + wnd_size.width / 2.f; - proj2D.y = ((proj3D_cam.y / pt4d.w) * wnd_size.height) / (2.f * proj3D_cam.w) + wnd_size.height / 2.f; - return proj2D; -} - -void GLViewer::printText() { - - sl::Resolution wnd_size(glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT)); - for (auto &it : fusionStats) { -#if 0 - auto pt2d = compute3Dprojection(it.position, projection_, wnd_size); -#else - sl::float2 pt2d(it.position.x, it.position.y); -#endif - glColor4f(it.color.b, it.color.g, it.color.r, .85f); - const auto *string = it.name_lineA.c_str(); - glWindowPos2f(pt2d.x, pt2d.y + 15); - int len = (int) strlen(string); - for (int i = 0; i < len; i++) - glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); - - string = it.name_lineB.c_str(); - glWindowPos2f(pt2d.x, pt2d.y); - len = (int) strlen(string); - for (int i = 0; i < len; i++) - glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); - } -} - -void GLViewer::clearInputs() { - mouseMotion_[0] = mouseMotion_[1] = 0; - mouseWheelPosition_ = 0; - for (unsigned int i = 0; i < 256; ++i) - if (keyStates_[i] != KEY_STATE::DOWN) - keyStates_[i] = KEY_STATE::FREE; -} - -void GLViewer::drawCallback() { - currentInstance_->render(); -} - -void GLViewer::mouseButtonCallback(int button, int state, int x, int y) { - if (button < 5) { - if (button < 3) { - currentInstance_->mouseButton_[button] = state == GLUT_DOWN; - } else { - currentInstance_->mouseWheelPosition_ += button == MOUSE_BUTTON::WHEEL_UP ? 1 : -1; - } - currentInstance_->mouseCurrentPosition_[0] = x; - currentInstance_->mouseCurrentPosition_[1] = y; - currentInstance_->previousMouseMotion_[0] = x; - currentInstance_->previousMouseMotion_[1] = y; - } -} - -void GLViewer::mouseMotionCallback(int x, int y) { - currentInstance_->mouseMotion_[0] = x - currentInstance_->previousMouseMotion_[0]; - currentInstance_->mouseMotion_[1] = y - currentInstance_->previousMouseMotion_[1]; - currentInstance_->previousMouseMotion_[0] = x; - currentInstance_->previousMouseMotion_[1] = y; -} - -void GLViewer::reshapeCallback(int width, int height) { - glViewport(0, 0, width, height); - float hfov = (180.0f / M_PI) * (2.0f * atan(width / (2.0f * 500))); - float vfov = (180.0f / M_PI) * (2.0f * atan(height / (2.0f * 500))); - currentInstance_->camera_.setProjection(hfov, vfov, currentInstance_->camera_.getZNear(), currentInstance_->camera_.getZFar()); -} - -void GLViewer::keyPressedCallback(unsigned char c, int x, int y) { - currentInstance_->keyStates_[c] = KEY_STATE::DOWN; - currentInstance_->lastPressedKey = c; - //glutPostRedisplay(); -} - -void GLViewer::keyReleasedCallback(unsigned char c, int x, int y) { - currentInstance_->keyStates_[c] = KEY_STATE::UP; -} - -void GLViewer::idle() { - glutPostRedisplay(); -} - -Simple3DObject::Simple3DObject() { - vaoID_ = 0; - drawingType_ = GL_TRIANGLES; - isStatic_ = need_update = false; -} - -Simple3DObject::~Simple3DObject() { - clear(); - if (vaoID_ != 0) { - glDeleteBuffers(3, vboID_); - glDeleteVertexArrays(1, &vaoID_); - } -} - -void Simple3DObject::addPoint(sl::float3 pt, sl::float3 clr){ - vertices_.push_back(pt); - colors_.push_back(clr); - indices_.push_back((int) indices_.size()); - need_update = true; -} - -void Simple3DObject::addLine(sl::float3 pt1, sl::float3 pt2, sl::float3 clr){ - addPoint(pt1, clr); - addPoint(pt2, clr); -} - -void Simple3DObject::addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float3 clr){ - addPoint(p1, clr); - addPoint(p2, clr); - addPoint(p3, clr); -} -void Simple3DObject::addBBox(std::vector &pts, sl::float3 clr) { - int start_id = vertices_.size() / 3; - - float transparency_top = 0.05f, transparency_bottom = 0.75f; - for (unsigned int i = 0; i < pts.size(); i++) { - // clr.a = (i < 4 ? transparency_top : transparency_bottom); - addPoint(pts[i], clr); - } - - const std::vector boxLinks = { 4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7 }; - - for (unsigned int i = 0; i < boxLinks.size(); i += 2) { - indices_.push_back(start_id + boxLinks[i]); - indices_.push_back(start_id + boxLinks[i + 1]); - } -} - -void Simple3DObject::addPt(sl::float3 pt) { - // vertices_.push_back(pt.x); - // vertices_.push_back(pt.y); - // vertices_.push_back(pt.z); -} - -void Simple3DObject::addClr(sl::float4 clr) { - // colors_.push_back(clr.r); - // colors_.push_back(clr.g); - // colors_.push_back(clr.b); - // colors_.push_back(clr.a); -} - - -void Simple3DObject::pushToGPU() { - if(!need_update) return; - - if (!isStatic_ || vaoID_ == 0) { - if (vaoID_ == 0) { - glGenVertexArrays(1, &vaoID_); - glGenBuffers(3, vboID_); - } - glBindVertexArray(vaoID_); - glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); - glBufferData(GL_ARRAY_BUFFER, vertices_.size() * sizeof(sl::float3), &vertices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); - glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); - glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); - - glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); - glBufferData(GL_ARRAY_BUFFER, colors_.size() * sizeof(sl::float3), &colors_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); - glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); - glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); - - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); - glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_.size() * sizeof (unsigned int), &indices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); - - glBindVertexArray(0); - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); - glBindBuffer(GL_ARRAY_BUFFER, 0); - need_update = false; - } -} - -void Simple3DObject::clear() { - vertices_.clear(); - colors_.clear(); - indices_.clear(); -} - -void Simple3DObject::setDrawingType(GLenum type) { - drawingType_ = type; -} - -void Simple3DObject::draw() { - glBindVertexArray(vaoID_); - glDrawElements(drawingType_, (GLsizei) indices_.size(), GL_UNSIGNED_INT, 0); - glBindVertexArray(0); -} - -Shader::Shader(const GLchar* vs, const GLchar* fs) { - if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { - std::cout << "ERROR: while compiling vertex shader" << std::endl; - } - if (!compile(fragmentId_, GL_FRAGMENT_SHADER, fs)) { - std::cout << "ERROR: while compiling fragment shader" << std::endl; - } - - programId_ = glCreateProgram(); - - glAttachShader(programId_, verterxId_); - glAttachShader(programId_, fragmentId_); - - glBindAttribLocation(programId_, ATTRIB_VERTICES_POS, "in_vertex"); - glBindAttribLocation(programId_, ATTRIB_COLOR_POS, "in_texCoord"); - - glLinkProgram(programId_); - - GLint errorlk(0); - glGetProgramiv(programId_, GL_LINK_STATUS, &errorlk); - if (errorlk != GL_TRUE) { - std::cout << "ERROR: while linking Shader :" << std::endl; - GLint errorSize(0); - glGetProgramiv(programId_, GL_INFO_LOG_LENGTH, &errorSize); - - char *error = new char[errorSize + 1]; - glGetShaderInfoLog(programId_, errorSize, &errorSize, error); - error[errorSize] = '\0'; - std::cout << error << std::endl; - - delete[] error; - glDeleteProgram(programId_); - } -} - -Shader::~Shader() { - if (verterxId_ != 0) - glDeleteShader(verterxId_); - if (fragmentId_ != 0) - glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); -} - -GLuint Shader::getProgramId() { - return programId_; -} - -bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { - shaderId = glCreateShader(type); - if (shaderId == 0) { - std::cout << "ERROR: shader type (" << type << ") does not exist" << std::endl; - return false; - } - glShaderSource(shaderId, 1, (const char**) &src, 0); - glCompileShader(shaderId); - - GLint errorCp(0); - glGetShaderiv(shaderId, GL_COMPILE_STATUS, &errorCp); - if (errorCp != GL_TRUE) { - std::cout << "ERROR: while compiling Shader :" << std::endl; - GLint errorSize(0); - glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &errorSize); - - char *error = new char[errorSize + 1]; - glGetShaderInfoLog(shaderId, errorSize, &errorSize, error); - error[errorSize] = '\0'; - std::cout << error << std::endl; - - delete[] error; - glDeleteShader(shaderId); - return false; - } - return true; -} - -const GLchar* IMAGE_FRAGMENT_SHADER = - "#version 330 core\n" - " in vec2 UV;\n" - " out vec4 color;\n" - " uniform sampler2D texImage;\n" - " void main() {\n" - " vec2 scaler =vec2(UV.x,1.f - UV.y);\n" - " vec3 rgbcolor = vec3(texture(texImage, scaler).zyx);\n" - " vec3 color_rgb = pow(rgbcolor, vec3(1.65f));\n" - " color = vec4(color_rgb,1);\n" - "}"; - -const GLchar* IMAGE_VERTEX_SHADER = - "#version 330\n" - "layout(location = 0) in vec3 vert;\n" - "out vec2 UV;" - "void main() {\n" - " UV = (vert.xy+vec2(1,1))* .5f;\n" - " gl_Position = vec4(vert, 1);\n" - "}\n"; - - -PointCloud::PointCloud() { - -} - -PointCloud::~PointCloud() { - close(); -} - -void PointCloud::close() { - if (refMat.isInit()) { - auto err = cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); - if (err != cudaSuccess) - std::cerr << "Error: CUDA UnmapResources (" << err << ")" << std::endl; - glDeleteBuffers(1, &bufferGLID_); - refMat.free(); - } -} - -void PointCloud::initialize(sl::Mat &ref, sl::float3 clr_) { - - refMat = ref; - clr = clr_; - - glGenBuffers(1, &bufferGLID_); - glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); - glBufferData(GL_ARRAY_BUFFER, refMat.getResolution().area() * 4 * sizeof (float), 0, GL_STATIC_DRAW); - glBindBuffer(GL_ARRAY_BUFFER, 0); - - cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); - if (err != cudaSuccess) - std::cerr << "Error: CUDA - OpenGL Interop failed (" << err << ")" << std::endl; - - err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); - if (err != cudaSuccess) - std::cerr << "Error: CUDA MapResources (" << err << ")" << std::endl; - - err = cudaGraphicsResourceGetMappedPointer((void**) &xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); - if (err != cudaSuccess) - std::cerr << "Error: CUDA GetMappedPointer (" << err << ")" << std::endl; - - shader_ = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); - shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); - shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); - shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); -} - -void PointCloud::pushNewPC() { - if (refMat.isInit()) - cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice); -} - -void PointCloud::draw(const sl::Transform& vp, bool draw_flat) { - if (refMat.isInit()) { -#ifndef JETSON_STYLE - glDisable(GL_BLEND); -#endif - - glUseProgram(shader_.getProgramId()); - glUniformMatrix4fv(shMVPMatrixLoc_, 1, GL_TRUE, vp.m); - - glUniform3fv(shColor, 1, clr.v); - glUniform1i(shDrawColor, draw_flat); - - glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); - glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); - glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); - - glDrawArrays(GL_POINTS, 0, refMat.getResolution().area()); - glBindBuffer(GL_ARRAY_BUFFER, 0); - glUseProgram(0); - -#ifndef JETSON_STYLE - glEnable(GL_BLEND); -#endif - } -} - - -CameraViewer::CameraViewer() { - -} - -CameraViewer::~CameraViewer() { - close(); -} - -void CameraViewer::close() { - if (ref.isInit()) { - - auto err = cudaGraphicsUnmapResources(1, &cuda_gl_ressource, 0); - if (err) std::cout << "err 3 " << err << " " << cudaGetErrorString(err) << "\n"; - - glDeleteTextures(1, &texture); - glDeleteBuffers(3, vboID_); - glDeleteVertexArrays(1, &vaoID_); - ref.free(); - } -} - -bool CameraViewer::initialize(sl::Mat &im, sl::float3 clr) { - - // Create 3D axis - float fx,fy,cx,cy; - fx = fy = 1400; - float width, height; - width = 2208; - height = 1242; - cx = width /2; - cy = height /2; - - float Z_ = .5f; - sl::float3 toOGL(1,-1,-1); - sl::float3 cam_0(0, 0, 0); - sl::float3 cam_1, cam_2, cam_3, cam_4; - - float fx_ = 1.f / fx; - float fy_ = 1.f / fy; - - cam_1.z = Z_; - cam_1.x = (0 - cx) * Z_ *fx_; - cam_1.y = (0 - cy) * Z_ *fy_ ; - cam_1 *= toOGL; - - cam_2.z = Z_; - cam_2.x = (width - cx) * Z_ *fx_; - cam_2.y = (0 - cy) * Z_ *fy_; - cam_2 *= toOGL; - - cam_3.z = Z_; - cam_3.x = (width - cx) * Z_ *fx_; - cam_3.y = (height - cy) * Z_ *fy_; - cam_3 *= toOGL; - - cam_4.z = Z_; - cam_4.x = (0 - cx) * Z_ *fx_; - cam_4.y = (height - cy) * Z_ *fy_; - cam_4 *= toOGL; - - - frustum.addFace(cam_0, cam_1, cam_2, clr); - frustum.addFace(cam_0, cam_2, cam_3, clr); - frustum.addFace(cam_0, cam_3, cam_4, clr); - frustum.addFace(cam_0, cam_4, cam_1, clr); - frustum.setDrawingType(GL_TRIANGLES); - frustum.pushToGPU(); - - vert.push_back(cam_1); - vert.push_back(cam_2); - vert.push_back(cam_3); - vert.push_back(cam_4); - - uv.push_back(sl::float2(0,0)); - uv.push_back(sl::float2(1,0)); - uv.push_back(sl::float2(1,1)); - uv.push_back(sl::float2(0,1)); - - faces.push_back(sl::uint3(0,1,2)); - faces.push_back(sl::uint3(0,2,3)); - - ref = im; - shader = Shader(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); - shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); - - glGenVertexArrays(1, &vaoID_); - glGenBuffers(3, vboID_); - - glBindVertexArray(vaoID_); - glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); - glBufferData(GL_ARRAY_BUFFER, vert.size() * sizeof(sl::float3), &vert[0], GL_STATIC_DRAW); - glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); - glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); - - glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); - glBufferData(GL_ARRAY_BUFFER, uv.size() * sizeof(sl::float2), &uv[0], GL_STATIC_DRAW); - glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); - glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); - - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); - glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(sl::uint3), &faces[0], GL_STATIC_DRAW); - - glBindVertexArray(0); - glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); - glBindBuffer(GL_ARRAY_BUFFER, 0); - - auto res = ref.getResolution(); - glGenTextures(1, &texture); - glBindTexture(GL_TEXTURE_2D, texture); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, res.width, res.height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); - glBindTexture(GL_TEXTURE_2D, 0); - cudaError_t err = cudaGraphicsGLRegisterImage(&cuda_gl_ressource, texture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard); - if (err) std::cout << "err alloc " << err << " " << cudaGetErrorString(err) << "\n"; - glDisable(GL_TEXTURE_2D); - - err = cudaGraphicsMapResources(1, &cuda_gl_ressource, 0); - if (err) std::cout << "err 0 " << err << " " << cudaGetErrorString(err) << "\n"; - err = cudaGraphicsSubResourceGetMappedArray(&ArrIm, cuda_gl_ressource, 0, 0); - if (err) std::cout << "err 1 " << err << " " << cudaGetErrorString(err) << "\n"; - - return (err == cudaSuccess); -} - -void CameraViewer::pushNewImage() { - if (!ref.isInit()) return; - auto err = cudaMemcpy2DToArray(ArrIm, 0, 0, ref.getPtr(sl::MEM::CPU), ref.getStepBytes(sl::MEM::CPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyHostToDevice); - if (err) std::cout << "err 2 " << err << " " << cudaGetErrorString(err) << "\n"; -} - -void CameraViewer::draw(sl::Transform vpMatrix) { - if (!ref.isInit()) return; - - glUseProgram(shader.getProgramId()); - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); - - glUniformMatrix4fv(shMVPMatrixLocTex_, 1, GL_FALSE, sl::Transform::transpose(vpMatrix).m); - glBindTexture(GL_TEXTURE_2D, texture); - - glBindVertexArray(vaoID_); - glDrawElements(GL_TRIANGLES, (GLsizei)faces.size()*3, GL_UNSIGNED_INT, 0); - glBindVertexArray(0); - - glUseProgram(0); -} - - -const sl::Translation CameraGL::ORIGINAL_FORWARD = sl::Translation(0, 0, 1); -const sl::Translation CameraGL::ORIGINAL_UP = sl::Translation(0, 1, 0); -const sl::Translation CameraGL::ORIGINAL_RIGHT = sl::Translation(1, 0, 0); - -CameraGL::CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical) { - this->position_ = position; - setDirection(direction, vertical); - - offset_ = sl::Translation(0, 0, 0); - view_.setIdentity(); - updateView(); - setProjection(70, 70, 0.200f, 50.f); - updateVPMatrix(); -} - -CameraGL::~CameraGL() { -} - -void CameraGL::update() { - if (sl::Translation::dot(vertical_, up_) < 0) - vertical_ = vertical_ * -1.f; - updateView(); - updateVPMatrix(); -} - -void CameraGL::setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) { - horizontalFieldOfView_ = horizontalFOV; - verticalFieldOfView_ = verticalFOV; - znear_ = znear; - zfar_ = zfar; - - float fov_y = verticalFOV * M_PI / 180.f; - float fov_x = horizontalFOV * M_PI / 180.f; - - projection_.setIdentity(); - projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); - projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); - projection_(2, 2) = -(zfar + znear) / (zfar - znear); - projection_(3, 2) = -1; - projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); - projection_(3, 3) = 0; -} - -const sl::Transform& CameraGL::getViewProjectionMatrix() const { - return vpMatrix_; -} - -float CameraGL::getHorizontalFOV() const { - return horizontalFieldOfView_; -} - -float CameraGL::getVerticalFOV() const { - return verticalFieldOfView_; -} - -void CameraGL::setOffsetFromPosition(const sl::Translation& o) { - offset_ = o; -} - -const sl::Translation& CameraGL::getOffsetFromPosition() const { - return offset_; -} - -void CameraGL::setDirection(const sl::Translation& direction, const sl::Translation& vertical) { - sl::Translation dirNormalized = direction; - dirNormalized.normalize(); - this->rotation_ = sl::Orientation(ORIGINAL_FORWARD, dirNormalized * -1.f); - updateVectors(); - this->vertical_ = vertical; - if (sl::Translation::dot(vertical_, up_) < 0) - rotate(sl::Rotation(M_PI, ORIGINAL_FORWARD)); -} - -void CameraGL::translate(const sl::Translation& t) { - position_ = position_ + t; -} - -void CameraGL::setPosition(const sl::Translation& p) { - position_ = p; -} - -void CameraGL::rotate(const sl::Orientation& rot) { - rotation_ = rot * rotation_; - updateVectors(); -} - -void CameraGL::rotate(const sl::Rotation& m) { - this->rotate(sl::Orientation(m)); -} - -void CameraGL::setRotation(const sl::Orientation& rot) { - rotation_ = rot; - updateVectors(); -} - -void CameraGL::setRotation(const sl::Rotation& m) { - this->setRotation(sl::Orientation(m)); -} - -const sl::Translation& CameraGL::getPosition() const { - return position_; -} - -const sl::Translation& CameraGL::getForward() const { - return forward_; -} - -const sl::Translation& CameraGL::getRight() const { - return right_; -} - -const sl::Translation& CameraGL::getUp() const { - return up_; -} - -const sl::Translation& CameraGL::getVertical() const { - return vertical_; -} - -float CameraGL::getZNear() const { - return znear_; -} - -float CameraGL::getZFar() const { - return zfar_; -} - -void CameraGL::updateVectors() { - forward_ = ORIGINAL_FORWARD * rotation_; - up_ = ORIGINAL_UP * rotation_; - right_ = sl::Translation(ORIGINAL_RIGHT * -1.f) * rotation_; -} - -void CameraGL::updateView() { - sl::Transform transformation(rotation_, (offset_ * rotation_) + position_); - view_ = sl::Transform::inverse(transformation); -} - -void CameraGL::updateVPMatrix() { - vpMatrix_ = projection_ * view_; -} +#include "GLViewer.hpp" +#include + +#if defined(_DEBUG) && defined(_WIN32) +//#error "This sample should not be built in Debug mode, use RelWithDebInfo if you want to do step by step." +#endif + +#define FADED_RENDERING +const float grid_size = 10.0f; + +const GLchar* VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec4 in_Color;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec4 b_color;\n" + "void main() {\n" + " b_color = in_Color;\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + "}"; + +const GLchar* FRAGMENT_SHADER = + "#version 330 core\n" + "in vec4 b_color;\n" + "layout(location = 0) out vec4 out_Color;\n" + "void main() {\n" + " out_Color = b_color;\n" + "}"; + +const GLchar* VERTEX_SHADER_TEXTURE = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec2 in_UVs;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec2 UV;\n" + "void main() {\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + " UV = in_UVs;\n" + "}\n"; + +const GLchar* FRAGMENT_SHADER_TEXTURE = + "#version 330 core\n" + "in vec2 UV;\n" + "uniform sampler2D texture_sampler;\n" + "void main() {\n" + " gl_FragColor = vec4(texture(texture_sampler, UV).bgr, 1.0);\n" + "}\n"; + +void addVert(Simple3DObject &obj, float i_f, float limit, float height, sl::float4 &clr) { + auto p1 = sl::float3(i_f, height, -limit); + auto p2 = sl::float3(i_f, height, limit); + auto p3 = sl::float3(-limit, height, i_f); + auto p4 = sl::float3(limit, height, i_f); + + obj.addLine(p1, p2, clr); + obj.addLine(p3, p4, clr); +} + +GLViewer* currentInstance_ = nullptr; + +GLViewer::GLViewer() : available(false) { + currentInstance_ = this; + mouseButton_[0] = mouseButton_[1] = mouseButton_[2] = false; + clearInputs(); + previousMouseMotion_[0] = previousMouseMotion_[1] = 0; +} + +GLViewer::~GLViewer() { +} + +void GLViewer::exit() { + if (currentInstance_ && available) { + available = false; + for (auto& pc : point_clouds) + { + pc.second.close(); + } + } +} + +bool GLViewer::isAvailable() { + if (currentInstance_ && available) { + glutMainLoopEvent(); + } return available; +} + +Simple3DObject createFrustum(sl::CameraParameters param) { + // Create 3D axis + Simple3DObject it(sl::Translation(0, 0, 0), true); + + float Z_ = -150; + sl::float3 cam_0(0, 0, 0); + sl::float3 cam_1, cam_2, cam_3, cam_4; + + float fx_ = 1.f / param.fx; + float fy_ = 1.f / param.fy; + + cam_1.z = Z_; + cam_1.x = (0 - param.cx) * Z_ *fx_; + cam_1.y = (0 - param.cy) * Z_ *fy_; + + cam_2.z = Z_; + cam_2.x = (param.image_size.width - param.cx) * Z_ *fx_; + cam_2.y = (0 - param.cy) * Z_ *fy_; + + cam_3.z = Z_; + cam_3.x = (param.image_size.width - param.cx) * Z_ *fx_; + cam_3.y = (param.image_size.height - param.cy) * Z_ *fy_; + + cam_4.z = Z_; + cam_4.x = (0 - param.cx) * Z_ *fx_; + cam_4.y = (param.image_size.height - param.cy) * Z_ *fy_; + + sl::float4 clr(0.8f, 0.5f, 0.2f, 1.0f); + it.addFace(cam_0, cam_1, cam_2, clr); + it.addFace(cam_0, cam_2, cam_3, clr); + it.addFace(cam_0, cam_3, cam_4, clr); + it.addFace(cam_0, cam_4, cam_1, clr); + + it.setDrawingType(GL_TRIANGLES); + return it; +} + +void CloseFunc(void) { + if (currentInstance_) + currentInstance_->exit(); +} + +void GLViewer::init(int argc, char **argv, const std::vector& cameras_id) { + glutInit(&argc, argv); + int wnd_w = glutGet(GLUT_SCREEN_WIDTH); + int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); + + glutInitWindowSize(wnd_w * 0.8, wnd_h * 0.8); + glutInitWindowPosition(wnd_w * 0.1, wnd_h * 0.1); + glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH); + glutCreateWindow("ZED Object Detection"); + + GLenum err = glewInit(); + if (GLEW_OK != err) + std::cout << "ERROR: glewInit failed: " << glewGetErrorString(err) << "\n"; + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); + + // Compile and create the shader + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); + shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); + + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); + + // Create the camera + camera_ = CameraGL(sl::Translation(0, 2, 10), sl::Translation(0, 0, -1)); + + BBox_edges = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges.setDrawingType(GL_LINES); + + BBox_faces = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces.setDrawingType(GL_QUADS); + + for (const sl::CameraIdentifier& cam_id : cameras_id) { + BBox_edges_per_cam[cam_id.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges_per_cam[cam_id.sn].setDrawingType(GL_LINES); + + BBox_faces_per_cam[cam_id.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces_per_cam[cam_id.sn].setDrawingType(GL_QUADS); + } + + bckgrnd_clr = sl::float4(0.2f, 0.19f, 0.2f, 1.0f); + + floor_grid = Simple3DObject(sl::Translation(0, 0, 0), true); + floor_grid.setDrawingType(GL_LINES); + + float limit = 20.f; + sl::float4 clr_grid(80, 80, 80, 255); + clr_grid /= 255.f; + float height = -3; + for (int i = (int) (-limit); i <= (int) (limit); i++) + addVert(floor_grid, i , limit , height , clr_grid); + + floor_grid.pushToGPU(); + + std::random_device dev; + rng = std::mt19937(dev()); + rng.seed(42); + uint_dist360 = std::uniform_int_distribution(0, 360); + + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glEnable(GL_LINE_SMOOTH); + glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); + +#ifndef JETSON_STYLE + glEnable(GL_POINT_SMOOTH); +#endif + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); + + // Map glut function on this class methods + glutDisplayFunc(GLViewer::drawCallback); + glutMouseFunc(GLViewer::mouseButtonCallback); + glutMotionFunc(GLViewer::mouseMotionCallback); + glutReshapeFunc(GLViewer::reshapeCallback); + glutKeyboardFunc(GLViewer::keyPressedCallback); + glutKeyboardUpFunc(GLViewer::keyReleasedCallback); + glutCloseFunc(CloseFunc); + + available = true; +} + +void GLViewer::render() { + if (available) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + glClearColor(bckgrnd_clr.b, bckgrnd_clr.g, bckgrnd_clr.r, bckgrnd_clr.a); + update(); + draw(); + printText(); + glutSwapBuffers(); + glutPostRedisplay(); + } +} + +sl::float3 newColor(float hh) { + float s = 0.75f; + float v = 0.9f; + + sl::float3 clr; + int i = (int)hh; + float ff = hh - i; + float p = v * (1.f - s); + float q = v * (1.f - (s * ff)); + float t = v * (1.f - (s * (1.f - ff))); + switch (i) { + case 0: + clr.r = v; + clr.g = t; + clr.b = p; + break; + case 1: + clr.r = q; + clr.g = v; + clr.b = p; + break; + case 2: + clr.r = p; + clr.g = v; + clr.b = t; + break; + + case 3: + clr.r = p; + clr.g = q; + clr.b = v; + break; + case 4: + clr.r = t; + clr.g = p; + clr.b = v; + break; + case 5: + default: + clr.r = v; + clr.g = p; + clr.b = q; + break; + } + return clr; +} + +sl::float3 GLViewer::getColor(int id, bool for_bbox) { + const std::lock_guard lock(mtx_clr); + if (for_bbox) { + if (colors_bbox.find(id) == colors_bbox.end()) { + float hh = uint_dist360(rng) / 60.f; + colors_bbox[id] = newColor(hh); + } + return colors_bbox[id]; + } + else { + if (colors.find(id) == colors.end()) { + int h_ = uint_dist360(rng); + float hh = h_ / 60.f; + colors[id] = newColor(hh); + } + return colors[id]; + } +} + +void GLViewer::setCameraPose(int id, sl::Transform pose) { + const std::lock_guard lock(mtx); + getColor(id, false); + poses[id] = pose; +} + +void GLViewer::updateCamera(int id, sl::Mat& view, sl::Mat& pc) { + mtx.lock(); + auto clr = getColor(id, false); + if (view.isInit() && viewers.find(id) == viewers.end()) + viewers[id].initialize(view, clr); + + if (pc.isInit() && point_clouds.find(id) == point_clouds.end()) + point_clouds[id].initialize(pc, clr); + + mtx.unlock(); +} + +void GLViewer::updateCamera(sl::Mat& pc) { + const std::lock_guard lock(mtx); + int id = 0; + auto clr = getColor(id, false); + + // we need to release old pc and initialize new one because fused point cloud don't have the same number of points for each process + // I used close but it crashed in draw. Not yet investigated + point_clouds[id].initialize(pc, clr); +} + +void GLViewer::setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar) { + // Just slightly up the ZED camera FOV to make a small black border + float fov_y = (params.v_fov + 0.5f) * M_PI / 180.f; + float fov_x = (params.h_fov + 0.5f) * M_PI / 180.f; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); //Horizontal FoV. + projection_(0, 1) = 0; + projection_(0, 2) = 2.0f * ((params.image_size.width - 1.0f * params.cx) / params.image_size.width) - 1.0f; //Horizontal offset. + projection_(0, 3) = 0; + + projection_(1, 0) = 0; + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); //Vertical FoV. + projection_(1, 2) = -(2.0f * ((params.image_size.height - 1.0f * params.cy) / params.image_size.height) - 1.0f); //Vertical offset. + projection_(1, 3) = 0; + + projection_(2, 0) = 0; + projection_(2, 1) = 0; + projection_(2, 2) = -(zfar + znear) / (zfar - znear); //Near and far planes. + projection_(2, 3) = -(2.0f * zfar * znear) / (zfar - znear); //Near and far planes. + + projection_(3, 0) = 0; + projection_(3, 1) = 0; + projection_(3, 2) = -1; + projection_(3, 3) = 0.0f; +} + +static void createBboxRendering(std::vector& bbox, + sl::float4& bbox_clr, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces) +{ + // First create top and bottom full edges + BBox_edges.addFullEdges(bbox, bbox_clr); + // Add faded vertical edges + BBox_edges.addVerticalEdges(bbox, bbox_clr); + // Add faces + BBox_faces.addVerticalFaces(bbox, bbox_clr); + // Add top face + BBox_faces.addTopFace(bbox, bbox_clr); +} + +static void addObjects(const sl::Objects objs, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces, + const sl::float4 clr_id_ = sl::float4(-1.f, -1.f, -1.f, 1.f), + const bool draw_cross = false) +{ + for (unsigned int i = 0; i < objs.object_list.size(); i++) { + if (renderObject(objs.object_list[i], objs.is_tracked)) { + auto bb_ = objs.object_list[i].bounding_box; + if (!bb_.empty()) { + sl::float4 clr_id{clr_id_}; + if (clr_id.x == -1.f) { + if (objs.object_list[i].tracking_state != sl::OBJECT_TRACKING_STATE::OK) { + clr_id = getColorClass((int)objs.object_list[i].label); + } + else { + clr_id = generateColorID_f(objs.object_list[i].id); + } + } + + if (draw_cross) { // display centroid as a cross + sl::float3 centroid = objs.object_list[i].position; + const float size_cendtroid = 0.005f; // m + sl::float3 centroid1 = centroid; + centroid1.y += size_cendtroid; + sl::float3 centroid2 = objs.object_list[i].position; + centroid2.y -= size_cendtroid; + sl::float3 centroid3 = objs.object_list[i].position; + centroid3.x += size_cendtroid; + sl::float3 centroid4 = objs.object_list[i].position; + centroid4.x -= size_cendtroid; + + BBox_edges.addLine(centroid1, centroid2, sl::float4(1.f, 0.5f, 0.5f, 1.f)); + BBox_edges.addLine(centroid3, centroid4, sl::float4(1.f, 0.5f, 0.5f, 1.f)); + } + + createBboxRendering(bb_, clr_id, BBox_edges, BBox_faces); + } + } + } +} + +static void addObjects(const std::unordered_map& objs, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces, + const sl::float4 clr_id_ = sl::float4(-1.f, -1.f, -1.f, 1.f), + const bool draw_cross = false) +{ + for (const std::pair& it : objs) { + if (it.second.is_new) + { + addObjects(it.second, BBox_edges, BBox_faces, clr_id_, draw_cross); + } + } +} + +void GLViewer::updateObjects(const std::unordered_map& objs, + const std::unordered_map>& singledata, + const sl::FusionMetrics& metrics) { + mtx.lock(); + + BBox_edges.clear(); + BBox_faces.clear(); + addObjects(objs, BBox_edges, BBox_faces); + + fusionStats.clear(); + float id = 0.5F; + + ObjectClassName obj_str; + obj_str.name_lineA = "Publishers :" + std::to_string(metrics.mean_camera_fused); + obj_str.name_lineB = "Sync :" + std::to_string(metrics.mean_stdev_between_camera * 1000.f); + obj_str.color = sl::float4(0.9, 0.9, 0.9, 1); + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + + for (auto& it : BBox_edges_per_cam) + it.second.clear(); + for (auto& it : BBox_faces_per_cam) + it.second.clear(); + + for (const std::pair>& it : singledata) { + auto clr = getColor(it.first.sn, false); + ++id; + + ObjectClassName obj_str; + obj_str.name_lineA = "CAM: " + std::to_string(it.first.sn); + obj_str.name_lineA += " FPS: " + std::to_string(metrics.camera_individual_stats.at(it.first).received_fps); + obj_str.name_lineB = "Ratio Detection :" + std::to_string(metrics.camera_individual_stats.at(it.first).ratio_detection); + obj_str.name_lineB += " Delta " + std::to_string(metrics.camera_individual_stats.at(it.first).delta_ts * 1000.f); + obj_str.color = clr; + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + + std::swap(clr.r, clr.b); + for (const std::pair& it2 : it.second) { + addObjects(it2.second, BBox_edges_per_cam[it.first.sn], BBox_faces_per_cam[it.first.sn], clr); + } + } + + mtx.unlock(); +} + +void GLViewer::update() { + if (keyStates_['q'] == KEY_STATE::UP || keyStates_['Q'] == KEY_STATE::UP || keyStates_[27] == KEY_STATE::UP) { + currentInstance_->exit(); + return; + } + + if (keyStates_['r'] == KEY_STATE::UP || keyStates_['R'] == KEY_STATE::UP) + currentInstance_->show_raw = !currentInstance_->show_raw; + + if (keyStates_['f'] == KEY_STATE::UP || keyStates_['F'] == KEY_STATE::UP) + currentInstance_->show_fused = !currentInstance_->show_fused; + + if (keyStates_['c'] == KEY_STATE::UP || keyStates_['C'] == KEY_STATE::UP) + currentInstance_->draw_flat_color = !currentInstance_->draw_flat_color; + + if (keyStates_['s'] == KEY_STATE::UP || keyStates_['s'] == KEY_STATE::UP) + currentInstance_->show_pc = !currentInstance_->show_pc; + + if (keyStates_['p'] == KEY_STATE::UP || keyStates_['P'] == KEY_STATE::UP || keyStates_[32] == KEY_STATE::UP) + play = !play; + + // Rotate camera with mouse + if (mouseButton_[MOUSE_BUTTON::LEFT]) { + camera_.rotate(sl::Rotation((float)mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); + camera_.rotate(sl::Rotation((float)mouseMotion_[0] * MOUSE_R_SENSITIVITY, camera_.getVertical() * -1.f)); + } + + // Translate camera with mouse + if (mouseButton_[MOUSE_BUTTON::RIGHT]) { + camera_.translate(camera_.getUp() * (float)mouseMotion_[1] * MOUSE_T_SENSITIVITY); + camera_.translate(camera_.getRight() * (float)mouseMotion_[0] * MOUSE_T_SENSITIVITY); + } + + // Zoom in with mouse wheel + if (mouseWheelPosition_ != 0) { + //float distance = sl::Translation(camera_.getOffsetFromPosition()).norm(); + if (mouseWheelPosition_ > 0 /* && distance > camera_.getZNear()*/) { // zoom + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f * -1); + } + else if (/*distance < camera_.getZFar()*/ mouseWheelPosition_ < 0) {// unzoom + //camera_.setOffsetFromPosition(camera_.getOffsetFromPosition() * MOUSE_DZ_SENSITIVITY); + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f); + } + } + + camera_.update(); + mtx.lock(); + // Update point cloud buffers + BBox_edges.pushToGPU(); + BBox_faces.pushToGPU(); + for(auto &it: BBox_edges_per_cam) + it.second.pushToGPU(); + for(auto &it: BBox_faces_per_cam) + it.second.pushToGPU(); + + // Update point cloud buffers + for (auto& it : point_clouds) + it.second.pushNewPC(); + + for (auto& it : viewers) + { + it.second.pushNewImage(); + } + + + mtx.unlock(); + clearInputs(); +} + +void GLViewer::draw() { + glEnable(GL_DEPTH_TEST); + sl::Transform vpMatrix = camera_.getViewProjectionMatrix(); + + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); + glUseProgram(shader.it.getProgramId()); + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, vpMatrix.m); + glLineWidth(1.f); + floor_grid.draw(); + + glLineWidth(1.5f); + if (show_fused) + BBox_edges.draw(); + if (show_raw) + for (auto& it : BBox_edges_per_cam) + it.second.draw(); + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); + + if (show_fused) + BBox_faces.draw(); + if (show_raw) + for (auto& it : BBox_faces_per_cam) + it.second.draw(); + glLineWidth(2.f); + glPointSize(1.f); + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); + for (auto& it : viewers) { + sl::Transform pose_ = vpMatrix * poses[it.first]; + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, pose_.m); + it.second.frustum.draw(); + } + glUseProgram(0); + + for (auto& it : poses) { + sl::Transform vpMatrix_world = vpMatrix * it.second; + + if (show_pc) + if (point_clouds.find(it.first) != point_clouds.end()) + point_clouds[it.first].draw(vpMatrix_world, draw_flat_color); + + if (viewers.find(it.first) != viewers.end()) + viewers[it.first].draw(vpMatrix_world); + } +} + +sl::float2 compute3Dprojection(sl::float3& pt, const sl::Transform& cam, sl::Resolution wnd_size) { + sl::float4 pt4d(pt.x, pt.y, pt.z, 1.); + auto proj3D_cam = pt4d * cam; + sl::float2 proj2D; + proj2D.x = ((proj3D_cam.x / pt4d.w) * wnd_size.width) / (2.f * proj3D_cam.w) + wnd_size.width / 2.f; + proj2D.y = ((proj3D_cam.y / pt4d.w) * wnd_size.height) / (2.f * proj3D_cam.w) + wnd_size.height / 2.f; + return proj2D; +} + +void GLViewer::printText() { + + sl::Resolution wnd_size(glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT)); + for (auto& it : fusionStats) { + + sl::float2 pt2d(it.position.x, it.position.y); + glColor4f(it.color.b, it.color.g, it.color.r, .85f); + const auto* string = it.name_lineA.c_str(); + glWindowPos2f(pt2d.x, pt2d.y + 15); + int len = (int)strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + + string = it.name_lineB.c_str(); + glWindowPos2f(pt2d.x, pt2d.y); + len = (int)strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + } +} + +void GLViewer::clearInputs() { + mouseMotion_[0] = mouseMotion_[1] = 0; + mouseWheelPosition_ = 0; + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; + if (keyStates_[i] != KEY_STATE::DOWN) + keyStates_[i] = KEY_STATE::FREE; + } +} + +void GLViewer::drawCallback() { + currentInstance_->render(); +} + +void GLViewer::mouseButtonCallback(int button, int state, int x, int y) { + if (button < 5) { + if (button < 3) { + currentInstance_->mouseButton_[button] = state == GLUT_DOWN; + } else { + currentInstance_->mouseWheelPosition_ += button == MOUSE_BUTTON::WHEEL_UP ? 1 : -1; + } + currentInstance_->mouseCurrentPosition_[0] = x; + currentInstance_->mouseCurrentPosition_[1] = y; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; + } +} + +void GLViewer::mouseMotionCallback(int x, int y) { + currentInstance_->mouseMotion_[0] = x - currentInstance_->previousMouseMotion_[0]; + currentInstance_->mouseMotion_[1] = y - currentInstance_->previousMouseMotion_[1]; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; +} + +void GLViewer::reshapeCallback(int width, int height) { + glViewport(0, 0, width, height); + float hfov = (180.0f / M_PI) * (2.0f * atan(width / (2.0f * 500))); + float vfov = (180.0f / M_PI) * (2.0f * atan(height / (2.0f * 500))); + currentInstance_->camera_.setProjection(hfov, vfov, currentInstance_->camera_.getZNear(), currentInstance_->camera_.getZFar()); +} + +void GLViewer::keyPressedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::DOWN; +} + +void GLViewer::keyReleasedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::UP; +} + +void GLViewer::idle() { + glutPostRedisplay(); +} + + +Simple3DObject::Simple3DObject(sl::Translation position, bool isStatic) : isStatic_(isStatic) { + vaoID_ = 0; + drawingType_ = GL_TRIANGLES; + isStatic_ = false; + position_ = position; + rotation_.setIdentity(); +} + +Simple3DObject::~Simple3DObject() { + clear(); + if (vaoID_ != 0) { + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + } +} + +void Simple3DObject::addBBox(std::vector &pts, sl::float4 clr) { + int start_id = vertices_.size() / 3; + + float transparency_top = 0.05f, transparency_bottom = 0.75f; + for (unsigned int i = 0; i < pts.size(); i++) { + addPt(pts[i]); + clr.a = (i < 4 ? transparency_top : transparency_bottom); + addClr(clr); + } + + const std::vector boxLinks = {4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7}; + + for (unsigned int i = 0; i < boxLinks.size(); i += 2) { + indices_.push_back(start_id + boxLinks[i]); + indices_.push_back(start_id + boxLinks[i + 1]); + } +} + +void Simple3DObject::addPt(sl::float3 pt) { + vertices_.push_back(pt); +} + +void Simple3DObject::addClr(sl::float4 clr) { + colors_.push_back(clr); +} + +void Simple3DObject::addPoint(sl::float3 pt, sl::float4 clr) { + vertices_.push_back(pt); + colors_.push_back(clr); + indices_.push_back((int)indices_.size()); +} + +void Simple3DObject::addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr) { + addPoint(p1, clr); + addPoint(p2, clr); +} + +void Simple3DObject::addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr) { + addPoint(p1, clr); + addPoint(p2, clr); + addPoint(p3, clr); +} + +void Simple3DObject::addFullEdges(std::vector &pts, sl::float4 clr) { + clr.w = 0.4f; + int start_id = vertices_.size(); + + for (unsigned int i = 0; i < pts.size(); i++) { + addPt(pts[i]); + addClr(clr); + } + + const std::vector boxLinksTop = {0, 1, 1, 2, 2, 3, 3, 0}; + for (unsigned int i = 0; i < boxLinksTop.size(); i += 2) { + indices_.push_back(start_id + boxLinksTop[i]); + indices_.push_back(start_id + boxLinksTop[i + 1]); + } + + const std::vector boxLinksBottom = {4, 5, 5, 6, 6, 7, 7, 4}; + for (unsigned int i = 0; i < boxLinksBottom.size(); i += 2) { + indices_.push_back(start_id + boxLinksBottom[i]); + indices_.push_back(start_id + boxLinksBottom[i + 1]); + } +} + +void Simple3DObject::addVerticalEdges(std::vector &pts, sl::float4 clr) { + auto addSingleVerticalLine = [&](sl::float3 top_pt, sl::float3 bot_pt) { + std::vector current_pts{ + top_pt, + ((grid_size - 1.0f) * top_pt + bot_pt) / grid_size, + ((grid_size - 2.0f) * top_pt + bot_pt * 2.0f) / grid_size, + (2.0f * top_pt + bot_pt * (grid_size - 2.0f)) / grid_size, + (top_pt + bot_pt * (grid_size - 1.0f)) / grid_size, + bot_pt}; + + int start_id = vertices_.size(); + for (unsigned int i = 0; i < current_pts.size(); i++) { + addPt(current_pts[i]); + clr.a = (i == 2 || i == 3) ? 0.0f : 0.4f; + addClr(clr); + } + + const std::vector boxLinks = {0, 1, 1, 2, 2, 3, 3, 4, 4, 5}; + for (unsigned int i = 0; i < boxLinks.size(); i += 2) { + indices_.push_back(start_id + boxLinks[i]); + indices_.push_back(start_id + boxLinks[i + 1]); + } + }; + + addSingleVerticalLine(pts[0], pts[4]); + addSingleVerticalLine(pts[1], pts[5]); + addSingleVerticalLine(pts[2], pts[6]); + addSingleVerticalLine(pts[3], pts[7]); +} + +void Simple3DObject::addTopFace(std::vector &pts, sl::float4 clr) { + clr.a = 0.3f; + for (auto it : pts) + addPoint(it, clr); +} + +void Simple3DObject::addVerticalFaces(std::vector &pts, sl::float4 clr) { + auto addQuad = [&](std::vector quad_pts, float alpha1, float alpha2) { // To use only with 4 points + for (unsigned int i = 0; i < quad_pts.size(); ++i) { + addPt(quad_pts[i]); + clr.a = (i < 2 ? alpha1 : alpha2); + addClr(clr); + } + + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + }; + + // For each face, we need to add 4 quads (the first 2 indexes are always the top points of the quad) + std::vector> quads + { + { + 0, 3, 7, 4 + }, // front face + { + 3, 2, 6, 7 + }, // right face + { + 2, 1, 5, 6 + }, // back face + { + 1, 0, 4, 5 + } // left face + }; + float alpha = 0.5f; + + for (const auto quad : quads) { + + // Top quads + std::vector quad_pts_1{ + pts[quad[0]], + pts[quad[1]], + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_1, alpha, alpha); + + std::vector quad_pts_2{ + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size}; + addQuad(quad_pts_2, alpha, 2 * alpha / 3); + + std::vector quad_pts_3{ + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_3, 2 * alpha / 3, alpha / 3); + + std::vector quad_pts_4{ + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[1]] + 2.0f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[0]] + 2.0f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_4, alpha / 3, 0.0f); + + // Bottom quads + std::vector quad_pts_5{ + (pts[quad[1]] * 2.0f + (grid_size - 2.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 2.0f + (grid_size - 2.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_5, 0.0f, alpha / 3); + + std::vector quad_pts_6{ + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_6, alpha / 3, 2 * alpha / 3); + + std::vector quad_pts_7{ + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_7, 2 * alpha / 3, alpha); + + std::vector quad_pts_8{ + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size, + pts[quad[2]], + pts[quad[3]]}; + addQuad(quad_pts_8, alpha, alpha); + } +} + +void Simple3DObject::pushToGPU() { + if (!isStatic_ || vaoID_ == 0) { + if (vaoID_ == 0) { + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + } + + if (vertices_.size() > 0) { + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vertices_.size() * sizeof (sl::float3), &vertices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + } + + if (colors_.size() > 0) { + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, colors_.size() * sizeof (sl::float4), &colors_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + } + + if (indices_.size() > 0) { + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_.size() * sizeof (unsigned int), &indices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + } + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + } +} + +void Simple3DObject::clear() { + vertices_.clear(); + colors_.clear(); + indices_.clear(); +} + +void Simple3DObject::setDrawingType(GLenum type) { + drawingType_ = type; +} + +void Simple3DObject::draw() { + if (indices_.size() && vaoID_) { + glBindVertexArray(vaoID_); + glDrawElements(drawingType_, (GLsizei) indices_.size(), GL_UNSIGNED_INT, 0); + glBindVertexArray(0); + } +} + +void Simple3DObject::translate(const sl::Translation& t) { + position_ = position_ + t; +} + +void Simple3DObject::setPosition(const sl::Translation& p) { + position_ = p; +} + +void Simple3DObject::setRT(const sl::Transform& mRT) { + position_ = mRT.getTranslation(); + rotation_ = mRT.getOrientation(); +} + +void Simple3DObject::rotate(const sl::Orientation& rot) { + rotation_ = rot * rotation_; +} + +void Simple3DObject::rotate(const sl::Rotation& m) { + this->rotate(sl::Orientation(m)); +} + +void Simple3DObject::setRotation(const sl::Orientation& rot) { + rotation_ = rot; +} + +void Simple3DObject::setRotation(const sl::Rotation& m) { + this->setRotation(sl::Orientation(m)); +} + +const sl::Translation& Simple3DObject::getPosition() const { + return position_; +} + +sl::Transform Simple3DObject::getModelMatrix() const { + sl::Transform tmp; + tmp.setOrientation(rotation_); + tmp.setTranslation(position_); + return tmp; +} + +Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { + if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { + std::cout << "ERROR: while compiling vertex shader" << std::endl; + } + if (!compile(fragmentId_, GL_FRAGMENT_SHADER, fs)) { + std::cout << "ERROR: while compiling fragment shader" << std::endl; + } + + programId_ = glCreateProgram(); + + glAttachShader(programId_, verterxId_); + glAttachShader(programId_, fragmentId_); + + glBindAttribLocation(programId_, ATTRIB_VERTICES_POS, "in_vertex"); + glBindAttribLocation(programId_, ATTRIB_COLOR_POS, "in_texCoord"); + + glLinkProgram(programId_); + + GLint errorlk(0); + glGetProgramiv(programId_, GL_LINK_STATUS, &errorlk); + if (errorlk != GL_TRUE) { + std::cout << "ERROR: while linking Shader :" << std::endl; + GLint errorSize(0); + glGetProgramiv(programId_, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(programId_, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteProgram(programId_); + } +} + +Shader::~Shader() { + if (verterxId_ != 0 && glIsShader(verterxId_)) + glDeleteShader(verterxId_); + if (fragmentId_ != 0 && glIsShader(fragmentId_)) + glDeleteShader(fragmentId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); +} + +GLuint Shader::getProgramId() { + return programId_; +} + +bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { + shaderId = glCreateShader(type); + if (shaderId == 0) { + std::cout << "ERROR: shader type (" << type << ") does not exist" << std::endl; + return false; + } + glShaderSource(shaderId, 1, (const char**) &src, 0); + glCompileShader(shaderId); + + GLint errorCp(0); + glGetShaderiv(shaderId, GL_COMPILE_STATUS, &errorCp); + if (errorCp != GL_TRUE) { + std::cout << "ERROR: while compiling Shader :" << std::endl; + GLint errorSize(0); + glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(shaderId, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteShader(shaderId); + return false; + } + return true; +} + +const GLchar* POINTCLOUD_VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec4 in_VertexRGBA;\n" + "out vec4 b_color;\n" + "uniform mat4 u_mvpMatrix;\n" + "uniform vec3 u_color;\n" + "uniform bool u_drawFlat;\n" + "void main() {\n" + " if(u_drawFlat)\n" + " b_color = vec4(u_color.bgr, .85f);\n" + "else{" + // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) + " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" + " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n" + " b_color = vec4(clr_int.b / 255.0f, clr_int.g / 255.0f, clr_int.r / 255.0f, .85f);\n" + " }" + " gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1);\n" + "}"; + +const GLchar* POINTCLOUD_FRAGMENT_SHADER = + "#version 330 core\n" + "in vec4 b_color;\n" + "layout(location = 0) out vec4 out_Color;\n" + "void main() {\n" + " out_Color = b_color;\n" + "}"; + +PointCloud::PointCloud() : numBytes_(0), xyzrgbaMappedBuf_(nullptr) { + +} + +PointCloud::~PointCloud() { + close(); +} + +void PointCloud::close() { + if (refMat.isInit()) { + auto err = cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + glDeleteBuffers(1, &bufferGLID_); + refMat.free(); + } +} + +void PointCloud::initialize(sl::Mat& ref, sl::float3 clr_) +{ + refMat = ref; + clr = clr_; +} + +void PointCloud::pushNewPC() { + if (refMat.isInit()) { + + int sizebytes = refMat.getResolution().area() * sizeof(sl::float4); + if (numBytes_ != sizebytes) { + + if (numBytes_ == 0) { + glGenBuffers(1, &bufferGLID_); + + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); + shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); + shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); + } + else { + cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + cudaGraphicsUnregisterResource(bufferCudaID_); + } + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glBufferData(GL_ARRAY_BUFFER, sizebytes, 0, GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsResourceGetMappedPointer((void**)&xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + } + + cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice); + } +} + +void PointCloud::draw(const sl::Transform& vp, bool draw_flat) { + if (refMat.isInit()) { +#ifndef JETSON_STYLE + glDisable(GL_BLEND); +#endif + + glUseProgram(shader_.getProgramId()); + glUniformMatrix4fv(shMVPMatrixLoc_, 1, GL_TRUE, vp.m); + + glUniform3fv(shColor, 1, clr.v); + glUniform1i(shDrawColor, draw_flat); + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glDrawArrays(GL_POINTS, 0, refMat.getResolution().area()); + glBindBuffer(GL_ARRAY_BUFFER, 0); + glUseProgram(0); + +#ifndef JETSON_STYLE + glEnable(GL_BLEND); +#endif + } +} + + +CameraViewer::CameraViewer() { + +} + +CameraViewer::~CameraViewer() { + close(); +} + +void CameraViewer::close() { + if (ref.isInit()) { + + auto err = cudaGraphicsUnmapResources(1, &cuda_gl_ressource, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(cuda_gl_ressource); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + glDeleteTextures(1, &texture); + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + ref.free(); + } +} + +bool CameraViewer::initialize(sl::Mat& im, sl::float3 clr) { + + frustum = Simple3DObject(sl::Translation(0, 0, 0), false); + + // Create 3D axis + float fx, fy, cx, cy; + fx = fy = 1400; + float width, height; + width = 2208; + height = 1242; + cx = width / 2; + cy = height / 2; + + float Z_ = .5f; + sl::float3 toOGL(1, -1, -1); + sl::float3 cam_0(0, 0, 0); + sl::float3 cam_1, cam_2, cam_3, cam_4; + + float fx_ = 1.f / fx; + float fy_ = 1.f / fy; + + cam_1.z = Z_; + cam_1.x = (0 - cx) * Z_ * fx_; + cam_1.y = (0 - cy) * Z_ * fy_; + cam_1 *= toOGL; + + cam_2.z = Z_; + cam_2.x = (width - cx) * Z_ * fx_; + cam_2.y = (0 - cy) * Z_ * fy_; + cam_2 *= toOGL; + + cam_3.z = Z_; + cam_3.x = (width - cx) * Z_ * fx_; + cam_3.y = (height - cy) * Z_ * fy_; + cam_3 *= toOGL; + + cam_4.z = Z_; + cam_4.x = (0 - cx) * Z_ * fx_; + cam_4.y = (height - cy) * Z_ * fy_; + cam_4 *= toOGL; + + frustum.addFace(cam_0, cam_1, cam_2, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_2, cam_3, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_3, cam_4, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_4, cam_1, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.setDrawingType(GL_TRIANGLES); + + frustum.pushToGPU(); + + vert.push_back(cam_1); + vert.push_back(cam_2); + vert.push_back(cam_3); + vert.push_back(cam_4); + + uv.push_back(sl::float2(0, 0)); + uv.push_back(sl::float2(1, 0)); + uv.push_back(sl::float2(1, 1)); + uv.push_back(sl::float2(0, 1)); + + faces.push_back(sl::uint3(0, 1, 2)); + faces.push_back(sl::uint3(0, 2, 3)); + + ref = im; + shader.set(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); + shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); + + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vert.size() * sizeof(sl::float3), &vert[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, uv.size() * sizeof(sl::float2), &uv[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(sl::uint3), &faces[0], GL_STATIC_DRAW); + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + auto res = ref.getResolution(); + glGenTextures(1, &texture); + glBindTexture(GL_TEXTURE_2D, texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, res.width, res.height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); + glBindTexture(GL_TEXTURE_2D, 0); + cudaError_t err = cudaGraphicsGLRegisterImage(&cuda_gl_ressource, texture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard); + if (err) std::cout << "err alloc " << err << " " << cudaGetErrorString(err) << "\n"; + glDisable(GL_TEXTURE_2D); + + err = cudaGraphicsMapResources(1, &cuda_gl_ressource, 0); + if (err) std::cout << "err 0 " << err << " " << cudaGetErrorString(err) << "\n"; + err = cudaGraphicsSubResourceGetMappedArray(&ArrIm, cuda_gl_ressource, 0, 0); + if (err) std::cout << "err 1 " << err << " " << cudaGetErrorString(err) << "\n"; + + return (err == cudaSuccess); +} + +void CameraViewer::pushNewImage() { + if (!ref.isInit()) return; + auto err = cudaMemcpy2DToArray(ArrIm, 0, 0, ref.getPtr(sl::MEM::CPU), ref.getStepBytes(sl::MEM::CPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyHostToDevice); + if (err) std::cout << "err 2 " << err << " " << cudaGetErrorString(err) << "\n"; +} + +void CameraViewer::draw(sl::Transform vpMatrix) { + + if (!ref.isInit()) return; + + glUseProgram(shader.getProgramId()); + glUniformMatrix4fv(shMVPMatrixLocTex_, 1, GL_FALSE, sl::Transform::transpose(vpMatrix).m); + glBindTexture(GL_TEXTURE_2D, texture); + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); + + glBindVertexArray(vaoID_); + glDrawElements(GL_TRIANGLES, (GLsizei)faces.size() * 3, GL_UNSIGNED_INT, 0); + glBindVertexArray(0); + + glUseProgram(0); +} + +const sl::Translation CameraGL::ORIGINAL_FORWARD = sl::Translation(0, 0, 1); +const sl::Translation CameraGL::ORIGINAL_UP = sl::Translation(0, 1, 0); +const sl::Translation CameraGL::ORIGINAL_RIGHT = sl::Translation(1, 0, 0); + +CameraGL::CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical) { + this->position_ = position; + setDirection(direction, vertical); + + offset_ = sl::Translation(0, 0, 0); + view_.setIdentity(); + updateView(); + setProjection(70, 70, 0.200f, 50.f); + updateVPMatrix(); +} + +CameraGL::~CameraGL() { +} + +void CameraGL::update() { + if (sl::Translation::dot(vertical_, up_) < 0) + vertical_ = vertical_ * -1.f; + updateView(); + updateVPMatrix(); +} + +void CameraGL::setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) { + horizontalFieldOfView_ = horizontalFOV; + verticalFieldOfView_ = verticalFOV; + znear_ = znear; + zfar_ = zfar; + + float fov_y = verticalFOV * M_PI / 180.f; + float fov_x = horizontalFOV * M_PI / 180.f; + + projection_.setIdentity(); + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; +} + +const sl::Transform& CameraGL::getViewProjectionMatrix() const { + return vpMatrix_; +} + +float CameraGL::getHorizontalFOV() const { + return horizontalFieldOfView_; +} + +float CameraGL::getVerticalFOV() const { + return verticalFieldOfView_; +} + +void CameraGL::setOffsetFromPosition(const sl::Translation& o) { + offset_ = o; +} + +const sl::Translation& CameraGL::getOffsetFromPosition() const { + return offset_; +} + +void CameraGL::setDirection(const sl::Translation& direction, const sl::Translation& vertical) { + sl::Translation dirNormalized = direction; + dirNormalized.normalize(); + this->rotation_ = sl::Orientation(ORIGINAL_FORWARD, dirNormalized * -1.f); + updateVectors(); + this->vertical_ = vertical; + if (sl::Translation::dot(vertical_, up_) < 0) + rotate(sl::Rotation(M_PI, ORIGINAL_FORWARD)); +} + +void CameraGL::translate(const sl::Translation& t) { + position_ = position_ + t; +} + +void CameraGL::setPosition(const sl::Translation& p) { + position_ = p; +} + +void CameraGL::rotate(const sl::Orientation& rot) { + rotation_ = rot * rotation_; + updateVectors(); +} + +void CameraGL::rotate(const sl::Rotation& m) { + this->rotate(sl::Orientation(m)); +} + +void CameraGL::setRotation(const sl::Orientation& rot) { + rotation_ = rot; + updateVectors(); +} + +void CameraGL::setRotation(const sl::Rotation& m) { + this->setRotation(sl::Orientation(m)); +} + +const sl::Translation& CameraGL::getPosition() const { + return position_; +} + +const sl::Translation& CameraGL::getForward() const { + return forward_; +} + +const sl::Translation& CameraGL::getRight() const { + return right_; +} + +const sl::Translation& CameraGL::getUp() const { + return up_; +} + +const sl::Translation& CameraGL::getVertical() const { + return vertical_; +} + +float CameraGL::getZNear() const { + return znear_; +} + +float CameraGL::getZFar() const { + return zfar_; +} + +void CameraGL::updateVectors() { + forward_ = ORIGINAL_FORWARD * rotation_; + up_ = ORIGINAL_UP * rotation_; + right_ = sl::Translation(ORIGINAL_RIGHT * -1.f) * rotation_; +} + +void CameraGL::updateView() { + sl::Transform transformation(rotation_, (offset_ * rotation_) + position_); + view_ = sl::Transform::inverse(transformation); +} + +void CameraGL::updateVPMatrix() { + vpMatrix_ = projection_ * view_; +} \ No newline at end of file diff --git a/object detection/multi-camera/cpp/src/main.cpp b/object detection/multi-camera/cpp/src/main.cpp index ff3f90cb..85202067 100644 --- a/object detection/multi-camera/cpp/src/main.cpp +++ b/object detection/multi-camera/cpp/src/main.cpp @@ -45,6 +45,8 @@ int main(int argc, char **argv) return EXIT_FAILURE; } + Trigger trigger; + // Check if the ZED camera should run within the same process or if they are running on the edge. std::vector clients(configurations.size()); int id_ = 0; @@ -55,7 +57,7 @@ int main(int argc, char **argv) if (conf.communication_parameters.getType() == sl::CommunicationParameters::COMM_TYPE::INTRA_PROCESS) { std::cout << "Try to open ZED " << conf.serial_number << ".." << std::flush; - auto state = clients[id_].open(conf.input_type); + auto state = clients[id_].open(conf.input_type, &trigger); if (!state) { std::cerr << "Could not open ZED: " << conf.input_type.getConfiguration() << ". Skipping..." << std::endl; @@ -105,7 +107,7 @@ int main(int argc, char **argv) { sl::CameraIdentifier uuid(it.serial_number); // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. - auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose); + auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity); if (state != sl::FUSION_ERROR_CODE::SUCCESS) std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl; else @@ -119,73 +121,88 @@ int main(int argc, char **argv) return EXIT_FAILURE; } - // as this sample shows how to fuse body detection from the multi camera setup - // we enable the Body Tracking module with its options + fusion.enablePositionalTracking(); + + // As this sample shows how to do object detection with a multi camera setup, we enable the same model accross cameras + // and only create one group in fusion + + // Enable object detection sl::ObjectDetectionFusionParameters od_fusion_params; - od_fusion_params.enable_tracking = true; + od_fusion_params.enable_tracking = false; // We do tracking per camera then fuse only in fusion. + // This takes more resources than tracking in Fusion only but yields more accurate tracking. std::cout << "Enabling Fused Object detection" << std::endl; - fusion.enableObjectDetection(od_fusion_params); - std::cout << "OK" << std::endl; + const sl::FUSION_ERROR_CODE err = fusion.enableObjectDetection(od_fusion_params); + if (err != sl::FUSION_ERROR_CODE::SUCCESS) + { + std::cout << "Error: " << err << std::endl; + return EXIT_FAILURE; + } // creation of a 3D viewer GLViewer viewer; - viewer.init(argc, argv); + viewer.init(argc, argv, cameras); std::cout << "Viewer Shortcuts\n" - << "\t- 'r': swicth on/off for raw skeleton display\n" - << "\t- 'p': swicth on/off for live point cloud display\n" - << "\t- 'c': swicth on/off point cloud display with flat color\n" + << "\t- 'q': quit the application\n" + << "\t- 'p': play/pause the GLViewer\n" + << "\t- 'f': swicth on/off for fused bbox display\n" + << "\t- 'r': swicth on/off for raw bbox display\n" + << "\t- 's': swicth on/off for live point cloud display\n" + << "\t- 'c': swicth on/off point cloud display with raw color\n" << std::endl; // fusion outputs - sl::Objects fused_objects; - std::map camera_raw_data; + std::unordered_map fused_objects; + std::unordered_map> camera_raw_data; sl::FusionMetrics metrics; std::map views; std::map pointClouds; sl::Resolution low_res(512, 360); - bool new_data = false; - sl::Timestamp ts_new_data = sl::Timestamp(0); - // run the fusion as long as the viewer is available. while (viewer.isAvailable()) { + trigger.notifyZED(); + // run the fusion process (which gather data from all camera, sync them and process them) if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) { - // Retrieve fused body + // Retrieve all the fused objects fusion.retrieveObjects(fused_objects); - // for debug, you can retrieve the data send by each camera - for (auto &id : cameras) + // for debug, you can retrieve the data sent by each camera + for (const sl::CameraIdentifier &id : cameras) { - // fusion.retrieveObjects(camera_raw_data[id], id); + // Retrieve all the raw objects of a given camera + fusion.retrieveObjects(camera_raw_data[id], id); sl::Pose pose; - if (fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id, sl::POSITION_TYPE::RAW) == sl::POSITIONAL_TRACKING_STATE::OK) + if (fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id) == sl::POSITIONAL_TRACKING_STATE::OK) viewer.setCameraPose(id.sn, pose.pose_data); auto state_view = fusion.retrieveImage(views[id], id, low_res); auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res); + if (state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) { - // viewer.updateObjectsRaw(camera_raw_data[id]); + viewer.updateCamera(id.sn, views[id], pointClouds[id]); } - // viewer.updateObjects(fused_objects, camera_raw_data, metrics); } // get metrics about the fusion process for monitoring purposes fusion.getProcessMetrics(metrics); } - + + // Update the viewer with the fused objects and the raw objects viewer.updateObjects(fused_objects, camera_raw_data, metrics); - if(fused_objects.object_list.size()) - std::cout << "Objects detected from fusion " << " : " << fused_objects.object_list.size() << std::endl; - std::this_thread::sleep_for(std::chrono::microseconds(10)); + while (!viewer.isPlaying() && viewer.isAvailable()) + sl::sleep_ms(10); } viewer.exit(); + trigger.running = false; + trigger.notifyZED(); + for (auto &it : clients) it.stop(); diff --git a/object detection/multi-camera_multi-model/cpp/CMakeLists.txt b/object detection/multi-camera_multi-model/cpp/CMakeLists.txt new file mode 100755 index 00000000..8f67d8ab --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/CMakeLists.txt @@ -0,0 +1,59 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +PROJECT(ZED_ObjectDetectionFusionMultiModel) + +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +option(LINK_SHARED_ZED "Link with the ZED SDK shared executable" ON) + +if (NOT LINK_SHARED_ZED AND MSVC) + message(FATAL_ERROR "LINK_SHARED_ZED OFF : ZED SDK static libraries not available on Windows") +endif() + +find_package(ZED 4 REQUIRED) +find_package(OpenCV REQUIRED) +find_package(CUDA REQUIRED) +find_package(GLUT REQUIRED) +find_package(GLEW REQUIRED) +SET(OpenGL_GL_PREFERENCE GLVND) +find_package(OpenGL REQUIRED) + +include_directories(${OpenCV_INCLUDE_DIRS}) +include_directories(${CUDA_INCLUDE_DIRS}) +include_directories(${ZED_INCLUDE_DIRS}) +include_directories(${GLEW_INCLUDE_DIRS}) +include_directories(${GLUT_INCLUDE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) +link_directories(${GLEW_LIBRARY_DIRS}) +link_directories(${GLUT_LIBRARY_DIRS}) +link_directories(${OpenGL_LIBRARY_DIRS}) +link_directories(${OpenCV_LIBRARY_DIRS}) + +IF(NOT WIN32) + SET(SPECIAL_OS_LIBS "pthread") + + IF (CMAKE_SYSTEM_PROCESSOR MATCHES aarch64) + add_definitions(-DJETSON_STYLE) + ENDIF() +ENDIF() + +FILE(GLOB_RECURSE SRC_FILES src/*.c*) +FILE(GLOB_RECURSE HDR_FILES include/*.h*) + +add_executable(${PROJECT_NAME} ${HDR_FILES} ${SRC_FILES}) + +if (LINK_SHARED_ZED) + SET(ZED_LIBS ${ZED_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY}) +else() + SET(ZED_LIBS ${ZED_STATIC_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_LIBRARY}) +endif() + +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${UTILS_LIB} ${SPECIAL_OS_LIBS} ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OpenCV_LIBRARIES}) + +if(INSTALL_SAMPLES) + LIST(APPEND SAMPLE_LIST ${PROJECT_NAME}) + SET(SAMPLE_LIST "${SAMPLE_LIST}" PARENT_SCOPE) +endif() diff --git a/object detection/multi-camera_multi-model/cpp/include/ClientPublisher.hpp b/object detection/multi-camera_multi-model/cpp/include/ClientPublisher.hpp new file mode 100755 index 00000000..a89c44d1 --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/include/ClientPublisher.hpp @@ -0,0 +1,63 @@ +#ifndef __SENDER_RUNNER_HDR__ +#define __SENDER_RUNNER_HDR__ + +#include +#include + +#include +#include +#include + +struct Trigger{ + + void notifyZED() { + cv.notify_all(); + if (running) { + bool wait_for_zed = true; + size_t const nb_zed{states.size()}; + while (wait_for_zed) { + int count_r = 0; + for (auto const& it : states) + count_r += it.second; + wait_for_zed = count_r != nb_zed; + sl::sleep_ms(1); + } + for (auto &it : states) + it.second = false; + } + } + + std::condition_variable cv; + bool running = true; + std::map states; +}; + +class ClientPublisher{ + +public: + ClientPublisher(); + ~ClientPublisher(); + + bool open(const sl::InputType& input, Trigger* ref); + void start(); + void stop(); + void setStartSVOPosition(const unsigned pos); + sl::Objects getObjects() const; + + std::string optional_custom_onnx_yolo_model; + +private: + sl::Camera zed; + void work(); + std::thread runner; + int serial; + std::mutex mtx; + Trigger *p_trigger; + sl::Objects objects; + + std::unordered_map object_detection_parameters; + std::unordered_map object_detection_runtime_parameters; + std::unordered_map custom_object_detection_runtime_parameters; +}; + +#endif // ! __SENDER_RUNNER_HDR__ diff --git a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/GLViewer.hpp b/object detection/multi-camera_multi-model/cpp/include/GLViewer.hpp old mode 100644 new mode 100755 similarity index 65% rename from object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/GLViewer.hpp rename to object detection/multi-camera_multi-model/cpp/include/GLViewer.hpp index 414be22c..2a403a29 --- a/object detection/custom detector/cpp/tensorrt_yolov5_v6.0/include/GLViewer.hpp +++ b/object detection/multi-camera_multi-model/cpp/include/GLViewer.hpp @@ -1,303 +1,380 @@ -#ifndef __VIEWER_INCLUDE__ -#define __VIEWER_INCLUDE__ - -#include -#include - -#include - -#include -#include - -#include -#include - -#include - -#include "utils.hpp" - - -#ifndef M_PI -#define M_PI 3.141592653f -#endif - -#define MOUSE_R_SENSITIVITY 0.03f -#define MOUSE_UZ_SENSITIVITY 0.5f -#define MOUSE_DZ_SENSITIVITY 1.25f -#define MOUSE_T_SENSITIVITY 0.05f -#define KEY_T_SENSITIVITY 0.1f - -// Utils -std::vector getBBoxOnFloorPlane(std::vector &bbox, sl::Pose cam_pose); - -// 3D - -class CameraGL { -public: - - CameraGL() { - } - - enum DIRECTION { - UP, DOWN, LEFT, RIGHT, FORWARD, BACK - }; - CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) - ~CameraGL(); - - void update(); - void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar); - const sl::Transform& getViewProjectionMatrix() const; - - float getHorizontalFOV() const; - float getVerticalFOV() const; - - // Set an offset between the eye of the camera and its position - // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 - // Note: coordinates are in local space - void setOffsetFromPosition(const sl::Translation& offset); - const sl::Translation& getOffsetFromPosition() const; - - void setDirection(const sl::Translation& direction, const sl::Translation &vertical); - void translate(const sl::Translation& t); - void setPosition(const sl::Translation& p); - void rotate(const sl::Orientation& rot); - void rotate(const sl::Rotation& m); - void setRotation(const sl::Orientation& rot); - void setRotation(const sl::Rotation& m); - - const sl::Translation& getPosition() const; - const sl::Translation& getForward() const; - const sl::Translation& getRight() const; - const sl::Translation& getUp() const; - const sl::Translation& getVertical() const; - float getZNear() const; - float getZFar() const; - - static const sl::Translation ORIGINAL_FORWARD; - static const sl::Translation ORIGINAL_UP; - static const sl::Translation ORIGINAL_RIGHT; - - sl::Transform projection_; - bool usePerspective_; -private: - void updateVectors(); - void updateView(); - void updateVPMatrix(); - - sl::Translation offset_; - sl::Translation position_; - sl::Translation forward_; - sl::Translation up_; - sl::Translation right_; - sl::Translation vertical_; - - sl::Orientation rotation_; - - sl::Transform view_; - sl::Transform vpMatrix_; - float horizontalFieldOfView_; - float verticalFieldOfView_; - float znear_; - float zfar_; -}; - -class Shader { -public: - - Shader() { - } - Shader(const GLchar* vs, const GLchar* fs); - ~Shader(); - GLuint getProgramId(); - - static const GLint ATTRIB_VERTICES_POS = 0; - static const GLint ATTRIB_COLOR_POS = 1; -private: - bool compile(GLuint &shaderId, GLenum type, const GLchar* src); - GLuint verterxId_; - GLuint fragmentId_; - GLuint programId_; -}; - -struct ShaderData { - Shader it; - GLuint MVP_Mat; -}; - -class Simple3DObject { -public: - - Simple3DObject() { - } - Simple3DObject(sl::Translation position, bool isStatic); - ~Simple3DObject(); - - void addPt(sl::float3 pt); - void addClr(sl::float4 clr); - - void addBBox(std::vector &pts, sl::float4 clr); - void addPoint(sl::float3 pt, sl::float4 clr); - void addTriangle(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr); - void addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr); - - // New 3D rendering - void addFullEdges(std::vector &pts, sl::float4 clr); - void addVerticalEdges(std::vector &pts, sl::float4 clr); - void addTopFace(std::vector &pts, sl::float4 clr); - void addVerticalFaces(std::vector &pts, sl::float4 clr); - - void pushToGPU(); - void clear(); - - void setDrawingType(GLenum type); - - void draw(); - - void translate(const sl::Translation& t); - void setPosition(const sl::Translation& p); - - void setRT(const sl::Transform& mRT); - - void rotate(const sl::Orientation& rot); - void rotate(const sl::Rotation& m); - void setRotation(const sl::Orientation& rot); - void setRotation(const sl::Rotation& m); - - const sl::Translation& getPosition() const; - - sl::Transform getModelMatrix() const; -private: - std::vector vertices_; - std::vector colors_; - std::vector indices_; - - bool isStatic_; - - GLenum drawingType_; - - GLuint vaoID_; - /* - Vertex buffer IDs: - - [0]: Vertices coordinates; - - [1]: Indices; - */ - GLuint vboID_[2]; - - sl::Translation position_; - sl::Orientation rotation_; -}; - -class PointCloud { -public: - PointCloud(); - ~PointCloud(); - - // Initialize Opengl and Cuda buffers - // Warning: must be called in the Opengl thread - void initialize(sl::Resolution res); - // Push a new point cloud - // Warning: can be called from any thread but the mutex "mutexData" must be locked - void pushNewPC(sl::Mat &matXYZRGBA); - // Update the Opengl buffer - // Warning: must be called in the Opengl thread - void update(); - // Draw the point cloud - // Warning: must be called in the Opengl thread - void draw(const sl::Transform& vp); - // Close (disable update) - void close(); - -private: - sl::Mat matGPU_; - bool hasNewPCL_ = false; - ShaderData shader; - size_t numBytes_; - float* xyzrgbaMappedBuf_; - GLuint bufferGLID_; - cudaGraphicsResource* bufferCudaID_; -}; - -struct ObjectClassName { - sl::float3 position; - std::string name; - sl::float4 color; -}; - -// This class manages input events, window and Opengl rendering pipeline - -class GLViewer { -public: - GLViewer(); - ~GLViewer(); - bool isAvailable(); - - void init(int argc, char **argv, sl::CameraParameters& param, bool isTrackingON); - void updateData(sl::Mat& matXYZRGBA, std::vector& objs, sl::Transform &cam_pose); - - void exit(); -private: - // Rendering loop method called each frame by glutDisplayFunc - void render(); - // Everything that needs to be updated before rendering must be done in this method - void update(); - // Once everything is updated, every renderable objects must be drawn in this method - void draw(); - // Clear and refresh inputs' data - void clearInputs(); - - void printText(); - void createBboxRendering(std::vector &bbox, sl::float4 bbox_clr); - void createIDRendering(sl::float3 ¢er, sl::float4 clr, unsigned int id); - - // Glut functions callbacks - static void drawCallback(); - static void mouseButtonCallback(int button, int state, int x, int y); - static void mouseMotionCallback(int x, int y); - static void reshapeCallback(int width, int height); - static void keyPressedCallback(unsigned char c, int x, int y); - static void keyReleasedCallback(unsigned char c, int x, int y); - static void idle(); - - bool available; - - enum MOUSE_BUTTON { - LEFT = 0, - MIDDLE = 1, - RIGHT = 2, - WHEEL_UP = 3, - WHEEL_DOWN = 4 - }; - - enum KEY_STATE { - UP = 'u', - DOWN = 'd', - FREE = 'f' - }; - - bool isTrackingON_ = false; - - bool mouseButton_[3]; - int mouseWheelPosition_; - int mouseCurrentPosition_[2]; - int mouseMotion_[2]; - int previousMouseMotion_[2]; - KEY_STATE keyStates_[256]; - - std::mutex mtx; - - Simple3DObject frustum; - PointCloud pointCloud_; - CameraGL camera_; - ShaderData shaderLine; - ShaderData shader; - sl::float4 bckgrnd_clr; - sl::Transform cam_pose; - - std::vector objectsName; - Simple3DObject BBox_edges; - Simple3DObject BBox_faces; - Simple3DObject skeletons; - Simple3DObject floor_grid; -}; - -#endif /* __VIEWER_INCLUDE__ */ +#ifndef __VIEWER_INCLUDE__ +#define __VIEWER_INCLUDE__ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include "utils.hpp" + + +#ifndef M_PI +#define M_PI 3.141592653f +#endif + +#define MOUSE_R_SENSITIVITY 0.03f +#define MOUSE_UZ_SENSITIVITY 0.75f +#define MOUSE_DZ_SENSITIVITY 1.25f +#define MOUSE_T_SENSITIVITY 0.05f +#define KEY_T_SENSITIVITY 0.1f + +// Utils +std::vector getBBoxOnFloorPlane(std::vector &bbox, sl::Pose cam_pose); + +// 3D + +class CameraGL { +public: + + CameraGL() { + } + + enum DIRECTION { + UP, DOWN, LEFT, RIGHT, FORWARD, BACK + }; + CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical = sl::Translation(0, 1, 0)); // vertical = Eigen::Vector3f(0, 1, 0) + ~CameraGL(); + + void update(); + void setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar); + const sl::Transform& getViewProjectionMatrix() const; + + float getHorizontalFOV() const; + float getVerticalFOV() const; + + // Set an offset between the eye of the camera and its position + // Note: Useful to use the camera as a trackball camera with z>0 and x = 0, y = 0 + // Note: coordinates are in local space + void setOffsetFromPosition(const sl::Translation& offset); + const sl::Translation& getOffsetFromPosition() const; + + void setDirection(const sl::Translation& direction, const sl::Translation& vertical); + void translate(const sl::Translation& t); + void setPosition(const sl::Translation& p); + void rotate(const sl::Orientation& rot); + void rotate(const sl::Rotation& m); + void setRotation(const sl::Orientation& rot); + void setRotation(const sl::Rotation& m); + + const sl::Translation& getPosition() const; + const sl::Translation& getForward() const; + const sl::Translation& getRight() const; + const sl::Translation& getUp() const; + const sl::Translation& getVertical() const; + float getZNear() const; + float getZFar() const; + + static const sl::Translation ORIGINAL_FORWARD; + static const sl::Translation ORIGINAL_UP; + static const sl::Translation ORIGINAL_RIGHT; + + sl::Transform projection_; + bool usePerspective_; +private: + void updateVectors(); + void updateView(); + void updateVPMatrix(); + + sl::Translation offset_; + sl::Translation position_; + sl::Translation forward_; + sl::Translation up_; + sl::Translation right_; + sl::Translation vertical_; + + sl::Orientation rotation_; + + sl::Transform view_; + sl::Transform vpMatrix_; + float horizontalFieldOfView_; + float verticalFieldOfView_; + float znear_; + float zfar_; +}; + +class Shader { +public: + + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} + Shader(const GLchar* vs, const GLchar* fs); + ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); + GLuint getProgramId(); + + static const GLint ATTRIB_VERTICES_POS = 0; + static const GLint ATTRIB_COLOR_POS = 1; + static const GLint ATTRIB_NORMAL = 2; + +private: + bool compile(GLuint &shaderId, GLenum type, const GLchar* src); + GLuint verterxId_; + GLuint fragmentId_; + GLuint programId_; +}; + +struct ShaderData { + Shader it; + GLuint MVP_Mat; +}; + +class Simple3DObject { +public: + + Simple3DObject() { + } + Simple3DObject(sl::Translation position, bool isStatic); + ~Simple3DObject(); + + void addPt(sl::float3 pt); + void addClr(sl::float4 clr); + + void addBBox(std::vector &pts, sl::float4 clr); + void addPoint(sl::float3 pt, sl::float4 clr); + void addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr); + void addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr); + + // New 3D rendering + void addFullEdges(std::vector &pts, sl::float4 clr); + void addVerticalEdges(std::vector &pts, sl::float4 clr); + void addTopFace(std::vector &pts, sl::float4 clr); + void addVerticalFaces(std::vector &pts, sl::float4 clr); + + void pushToGPU(); + void clear(); + + void setDrawingType(GLenum type); + + void draw(); + + void translate(const sl::Translation& t); + void setPosition(const sl::Translation& p); + + void setRT(const sl::Transform& mRT); + + void rotate(const sl::Orientation& rot); + void rotate(const sl::Rotation& m); + void setRotation(const sl::Orientation& rot); + void setRotation(const sl::Rotation& m); + + const sl::Translation& getPosition() const; + + sl::Transform getModelMatrix() const; + + std::vector vertices_; + std::vector colors_; + std::vector indices_; + + bool isStatic_; + GLenum drawingType_; + + GLuint vaoID_; +private: + /* + Vertex buffer IDs: + - [0]: Vertices coordinates + - [1]: colors; + - [2]: Indices; + */ + GLuint vboID_[3]; + + sl::Translation position_; + sl::Orientation rotation_; +}; + +class PointCloud { +public: + PointCloud(); + ~PointCloud(); + + // Initialize Opengl and Cuda buffers + // Warning: must be called in the Opengl thread + void initialize(sl::Mat&, sl::float3 clr); + // Push a new point cloud + // Warning: can be called from any thread but the mutex "mutexData" must be locked + void pushNewPC(); + // Draw the point cloud + // Warning: must be called in the Opengl thread + void draw(const sl::Transform& vp, bool draw_clr); + // Close (disable update) + void close(); + +private: + sl::Mat refMat; + sl::float3 clr; + + Shader shader_; + GLuint shMVPMatrixLoc_; + GLuint shDrawColor; + GLuint shColor; + + size_t numBytes_; + float* xyzrgbaMappedBuf_; + GLuint bufferGLID_; + cudaGraphicsResource* bufferCudaID_; +}; + +struct ObjectClassName { + sl::float3 position; + std::string name_lineA; + std::string name_lineB; + sl::float3 color; +}; + +class CameraViewer { +public: + CameraViewer(); + ~CameraViewer(); + + // Initialize Opengl and Cuda buffers + bool initialize(sl::Mat& image, sl::float3 clr); + // Push a new Image + Z buffer and transform into a point cloud + void pushNewImage(); + // Draw the Image + void draw(sl::Transform vpMatrix); + // Close (disable update) + void close(); + + Simple3DObject frustum; +private: + sl::Mat ref; + cudaArray_t ArrIm; + cudaGraphicsResource* cuda_gl_ressource;//cuda GL resource + Shader shader; + GLuint shMVPMatrixLocTex_; + + GLuint texture; + GLuint vaoID_; + GLuint vboID_[3]; + + std::vector faces; + std::vector vert; + std::vector uv; +}; + +// This class manages input events, window and Opengl rendering pipeline + +class GLViewer { +public: + GLViewer(); + ~GLViewer(); + bool isAvailable(); + bool isPlaying() const { return play; } + + void init(int argc, char **argv, const std::vector& cameras_id); + void updateObjects(const std::unordered_map& objs, + const std::unordered_map>& singledata, + const sl::FusionMetrics& metrics); + + void updateCamera(int, sl::Mat&, sl::Mat&); + void updateCamera(sl::Mat&); + void setCameraPose(int, sl::Transform); + + int getKey() { + const int key = last_key; + last_key = -1; + return key; + } + + void exit(); +private: + // Rendering loop method called each frame by glutDisplayFunc + void render(); + // Everything that needs to be updated before rendering must be done in this method + void update(); + // Once everything is updated, every renderable objects must be drawn in this method + void draw(); + // Clear and refresh inputs' data + void clearInputs(); + + void setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar); + void printText(); + + void createBboxRendering(std::vector &bbox, sl::float4 bbox_clr); + sl::float3 getColor(int, bool); + + // Glut functions callbacks + static void drawCallback(); + static void mouseButtonCallback(int button, int state, int x, int y); + static void mouseMotionCallback(int x, int y); + static void reshapeCallback(int width, int height); + static void keyPressedCallback(unsigned char c, int x, int y); + static void keyReleasedCallback(unsigned char c, int x, int y); + static void idle(); + + bool available; + + enum MOUSE_BUTTON { + LEFT = 0, + MIDDLE = 1, + RIGHT = 2, + WHEEL_UP = 3, + WHEEL_DOWN = 4 + }; + + enum KEY_STATE { + UP = 'u', + DOWN = 'd', + FREE = 'f' + }; + + bool mouseButton_[3]; + int mouseWheelPosition_; + int mouseCurrentPosition_[2]; + int mouseMotion_[2]; + int previousMouseMotion_[2]; + KEY_STATE keyStates_[256]; + + std::mutex mtx; + std::mutex mtx_clr; + + std::map point_clouds; + std::map viewers; + std::map poses; + CameraGL camera_; + ShaderData shaderLine; + ShaderData shader; + sl::float4 bckgrnd_clr; + + sl::Transform projection_; + + std::map BBox_edges_per_cam; + std::map BBox_faces_per_cam; + std::map colors; + std::map colors_bbox; + std::vector fusionStats; + Simple3DObject BBox_edges; + Simple3DObject BBox_faces; + Simple3DObject floor_grid; + + bool show_pc = true; + bool show_raw = false; + bool show_fused = true; + bool draw_flat_color = false; + + std::uniform_int_distribution uint_dist360; + std::mt19937 rng; + + bool play = true; + int last_key = -1; +}; + +#endif /* __VIEWER_INCLUDE__ */ diff --git a/object detection/multi-camera_multi-model/cpp/include/utils.hpp b/object detection/multi-camera_multi-model/cpp/include/utils.hpp new file mode 100755 index 00000000..d8b45c5e --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/include/utils.hpp @@ -0,0 +1,113 @@ +#pragma once + +#include + +inline bool renderObject(const sl::ObjectData& i, const bool isTrackingON) { + if (isTrackingON) + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK); + else + return (i.tracking_state == sl::OBJECT_TRACKING_STATE::OK || i.tracking_state == sl::OBJECT_TRACKING_STATE::OFF); +} + +float const id_colors[5][3] = { + { 232.0f, 176.0f ,59.0f }, + { 175.0f, 208.0f ,25.0f }, + { 102.0f, 205.0f ,105.0f}, + { 185.0f, 0.0f ,255.0f}, + { 99.0f, 107.0f ,252.0f} +}; + +inline sl::float4 generateColorID_u(int idx) { + if (idx < 0) return sl::float4(236, 184, 36, 255); + int color_idx = idx % 5; + return sl::float4(id_colors[color_idx][0], id_colors[color_idx][1], id_colors[color_idx][2], 255); +} + +inline sl::float4 generateColorID_f(int idx) { + auto clr_u = generateColorID_u(idx); + return sl::float4(static_cast(clr_u[0]) / 255.f, static_cast(clr_u[1]) / 255.f, static_cast(clr_u[2]) / 255.f, 1.f); +} + +float const class_colors[6][3] = { + { 44.0f, 117.0f, 255.0f}, // PEOPLE + { 255.0f, 0.0f, 255.0f}, // VEHICLE + { 0.0f, 0.0f, 255.0f}, + { 0.0f, 255.0f, 255.0f}, + { 0.0f, 255.0f, 0.0f}, + { 255.0f, 255.0f, 255.0f} +}; + +inline sl::float4 getColorClass(int idx) { + idx = std::min(5, idx); + sl::float4 clr(class_colors[idx][0], class_colors[idx][1], class_colors[idx][2], 1.f); + return clr / 255.f; +} + +/** +* @brief Compute the start frame of each SVO for playback to be synced +* +* @param svo_files Map camera index to SVO file path +* @return Map camera index to starting SVO frame for synced playback +*/ +static std::map syncDATA(std::map svo_files) { + std::map output; // map of camera index and frame index of the starting point for each + + // Open all SVO + std::map> p_zeds; + + for (auto &it : svo_files) { + auto p_zed = std::make_shared(); + + sl::InitParameters init_param; + init_param.depth_mode = sl::DEPTH_MODE::NONE; + init_param.camera_disable_self_calib = true; + init_param.input.setFromSVOFile(it.second.c_str()); + + auto error = p_zed->open(init_param); + if (error == sl::ERROR_CODE::SUCCESS) + p_zeds.insert(std::make_pair(it.first, p_zed)); + else { + std::cerr << "Could not open file " << it.second.c_str() << ": " << sl::toString(error) << ". Skipping" << std::endl; + } + } + + // Compute the starting point, we have to take the latest one + sl::Timestamp start_ts = 0; + for (auto &it : p_zeds) { + it.second->grab(); + auto ts = it.second->getTimestamp(sl::TIME_REFERENCE::IMAGE); + + if (ts > start_ts) + start_ts = ts; + } + + std::cout << "Found SVOs common starting time: " << start_ts << std::endl; + + // The starting point is now known, let's find the frame idx for all corresponding + for (auto &it : p_zeds) { + auto frame_position_at_ts = it.second->getSVOPositionAtTimestamp(start_ts); + + if (frame_position_at_ts != -1) + output.insert(std::make_pair(it.first, frame_position_at_ts)); + } + + for (auto &it : p_zeds) it.second->close(); + + return output; +} + +inline bool hasExtension(const std::string& fileName, const std::string& extension) { + // Find the position of the last '.' in the file name + size_t dotPos = fileName.find_last_of('.'); + + // If no '.' is found or it's at the start of the string, return false + if (dotPos == std::string::npos || dotPos == 0) { + return false; + } + + // Extract the extension from the file name + std::string fileExtension = fileName.substr(dotPos); + + // Check if the extracted extension matches the provided extension + return fileExtension == extension; +} \ No newline at end of file diff --git a/object detection/multi-camera_multi-model/cpp/src/ClientPublisher.cpp b/object detection/multi-camera_multi-model/cpp/src/ClientPublisher.cpp new file mode 100755 index 00000000..45a9b3c2 --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/src/ClientPublisher.cpp @@ -0,0 +1,149 @@ +#include "ClientPublisher.hpp" + +ClientPublisher::ClientPublisher() { +} + +ClientPublisher::~ClientPublisher() { + zed.close(); +} + +bool ClientPublisher::open(const sl::InputType& input, Trigger* ref) { + // already running + if (runner.joinable()) + return false; + + p_trigger = ref; + + sl::InitParameters init_parameters; + init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA; + init_parameters.input = input; + init_parameters.coordinate_units = sl::UNIT::METER; + init_parameters.depth_stabilization = 5; + auto state = zed.open(init_parameters); + if (state != sl::ERROR_CODE::SUCCESS) { + std::cout << "Error: " << state << std::endl; + return false; + } + + serial = zed.getCameraInformation().serial_number; + p_trigger->states[serial] = false; + + // In most cases in object detection setup, the cameras are static + sl::PositionalTrackingParameters positional_tracking_parameters; + positional_tracking_parameters.set_as_static = true; + state = zed.enablePositionalTracking(positional_tracking_parameters); + if (state != sl::ERROR_CODE::SUCCESS) { + std::cout << "Error: " << state << std::endl; + return false; + } + + object_detection_parameters.clear(); + object_detection_runtime_parameters.clear(); + + // Parameters for a MULTI CLASS object detection model instance + sl::ObjectDetectionParameters param_instance_0; + param_instance_0.instance_module_id = 0; + param_instance_0.detection_model = sl::OBJECT_DETECTION_MODEL::MULTI_CLASS_BOX_FAST; + param_instance_0.enable_tracking = true; + param_instance_0.fused_objects_group_name = "MULTI_CLASS_BOX"; + sl::ObjectDetectionRuntimeParameters runtime_param_instance_0; + runtime_param_instance_0.detection_confidence_threshold = 20; + + object_detection_parameters.insert({param_instance_0.instance_module_id, param_instance_0}); + object_detection_runtime_parameters.insert({param_instance_0.instance_module_id, runtime_param_instance_0}); + + // Parameters for a PERSON HEAD object detection model instance + sl::ObjectDetectionParameters param_instance_1; + param_instance_1.instance_module_id = 1; + param_instance_1.detection_model = sl::OBJECT_DETECTION_MODEL::PERSON_HEAD_BOX_FAST; + param_instance_1.enable_tracking = true; + param_instance_1.fused_objects_group_name = "PERSON_HEAD_BOX"; + sl::ObjectDetectionRuntimeParameters runtime_param_instance_1; + runtime_param_instance_1.detection_confidence_threshold = 20; + + object_detection_parameters.insert({param_instance_1.instance_module_id, param_instance_1}); + object_detection_runtime_parameters.insert({param_instance_1.instance_module_id, runtime_param_instance_1}); + + // Parameters for CUSTOM object detection model instance + if (!optional_custom_onnx_yolo_model.empty()) { + sl::ObjectDetectionParameters param_instance_2; + param_instance_2.instance_module_id = 2; + param_instance_2.detection_model = sl::OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS; + param_instance_2.custom_onnx_file = optional_custom_onnx_yolo_model.c_str(); + param_instance_2.custom_onnx_dynamic_input_shape = sl::Resolution(512, 512); + param_instance_2.enable_tracking = true; + param_instance_2.fused_objects_group_name = "MY_CUSTOM_OUTPUT"; + sl::CustomObjectDetectionRuntimeParameters runtime_param_instance_2; + runtime_param_instance_2.object_detection_properties.detection_confidence_threshold = 20; + // runtime_param_instance_2.object_detection_properties.is_static = true; + // runtime_param_instance_2.object_detection_properties.tracking_timeout = 100.f; + + object_detection_parameters.insert({param_instance_2.instance_module_id, param_instance_2}); + custom_object_detection_runtime_parameters.insert({param_instance_2.instance_module_id, runtime_param_instance_2}); + } + + // Enable the object detection instances + for (auto const& params : object_detection_parameters) { + state = zed.enableObjectDetection(params.second); + std::cout << "Enabling " << params.second.detection_model << " " << state << std::endl; + if (state != sl::ERROR_CODE::SUCCESS) { + std::cout << "Error: " << state << std::endl; + return false; + } + } + + return true; +} + +void ClientPublisher::start() { + if (zed.isOpened()) { + // the camera should stream its data so the fusion can subscribe to it to gather the detected body and others metadata needed for the process. + zed.startPublishing(); + // the thread can start to process the camera grab in background + runner = std::thread(&ClientPublisher::work, this); + } +} + +void ClientPublisher::stop() { + if (runner.joinable()) + runner.join(); + zed.close(); +} + +void ClientPublisher::work() { + sl::RuntimeParameters rt; + rt.confidence_threshold = 50; + + // In this sample we use a dummy thread to process the ZED data. + // You can replace it by your own application and use the ZED like you use to, retrieve its images, depth, sensors data and so on. + // as long as you call the grab function and the retrieveObjects (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module. + while (p_trigger->running) { + std::unique_lock lk(mtx); + p_trigger->cv.wait(lk); + if (p_trigger->running) { + if (zed.grab(rt) == sl::ERROR_CODE::SUCCESS) { + /* + Your App + + Here, as an example, we retrieve the enabled object detection models, but you can retrieve as many as you want and they will seamllessly be transmitted to the fusion module. + It is important to note that for synchronisation purposes, at the moment, the retrieveObjects runs the detection and the sending happens only at the beginning of the next grab. + That way, you can decide to only send to fusion a subset of the possible detection results by only calling a subset of the possible retrieveObjects. + */ + for (auto const& odp : object_detection_runtime_parameters) + zed.retrieveObjects(objects, odp.second, odp.first); + for (auto const& codp : custom_object_detection_runtime_parameters) + zed.retrieveObjects(objects, codp.second, codp.first); + } + } + p_trigger->states[serial] = true; + } +} + +void ClientPublisher::setStartSVOPosition(unsigned pos) { + zed.setSVOPosition(pos); +} + +sl::Objects ClientPublisher::getObjects() const { + return objects; +} + diff --git a/object detection/multi-camera_multi-model/cpp/src/GLViewer.cpp b/object detection/multi-camera_multi-model/cpp/src/GLViewer.cpp new file mode 100755 index 00000000..d9c622c3 --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/src/GLViewer.cpp @@ -0,0 +1,1464 @@ +#include "GLViewer.hpp" +#include + +#if defined(_DEBUG) && defined(_WIN32) +//#error "This sample should not be built in Debug mode, use RelWithDebInfo if you want to do step by step." +#endif + +#define FADED_RENDERING +const float grid_size = 10.0f; + +const GLchar* VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec4 in_Color;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec4 b_color;\n" + "void main() {\n" + " b_color = in_Color;\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + "}"; + +const GLchar* FRAGMENT_SHADER = + "#version 330 core\n" + "in vec4 b_color;\n" + "layout(location = 0) out vec4 out_Color;\n" + "void main() {\n" + " out_Color = b_color;\n" + "}"; + +const GLchar* VERTEX_SHADER_TEXTURE = + "#version 330 core\n" + "layout(location = 0) in vec3 in_Vertex;\n" + "layout(location = 1) in vec2 in_UVs;\n" + "uniform mat4 u_mvpMatrix;\n" + "out vec2 UV;\n" + "void main() {\n" + " gl_Position = u_mvpMatrix * vec4(in_Vertex, 1);\n" + " UV = in_UVs;\n" + "}\n"; + +const GLchar* FRAGMENT_SHADER_TEXTURE = + "#version 330 core\n" + "in vec2 UV;\n" + "uniform sampler2D texture_sampler;\n" + "void main() {\n" + " gl_FragColor = vec4(texture(texture_sampler, UV).bgr, 1.0);\n" + "}\n"; + +void addVert(Simple3DObject &obj, float i_f, float limit, float height, sl::float4 &clr) { + auto p1 = sl::float3(i_f, height, -limit); + auto p2 = sl::float3(i_f, height, limit); + auto p3 = sl::float3(-limit, height, i_f); + auto p4 = sl::float3(limit, height, i_f); + + obj.addLine(p1, p2, clr); + obj.addLine(p3, p4, clr); +} + +GLViewer* currentInstance_ = nullptr; + +GLViewer::GLViewer() : available(false) { + currentInstance_ = this; + mouseButton_[0] = mouseButton_[1] = mouseButton_[2] = false; + clearInputs(); + previousMouseMotion_[0] = previousMouseMotion_[1] = 0; +} + +GLViewer::~GLViewer() { +} + +void GLViewer::exit() { + if (currentInstance_ && available) { + available = false; + for (auto& pc : point_clouds) + { + pc.second.close(); + } + } +} + +bool GLViewer::isAvailable() { + if (currentInstance_ && available) { + glutMainLoopEvent(); + } return available; +} + +Simple3DObject createFrustum(sl::CameraParameters param) { + // Create 3D axis + Simple3DObject it(sl::Translation(0, 0, 0), true); + + float Z_ = -150; + sl::float3 cam_0(0, 0, 0); + sl::float3 cam_1, cam_2, cam_3, cam_4; + + float fx_ = 1.f / param.fx; + float fy_ = 1.f / param.fy; + + cam_1.z = Z_; + cam_1.x = (0 - param.cx) * Z_ *fx_; + cam_1.y = (0 - param.cy) * Z_ *fy_; + + cam_2.z = Z_; + cam_2.x = (param.image_size.width - param.cx) * Z_ *fx_; + cam_2.y = (0 - param.cy) * Z_ *fy_; + + cam_3.z = Z_; + cam_3.x = (param.image_size.width - param.cx) * Z_ *fx_; + cam_3.y = (param.image_size.height - param.cy) * Z_ *fy_; + + cam_4.z = Z_; + cam_4.x = (0 - param.cx) * Z_ *fx_; + cam_4.y = (param.image_size.height - param.cy) * Z_ *fy_; + + sl::float4 clr(0.8f, 0.5f, 0.2f, 1.0f); + it.addFace(cam_0, cam_1, cam_2, clr); + it.addFace(cam_0, cam_2, cam_3, clr); + it.addFace(cam_0, cam_3, cam_4, clr); + it.addFace(cam_0, cam_4, cam_1, clr); + + it.setDrawingType(GL_TRIANGLES); + return it; +} + +void CloseFunc(void) { + if (currentInstance_) + currentInstance_->exit(); +} + +void GLViewer::init(int argc, char **argv, const std::vector& cameras_id) { + glutInit(&argc, argv); + int wnd_w = glutGet(GLUT_SCREEN_WIDTH); + int wnd_h = glutGet(GLUT_SCREEN_HEIGHT); + + glutInitWindowSize(wnd_w * 0.8, wnd_h * 0.8); + glutInitWindowPosition(wnd_w * 0.1, wnd_h * 0.1); + glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH); + glutCreateWindow("ZED Object Detection"); + + GLenum err = glewInit(); + if (GLEW_OK != err) + std::cout << "ERROR: glewInit failed: " << glewGetErrorString(err) << "\n"; + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); + + // Compile and create the shader + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); + shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); + + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); + + // Create the camera + camera_ = CameraGL(sl::Translation(0, 2, 10), sl::Translation(0, 0, -1)); + + BBox_edges = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges.setDrawingType(GL_LINES); + + BBox_faces = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces.setDrawingType(GL_QUADS); + + for (const sl::CameraIdentifier& cam_id : cameras_id) { + BBox_edges_per_cam[cam_id.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges_per_cam[cam_id.sn].setDrawingType(GL_LINES); + + BBox_faces_per_cam[cam_id.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces_per_cam[cam_id.sn].setDrawingType(GL_QUADS); + } + + bckgrnd_clr = sl::float4(0.2f, 0.19f, 0.2f, 1.0f); + + floor_grid = Simple3DObject(sl::Translation(0, 0, 0), true); + floor_grid.setDrawingType(GL_LINES); + + float limit = 20.f; + sl::float4 clr_grid(80, 80, 80, 255); + clr_grid /= 255.f; + float height = -3; + for (int i = (int) (-limit); i <= (int) (limit); i++) + addVert(floor_grid, i , limit , height , clr_grid); + + floor_grid.pushToGPU(); + + std::random_device dev; + rng = std::mt19937(dev()); + rng.seed(42); + uint_dist360 = std::uniform_int_distribution(0, 360); + + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + + glEnable(GL_LINE_SMOOTH); + glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); + +#ifndef JETSON_STYLE + glEnable(GL_POINT_SMOOTH); +#endif + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); + + // Map glut function on this class methods + glutDisplayFunc(GLViewer::drawCallback); + glutMouseFunc(GLViewer::mouseButtonCallback); + glutMotionFunc(GLViewer::mouseMotionCallback); + glutReshapeFunc(GLViewer::reshapeCallback); + glutKeyboardFunc(GLViewer::keyPressedCallback); + glutKeyboardUpFunc(GLViewer::keyReleasedCallback); + glutCloseFunc(CloseFunc); + + available = true; +} + +void GLViewer::render() { + if (available) { + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + glClearColor(bckgrnd_clr.b, bckgrnd_clr.g, bckgrnd_clr.r, bckgrnd_clr.a); + update(); + draw(); + printText(); + glutSwapBuffers(); + glutPostRedisplay(); + } +} + +sl::float3 newColor(float hh) { + float s = 0.75f; + float v = 0.9f; + + sl::float3 clr; + int i = (int)hh; + float ff = hh - i; + float p = v * (1.f - s); + float q = v * (1.f - (s * ff)); + float t = v * (1.f - (s * (1.f - ff))); + switch (i) { + case 0: + clr.r = v; + clr.g = t; + clr.b = p; + break; + case 1: + clr.r = q; + clr.g = v; + clr.b = p; + break; + case 2: + clr.r = p; + clr.g = v; + clr.b = t; + break; + + case 3: + clr.r = p; + clr.g = q; + clr.b = v; + break; + case 4: + clr.r = t; + clr.g = p; + clr.b = v; + break; + case 5: + default: + clr.r = v; + clr.g = p; + clr.b = q; + break; + } + return clr; +} + +sl::float3 GLViewer::getColor(int id, bool for_bbox) { + const std::lock_guard lock(mtx_clr); + if (for_bbox) { + if (colors_bbox.find(id) == colors_bbox.end()) { + float hh = uint_dist360(rng) / 60.f; + colors_bbox[id] = newColor(hh); + } + return colors_bbox[id]; + } + else { + if (colors.find(id) == colors.end()) { + int h_ = uint_dist360(rng); + float hh = h_ / 60.f; + colors[id] = newColor(hh); + } + return colors[id]; + } +} + +void GLViewer::setCameraPose(int id, sl::Transform pose) { + const std::lock_guard lock(mtx); + getColor(id, false); + poses[id] = pose; +} + +void GLViewer::updateCamera(int id, sl::Mat& view, sl::Mat& pc) { + mtx.lock(); + auto clr = getColor(id, false); + if (view.isInit() && viewers.find(id) == viewers.end()) + viewers[id].initialize(view, clr); + + if (pc.isInit() && point_clouds.find(id) == point_clouds.end()) + point_clouds[id].initialize(pc, clr); + + mtx.unlock(); +} + +void GLViewer::updateCamera(sl::Mat& pc) { + const std::lock_guard lock(mtx); + int id = 0; + auto clr = getColor(id, false); + + // we need to release old pc and initialize new one because fused point cloud don't have the same number of points for each process + // I used close but it crashed in draw. Not yet investigated + point_clouds[id].initialize(pc, clr); +} + +void GLViewer::setRenderCameraProjection(sl::CameraParameters params, float znear, float zfar) { + // Just slightly up the ZED camera FOV to make a small black border + float fov_y = (params.v_fov + 0.5f) * M_PI / 180.f; + float fov_x = (params.h_fov + 0.5f) * M_PI / 180.f; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; + + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); //Horizontal FoV. + projection_(0, 1) = 0; + projection_(0, 2) = 2.0f * ((params.image_size.width - 1.0f * params.cx) / params.image_size.width) - 1.0f; //Horizontal offset. + projection_(0, 3) = 0; + + projection_(1, 0) = 0; + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); //Vertical FoV. + projection_(1, 2) = -(2.0f * ((params.image_size.height - 1.0f * params.cy) / params.image_size.height) - 1.0f); //Vertical offset. + projection_(1, 3) = 0; + + projection_(2, 0) = 0; + projection_(2, 1) = 0; + projection_(2, 2) = -(zfar + znear) / (zfar - znear); //Near and far planes. + projection_(2, 3) = -(2.0f * zfar * znear) / (zfar - znear); //Near and far planes. + + projection_(3, 0) = 0; + projection_(3, 1) = 0; + projection_(3, 2) = -1; + projection_(3, 3) = 0.0f; +} + +static void createBboxRendering(std::vector& bbox, + sl::float4& bbox_clr, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces) +{ + // First create top and bottom full edges + BBox_edges.addFullEdges(bbox, bbox_clr); + // Add faded vertical edges + BBox_edges.addVerticalEdges(bbox, bbox_clr); + // Add faces + BBox_faces.addVerticalFaces(bbox, bbox_clr); + // Add top face + BBox_faces.addTopFace(bbox, bbox_clr); +} + +static void addObjects(const sl::Objects objs, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces, + const sl::float4 clr_id_ = sl::float4(-1.f, -1.f, -1.f, 1.f), + const bool draw_cross = false) +{ + for (unsigned int i = 0; i < objs.object_list.size(); i++) { + if (renderObject(objs.object_list[i], objs.is_tracked)) { + auto bb_ = objs.object_list[i].bounding_box; + if (!bb_.empty()) { + sl::float4 clr_id{clr_id_}; + if (clr_id.x == -1.f) { + if (objs.object_list[i].tracking_state != sl::OBJECT_TRACKING_STATE::OK) { + clr_id = getColorClass((int)objs.object_list[i].label); + } + else { + clr_id = generateColorID_f(objs.object_list[i].id); + } + } + + if (draw_cross) { // display centroid as a cross + sl::float3 centroid = objs.object_list[i].position; + const float size_cendtroid = 0.005f; // m + sl::float3 centroid1 = centroid; + centroid1.y += size_cendtroid; + sl::float3 centroid2 = objs.object_list[i].position; + centroid2.y -= size_cendtroid; + sl::float3 centroid3 = objs.object_list[i].position; + centroid3.x += size_cendtroid; + sl::float3 centroid4 = objs.object_list[i].position; + centroid4.x -= size_cendtroid; + + BBox_edges.addLine(centroid1, centroid2, sl::float4(1.f, 0.5f, 0.5f, 1.f)); + BBox_edges.addLine(centroid3, centroid4, sl::float4(1.f, 0.5f, 0.5f, 1.f)); + } + + createBboxRendering(bb_, clr_id, BBox_edges, BBox_faces); + } + } + } +} + +static void addObjects(const std::unordered_map& objs, + Simple3DObject& BBox_edges, + Simple3DObject& BBox_faces, + const sl::float4 clr_id_ = sl::float4(-1.f, -1.f, -1.f, 1.f), + const bool draw_cross = false) +{ + for (const std::pair& it : objs) { + if (it.second.is_new) + { + addObjects(it.second, BBox_edges, BBox_faces, clr_id_, draw_cross); + } + } +} + +void GLViewer::updateObjects(const std::unordered_map& objs, + const std::unordered_map>& singledata, + const sl::FusionMetrics& metrics) { + mtx.lock(); + + BBox_edges.clear(); + BBox_faces.clear(); + addObjects(objs, BBox_edges, BBox_faces); + + fusionStats.clear(); + float id = 0.5F; + + ObjectClassName obj_str; + obj_str.name_lineA = "Publishers :" + std::to_string(metrics.mean_camera_fused); + obj_str.name_lineB = "Sync :" + std::to_string(metrics.mean_stdev_between_camera * 1000.f); + obj_str.color = sl::float4(0.9, 0.9, 0.9, 1); + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + + for (auto& it : BBox_edges_per_cam) + it.second.clear(); + for (auto& it : BBox_faces_per_cam) + it.second.clear(); + + for (const std::pair>& it : singledata) { + auto clr = getColor(it.first.sn, false); + ++id; + + if (BBox_edges_per_cam.find(it.first.sn) == BBox_edges_per_cam.end()) { + BBox_edges_per_cam[it.first.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_faces_per_cam[it.first.sn] = Simple3DObject(sl::Translation(0, 0, 0), false); + BBox_edges_per_cam[it.first.sn].setDrawingType(GL_LINES); + BBox_faces_per_cam[it.first.sn].setDrawingType(GL_QUADS); + } + + ObjectClassName obj_str; + obj_str.name_lineA = "CAM: " + std::to_string(it.first.sn); + obj_str.name_lineA += " FPS: " + std::to_string(metrics.camera_individual_stats.at(it.first).received_fps); + obj_str.name_lineB = "Ratio Detection :" + std::to_string(metrics.camera_individual_stats.at(it.first).ratio_detection); + obj_str.name_lineB += " Delta " + std::to_string(metrics.camera_individual_stats.at(it.first).delta_ts * 1000.f); + obj_str.color = clr; + obj_str.position = sl::float3(10, (id * 30), 0); + fusionStats.push_back(obj_str); + + std::swap(clr.r, clr.b); + for (const std::pair& it2 : it.second) { + addObjects(it2.second, BBox_edges_per_cam[it.first.sn], BBox_faces_per_cam[it.first.sn], clr); + } + } + + mtx.unlock(); +} + +void GLViewer::update() { + if (keyStates_['q'] == KEY_STATE::UP || keyStates_['Q'] == KEY_STATE::UP || keyStates_[27] == KEY_STATE::UP) { + currentInstance_->exit(); + return; + } + + if (keyStates_['r'] == KEY_STATE::UP || keyStates_['R'] == KEY_STATE::UP) + currentInstance_->show_raw = !currentInstance_->show_raw; + + if (keyStates_['f'] == KEY_STATE::UP || keyStates_['F'] == KEY_STATE::UP) + currentInstance_->show_fused = !currentInstance_->show_fused; + + if (keyStates_['c'] == KEY_STATE::UP || keyStates_['C'] == KEY_STATE::UP) + currentInstance_->draw_flat_color = !currentInstance_->draw_flat_color; + + if (keyStates_['s'] == KEY_STATE::UP || keyStates_['s'] == KEY_STATE::UP) + currentInstance_->show_pc = !currentInstance_->show_pc; + + if (keyStates_['p'] == KEY_STATE::UP || keyStates_['P'] == KEY_STATE::UP || keyStates_[32] == KEY_STATE::UP) + play = !play; + + // Rotate camera with mouse + if (mouseButton_[MOUSE_BUTTON::LEFT]) { + camera_.rotate(sl::Rotation((float)mouseMotion_[1] * MOUSE_R_SENSITIVITY, camera_.getRight())); + camera_.rotate(sl::Rotation((float)mouseMotion_[0] * MOUSE_R_SENSITIVITY, camera_.getVertical() * -1.f)); + } + + // Translate camera with mouse + if (mouseButton_[MOUSE_BUTTON::RIGHT]) { + camera_.translate(camera_.getUp() * (float)mouseMotion_[1] * MOUSE_T_SENSITIVITY); + camera_.translate(camera_.getRight() * (float)mouseMotion_[0] * MOUSE_T_SENSITIVITY); + } + + // Zoom in with mouse wheel + if (mouseWheelPosition_ != 0) { + //float distance = sl::Translation(camera_.getOffsetFromPosition()).norm(); + if (mouseWheelPosition_ > 0 /* && distance > camera_.getZNear()*/) { // zoom + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f * -1); + } + else if (/*distance < camera_.getZFar()*/ mouseWheelPosition_ < 0) {// unzoom + //camera_.setOffsetFromPosition(camera_.getOffsetFromPosition() * MOUSE_DZ_SENSITIVITY); + camera_.translate(camera_.getForward() * MOUSE_UZ_SENSITIVITY * 0.5f); + } + } + + camera_.update(); + mtx.lock(); + // Update point cloud buffers + BBox_edges.pushToGPU(); + BBox_faces.pushToGPU(); + for(auto &it: BBox_edges_per_cam) + it.second.pushToGPU(); + for(auto &it: BBox_faces_per_cam) + it.second.pushToGPU(); + + // Update point cloud buffers + for (auto& it : point_clouds) + it.second.pushNewPC(); + + for (auto& it : viewers) + { + it.second.pushNewImage(); + } + + + mtx.unlock(); + clearInputs(); +} + +void GLViewer::draw() { + glEnable(GL_DEPTH_TEST); + sl::Transform vpMatrix = camera_.getViewProjectionMatrix(); + + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); + glUseProgram(shader.it.getProgramId()); + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, vpMatrix.m); + glLineWidth(1.f); + floor_grid.draw(); + + glLineWidth(1.5f); + if (show_fused) + BBox_edges.draw(); + if (show_raw) + for (auto& it : BBox_edges_per_cam) + it.second.draw(); + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); + + if (show_fused) + BBox_faces.draw(); + if (show_raw) + for (auto& it : BBox_faces_per_cam) + it.second.draw(); + glLineWidth(2.f); + glPointSize(1.f); + glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); + for (auto& it : viewers) { + sl::Transform pose_ = vpMatrix * poses[it.first]; + glUniformMatrix4fv(shader.MVP_Mat, 1, GL_TRUE, pose_.m); + it.second.frustum.draw(); + } + glUseProgram(0); + + for (auto& it : poses) { + sl::Transform vpMatrix_world = vpMatrix * it.second; + + if (show_pc) + if (point_clouds.find(it.first) != point_clouds.end()) + point_clouds[it.first].draw(vpMatrix_world, draw_flat_color); + + if (viewers.find(it.first) != viewers.end()) + viewers[it.first].draw(vpMatrix_world); + } +} + +sl::float2 compute3Dprojection(sl::float3& pt, const sl::Transform& cam, sl::Resolution wnd_size) { + sl::float4 pt4d(pt.x, pt.y, pt.z, 1.); + auto proj3D_cam = pt4d * cam; + sl::float2 proj2D; + proj2D.x = ((proj3D_cam.x / pt4d.w) * wnd_size.width) / (2.f * proj3D_cam.w) + wnd_size.width / 2.f; + proj2D.y = ((proj3D_cam.y / pt4d.w) * wnd_size.height) / (2.f * proj3D_cam.w) + wnd_size.height / 2.f; + return proj2D; +} + +void GLViewer::printText() { + + sl::Resolution wnd_size(glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT)); + for (auto& it : fusionStats) { + + sl::float2 pt2d(it.position.x, it.position.y); + glColor4f(it.color.b, it.color.g, it.color.r, .85f); + const auto* string = it.name_lineA.c_str(); + glWindowPos2f(pt2d.x, pt2d.y + 15); + int len = (int)strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + + string = it.name_lineB.c_str(); + glWindowPos2f(pt2d.x, pt2d.y); + len = (int)strlen(string); + for (int i = 0; i < len; i++) + glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12, string[i]); + } +} + +void GLViewer::clearInputs() { + mouseMotion_[0] = mouseMotion_[1] = 0; + mouseWheelPosition_ = 0; + for (unsigned int i = 0; i < 256; ++i) { + if (keyStates_[i] == KEY_STATE::UP) + last_key = i; + if (keyStates_[i] != KEY_STATE::DOWN) + keyStates_[i] = KEY_STATE::FREE; + } +} + +void GLViewer::drawCallback() { + currentInstance_->render(); +} + +void GLViewer::mouseButtonCallback(int button, int state, int x, int y) { + if (button < 5) { + if (button < 3) { + currentInstance_->mouseButton_[button] = state == GLUT_DOWN; + } else { + currentInstance_->mouseWheelPosition_ += button == MOUSE_BUTTON::WHEEL_UP ? 1 : -1; + } + currentInstance_->mouseCurrentPosition_[0] = x; + currentInstance_->mouseCurrentPosition_[1] = y; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; + } +} + +void GLViewer::mouseMotionCallback(int x, int y) { + currentInstance_->mouseMotion_[0] = x - currentInstance_->previousMouseMotion_[0]; + currentInstance_->mouseMotion_[1] = y - currentInstance_->previousMouseMotion_[1]; + currentInstance_->previousMouseMotion_[0] = x; + currentInstance_->previousMouseMotion_[1] = y; +} + +void GLViewer::reshapeCallback(int width, int height) { + glViewport(0, 0, width, height); + float hfov = (180.0f / M_PI) * (2.0f * atan(width / (2.0f * 500))); + float vfov = (180.0f / M_PI) * (2.0f * atan(height / (2.0f * 500))); + currentInstance_->camera_.setProjection(hfov, vfov, currentInstance_->camera_.getZNear(), currentInstance_->camera_.getZFar()); +} + +void GLViewer::keyPressedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::DOWN; +} + +void GLViewer::keyReleasedCallback(unsigned char c, int x, int y) { + currentInstance_->keyStates_[c] = KEY_STATE::UP; +} + +void GLViewer::idle() { + glutPostRedisplay(); +} + + +Simple3DObject::Simple3DObject(sl::Translation position, bool isStatic) : isStatic_(isStatic) { + vaoID_ = 0; + drawingType_ = GL_TRIANGLES; + isStatic_ = false; + position_ = position; + rotation_.setIdentity(); +} + +Simple3DObject::~Simple3DObject() { + clear(); + if (vaoID_ != 0) { + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + } +} + +void Simple3DObject::addBBox(std::vector &pts, sl::float4 clr) { + int start_id = vertices_.size() / 3; + + float transparency_top = 0.05f, transparency_bottom = 0.75f; + for (unsigned int i = 0; i < pts.size(); i++) { + addPt(pts[i]); + clr.a = (i < 4 ? transparency_top : transparency_bottom); + addClr(clr); + } + + const std::vector boxLinks = {4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7}; + + for (unsigned int i = 0; i < boxLinks.size(); i += 2) { + indices_.push_back(start_id + boxLinks[i]); + indices_.push_back(start_id + boxLinks[i + 1]); + } +} + +void Simple3DObject::addPt(sl::float3 pt) { + vertices_.push_back(pt); +} + +void Simple3DObject::addClr(sl::float4 clr) { + colors_.push_back(clr); +} + +void Simple3DObject::addPoint(sl::float3 pt, sl::float4 clr) { + vertices_.push_back(pt); + colors_.push_back(clr); + indices_.push_back((int)indices_.size()); +} + +void Simple3DObject::addLine(sl::float3 p1, sl::float3 p2, sl::float4 clr) { + addPoint(p1, clr); + addPoint(p2, clr); +} + +void Simple3DObject::addFace(sl::float3 p1, sl::float3 p2, sl::float3 p3, sl::float4 clr) { + addPoint(p1, clr); + addPoint(p2, clr); + addPoint(p3, clr); +} + +void Simple3DObject::addFullEdges(std::vector &pts, sl::float4 clr) { + clr.w = 0.4f; + int start_id = vertices_.size(); + + for (unsigned int i = 0; i < pts.size(); i++) { + addPt(pts[i]); + addClr(clr); + } + + const std::vector boxLinksTop = {0, 1, 1, 2, 2, 3, 3, 0}; + for (unsigned int i = 0; i < boxLinksTop.size(); i += 2) { + indices_.push_back(start_id + boxLinksTop[i]); + indices_.push_back(start_id + boxLinksTop[i + 1]); + } + + const std::vector boxLinksBottom = {4, 5, 5, 6, 6, 7, 7, 4}; + for (unsigned int i = 0; i < boxLinksBottom.size(); i += 2) { + indices_.push_back(start_id + boxLinksBottom[i]); + indices_.push_back(start_id + boxLinksBottom[i + 1]); + } +} + +void Simple3DObject::addVerticalEdges(std::vector &pts, sl::float4 clr) { + auto addSingleVerticalLine = [&](sl::float3 top_pt, sl::float3 bot_pt) { + std::vector current_pts{ + top_pt, + ((grid_size - 1.0f) * top_pt + bot_pt) / grid_size, + ((grid_size - 2.0f) * top_pt + bot_pt * 2.0f) / grid_size, + (2.0f * top_pt + bot_pt * (grid_size - 2.0f)) / grid_size, + (top_pt + bot_pt * (grid_size - 1.0f)) / grid_size, + bot_pt}; + + int start_id = vertices_.size(); + for (unsigned int i = 0; i < current_pts.size(); i++) { + addPt(current_pts[i]); + clr.a = (i == 2 || i == 3) ? 0.0f : 0.4f; + addClr(clr); + } + + const std::vector boxLinks = {0, 1, 1, 2, 2, 3, 3, 4, 4, 5}; + for (unsigned int i = 0; i < boxLinks.size(); i += 2) { + indices_.push_back(start_id + boxLinks[i]); + indices_.push_back(start_id + boxLinks[i + 1]); + } + }; + + addSingleVerticalLine(pts[0], pts[4]); + addSingleVerticalLine(pts[1], pts[5]); + addSingleVerticalLine(pts[2], pts[6]); + addSingleVerticalLine(pts[3], pts[7]); +} + +void Simple3DObject::addTopFace(std::vector &pts, sl::float4 clr) { + clr.a = 0.3f; + for (auto it : pts) + addPoint(it, clr); +} + +void Simple3DObject::addVerticalFaces(std::vector &pts, sl::float4 clr) { + auto addQuad = [&](std::vector quad_pts, float alpha1, float alpha2) { // To use only with 4 points + for (unsigned int i = 0; i < quad_pts.size(); ++i) { + addPt(quad_pts[i]); + clr.a = (i < 2 ? alpha1 : alpha2); + addClr(clr); + } + + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + indices_.push_back((int) indices_.size()); + }; + + // For each face, we need to add 4 quads (the first 2 indexes are always the top points of the quad) + std::vector> quads + { + { + 0, 3, 7, 4 + }, // front face + { + 3, 2, 6, 7 + }, // right face + { + 2, 1, 5, 6 + }, // back face + { + 1, 0, 4, 5 + } // left face + }; + float alpha = 0.5f; + + for (const auto quad : quads) { + + // Top quads + std::vector quad_pts_1{ + pts[quad[0]], + pts[quad[1]], + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_1, alpha, alpha); + + std::vector quad_pts_2{ + ((grid_size - 0.5f) * pts[quad[0]] + 0.5f * pts[quad[3]]) / grid_size, + ((grid_size - 0.5f) * pts[quad[1]] + 0.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size}; + addQuad(quad_pts_2, alpha, 2 * alpha / 3); + + std::vector quad_pts_3{ + ((grid_size - 1.0f) * pts[quad[0]] + pts[quad[3]]) / grid_size, + ((grid_size - 1.0f) * pts[quad[1]] + pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_3, 2 * alpha / 3, alpha / 3); + + std::vector quad_pts_4{ + ((grid_size - 1.5f) * pts[quad[0]] + 1.5f * pts[quad[3]]) / grid_size, + ((grid_size - 1.5f) * pts[quad[1]] + 1.5f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[1]] + 2.0f * pts[quad[2]]) / grid_size, + ((grid_size - 2.0f) * pts[quad[0]] + 2.0f * pts[quad[3]]) / grid_size}; + addQuad(quad_pts_4, alpha / 3, 0.0f); + + // Bottom quads + std::vector quad_pts_5{ + (pts[quad[1]] * 2.0f + (grid_size - 2.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 2.0f + (grid_size - 2.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_5, 0.0f, alpha / 3); + + std::vector quad_pts_6{ + (pts[quad[1]] * 1.5f + (grid_size - 1.5f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] * 1.5f + (grid_size - 1.5f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_6, alpha / 3, 2 * alpha / 3); + + std::vector quad_pts_7{ + (pts[quad[1]] + (grid_size - 1.0f) * pts[quad[2]]) / grid_size, + (pts[quad[0]] + (grid_size - 1.0f) * pts[quad[3]]) / grid_size, + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size}; + addQuad(quad_pts_7, 2 * alpha / 3, alpha); + + std::vector quad_pts_8{ + (pts[quad[0]] * 0.5f + (grid_size - 0.5f) * pts[quad[3]]) / grid_size, + (pts[quad[1]] * 0.5f + (grid_size - 0.5f) * pts[quad[2]]) / grid_size, + pts[quad[2]], + pts[quad[3]]}; + addQuad(quad_pts_8, alpha, alpha); + } +} + +void Simple3DObject::pushToGPU() { + if (!isStatic_ || vaoID_ == 0) { + if (vaoID_ == 0) { + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + } + + if (vertices_.size() > 0) { + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vertices_.size() * sizeof (sl::float3), &vertices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + } + + if (colors_.size() > 0) { + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, colors_.size() * sizeof (sl::float4), &colors_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + } + + if (indices_.size() > 0) { + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_.size() * sizeof (unsigned int), &indices_[0], isStatic_ ? GL_STATIC_DRAW : GL_DYNAMIC_DRAW); + } + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + } +} + +void Simple3DObject::clear() { + vertices_.clear(); + colors_.clear(); + indices_.clear(); +} + +void Simple3DObject::setDrawingType(GLenum type) { + drawingType_ = type; +} + +void Simple3DObject::draw() { + if (indices_.size() && vaoID_) { + glBindVertexArray(vaoID_); + glDrawElements(drawingType_, (GLsizei) indices_.size(), GL_UNSIGNED_INT, 0); + glBindVertexArray(0); + } +} + +void Simple3DObject::translate(const sl::Translation& t) { + position_ = position_ + t; +} + +void Simple3DObject::setPosition(const sl::Translation& p) { + position_ = p; +} + +void Simple3DObject::setRT(const sl::Transform& mRT) { + position_ = mRT.getTranslation(); + rotation_ = mRT.getOrientation(); +} + +void Simple3DObject::rotate(const sl::Orientation& rot) { + rotation_ = rot * rotation_; +} + +void Simple3DObject::rotate(const sl::Rotation& m) { + this->rotate(sl::Orientation(m)); +} + +void Simple3DObject::setRotation(const sl::Orientation& rot) { + rotation_ = rot; +} + +void Simple3DObject::setRotation(const sl::Rotation& m) { + this->setRotation(sl::Orientation(m)); +} + +const sl::Translation& Simple3DObject::getPosition() const { + return position_; +} + +sl::Transform Simple3DObject::getModelMatrix() const { + sl::Transform tmp; + tmp.setOrientation(rotation_); + tmp.setTranslation(position_); + return tmp; +} + +Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { + if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { + std::cout << "ERROR: while compiling vertex shader" << std::endl; + } + if (!compile(fragmentId_, GL_FRAGMENT_SHADER, fs)) { + std::cout << "ERROR: while compiling fragment shader" << std::endl; + } + + programId_ = glCreateProgram(); + + glAttachShader(programId_, verterxId_); + glAttachShader(programId_, fragmentId_); + + glBindAttribLocation(programId_, ATTRIB_VERTICES_POS, "in_vertex"); + glBindAttribLocation(programId_, ATTRIB_COLOR_POS, "in_texCoord"); + + glLinkProgram(programId_); + + GLint errorlk(0); + glGetProgramiv(programId_, GL_LINK_STATUS, &errorlk); + if (errorlk != GL_TRUE) { + std::cout << "ERROR: while linking Shader :" << std::endl; + GLint errorSize(0); + glGetProgramiv(programId_, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(programId_, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteProgram(programId_); + } +} + +Shader::~Shader() { + if (verterxId_ != 0 && glIsShader(verterxId_)) + glDeleteShader(verterxId_); + if (fragmentId_ != 0 && glIsShader(fragmentId_)) + glDeleteShader(fragmentId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); +} + +GLuint Shader::getProgramId() { + return programId_; +} + +bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { + shaderId = glCreateShader(type); + if (shaderId == 0) { + std::cout << "ERROR: shader type (" << type << ") does not exist" << std::endl; + return false; + } + glShaderSource(shaderId, 1, (const char**) &src, 0); + glCompileShader(shaderId); + + GLint errorCp(0); + glGetShaderiv(shaderId, GL_COMPILE_STATUS, &errorCp); + if (errorCp != GL_TRUE) { + std::cout << "ERROR: while compiling Shader :" << std::endl; + GLint errorSize(0); + glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &errorSize); + + char *error = new char[errorSize + 1]; + glGetShaderInfoLog(shaderId, errorSize, &errorSize, error); + error[errorSize] = '\0'; + std::cout << error << std::endl; + + delete[] error; + glDeleteShader(shaderId); + return false; + } + return true; +} + +const GLchar* POINTCLOUD_VERTEX_SHADER = + "#version 330 core\n" + "layout(location = 0) in vec4 in_VertexRGBA;\n" + "out vec4 b_color;\n" + "uniform mat4 u_mvpMatrix;\n" + "uniform vec3 u_color;\n" + "uniform bool u_drawFlat;\n" + "void main() {\n" + " if(u_drawFlat)\n" + " b_color = vec4(u_color.bgr, .85f);\n" + "else{" + // Decompose the 4th channel of the XYZRGBA buffer to retrieve the color of the point (1float to 4uint) + " uint vertexColor = floatBitsToUint(in_VertexRGBA.w); \n" + " vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16);\n" + " b_color = vec4(clr_int.b / 255.0f, clr_int.g / 255.0f, clr_int.r / 255.0f, .85f);\n" + " }" + " gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1);\n" + "}"; + +const GLchar* POINTCLOUD_FRAGMENT_SHADER = + "#version 330 core\n" + "in vec4 b_color;\n" + "layout(location = 0) out vec4 out_Color;\n" + "void main() {\n" + " out_Color = b_color;\n" + "}"; + +PointCloud::PointCloud() : numBytes_(0), xyzrgbaMappedBuf_(nullptr) { + +} + +PointCloud::~PointCloud() { + close(); +} + +void PointCloud::close() { + if (refMat.isInit()) { + auto err = cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + glDeleteBuffers(1, &bufferGLID_); + refMat.free(); + } +} + +void PointCloud::initialize(sl::Mat& ref, sl::float3 clr_) +{ + refMat = ref; + clr = clr_; +} + +void PointCloud::pushNewPC() { + if (refMat.isInit()) { + + int sizebytes = refMat.getResolution().area() * sizeof(sl::float4); + if (numBytes_ != sizebytes) { + + if (numBytes_ == 0) { + glGenBuffers(1, &bufferGLID_); + + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); + shColor = glGetUniformLocation(shader_.getProgramId(), "u_color"); + shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat"); + } + else { + cudaGraphicsUnmapResources(1, &bufferCudaID_, 0); + cudaGraphicsUnregisterResource(bufferCudaID_); + } + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glBufferData(GL_ARRAY_BUFFER, sizebytes, 0, GL_STATIC_DRAW); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + cudaError_t err = cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsNone); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsMapResources(1, &bufferCudaID_, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + err = cudaGraphicsResourceGetMappedPointer((void**)&xyzrgbaMappedBuf_, &numBytes_, bufferCudaID_); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + } + + cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice); + } +} + +void PointCloud::draw(const sl::Transform& vp, bool draw_flat) { + if (refMat.isInit()) { +#ifndef JETSON_STYLE + glDisable(GL_BLEND); +#endif + + glUseProgram(shader_.getProgramId()); + glUniformMatrix4fv(shMVPMatrixLoc_, 1, GL_TRUE, vp.m); + + glUniform3fv(shColor, 1, clr.v); + glUniform1i(shDrawColor, draw_flat); + + glBindBuffer(GL_ARRAY_BUFFER, bufferGLID_); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 4, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glDrawArrays(GL_POINTS, 0, refMat.getResolution().area()); + glBindBuffer(GL_ARRAY_BUFFER, 0); + glUseProgram(0); + +#ifndef JETSON_STYLE + glEnable(GL_BLEND); +#endif + } +} + + +CameraViewer::CameraViewer() { + +} + +CameraViewer::~CameraViewer() { + close(); +} + +void CameraViewer::close() { + if (ref.isInit()) { + + auto err = cudaGraphicsUnmapResources(1, &cuda_gl_ressource, 0); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + err = cudaGraphicsUnregisterResource(cuda_gl_ressource); + if (err) std::cerr << "Error CUDA " << cudaGetErrorString(err) << std::endl; + + glDeleteTextures(1, &texture); + glDeleteBuffers(3, vboID_); + glDeleteVertexArrays(1, &vaoID_); + ref.free(); + } +} + +bool CameraViewer::initialize(sl::Mat& im, sl::float3 clr) { + + frustum = Simple3DObject(sl::Translation(0, 0, 0), false); + + // Create 3D axis + float fx, fy, cx, cy; + fx = fy = 1400; + float width, height; + width = 2208; + height = 1242; + cx = width / 2; + cy = height / 2; + + float Z_ = .5f; + sl::float3 toOGL(1, -1, -1); + sl::float3 cam_0(0, 0, 0); + sl::float3 cam_1, cam_2, cam_3, cam_4; + + float fx_ = 1.f / fx; + float fy_ = 1.f / fy; + + cam_1.z = Z_; + cam_1.x = (0 - cx) * Z_ * fx_; + cam_1.y = (0 - cy) * Z_ * fy_; + cam_1 *= toOGL; + + cam_2.z = Z_; + cam_2.x = (width - cx) * Z_ * fx_; + cam_2.y = (0 - cy) * Z_ * fy_; + cam_2 *= toOGL; + + cam_3.z = Z_; + cam_3.x = (width - cx) * Z_ * fx_; + cam_3.y = (height - cy) * Z_ * fy_; + cam_3 *= toOGL; + + cam_4.z = Z_; + cam_4.x = (0 - cx) * Z_ * fx_; + cam_4.y = (height - cy) * Z_ * fy_; + cam_4 *= toOGL; + + frustum.addFace(cam_0, cam_1, cam_2, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_2, cam_3, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_3, cam_4, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.addFace(cam_0, cam_4, cam_1, sl::float4(clr.b, clr.g, clr.r, 1.f)); + frustum.setDrawingType(GL_TRIANGLES); + + frustum.pushToGPU(); + + vert.push_back(cam_1); + vert.push_back(cam_2); + vert.push_back(cam_3); + vert.push_back(cam_4); + + uv.push_back(sl::float2(0, 0)); + uv.push_back(sl::float2(1, 0)); + uv.push_back(sl::float2(1, 1)); + uv.push_back(sl::float2(0, 1)); + + faces.push_back(sl::uint3(0, 1, 2)); + faces.push_back(sl::uint3(0, 2, 3)); + + ref = im; + shader.set(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); + shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); + + glGenVertexArrays(1, &vaoID_); + glGenBuffers(3, vboID_); + + glBindVertexArray(vaoID_); + glBindBuffer(GL_ARRAY_BUFFER, vboID_[0]); + glBufferData(GL_ARRAY_BUFFER, vert.size() * sizeof(sl::float3), &vert[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); + + glBindBuffer(GL_ARRAY_BUFFER, vboID_[1]); + glBufferData(GL_ARRAY_BUFFER, uv.size() * sizeof(sl::float2), &uv[0], GL_STATIC_DRAW); + glVertexAttribPointer(Shader::ATTRIB_COLOR_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); + glEnableVertexAttribArray(Shader::ATTRIB_COLOR_POS); + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_[2]); + glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(sl::uint3), &faces[0], GL_STATIC_DRAW); + + glBindVertexArray(0); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); + glBindBuffer(GL_ARRAY_BUFFER, 0); + + auto res = ref.getResolution(); + glGenTextures(1, &texture); + glBindTexture(GL_TEXTURE_2D, texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, res.width, res.height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); + glBindTexture(GL_TEXTURE_2D, 0); + cudaError_t err = cudaGraphicsGLRegisterImage(&cuda_gl_ressource, texture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard); + if (err) std::cout << "err alloc " << err << " " << cudaGetErrorString(err) << "\n"; + glDisable(GL_TEXTURE_2D); + + err = cudaGraphicsMapResources(1, &cuda_gl_ressource, 0); + if (err) std::cout << "err 0 " << err << " " << cudaGetErrorString(err) << "\n"; + err = cudaGraphicsSubResourceGetMappedArray(&ArrIm, cuda_gl_ressource, 0, 0); + if (err) std::cout << "err 1 " << err << " " << cudaGetErrorString(err) << "\n"; + + return (err == cudaSuccess); +} + +void CameraViewer::pushNewImage() { + if (!ref.isInit()) return; + auto err = cudaMemcpy2DToArray(ArrIm, 0, 0, ref.getPtr(sl::MEM::CPU), ref.getStepBytes(sl::MEM::CPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyHostToDevice); + if (err) std::cout << "err 2 " << err << " " << cudaGetErrorString(err) << "\n"; +} + +void CameraViewer::draw(sl::Transform vpMatrix) { + + if (!ref.isInit()) return; + + glUseProgram(shader.getProgramId()); + glUniformMatrix4fv(shMVPMatrixLocTex_, 1, GL_FALSE, sl::Transform::transpose(vpMatrix).m); + glBindTexture(GL_TEXTURE_2D, texture); + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); + + glBindVertexArray(vaoID_); + glDrawElements(GL_TRIANGLES, (GLsizei)faces.size() * 3, GL_UNSIGNED_INT, 0); + glBindVertexArray(0); + + glUseProgram(0); +} + +const sl::Translation CameraGL::ORIGINAL_FORWARD = sl::Translation(0, 0, 1); +const sl::Translation CameraGL::ORIGINAL_UP = sl::Translation(0, 1, 0); +const sl::Translation CameraGL::ORIGINAL_RIGHT = sl::Translation(1, 0, 0); + +CameraGL::CameraGL(sl::Translation position, sl::Translation direction, sl::Translation vertical) { + this->position_ = position; + setDirection(direction, vertical); + + offset_ = sl::Translation(0, 0, 0); + view_.setIdentity(); + updateView(); + setProjection(70, 70, 0.200f, 50.f); + updateVPMatrix(); +} + +CameraGL::~CameraGL() { +} + +void CameraGL::update() { + if (sl::Translation::dot(vertical_, up_) < 0) + vertical_ = vertical_ * -1.f; + updateView(); + updateVPMatrix(); +} + +void CameraGL::setProjection(float horizontalFOV, float verticalFOV, float znear, float zfar) { + horizontalFieldOfView_ = horizontalFOV; + verticalFieldOfView_ = verticalFOV; + znear_ = znear; + zfar_ = zfar; + + float fov_y = verticalFOV * M_PI / 180.f; + float fov_x = horizontalFOV * M_PI / 180.f; + + projection_.setIdentity(); + projection_(0, 0) = 1.0f / tanf(fov_x * 0.5f); + projection_(1, 1) = 1.0f / tanf(fov_y * 0.5f); + projection_(2, 2) = -(zfar + znear) / (zfar - znear); + projection_(3, 2) = -1; + projection_(2, 3) = -(2.f * zfar * znear) / (zfar - znear); + projection_(3, 3) = 0; +} + +const sl::Transform& CameraGL::getViewProjectionMatrix() const { + return vpMatrix_; +} + +float CameraGL::getHorizontalFOV() const { + return horizontalFieldOfView_; +} + +float CameraGL::getVerticalFOV() const { + return verticalFieldOfView_; +} + +void CameraGL::setOffsetFromPosition(const sl::Translation& o) { + offset_ = o; +} + +const sl::Translation& CameraGL::getOffsetFromPosition() const { + return offset_; +} + +void CameraGL::setDirection(const sl::Translation& direction, const sl::Translation& vertical) { + sl::Translation dirNormalized = direction; + dirNormalized.normalize(); + this->rotation_ = sl::Orientation(ORIGINAL_FORWARD, dirNormalized * -1.f); + updateVectors(); + this->vertical_ = vertical; + if (sl::Translation::dot(vertical_, up_) < 0) + rotate(sl::Rotation(M_PI, ORIGINAL_FORWARD)); +} + +void CameraGL::translate(const sl::Translation& t) { + position_ = position_ + t; +} + +void CameraGL::setPosition(const sl::Translation& p) { + position_ = p; +} + +void CameraGL::rotate(const sl::Orientation& rot) { + rotation_ = rot * rotation_; + updateVectors(); +} + +void CameraGL::rotate(const sl::Rotation& m) { + this->rotate(sl::Orientation(m)); +} + +void CameraGL::setRotation(const sl::Orientation& rot) { + rotation_ = rot; + updateVectors(); +} + +void CameraGL::setRotation(const sl::Rotation& m) { + this->setRotation(sl::Orientation(m)); +} + +const sl::Translation& CameraGL::getPosition() const { + return position_; +} + +const sl::Translation& CameraGL::getForward() const { + return forward_; +} + +const sl::Translation& CameraGL::getRight() const { + return right_; +} + +const sl::Translation& CameraGL::getUp() const { + return up_; +} + +const sl::Translation& CameraGL::getVertical() const { + return vertical_; +} + +float CameraGL::getZNear() const { + return znear_; +} + +float CameraGL::getZFar() const { + return zfar_; +} + +void CameraGL::updateVectors() { + forward_ = ORIGINAL_FORWARD * rotation_; + up_ = ORIGINAL_UP * rotation_; + right_ = sl::Translation(ORIGINAL_RIGHT * -1.f) * rotation_; +} + +void CameraGL::updateView() { + sl::Transform transformation(rotation_, (offset_ * rotation_) + position_); + view_ = sl::Transform::inverse(transformation); +} + +void CameraGL::updateVPMatrix() { + vpMatrix_ = projection_ * view_; +} \ No newline at end of file diff --git a/object detection/multi-camera_multi-model/cpp/src/main.cpp b/object detection/multi-camera_multi-model/cpp/src/main.cpp new file mode 100755 index 00000000..e6186747 --- /dev/null +++ b/object detection/multi-camera_multi-model/cpp/src/main.cpp @@ -0,0 +1,203 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +// ZED include +#include "ClientPublisher.hpp" +#include "GLViewer.hpp" +#include "utils.hpp" + +int main(int argc, char **argv) { + std::string configuration_json, optional_custom_onnx_yolo_model; + + if (argc < 2 || (argc == 2 && !hasExtension(std::string(argv[1]), ".json"))) { + // this file should be generated by using the tool ZED360 + std::cout << "Need a Configuration file in input" << std::endl; + return 1; + } + + configuration_json = std::string(argv[1]); + if (argc > 2 && hasExtension(std::string(argv[2]), ".onnx")) { + optional_custom_onnx_yolo_model = std::string(argv[2]); + std::cout << "Enabling custom ONNX model with " << optional_custom_onnx_yolo_model << std::endl; + } + + // Defines the Coordinate system and unit used in this sample + constexpr sl::COORDINATE_SYSTEM COORDINATE_SYSTEM = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; + constexpr sl::UNIT UNIT = sl::UNIT::METER; + + // Read json file containing the configuration of your multi-camera setup. + auto configurations = sl::readFusionConfigurationFile(configuration_json.c_str(), COORDINATE_SYSTEM, UNIT); + + if (configurations.empty()) { + std::cout << "Empty configuration File." << std::endl; + return EXIT_FAILURE; + } + + Trigger trigger; + + // Check if the ZED camera should run within the same process or if they are running on the edge. + std::vector clients(configurations.size()); + int id_ = 0; + std::map svo_files; + for (auto conf : configurations) { + // if the ZED camera should run locally, then start a thread to handle it + if (conf.communication_parameters.getType() == sl::CommunicationParameters::COMM_TYPE::INTRA_PROCESS) { + std::cout << "Try to open ZED " << conf.serial_number << ".." << std::endl; + clients[id_].optional_custom_onnx_yolo_model = optional_custom_onnx_yolo_model; + auto state = clients[id_].open(conf.input_type, &trigger); + if (!state) { + std::cerr << "Could not open ZED: " << conf.input_type.getConfiguration() << ". Skipping..." << std::endl; + continue; + } + + if (conf.input_type.getType() == sl::InputType::INPUT_TYPE::SVO_FILE) + svo_files.insert(std::make_pair(id_, conf.input_type.getConfiguration())); + + std::cout << ". ready !" << std::endl; + + id_++; + } + } + + // Synchronize SVO files in SVO mode + bool enable_svo_sync = (svo_files.size() > 1); + if (enable_svo_sync) { + std::cout << "Starting SVO sync process..." << std::endl; + std::map cam_idx_to_svo_frame_idx = syncDATA(svo_files); + + for (auto &it : cam_idx_to_svo_frame_idx) { + std::cout << "Setting camera " << it.first << " to frame " << it.second << std::endl; + clients[it.first].setStartSVOPosition(it.second); + } + } + + // start camera threads + for (auto &it : clients) + it.start(); + + // Now that the ZED camera are running, we need to initialize the fusion module + sl::InitFusionParameters init_params; + init_params.coordinate_units = UNIT; + init_params.coordinate_system = COORDINATE_SYSTEM; + init_params.verbose = true; + + // create and initialize it + sl::Fusion fusion; + fusion.init(init_params); + + // subscribe to every cameras of the setup to internally gather their data + std::vector cameras; + for (auto &it : configurations) { + sl::CameraIdentifier uuid(it.serial_number); + // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. + auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity); + if (state != sl::FUSION_ERROR_CODE::SUCCESS) + std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl; + else + cameras.push_back(uuid); + } + + // check that at least one camera is connected + if (cameras.empty()) { + std::cout << "no connections " << std::endl; + return EXIT_FAILURE; + } + + fusion.enablePositionalTracking(); + + // Enable object detection + sl::ObjectDetectionFusionParameters od_fusion_params; + od_fusion_params.enable_tracking = false; // We do tracking per camera then fuse only in fusion. + // This takes more resources than tracking in Fusion only but yields more accurate tracking. + std::cout << "Enabling Fused Object detection" << std::endl; + const sl::FUSION_ERROR_CODE err = fusion.enableObjectDetection(od_fusion_params); + if (err != sl::FUSION_ERROR_CODE::SUCCESS) { + std::cout << "Error: " << err << std::endl; + return EXIT_FAILURE; + } + + // creation of a 3D viewer + GLViewer viewer; + viewer.init(argc, argv, cameras); + + std::cout << "Viewer Shortcuts\n" + << "\t- 'q': quit the application\n" + << "\t- 'p': play/pause the GLViewer\n" + << "\t- 'f': swicth on/off for fused bbox display\n" + << "\t- 'r': swicth on/off for raw bbox display\n" + << "\t- 's': swicth on/off for live point cloud display\n" + << "\t- 'c': swicth on/off point cloud display with raw color\n" + << std::endl; + + // fusion outputs + std::unordered_map fused_objects; + std::unordered_map> camera_raw_data; + sl::FusionMetrics metrics; + std::map views; + std::map pointClouds; + sl::Resolution low_res(512, 360); + + // run the fusion as long as the viewer is available. + while (viewer.isAvailable()) { + trigger.notifyZED(); + + // run the fusion process (which gather data from all camera, sync them and process them) + if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) { + // Retrieve all the fused objects + fusion.retrieveObjects(fused_objects); + // for debug, you can retrieve the data sent by each camera + for (const sl::CameraIdentifier& id : cameras) { + // Retrieve all the raw objects of a given camera + fusion.retrieveObjects(camera_raw_data[id], id); + sl::Pose pose; + if (fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id) == sl::POSITIONAL_TRACKING_STATE::OK) + viewer.setCameraPose(id.sn, pose.pose_data); + + auto state_view = fusion.retrieveImage(views[id], id, low_res); + auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res); + + if (state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS) { + viewer.updateCamera(id.sn, views[id], pointClouds[id]); + } + } + + // get metrics about the fusion process for monitoring purposes + fusion.getProcessMetrics(metrics); + } + + // Update the viewer with the fused objects and the raw objects + viewer.updateObjects(fused_objects, camera_raw_data, metrics); + + while (!viewer.isPlaying() && viewer.isAvailable()) + sl::sleep_ms(10); + } + + viewer.exit(); + + trigger.running = false; + trigger.notifyZED(); + + for (auto &it : clients) + it.stop(); + + fusion.close(); + + return EXIT_SUCCESS; +} diff --git a/plane detection/floor plane/cpp/include/GLViewer.hpp b/plane detection/floor plane/cpp/include/GLViewer.hpp index 0864cd8c..6389769a 100644 --- a/plane detection/floor plane/cpp/include/GLViewer.hpp +++ b/plane detection/floor plane/cpp/include/GLViewer.hpp @@ -97,10 +97,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; @@ -178,6 +187,14 @@ class ImageHandler { ImageHandler(); ~ImageHandler(); + // Delete the move constructor and move assignment operator + ImageHandler(ImageHandler&&) = delete; + ImageHandler& operator=(ImageHandler&&) = delete; + + // Delete the copy constructor and copy assignment operator + ImageHandler(const ImageHandler&) = delete; + ImageHandler& operator=(const ImageHandler&) = delete; + // Initialize Opengl and Cuda buffers bool initialize(sl::Resolution res); // Push a new Image + Z buffer and transform into a point cloud diff --git a/plane detection/floor plane/cpp/src/GLViewer.cpp b/plane detection/floor plane/cpp/src/GLViewer.cpp index 5db7857e..71a6586d 100644 --- a/plane detection/floor plane/cpp/src/GLViewer.cpp +++ b/plane detection/floor plane/cpp/src/GLViewer.cpp @@ -147,10 +147,10 @@ void GLViewer::init(int argc, char **argv, sl::CameraParameters ¶m) { floorFind = false; // Compile and create the shader - shader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -527,6 +527,10 @@ sl::Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -562,12 +566,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -651,7 +655,7 @@ void PointCloud::initialize(sl::Resolution res) { checkError(cudaGraphicsGLRegisterBuffer(&bufferCudaID_, bufferGLID_, cudaGraphicsRegisterFlagsWriteDiscard)); - shader.it = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader.it.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); matGPU_.alloc(res, sl::MAT_TYPE::F32_C4, sl::MEM::GPU); @@ -858,7 +862,7 @@ const GLchar* IMAGE_FRAGMENT_SHADER = " color = vec4(texture(texImage, scaler).zyxw);\n" "}"; -ImageHandler::ImageHandler():init(false) {} +ImageHandler::ImageHandler() : vaoID_(0), init(false) {} ImageHandler::~ImageHandler() { close(); @@ -868,15 +872,17 @@ void ImageHandler::close() { if (init) { init = false; glDeleteTextures(1, &imageTex); - glDeleteBuffers(3, vboID_); - glDeleteVertexArrays(1, &vaoID_); + if (vaoID_ != 0) { + glDeleteVertexArrays(1, &vaoID_); + glDeleteBuffers(3, vboID_); + } } } bool ImageHandler::initialize(sl::Resolution res) { glEnable(GL_TEXTURE_2D); glDisable(GL_DEPTH_TEST); - shader = Shader(IMAGE_VERTEX_SHADER, IMAGE_FRAGMENT_SHADER); + shader.set(IMAGE_VERTEX_SHADER, IMAGE_FRAGMENT_SHADER); texID = glGetUniformLocation(shader.getProgramId(), "texImage"); glGenVertexArrays(1, &vaoID_); diff --git a/plane detection/plane detection/cpp/include/GLViewer.hpp b/plane detection/plane detection/cpp/include/GLViewer.hpp index 17f0189f..46781cad 100644 --- a/plane detection/plane detection/cpp/include/GLViewer.hpp +++ b/plane detection/plane detection/cpp/include/GLViewer.hpp @@ -30,10 +30,20 @@ struct UserAction { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); + GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/plane detection/plane detection/cpp/src/GLViewer.cpp b/plane detection/plane detection/cpp/src/GLViewer.cpp index ad4ec005..e4dc6da1 100644 --- a/plane detection/plane detection/cpp/src/GLViewer.cpp +++ b/plane detection/plane detection/cpp/src/GLViewer.cpp @@ -86,7 +86,7 @@ MeshObject::~MeshObject() { void MeshObject::alloc(){ glGenVertexArrays(1, &vaoID_); glGenBuffers(3, vboID_); - shader.it = Shader(MESH_VERTEX_SHADER, MESH_FRAGMENT_SHADER); + shader.it.set(MESH_VERTEX_SHADER, MESH_FRAGMENT_SHADER); shader.MVP_Mat = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); shader.shColorLoc = glGetUniformLocation(shader.it.getProgramId(), "u_color"); } @@ -454,6 +454,10 @@ void GLViewer::printText() { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if(!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -488,12 +492,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if(verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if(fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if(programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -528,18 +532,19 @@ bool Shader::compile(GLuint &shaderId, GLenum type, const GLchar* src) { return true; } -ImageHandler::ImageHandler() {} +ImageHandler::ImageHandler() : imageTex(0) {} ImageHandler::~ImageHandler() { close(); } void ImageHandler::close() { - glDeleteTextures(1, &imageTex); + if (imageTex != 0) + glDeleteTextures(1, &imageTex); } bool ImageHandler::initialize(sl::Resolution res) { - shader.it = Shader(IMAGE_VERTEX_SHADER, IMAGE_FRAGMENT_SHADER); + shader.it.set(IMAGE_VERTEX_SHADER, IMAGE_FRAGMENT_SHADER); texID = glGetUniformLocation(shader.it.getProgramId(), "texImage"); static const GLfloat g_quad_vertex_buffer_data[] = { -1.0f, -1.0f, 0.0f, diff --git a/positional tracking/positional tracking/cpp/include/GLViewer.hpp b/positional tracking/positional tracking/cpp/include/GLViewer.hpp index 11bb9ab4..a2fc714c 100644 --- a/positional tracking/positional tracking/cpp/include/GLViewer.hpp +++ b/positional tracking/positional tracking/cpp/include/GLViewer.hpp @@ -77,9 +77,19 @@ class CameraGL { class Shader { public: - Shader() {} + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/positional tracking/positional tracking/cpp/src/GLViewer.cpp b/positional tracking/positional tracking/cpp/src/GLViewer.cpp index c5dacaab..8806afa1 100644 --- a/positional tracking/positional tracking/cpp/src/GLViewer.cpp +++ b/positional tracking/positional tracking/cpp/src/GLViewer.cpp @@ -107,10 +107,10 @@ void GLViewer::init(int argc, char **argv, sl::MODEL camera_model) { glHint(GL_LINE_SMOOTH_HINT, GL_NICEST); // Compile and create the shader - mainShader.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + mainShader.it.set(VERTEX_SHADER, FRAGMENT_SHADER); mainShader.MVP_Mat = glGetUniformLocation(mainShader.it.getProgramId(), "u_mvpMatrix"); - shaderLine.it = Shader(VERTEX_SHADER, FRAGMENT_SHADER); + shaderLine.it.set(VERTEX_SHADER, FRAGMENT_SHADER); shaderLine.MVP_Mat = glGetUniformLocation(shaderLine.it.getProgramId(), "u_mvpMatrix"); // Create the camera @@ -538,6 +538,10 @@ Transform Simple3DObject::getModelMatrix() const { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -573,12 +577,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/recording/recording/mono/cpp/src/main.cpp b/recording/recording/mono/cpp/src/main.cpp index 2cbd3eb4..678994a4 100644 --- a/recording/recording/mono/cpp/src/main.cpp +++ b/recording/recording/mono/cpp/src/main.cpp @@ -49,6 +49,7 @@ int main(int argc, char **argv) { // Set configuration parameters for the ZED InitParameters init_parameters; init_parameters.depth_mode = DEPTH_MODE::NONE; + init_parameters.async_image_retrieval = false; //This parameter can be used to record SVO in camera FPS even if the grab loop is running at a lower FPS (due to compute for ex.) parseArgs(argc,argv,init_parameters); // Open the camera @@ -61,7 +62,7 @@ int main(int argc, char **argv) { // Enable recording with the filename specified in argument RecordingParameters recording_parameters; recording_parameters.video_filename.set(argv[1]); - recording_parameters.compression_mode = SVO_COMPRESSION_MODE::H264; + recording_parameters.compression_mode = SVO_COMPRESSION_MODE::H265; returned_state = zed.enableRecording(recording_parameters); if (returned_state != ERROR_CODE::SUCCESS) { print("Recording ZED : ", returned_state); @@ -72,15 +73,12 @@ int main(int argc, char **argv) { // Start recording SVO, stop with Ctrl-C command print("SVO is Recording, use Ctrl-C to stop." ); SetCtrlHandler(); - int frames_recorded = 0; sl::RecordingStatus rec_status; while (!exit_app) { if (zed.grab() == ERROR_CODE::SUCCESS) { // Each new frame is added to the SVO file rec_status = zed.getRecordingStatus(); - if (rec_status.status) - frames_recorded++; - print("Frame count: " +to_string(frames_recorded)); + printf(" NFrames SVO: %d / %d\n",rec_status.number_frames_ingested,rec_status.number_frames_encoded); } else break; diff --git a/recording/recording/mono/python/svo_recording.py b/recording/recording/mono/python/svo_recording.py index aa3c8933..3d41275b 100644 --- a/recording/recording/mono/python/svo_recording.py +++ b/recording/recording/mono/python/svo_recording.py @@ -38,6 +38,7 @@ def main(): init = sl.InitParameters() init.depth_mode = sl.DEPTH_MODE.NONE # Set configuration parameters for the ZED + init.async_image_retrieval = False; # This parameter can be used to record SVO in camera FPS even if the grab loop is running at a lower FPS (due to compute for ex.) status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: diff --git a/spatial mapping/multi camera/cpp/include/GLViewer.hpp b/spatial mapping/multi camera/cpp/include/GLViewer.hpp index 2347dba3..e787d88c 100644 --- a/spatial mapping/multi camera/cpp/include/GLViewer.hpp +++ b/spatial mapping/multi camera/cpp/include/GLViewer.hpp @@ -99,10 +99,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/spatial mapping/multi camera/cpp/src/GLViewer.cpp b/spatial mapping/multi camera/cpp/src/GLViewer.cpp index d6ee3369..59d77c99 100644 --- a/spatial mapping/multi camera/cpp/src/GLViewer.cpp +++ b/spatial mapping/multi camera/cpp/src/GLViewer.cpp @@ -119,10 +119,10 @@ void GLViewer::init(int argc, char **argv) { glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnable(GL_POINT_SMOOTH); - shader_mesh.it = Shader((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); + shader_mesh.it.set((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); shader_mesh.MVPM = glGetUniformLocation(shader_mesh.it.getProgramId(), "u_mvpMatrix"); - shader_fpc.it = Shader((GLchar*) POINTCLOUD_VERTEX_SHADER, (GLchar*) POINTCLOUD_FRAGMENT_SHADER); + shader_fpc.it.set((GLchar*) POINTCLOUD_VERTEX_SHADER, (GLchar*) POINTCLOUD_FRAGMENT_SHADER); shader_fpc.MVPM = glGetUniformLocation(shader_fpc.it.getProgramId(), "u_mvpMatrix"); camera_ = CameraGL(sl::Translation(0, 2, 2.000), sl::Translation(0, 0, -0.1)); @@ -504,6 +504,10 @@ void MeshObject::draw(bool draw_wire) { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -539,12 +543,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { diff --git a/spatial mapping/multi camera/cpp/src/main.cpp b/spatial mapping/multi camera/cpp/src/main.cpp index 738d3004..81d2d411 100644 --- a/spatial mapping/multi camera/cpp/src/main.cpp +++ b/spatial mapping/multi camera/cpp/src/main.cpp @@ -104,7 +104,7 @@ int main(int argc, char **argv) { for (auto& it : configurations) { sl::CameraIdentifier uuid(it.serial_number); // to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup. - auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose); + auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose, it.override_gravity); if (state != sl::FUSION_ERROR_CODE::SUCCESS) std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl; else diff --git a/spatial mapping/spatial mapping/cpp/include/GLViewer.hpp b/spatial mapping/spatial mapping/cpp/include/GLViewer.hpp index 7008d087..b6b05a6a 100644 --- a/spatial mapping/spatial mapping/cpp/include/GLViewer.hpp +++ b/spatial mapping/spatial mapping/cpp/include/GLViewer.hpp @@ -99,10 +99,19 @@ class CameraGL { class Shader { public: - Shader() { - } + Shader() : verterxId_(0), fragmentId_(0), programId_(0) {} Shader(const GLchar* vs, const GLchar* fs); ~Shader(); + + // Delete the move constructor and move assignment operator + Shader(Shader&&) = delete; + Shader& operator=(Shader&&) = delete; + + // Delete the copy constructor and copy assignment operator + Shader(const Shader&) = delete; + Shader& operator=(const Shader&) = delete; + + void set(const GLchar* vs, const GLchar* fs); GLuint getProgramId(); static const GLint ATTRIB_VERTICES_POS = 0; diff --git a/spatial mapping/spatial mapping/cpp/src/GLViewer.cpp b/spatial mapping/spatial mapping/cpp/src/GLViewer.cpp index f56a5cc2..2dc75f6c 100644 --- a/spatial mapping/spatial mapping/cpp/src/GLViewer.cpp +++ b/spatial mapping/spatial mapping/cpp/src/GLViewer.cpp @@ -124,13 +124,13 @@ void GLViewer::init(int argc, char **argv, sl::Mat &image, sl::Mat & pointcloud, pc_render.initialize(pointcloud); camera_viewer.initialize(image); - shader.it = Shader((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); + shader.it.set((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); shader.MVPM = glGetUniformLocation(shader.it.getProgramId(), "u_mvpMatrix"); - shader_mesh.it = Shader((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); + shader_mesh.it.set((GLchar*) MESH_VERTEX_SHADER, (GLchar*) MESH_FRAGMENT_SHADER); shader_mesh.MVPM = glGetUniformLocation(shader_mesh.it.getProgramId(), "u_mvpMatrix"); - shader_fpc.it = Shader((GLchar*) POINTCLOUD_VERTEX_SHADER, (GLchar*) POINTCLOUD_FRAGMENT_SHADER); + shader_fpc.it.set((GLchar*) POINTCLOUD_VERTEX_SHADER, (GLchar*) POINTCLOUD_FRAGMENT_SHADER); shader_fpc.MVPM = glGetUniformLocation(shader_fpc.it.getProgramId(), "u_mvpMatrix"); camera_ = CameraGL(sl::Translation(0, 2, 2.000), sl::Translation(0, 0, -0.1)); @@ -530,6 +530,10 @@ void MeshObject::draw(bool draw_wire) { } Shader::Shader(const GLchar* vs, const GLchar* fs) { + set(vs, fs); +} + +void Shader::set(const GLchar* vs, const GLchar* fs) { if (!compile(verterxId_, GL_VERTEX_SHADER, vs)) { std::cout << "ERROR: while compiling vertex shader" << std::endl; } @@ -565,12 +569,12 @@ Shader::Shader(const GLchar* vs, const GLchar* fs) { } Shader::~Shader() { - if (verterxId_ != 0) + if (verterxId_ != 0 && glIsShader(verterxId_)) glDeleteShader(verterxId_); - if (fragmentId_ != 0) + if (fragmentId_ != 0 && glIsShader(fragmentId_)) glDeleteShader(fragmentId_); - if (programId_ != 0) - glDeleteShader(programId_); + if (programId_ != 0 && glIsProgram(programId_)) + glDeleteProgram(programId_); } GLuint Shader::getProgramId() { @@ -642,7 +646,7 @@ void PointCloud::initialize(sl::Mat &ref) { if (err != cudaSuccess) std::cerr << "Error: CUDA GetMappedPointer (" << err << ")" << std::endl; - shader_ = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); + shader_.set(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER); shMVPMatrixLoc_ = glGetUniformLocation(shader_.getProgramId(), "u_mvpMatrix"); } @@ -893,7 +897,7 @@ bool CameraViewer::initialize(sl::Mat &im) { faces.push_back(sl::uint3(0,2,3)); ref = im; - shader = Shader(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); + shader.set(VERTEX_SHADER_TEXTURE, FRAGMENT_SHADER_TEXTURE); shMVPMatrixLocTex_ = glGetUniformLocation(shader.getProgramId(), "u_mvpMatrix"); glGenVertexArrays(1, &vaoID_); diff --git a/zed one/cpp/CMakeLists.txt b/zed one/cpp/CMakeLists.txt new file mode 100644 index 00000000..8bec73f6 --- /dev/null +++ b/zed one/cpp/CMakeLists.txt @@ -0,0 +1,51 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +PROJECT(ZED_One_live) + +if (WIN32) + message(FATAL_ERROR "ZED One is not yet supported on windows.") +endif(WIN32) + +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +SET(CMAKE_BUILD_TYPE "RelWithDebInfo") + +option(LINK_SHARED_ZED "Link with the ZED SDK shared executable" ON) + +if (NOT LINK_SHARED_ZED AND MSVC) + message(FATAL_ERROR "LINK_SHARED_ZED OFF : ZED SDK static libraries not available on Windows") +endif() + +find_package(ZED 4 REQUIRED) +find_package(OpenCV REQUIRED) +find_package(CUDA ${ZED_CUDA_VERSION} REQUIRED) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) +include_directories(${CUDA_INCLUDE_DIRS}) +include_directories(${ZED_INCLUDE_DIRS}) +include_directories(${OpenCV_INCLUDE_DIRS}) + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${OpenCV_LIBRARY_DIRS}) +link_directories(${CUDA_LIBRARY_DIRS}) + +SET(ZED_LIBS ${ZED_LIBRARIES} ${CUDA_CUDA_LIBRARY} ${CUDA_CUDART_LIBRARY}) + +ADD_EXECUTABLE(${PROJECT_NAME} include/utils.hpp live.cpp) +TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${ZED_LIBS} ${OpenCV_LIBRARIES}) + +ADD_EXECUTABLE(ZED_One_SVO_playback include/utils.hpp svo_playback.cpp) +TARGET_LINK_LIBRARIES(ZED_One_SVO_playback ${ZED_LIBS} ${OpenCV_LIBRARIES}) + +ADD_EXECUTABLE(ZED_One_SVO_recording include/utils.hpp svo_recording.cpp) +TARGET_LINK_LIBRARIES(ZED_One_SVO_recording ${ZED_LIBS} ${OpenCV_LIBRARIES}) + +ADD_EXECUTABLE(ZED_One_streaming_sender include/utils.hpp streaming_sender.cpp) +TARGET_LINK_LIBRARIES(ZED_One_streaming_sender ${ZED_LIBS} ${OpenCV_LIBRARIES}) + +ADD_EXECUTABLE(ZED_One_streaming_receiver include/utils.hpp streaming_receiver.cpp) +TARGET_LINK_LIBRARIES(ZED_One_streaming_receiver ${ZED_LIBS} ${OpenCV_LIBRARIES}) + +if(INSTALL_SAMPLES) + LIST(APPEND SAMPLE_LIST ${PROJECT_NAME}) + SET(SAMPLE_LIST "${SAMPLE_LIST}" PARENT_SCOPE) +endif() diff --git a/zed one/cpp/include/utils.hpp b/zed one/cpp/include/utils.hpp new file mode 100644 index 00000000..a7aeef8c --- /dev/null +++ b/zed one/cpp/include/utils.hpp @@ -0,0 +1,104 @@ +#pragma once + +// ZED include +#include +#include + +// OpenCV include (for display) +#include +#include + +using namespace std; + +inline cv::Mat slMat2cvMat(sl::Mat& input) { + // Mapping between MAT_TYPE and CV_TYPE + int cv_type = -1; + switch (input.getDataType()) { + case sl::MAT_TYPE::F32_C1: cv_type = CV_32FC1; + break; + case sl::MAT_TYPE::F32_C2: cv_type = CV_32FC2; + break; + case sl::MAT_TYPE::F32_C3: cv_type = CV_32FC3; + break; + case sl::MAT_TYPE::F32_C4: cv_type = CV_32FC4; + break; + case sl::MAT_TYPE::U8_C1: cv_type = CV_8UC1; + break; + case sl::MAT_TYPE::U8_C2: cv_type = CV_8UC2; + break; + case sl::MAT_TYPE::U8_C3: cv_type = CV_8UC3; + break; + case sl::MAT_TYPE::U8_C4: cv_type = CV_8UC4; + break; + default: break; + } + + return cv::Mat(input.getHeight(), input.getWidth(), cv_type, input.getPtr(sl::MEM::CPU)); +} + +void print(std::string msg_prefix, sl::ERROR_CODE err_code = sl::ERROR_CODE::SUCCESS, std::string msg_suffix="") { + std::cout << "[Sample]"; + if (err_code != sl::ERROR_CODE::SUCCESS) + std::cout << "[Error] "; + else + std::cout << " "; + std::cout << msg_prefix << " "; + if (err_code != sl::ERROR_CODE::SUCCESS) { + std::cout << " | " << sl::toString(err_code) << " : "; + std::cout << sl::toVerbose(err_code); + } + if (!msg_suffix.empty()) + std::cout << " " << msg_suffix; + std::cout << std::endl; +} + +int parseArgs(int argc, char **argv, sl::InitParametersOne& param) { + if (argc > 1) { + string arg = string(argv[1]); + unsigned int a, b, c, d, port; + if (sscanf(arg.c_str(), "%u.%u.%u.%u:%d", &a, &b, &c, &d, &port) == 5) { + // Stream input mode - IP + port + string ip_adress = to_string(a) + "." + to_string(b) + "." + to_string(c) + "." + to_string(d); + param.input.setFromStream(sl::String(ip_adress.c_str()), port); + cout << "[Sample] Using Stream input, IP : " << ip_adress << ", port : " << port << endl; + } else if (sscanf(arg.c_str(), "%u.%u.%u.%u", &a, &b, &c, &d) == 4) { + // Stream input mode - IP only + param.input.setFromStream(sl::String(argv[1])); + cout << "[Sample] Using Stream input, IP : " << argv[1] << endl; + } else + return 1; + } else { + // Default + return 1; + } + return 0; +} + +static bool exit_app = false; + +// Handle the CTRL-C keyboard signal +#ifdef _WIN32 +#include + +void CtrlHandler(DWORD fdwCtrlType) { + exit_app = (fdwCtrlType == CTRL_C_EVENT); +} +#else +#include +void nix_exit_handler(int s) { + exit_app = true; +} +#endif + +// Set the function to handle the CTRL-C +void SetCtrlHandler() { +#ifdef _WIN32 + SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE); +#else // unix + struct sigaction sigIntHandler; + sigIntHandler.sa_handler = nix_exit_handler; + sigemptyset(&sigIntHandler.sa_mask); + sigIntHandler.sa_flags = 0; + sigaction(SIGINT, &sigIntHandler, NULL); +#endif +} \ No newline at end of file diff --git a/zed one/cpp/live.cpp b/zed one/cpp/live.cpp new file mode 100644 index 00000000..1a145f3a --- /dev/null +++ b/zed one/cpp/live.cpp @@ -0,0 +1,90 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +/******************************************************************************** + ** This sample demonstrates how to grab images and change the camera settings ** + ** with the ZED SDK ** + ********************************************************************************/ + +// ZED include +#include + +// Sample includes +#include + +// Using std and sl namespaces +using namespace std; +using namespace sl; + +int main(int argc, char **argv) +{ + // Create a ZED Camera object + CameraOne zed; + + InitParametersOne init_parameters; + init_parameters.sdk_verbose = true; + init_parameters.camera_resolution = sl::RESOLUTION::AUTO; + + // Open the camera + auto returned_state = zed.open(init_parameters); + if (returned_state != ERROR_CODE::SUCCESS) + { + print("Camera Open", returned_state, "Exit program."); + return EXIT_FAILURE; + } + // Create a Mat to store images + Mat zed_image; + + auto _cam_infos = zed.getCameraInformation(); + + std::cout<<"CAM INFOS\n "<< + "MODEL "<<_cam_infos.camera_model<<"\n"<< + "INPUT "<<_cam_infos.input_type<<"\n"<< + "SERIAL "<<_cam_infos.serial_number<<"\n"<< + "Resolution "<<_cam_infos.camera_configuration.resolution.width <<"x"<<_cam_infos.camera_configuration.resolution.height<<"\n"<< + "FPS "<<_cam_infos.camera_configuration.fps< + +// Sample includes +#include "utils.hpp" + +// Using std and sl namespaces +using namespace std; +using namespace sl; + +// Sample functions +void updateCameraSettings(char key, sl::CameraOne &zed); +void switchCameraSettings(); +void printHelp(); + +// Sample variables +VIDEO_SETTINGS camera_settings_ = VIDEO_SETTINGS::BRIGHTNESS; +string str_camera_settings = "BRIGHTNESS"; +int step_camera_setting = 1; + +bool selectInProgress = false; +sl::Rect selection_rect; +cv::Point origin_rect; +static void onMouse(int event, int x, int y, int, void*) +{ + switch (event) + { + case cv::EVENT_LBUTTONDOWN: + { + origin_rect = cv::Point(x, y); + selectInProgress = true; + break; + } + + case cv::EVENT_LBUTTONUP: + { + selectInProgress = false; + break; + } + + case cv::EVENT_RBUTTONDOWN: + { + //Reset selection + selectInProgress = false; + selection_rect = sl::Rect(0,0,0,0); + break; + } + } + + if (selectInProgress) + { + selection_rect.x = MIN(x, origin_rect.x); + selection_rect.y = MIN(y, origin_rect.y); + selection_rect.width = abs(x - origin_rect.x) + 1; + selection_rect.height = abs(y - origin_rect.y) + 1; + } +} + +vector< string> split(const string& s, char seperator) { + vector< string> output; + string::size_type prev_pos = 0, pos = 0; + + while ((pos = s.find(seperator, pos)) != string::npos) { + string substring(s.substr(prev_pos, pos - prev_pos)); + output.push_back(substring); + prev_pos = ++pos; + } + + output.push_back(s.substr(prev_pos, pos - prev_pos)); + return output; +} + +void setStreamParameter(InitParametersOne& init_p, string& argument) { + vector< string> configStream = split(argument, ':'); + String ip(configStream.at(0).c_str()); + if (configStream.size() == 2) { + init_p.input.setFromStream(ip, atoi(configStream.at(1).c_str())); + } else init_p.input.setFromStream(ip); +} + +int main(int argc, char **argv) { + CameraOne zed; + // Set configuration parameters for the ZED + InitParametersOne init_parameters; + init_parameters.sdk_verbose = true; + + string stream_params; + if (argc > 1) { + stream_params = string(argv[1]); + } else { + cout << "\nOpening the stream requires the IP of the sender\n"; + cout << "Usage : ./ZED_Streaming_Receiver IP:[port]\n"; + cout << "You can specify it now, then press ENTER, 'IP:[port]': "; + cin >> stream_params; + } + + setStreamParameter(init_parameters, stream_params); + + cv::String win_name = "Camera Remote Control"; + cv::namedWindow(win_name); + cv::setMouseCallback(win_name, onMouse); + + // Open the camera + auto returned_state = zed.open(init_parameters); + if (returned_state != ERROR_CODE::SUCCESS) { + print("Camera Open", returned_state, "Exit program."); + return EXIT_FAILURE; + } + + // Print camera information + auto camera_info = zed.getCameraInformation(); + cout << endl; + cout << "ZED Model : " << camera_info.camera_model << endl; + cout << "ZED Serial Number : " << camera_info.serial_number << endl; + cout << "ZED Camera Firmware : " << camera_info.camera_configuration.firmware_version << "/" << camera_info.sensors_configuration.firmware_version << endl; + cout << "ZED Camera Resolution : " << camera_info.camera_configuration.resolution.width << "x" << camera_info.camera_configuration.resolution.height << endl; + cout << "ZED Camera FPS : " << zed.getInitParameters().camera_fps << endl; + + // Print help in console + printHelp(); + + // Create a Mat to store images + Mat image; + + // Initialise camera setting + switchCameraSettings(); + + // Capture new images until 'q' is pressed + char key = ' '; + while (key != 'q') { + // Check that a new image is successfully acquired + returned_state = zed.grab(); + if (returned_state == ERROR_CODE::SUCCESS) { + // Retrieve left image + zed.retrieveImage(image); + + // Convert sl::Mat to cv::Mat (share buffer) + cv::Mat cvImage = slMat2cvMat(image); + + //Check that selection rectangle is valid and draw it on the image + if (!selection_rect.isEmpty() && selection_rect.isContained(sl::Resolution(cvImage.cols, cvImage.rows))) + cv::rectangle(cvImage, cv::Rect(selection_rect.x,selection_rect.y,selection_rect.width,selection_rect.height),cv::Scalar(0, 255, 0), 2); + + // Display image with OpenCV + cv::imshow(win_name, cvImage); + + } else { + print("Error during capture : ", returned_state); + break; + } + + key = cv::waitKey(5); + // Change camera settings with keyboard + updateCameraSettings(key, zed); + } + + // Exit + zed.close(); + return EXIT_SUCCESS; +} + +/** + This function updates camera settings + **/ +void updateCameraSettings(char key, sl::CameraOne &zed) { + int current_value; + + // Keyboard shortcuts + switch (key) { + // Switch to the next camera parameter + case 's': + switchCameraSettings(); + zed.getCameraSettings(camera_settings_,current_value); + break; + + // Increase camera settings value ('+' key) + case '+': + zed.getCameraSettings(camera_settings_,current_value); + zed.setCameraSettings(camera_settings_, current_value + step_camera_setting); + zed.getCameraSettings(camera_settings_,current_value); + print(str_camera_settings+": "+std::to_string(current_value)); + break; + + // Decrease camera settings value ('-' key) + case '-': + zed.getCameraSettings(camera_settings_,current_value); + current_value = current_value > 0 ? current_value - step_camera_setting : 0; // take care of the 'default' value parameter: VIDEO_SETTINGS_VALUE_AUTO + zed.setCameraSettings(camera_settings_, current_value); + zed.getCameraSettings(camera_settings_,current_value); + print(str_camera_settings+": "+std::to_string(current_value)); + break; + + // Reset to default parameters + case 'r': + print("Reset all settings to default"); + for (int s = (int) VIDEO_SETTINGS::BRIGHTNESS; s <= (int) VIDEO_SETTINGS::WHITEBALANCE_TEMPERATURE; s++) + zed.setCameraSettings(static_cast (s), sl::VIDEO_SETTINGS_VALUE_AUTO); + break; + + default : + break; + } +} + +/** + This function toggles between camera settings + **/ +void switchCameraSettings() { + camera_settings_ = static_cast ((int) camera_settings_ + 1); + + // reset to 1st setting + if (camera_settings_ == VIDEO_SETTINGS::LED_STATUS) + camera_settings_ = VIDEO_SETTINGS::BRIGHTNESS; + + // select the right step + step_camera_setting = (camera_settings_ == VIDEO_SETTINGS::WHITEBALANCE_TEMPERATURE) ? 100 : 1; + + // get the name of the selected SETTING + str_camera_settings = string(sl::toString(camera_settings_).c_str()); + + print("Switch to camera settings: ", ERROR_CODE::SUCCESS, str_camera_settings); +} + +/** + This function displays help + **/ +void printHelp() { + cout << "\n\nCamera controls hotkeys:\n"; + cout << "* Increase camera settings value: '+'\n"; + cout << "* Decrease camera settings value: '-'\n"; + cout << "* Toggle camera settings: 's'\n"; + cout << "* Reset all parameters: 'r'\n"; + cout << "* Exit : 'q'\n\n"; +} diff --git a/zed one/cpp/streaming_sender.cpp b/zed one/cpp/streaming_sender.cpp new file mode 100644 index 00000000..e43ec388 --- /dev/null +++ b/zed one/cpp/streaming_sender.cpp @@ -0,0 +1,78 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +/************************************************************************************************ + ** This sample shows how to stream remotely the video of a ZED camera. Any application using ** + ** the ZED SDK can receive and process this stream. See Camera Streaming/Receiver example. ** + *************************************************************************************************/ + +// ZED includes +#include + +// Sample includes +#include "utils.hpp" + +// Using namespace +using namespace sl; +using namespace std; + +int main(int argc, char **argv) { + // Create a ZED camera + CameraOne zed; + + // Set configuration parameters for the ZED + InitParametersOne init_parameters; + init_parameters.camera_resolution = sl::RESOLUTION::AUTO; + init_parameters.sdk_verbose = 1; + int res_arg = parseArgs(argc, argv, init_parameters); + + // Open the camera + auto returned_state = zed.open(init_parameters); + if (returned_state != ERROR_CODE::SUCCESS) { + print("Camera Open", returned_state, "Exit program."); + return EXIT_FAILURE; + } + + StreamingParameters stream_params; + if (argc == 2 && res_arg == 1) stream_params.port = atoi(argv[1]); + if (argc > 2) stream_params.port = atoi(argv[2]); + + returned_state = zed.enableStreaming(stream_params); + if (returned_state != ERROR_CODE::SUCCESS) { + print("Streaming initialization error: ", returned_state); + return EXIT_FAILURE; + } + + print("Streaming on port " + to_string(stream_params.port)); + + SetCtrlHandler(); + + while (!exit_app) { + if (zed.grab() != ERROR_CODE::SUCCESS) + sleep_ms(5); + } + + // disable Streaming + zed.disableStreaming(); + + // close the Camera + zed.close(); + return EXIT_SUCCESS; +} diff --git a/zed one/cpp/svo_playback.cpp b/zed one/cpp/svo_playback.cpp new file mode 100644 index 00000000..3ff73539 --- /dev/null +++ b/zed one/cpp/svo_playback.cpp @@ -0,0 +1,89 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +/******************************************************************************** + ** This sample demonstrates how to grab images and change the camera settings ** + ** with the ZED SDK ** + ********************************************************************************/ + +// ZED include +#include + +// Sample includes +#include + +// Using std and sl namespaces +using namespace std; +using namespace sl; + +int main(int argc, char **argv) +{ + if (argc < 2) { + cout << "No arguments provided, a SVO name is expected.\n"; + return EXIT_FAILURE; + } + + // Create a ZED Camera object + CameraOne zed; + + InitParametersOne init_parameters; + init_parameters.sdk_verbose = true; + init_parameters.input.setFromSVOFile(argv[1]); + + // Open the camera + auto returned_state = zed.open(init_parameters); + if (returned_state != ERROR_CODE::SUCCESS) + { + print("Camera Open", returned_state, "Exit program."); + return EXIT_FAILURE; + } + + int frames = zed.getSVONumberOfFrames(); + std::cout << "Number of frames: " << frames << std::endl; + + // Create a Mat to store images + Mat zed_image; + + // Capture new images until 'q' is pressed + char key = ' '; + while (key != 'q') + { + // Check that a new image is successfully acquired + returned_state = zed.grab(); + if (returned_state == ERROR_CODE::SUCCESS) + { + // Retrieve image + zed.retrieveImage(zed_image); + // Display the image + cv::imshow("ZED-One", slMat2cvMat(zed_image)); + } + else + { + print("Grab", returned_state); + break; + } + + key = cv::waitKey(10); + // Change camera settings with keyboard + } + + zed.close(); + return EXIT_SUCCESS; +} diff --git a/zed one/cpp/svo_recording.cpp b/zed one/cpp/svo_recording.cpp new file mode 100644 index 00000000..3272fea9 --- /dev/null +++ b/zed one/cpp/svo_recording.cpp @@ -0,0 +1,93 @@ +/////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2024, STEREOLABS. +// +// All rights reserved. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////// + +/**************************************************************************************** +** This sample shows how to record video in Stereolabs SVO format. ** +** SVO video files can be played with the ZED API and used with its different modules ** +*****************************************************************************************/ + +// ZED includes +#include + +// Sample includes +#include "utils.hpp" + +// Using namespace +using namespace sl; +using namespace std; + +int main(int argc, char **argv) { + + if (argc < 2) { + cout << "No arguments provided, an output SVO name is expected.\n"; + return EXIT_FAILURE; + } + + // Create a ZED camera + CameraOne zed; + + // Set configuration parameters for the ZED + InitParametersOne init_parameters; + + // Open the camera + auto returned_state = zed.open(init_parameters); + if (returned_state != ERROR_CODE::SUCCESS) { + print("Camera Open", returned_state, "Exit program."); + return EXIT_FAILURE; + } + + // Enable recording with the filename specified in argument + RecordingParameters recording_parameters; + recording_parameters.video_filename.set(argv[1]); + recording_parameters.compression_mode = SVO_COMPRESSION_MODE::H265; + returned_state = zed.enableRecording(recording_parameters); + if (returned_state != ERROR_CODE::SUCCESS) { + print("Camera enable Recording", returned_state, "Exit program."); + zed.close(); + return EXIT_FAILURE; + } + + // Start recording SVO, stop with Ctrl-C command + std::cout << "SVO is Recording, use Ctrl-C to stop." << std::endl; + SetCtrlHandler(); + sl::RecordingStatus rec_status; + auto start = std::chrono::high_resolution_clock::now(); + while (!exit_app) { + if (zed.grab() == ERROR_CODE::SUCCESS) { + + // Each new frame is automatically added to the SVO file + + if(std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - start).count() > 1000){ + rec_status = zed.getRecordingStatus(); + std::cout<<"Recording: "<= 1: + cam.set_camera_settings(camera_settings, current_value - step_camera_settings) + print(str_camera_settings + ": " + str(current_value - step_camera_settings)) + elif key == 114: # for 'r' key + # Reset all camera settings to default. + cam.set_camera_settings(sl.VIDEO_SETTINGS.BRIGHTNESS, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.CONTRAST, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.HUE, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.SATURATION, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.SHARPNESS, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.GAIN, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.EXPOSURE, -1) + cam.set_camera_settings(sl.VIDEO_SETTINGS.WHITEBALANCE_TEMPERATURE, -1) + print("[Sample] Reset all settings to default") + elif key == 108: # for 'l' key + # Turn on or off camera LED. + led_on = not led_on + cam.set_camera_settings(sl.VIDEO_SETTINGS.LED_STATUS, led_on) + elif key == 97 : # for 'a' key + # Set exposure region of interest (ROI) on a target area. + print("[Sample] set AEC_AGC_ROI on target [",selection_rect.x,",",selection_rect.y,",",selection_rect.width,",",selection_rect.height,"]") + cam.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI,selection_rect,sl.SIDE.BOTH) + elif key == 102: #for 'f' key + # Reset exposure ROI to full resolution. + print("[Sample] reset AEC_AGC_ROI to full res") + cam.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI,selection_rect,sl.SIDE.BOTH,True) + +# Function to switch between different camera settings (brightness, contrast, etc.). +def switch_camera_settings(): + global camera_settings + global str_camera_settings + if camera_settings == sl.VIDEO_SETTINGS.BRIGHTNESS: + camera_settings = sl.VIDEO_SETTINGS.CONTRAST + str_camera_settings = "Contrast" + print("[Sample] Switch to camera settings: CONTRAST") + elif camera_settings == sl.VIDEO_SETTINGS.CONTRAST: + camera_settings = sl.VIDEO_SETTINGS.HUE + str_camera_settings = "Hue" + print("[Sample] Switch to camera settings: HUE") + elif camera_settings == sl.VIDEO_SETTINGS.HUE: + camera_settings = sl.VIDEO_SETTINGS.SATURATION + str_camera_settings = "Saturation" + print("[Sample] Switch to camera settings: SATURATION") + elif camera_settings == sl.VIDEO_SETTINGS.SATURATION: + camera_settings = sl.VIDEO_SETTINGS.SHARPNESS + str_camera_settings = "Sharpness" + print("[Sample] Switch to camera settings: Sharpness") + elif camera_settings == sl.VIDEO_SETTINGS.SHARPNESS: + camera_settings = sl.VIDEO_SETTINGS.GAIN + str_camera_settings = "Gain" + print("[Sample] Switch to camera settings: GAIN") + elif camera_settings == sl.VIDEO_SETTINGS.GAIN: + camera_settings = sl.VIDEO_SETTINGS.EXPOSURE + str_camera_settings = "Exposure" + print("[Sample] Switch to camera settings: EXPOSURE") + elif camera_settings == sl.VIDEO_SETTINGS.EXPOSURE: + camera_settings = sl.VIDEO_SETTINGS.WHITEBALANCE_TEMPERATURE + str_camera_settings = "White Balance" + print("[Sample] Switch to camera settings: WHITEBALANCE") + elif camera_settings == sl.VIDEO_SETTINGS.WHITEBALANCE_TEMPERATURE: + camera_settings = sl.VIDEO_SETTINGS.BRIGHTNESS + str_camera_settings = "Brightness" + print("[Sample] Switch to camera settings: BRIGHTNESS") + +def valid_ip_or_hostname(ip_or_hostname): + try: + host, port = ip_or_hostname.split(':') + socket.inet_aton(host) # Vérifier si c'est une adresse IP valide + port = int(port) + return f"{host}:{port}" + except (socket.error, ValueError): + raise argparse.ArgumentTypeError("Invalid IP address or hostname format. Use format a.b.c.d:p or hostname:p") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--ip_address', type=valid_ip_or_hostname, help='IP address or hostname of the sender. Should be in format a.b.c.d:p or hostname:p', required=True) + opt = parser.parse_args() + main() + diff --git a/zed one/python/streaming_sender.py b/zed one/python/streaming_sender.py new file mode 100644 index 00000000..e5c2a5b5 --- /dev/null +++ b/zed one/python/streaming_sender.py @@ -0,0 +1,92 @@ +######################################################################## +# +# Copyright (c) 2022, STEREOLABS. +# +# All rights reserved. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +######################################################################## + +""" + Open the camera and start streaming images using H264 codec +""" +import sys +import pyzed.sl as sl +import argparse +from time import sleep + +def parse_args(init): + if ("HD2K" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD2K + print("[Sample] Using Camera in resolution HD2K") + elif ("HD1200" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD1200 + print("[Sample] Using Camera in resolution HD1200") + elif ("HD1080" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD1080 + print("[Sample] Using Camera in resolution HD1080") + elif ("HD720" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD720 + print("[Sample] Using Camera in resolution HD720") + elif ("SVGA" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.SVGA + print("[Sample] Using Camera in resolution SVGA") + elif ("VGA" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.VGA + print("[Sample] Using Camera in resolution VGA") + elif len(opt.resolution)>0: + print("[Sample] No valid resolution entered. Using default") + else : + print("[Sample] Using default resolution") + +def main(): + + init = sl.InitParametersOne() + init.camera_resolution = sl.RESOLUTION.AUTO + init.sdk_verbose = 1 + parse_args(init) + cam = sl.CameraOne() + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: #Ensure the camera has opened succesfully + print("Camera Open : "+repr(status)+". Exit program.") + exit() + stream_params = sl.StreamingParameters() + print("Streaming on port ",stream_params.port) #Get the port used to stream + stream_params.codec = sl.STREAMING_CODEC.H264 + stream_params.bitrate = 4000 + status_streaming = cam.enable_streaming(stream_params) #Enable streaming + if status_streaming != sl.ERROR_CODE.SUCCESS: + print("Streaming initialization error: ", status_streaming) + cam.close() + exit() + exit_app = False + try : + while not exit_app: + err = cam.grab() + if err == sl.ERROR_CODE.SUCCESS: + sleep(0.001) + except KeyboardInterrupt: + exit_app = True + + # disable Streaming + cam.disable_streaming() + # close the Camera + cam.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--resolution', type=str, help='Resolution, can be either HD2K, HD1200, HD1080, HD720, SVGA or VGA', default = '') + opt = parser.parse_args() + main() diff --git a/zed one/python/svo_playback.py b/zed one/python/svo_playback.py new file mode 100644 index 00000000..7a07dab7 --- /dev/null +++ b/zed one/python/svo_playback.py @@ -0,0 +1,117 @@ +######################################################################## +# +# Copyright (c) 2022, STEREOLABS. +# +# All rights reserved. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +######################################################################## + +""" + Read SVO sample to read the video and the information of the camera. It can pick a frame of the svo and save it as + a JPEG or PNG file. Depth map and Point Cloud can also be saved into files. +""" +import sys +import pyzed.sl as sl +import cv2 +import argparse +import os + +def progress_bar(percent_done, bar_length=50): + #Display progress bar + done_length = int(bar_length * percent_done / 100) + bar = '=' * done_length + '-' * (bar_length - done_length) + sys.stdout.write('[%s] %i%s\r' % (bar, percent_done, '%')) + sys.stdout.flush() + +def main(): + filepath = opt.input_svo_file # Path to the .svo file to be playbacked + input_type = sl.InputType() + input_type.set_from_svo_file(filepath) #Set init parameter to run from the .svo + init = sl.InitParametersOne() + init.input=input_type + cam = sl.CameraOne() + print("Opening camera") + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: #Ensure the camera opened succesfully + print("Camera Open", status, "Exit program.") + exit(1) + + print("Camera open.") + + # Set a maximum resolution, for visualisation confort + resolution = cam.get_camera_information().camera_configuration.resolution + low_resolution = sl.Resolution(min(720,resolution.width) * 2, min(404,resolution.height)) + svo_image = sl.Mat(min(720,resolution.width) * 2,min(404,resolution.height), sl.MAT_TYPE.U8_C4, sl.MEM.CPU) + + mat = sl.Mat() + + key = ' ' + print(" Press 's' to save SVO image as a PNG") + print(" Press 'f' to jump forward in the video") + print(" Press 'b' to jump backward in the video") + print(" Press 'q' to exit...") + + svo_frame_rate = cam.get_init_parameters().camera_fps + nb_frames = cam.get_svo_number_of_frames() + print("[Info] SVO contains " ,nb_frames," frames") + + + key = '' + + while key != 113: # for 'q' key + err = cam.grab() + if err == sl.ERROR_CODE.SUCCESS: + cam.retrieve_image(svo_image,sl.VIEW.LEFT,sl.MEM.CPU,low_resolution) #retrieve image left and right + svo_position = cam.get_svo_position() + cv2.imshow("View", svo_image.get_data()) #dislay both images to cv2 + key = cv2.waitKey(10) + if key == 115 :# for 's' key + #save .svo image as a png + cam.retrieve_image(mat) + filepath = "capture_" + str(svo_position) + ".png" + img = mat.write(filepath) + if img == sl.ERROR_CODE.SUCCESS: + print("Saved image : ",filepath) + else: + print("Something wrong happened in image saving... ") + if key == 102: # for 'f' key + #move forward one second + cam.set_svo_position(svo_position+svo_frame_rate) + if key == 98: #for 'b' key + #move backward one second + cam.set_svo_position(svo_position-svo_frame_rate) + progress_bar(svo_position /nb_frames*100, 30) + elif err == sl.ERROR_CODE.END_OF_SVOFILE_REACHED: #Check if the .svo has ended + progress_bar(100, 30) + print("SVO end has been reached. Looping back to 0") + cam.set_svo_position(0) + else: + print("Grab ZED : ", err) + break + cv2.destroyAllWindows() + cam.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--input_svo_file', type=str, help='Path to the SVO file', required= True) + opt = parser.parse_args() + if not opt.input_svo_file.endswith(".svo") and not opt.input_svo_file.endswith(".svo2"): + print("--input_svo_file parameter should be a .svo file but is not : ",opt.input_svo_file,"Exit program.") + exit() + if not os.path.isfile(opt.input_svo_file): + print("--input_svo_file parameter should be an existing file but is not : ",opt.input_svo_file,"Exit program.") + exit() + main() diff --git a/zed one/python/svo_recording.py b/zed one/python/svo_recording.py new file mode 100644 index 00000000..772f6b4a --- /dev/null +++ b/zed one/python/svo_recording.py @@ -0,0 +1,71 @@ +######################################################################## +# +# Copyright (c) 2022, STEREOLABS. +# +# All rights reserved. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +######################################################################## + +import sys +import pyzed.sl as sl +from signal import signal, SIGINT +import argparse +import os + +cam = sl.CameraOne() + +#Handler to deal with CTRL+C properly +def handler(signal_received, frame): + cam.disable_recording() + cam.close() + sys.exit(0) + +signal(SIGINT, handler) + +def main(): + + init = sl.InitParametersOne() + + status = cam.open(init) + if status != sl.ERROR_CODE.SUCCESS: + print("Camera Open", status, "Exit program.") + exit(1) + + recording_param = sl.RecordingParameters(opt.output_svo_file, sl.SVO_COMPRESSION_MODE.H265) # Enable recording with the filename specified in argument + err = cam.enable_recording(recording_param) + if err != sl.ERROR_CODE.SUCCESS: + print("Recording ZED : ", err) + exit(1) + + print("SVO is Recording, use Ctrl-C to stop.") # Start recording SVO, stop with Ctrl-C command + frames_recorded = 0 + + while frames_recorded < 200: + if cam.grab() == sl.ERROR_CODE.SUCCESS : # Check that a new image is successfully acquired + frames_recorded += 1 + print("Frame count: " + str(frames_recorded), end="\r") + + print("Done!") + cam.disable_recording() + cam.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--output_svo_file', type=str, help='Path to the SVO file that will be written', required= True) + opt = parser.parse_args() + if not opt.output_svo_file.endswith(".svo") and not opt.output_svo_file.endswith(".svo2"): + print("--output_svo_file parameter should be a .svo file but is not : ",opt.output_svo_file,"Exit program.") + exit() + main() \ No newline at end of file