diff --git a/.devcontainer/Dockerfile_Jammy b/.devcontainer/Dockerfile_Jammy index 94122462c..ac02f60ba 100644 --- a/.devcontainer/Dockerfile_Jammy +++ b/.devcontainer/Dockerfile_Jammy @@ -31,23 +31,25 @@ RUN echo "${TZ}" > /etc/localtime && \ RUN echo "CUDA Version ${CUDA_MAJOR}.${CUDA_MINOR}.${CUDA_PATCH}" > /usr/local/cuda/version.txt # Add APT Repo for PCIe drivers. -RUN apt update && apt install -y curl && \ +RUN apt update && apt install -y wget gnupg && \ echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list && \ - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ - curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg && \ + wget -qO - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ + wget -qO - https://bazel.build/bazel-release.pub.gpg | gpg --dearmor > bazel-archive-keyring.gpg && \ mv bazel-archive-keyring.gpg /usr/share/keyrings && \ echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list # Install Required Ubuntu Packages RUN apt-get update && apt-get install --no-install-recommends -y iputils-ping \ - build-essential gdb wget less udev zstd sudo libgomp1 \ + build-essential gdb less udev zstd sudo libgomp1 \ cmake git libgtk2.0-dev pkg-config libx264-dev libdrm-dev ssh \ libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev tzdata net-tools \ yasm libatlas-base-dev gfortran libpq-dev libpostproc-dev \ libxine2-dev libglew-dev libtiff5-dev zlib1g-dev cowsay lolcat locales usbutils \ libeigen3-dev python3-dev python3-pip python3-numpy libx11-dev xauth libssl-dev \ valgrind doxygen graphviz htop nano fortune fortunes \ - vim-common gasket-dkms nlohmann-json3-dev gcovr lcov + vim-common gasket-dkms nlohmann-json3-dev gcovr lcov curl \ + libaom-dev libass-dev libfdk-aac-dev libdav1d-dev libmp3lame-dev \ + libopus-dev libvorbis-dev libvpx-dev libx264-dev libx265-dev # Nice to have RUN apt-get update && apt-get install --no-install-recommends -y bat \ @@ -55,7 +57,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y bat \ # Install Required Python Packages and link python3 executable to python. RUN ln -s /usr/bin/python3 /usr/bin/python && \ - python -m pip install numpy opencv-python pyopengl + python -m pip install numpy opencv-python pyopengl matplotlib # Set Timezone RUN echo "${TZ}" > /etc/localtime && \ diff --git a/.devcontainer/Dockerfile_JetPack b/.devcontainer/Dockerfile_JetPack index a5a6712ee..0bac4860f 100644 --- a/.devcontainer/Dockerfile_JetPack +++ b/.devcontainer/Dockerfile_JetPack @@ -25,26 +25,32 @@ RUN echo "# R${L4T_MAJOR} (release), REVISION: ${L4T_MINOR}.${L4T_PATCH}" > /etc # Clean APT Cache RUN rm /var/lib/dpkg/info/libc-bin.* # Add APT Repo for PCIe drivers and Bazel. -RUN apt update && apt install -y curl && \ +RUN apt update && apt install -y wget && \ echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list && \ - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - # Install Required Ubuntu Packages RUN apt-get update && apt-get install --no-install-recommends -y \ build-essential gfortran cmake git gdb file tar libatlas-base-dev apt-transport-https iputils-ping \ - libswresample-dev libcanberra-gtk3-module zstd wget less libx264-dev libdrm-dev \ + libswresample-dev libcanberra-gtk3-module zstd less libx264-dev libdrm-dev \ libeigen3-dev libglew-dev libgstreamer-plugins-base1.0-dev udev net-tools \ libgstreamer-plugins-good1.0-dev libgstreamer1.0-dev libgtk-3-dev libjpeg-dev sudo usbutils \ libjpeg8-dev libjpeg-turbo8-dev liblapack-dev liblapacke-dev libopenblas-dev libpng-dev tzdata \ libpostproc-dev libtbb-dev libtbb2 libtesseract-dev libtiff-dev libv4l-dev \ libxine2-dev libxvidcore-dev libx264-dev libgtkglext1 libgtkglext1-dev pkg-config qv4l2 \ v4l-utils zlib1g-dev python3-dev libboost-all-dev valgrind doxygen graphviz nano \ - vim-common libedgetpu1-std gasket-dkms ca-certificates nlohmann-json3-dev + vim-common libedgetpu1-std gasket-dkms ca-certificates nlohmann-json3-dev curl \ + python3-dev python3-pip python3-numpy libaom-dev libass-dev libfdk-aac-dev libdav1d-dev \ + libmp3lame-dev libopus-dev libvorbis-dev libvpx-dev libx264-dev libx265-dev # Nice to have RUN apt-get update && apt-get install --no-install-recommends -y bat \ bash-completion fish git-lfs +# Install Required Python Packages and link python3 executable to python. +RUN ln -s /usr/bin/python3 /usr/bin/python && \ + python -m pip install numpy opencv-python pyopengl matplotlib + # This symbolic link is needed to use the streaming features on Jetson inside a container RUN ln -sf /usr/lib/aarch64-linux-gnu/tegra/libv4l2.so.0 /usr/lib/aarch64-linux-gnu/libv4l2.so diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index dbc65cfb2..7089535f8 100755 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -71,7 +71,8 @@ "gruntfuggly.todo-tree", "streetsidesoftware.code-spell-checker", "vscode-icons-team.vscode-icons", - "ryanluker.vscode-coverage-gutters" + "ryanluker.vscode-coverage-gutters", + "GitHub.vscode-pull-request-github" ], "settings": { // VSCode settings. diff --git a/.gitignore b/.gitignore index e1cb219fd..fb3af0533 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,9 @@ valgrind.rpt tools/action-runners/compose.yml !tools/package-builders/ +# Logging tools. +tools/logging/*.png + # Compiled Program Autonomy_Software diff --git a/CMakeLists.txt b/CMakeLists.txt index 82a83a5d5..b3b7705f1 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,7 @@ cmake_policy(SET CMP0153 OLD) # Allows use of "Exec_Program" function #################################################################################################################### ## Enable or Disable Simulation Mode -option(BUILD_SIM_MODE "Enable Simulation Mode" ON) +option(BUILD_SIM_MODE "Enable Simulation Mode" OFF) ## Enable or Disable Code Coverage Mode option(BUILD_CODE_COVERAGE "Enable Code Coverage Mode" OFF) @@ -186,6 +186,9 @@ endif() ## Find Threads find_package(Threads REQUIRED) +## Find OpenMP +find_package(OpenMP REQUIRED) + ## Find Quill find_package(quill REQUIRED) @@ -309,6 +312,7 @@ endif() ## Link Libraries set(AUTONOMY_LIBRARIES Threads::Threads + OpenMP::OpenMP_CXX Eigen3::Eigen RoveComm_CPP quill::quill @@ -343,15 +347,16 @@ if (BUILD_TESTS_MODE) file(GLOB_RECURSE IntegrationTests_SRC CONFIGURE_DEPENDS "tests/Integration/*.cc") file(GLOB_RECURSE Algorithms_SRC CONFIGURE_DEPENDS "src/algorithms/*.cpp") file(GLOB_RECURSE Drivers_SRC CONFIGURE_DEPENDS "src/drivers/*.cpp") - # file(GLOB_RECURSE Handlers_SRC CONFIGURE_DEPENDS "src/handlers/*.cpp") + file(GLOB_RECURSE Vision_SRC CONFIGURE_DEPENDS "src/vision/*.cpp") file(GLOB Network_SRC CONFIGURE_DEPENDS "src/AutonomyNetworking.cpp") file(GLOB Logging_SRC CONFIGURE_DEPENDS "src/AutonomyLogging.cpp") + file(GLOB Globals_SRC CONFIGURE_DEPENDS "src/AutonomyGlobals.cpp") list(LENGTH UnitTests_SRC UnitTests_LEN) list(LENGTH IntegrationTests_SRC IntegrationTests_LEN) if (UnitTests_LEN GREATER 0) - add_executable(${EXE_NAME}_UnitTests ${UnitTests_SRC} ${Algorithms_SRC} ${Drivers_SRC} ${Network_SRC} ${Logging_SRC}) + add_executable(${EXE_NAME}_UnitTests ${UnitTests_SRC} ${Algorithms_SRC} ${Drivers_SRC} ${Vision_SRC} ${Network_SRC} ${Logging_SRC} ${Globals_SRC}) target_link_libraries(${EXE_NAME}_UnitTests GTest::gtest GTest::gtest_main ${AUTONOMY_LIBRARIES}) add_test(Unit_Tests ${EXE_NAME}_UnitTests) else() @@ -359,7 +364,7 @@ if (BUILD_TESTS_MODE) endif() if (IntegrationTests_LEN GREATER 0) - add_executable(${EXE_NAME}_IntegrationTests ${IntegrationTests_SRC} ${Algorithms_SRC} ${Drivers_SRC} ${Network_SRC} ${Logging_SRC}) + add_executable(${EXE_NAME}_IntegrationTests ${IntegrationTests_SRC} ${Algorithms_SRC} ${Drivers_SRC} ${Vision_SRC} ${Network_SRC} ${Logging_SRC} ${Globals_SRC}) target_link_libraries(${EXE_NAME}_IntegrationTests GTest::gtest GTest::gtest_main ${AUTONOMY_LIBRARIES}) add_test(Integration_Tests ${EXE_NAME}_IntegrationTests) else() diff --git a/data/Custom_Dictionaries/Autonomy-Dictionary.txt b/data/Custom_Dictionaries/Autonomy-Dictionary.txt index 6513f5c8a..fb1037b9c 100644 --- a/data/Custom_Dictionaries/Autonomy-Dictionary.txt +++ b/data/Custom_Dictionaries/Autonomy-Dictionary.txt @@ -41,6 +41,7 @@ CAFFE CALIB CELLVOLTAGE charconv +chromaprint chrono clayjay CLEARWAYPOINTS @@ -202,6 +203,7 @@ Geospatial gfortran gmock GNSS +gnutls GPSLATLONALT GPSMDRS GPSSDELC @@ -254,53 +256,111 @@ jthread kbits keyrings keyscan +ladspa latlng latlon ldconfig LEDRGB LEFTCAM +libaom +libass libatlas libavcodec libavdevice libavfilter libavformat libavutil +libbluray libboost +libbs libc +libcaca libcanberra +libcdio +libchromaprint +libcodec libdatachannel +libdav +libdc libdrm libedgetpu libeigen +libfdk +libflite +libfontconfig +libfreetype +libfribidi libglew +libglslang +libgme +libgnutls libgomp +libgsm libgstreamer libgtkglext +libharfbuzz +libiec +libjack libjpeg +libjxl liblapack liblapacke +libmfx +libmp +libmysofa libopenblas +libopenjpeg +libopenmpt +libopus +libplacebo libpostproc libpq +libpulse +librabbitmq +librav +librist +librsvg +librubberband +libshine +libsnappy +libsoxr +libspeex +libsrt +libssh +libsvtav libswresample libswscale libtbb libtesseract +libtheora +libtwolame libusb libv +libvidstab +libvorbis +libvpl +libvpx +libwebp libx libxine +libxvid libxvidcore +libzimg +libzmq +libzvbi localtime LOGNAME +lpthread m_muWebRTCRGBImageCopyMutex MAINCAM MAINCAN mainpage +Makefiles MAKEFLAGS MAPANGLE MAPRANGE marsrover +matplotlib matx Memcheck MERICA @@ -321,6 +381,7 @@ NAVIGATIONBOARD navpvt nJter nlohmann +nonfree noninteractive nullptr numops @@ -330,9 +391,11 @@ OBJDETECTION OBJECTDETECT Objectness odometry +openal OPENCL opencontainers opencv +opengl OPENMP ostream OTSU @@ -347,6 +410,7 @@ pispcl PIXELTYPE PKNS PNSD +pocketsphinx POSETRACK Pranswer pthread @@ -393,6 +457,7 @@ SIMBASICCAM SIMCAM SIMZED SIMZEDCAM +sndio sockaddr softprops SPSC diff --git a/examples/vision/cameras/OpenBasicCam.hpp b/examples/vision/cameras/OpenBasicCam.hpp index cf7d2e7e4..8b573a808 100644 --- a/examples/vision/cameras/OpenBasicCam.hpp +++ b/examples/vision/cameras/OpenBasicCam.hpp @@ -38,7 +38,7 @@ void RunExample() globals::g_pCameraHandler = new CameraHandler(); // Get reference to camera. - ZEDCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::BasicCamName::eHeadGroundCam); + BasicCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::BasicCamName::eHeadGroundCam); // Start basic cam. ExampleBasicCam1->Start(); diff --git a/examples/vision/dnn/InferenceYOLOModel.hpp b/examples/vision/dnn/InferenceYOLOModel.hpp index c849c7f0f..89fa061a7 100644 --- a/examples/vision/dnn/InferenceYOLOModel.hpp +++ b/examples/vision/dnn/InferenceYOLOModel.hpp @@ -37,7 +37,7 @@ void RunExample() globals::g_pCameraHandler = new CameraHandler(); // Get reference to camera. - ZEDCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::eHeadGroundCam); + BasicCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::BasicCamName::eHeadGroundCam); // Start basic cam. ExampleBasicCam1->Start(); diff --git a/examples/vision/tagdetection/ArucoDetectionBasicCam.hpp b/examples/vision/tagdetection/ArucoDetectionBasicCam.hpp index 68c2a475f..b6b985946 100644 --- a/examples/vision/tagdetection/ArucoDetectionBasicCam.hpp +++ b/examples/vision/tagdetection/ArucoDetectionBasicCam.hpp @@ -29,7 +29,7 @@ void RunExample() globals::g_pTagDetectionHandler = new TagDetectionHandler(); // Get pointer to camera. - ZEDCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::eHeadGroundCam); + BasicCamera* ExampleBasicCam1 = globals::g_pCameraHandler->GetBasicCam(CameraHandler::BasicCamName::eHeadGroundCam); // Start basic cam. ExampleBasicCam1->Start(); diff --git a/examples/vision/tagdetection/ArucoDetectionZED.hpp b/examples/vision/tagdetection/ArucoDetectionZED.hpp index abc3d5a1a..2605c7397 100644 --- a/examples/vision/tagdetection/ArucoDetectionZED.hpp +++ b/examples/vision/tagdetection/ArucoDetectionZED.hpp @@ -30,12 +30,12 @@ void RunExample() globals::g_pTagDetectionHandler = new TagDetectionHandler(); // Get pointer to camera. - ZEDCamera* ExampleZEDCam1 = globals::g_pCameraHandler->GetZED(CameraHandler::eHeadMainCam); + ZEDCamera* ExampleZEDCam1 = globals::g_pCameraHandler->GetZED(CameraHandler::ZEDCamName::eHeadMainCam); // Start basic cam. ExampleZEDCam1->Start(); // Get pointer to the tag detector for the basic cam. - TagDetector* ExampleTagDetector1 = globals::g_pTagDetectionHandler->GetTagDetector(TagDetectionHandler::eHeadMainCam); + TagDetector* ExampleTagDetector1 = globals::g_pTagDetectionHandler->GetTagDetector(TagDetectionHandler::TagDetectors::eHeadMainCam); // Start the basic cam detector. ExampleTagDetector1->Start(); diff --git a/src/AutonomyConstants.h b/src/AutonomyConstants.h index 3b501e6d9..347763283 100755 --- a/src/AutonomyConstants.h +++ b/src/AutonomyConstants.h @@ -41,6 +41,7 @@ namespace constants const bool MODE_SIM = false; // REG MODE ENABLED: Toggle RoveComm and Cameras to use standard configuration. #endif const std::string SIM_IP_ADDRESS = "192.168.69.48"; // The IP address to use for simulation mode. + const uint SIM_WEBRTC_QP = 25; // The QP value to use for WebRTC in simulation mode. 0-51, 0 is lossless. If too high for network, frames drop. // Safety constants. const double BATTERY_MINIMUM_CELL_VOLTAGE = 3.2; // The minimum cell voltage of the battery before autonomy will forcefully enter Idle state. @@ -224,8 +225,8 @@ namespace constants // OpenCV ArUco detection config. const cv::aruco::PredefinedDictionaryType ARUCO_DICTIONARY = cv::aruco::DICT_4X4_50; // The predefined ArUco dictionary to use for detections. - const float ARUCO_TAG_SIDE_LENGTH = 0.015f; // Size of the white borders around the tag. - const int ARUCO_VALIDATION_THRESHOLD = 20; // How many times does the tag need to be detected(hit) before being validated as an actual aruco tag. + const float ARUCO_TAG_SIDE_LENGTH = 0.015f; // Size of the white borders around the tag in meters. + const int ARUCO_VALIDATION_THRESHOLD = 10; // How many times does the tag need to be detected(hit) before being validated as an actual aruco tag. const int ARUCO_UNVALIDATED_TAG_FORGET_THRESHOLD = 5; // How many times can an unvalidated tag be missing from frame before being forgotten. const int ARUCO_VALIDATED_TAG_FORGET_THRESHOLD = 10; // How many times can a validated tag be missing from frame before being forgotten. const double ARUCO_PIXEL_THRESHOLD = 175; // Pixel value threshold for pre-process threshold mask @@ -296,7 +297,7 @@ namespace constants /////////////////////////////////////////////////////////////////////////// // Approaching Marker State - const int APPROACH_MARKER_DETECT_ATTEMPTS_LIMIT = 60; // How many consecutive failed attempts at detecting a tag before giving up on marker. + const int APPROACH_MARKER_DETECT_ATTEMPTS_LIMIT = 5; // How many consecutive failed attempts at detecting a tag before giving up on marker. const double APPROACH_MARKER_MOTOR_POWER = 0.3; // The amount of power the motors use when approaching the marker. const double APPROACH_MARKER_PROXIMITY_THRESHOLD = 2.0; // How close in meters the rover must be to the target marker before completing its approach. const double APPROACH_MARKER_TF_CONFIDENCE_THRESHOLD = 0.5; // What is the minimal confidence necessary to consider a tensorflow tag as a target. @@ -330,7 +331,7 @@ namespace constants const double STATEMACHINE_ZED_REALIGN_THRESHOLD = 0.5; // The threshold in meters that the error between GPS and ZED must be before realigning the ZED cameras. // Navigating State. - const double NAVIGATING_REACHED_GOAL_RADIUS = 1.0; // The radius in meters that the rover should get to the goal waypoint. + const double NAVIGATING_REACHED_GOAL_RADIUS = 2.0; // The radius in meters that the rover should get to the goal waypoint. // Avoidance State. const double AVOIDANCE_STATE_MOTOR_POWER = DRIVE_MAX_POWER; // Drive speed of avoidance state diff --git a/src/handlers/WaypointHandler.cpp b/src/handlers/WaypointHandler.cpp index 1def4bcc5..416ef0f2e 100755 --- a/src/handlers/WaypointHandler.cpp +++ b/src/handlers/WaypointHandler.cpp @@ -840,10 +840,10 @@ geoops::RoverPose WaypointHandler::SmartRetrieveRoverPose(bool bVIOTracking) } } - // Submit a debug print for the current rover pose. The pose stores both the + // Submit a debug print for the current rover pose. geoops::UTMCoordinate stCurrentUTMPosition = geoops::ConvertGPSToUTM(stCurrentVIOPosition); LOG_DEBUG(logging::g_qSharedLogger, - "Camera VIO Pose is currently: {} (northing), {} (easting), {} (alt), {} (degrees), GNSS/VIO FUSED? = {}", + "Camera VIO Pose is currently: {} (easting), {} (northing), {} (alt), {} (degrees), GNSS/VIO FUSED? = {}", stCurrentUTMPosition.dEasting, stCurrentUTMPosition.dNorthing, stCurrentUTMPosition.dAltitude, diff --git a/src/interfaces/BasicCamera.hpp b/src/interfaces/BasicCamera.hpp index d92ecf1eb..5e4a33c7c 100644 --- a/src/interfaces/BasicCamera.hpp +++ b/src/interfaces/BasicCamera.hpp @@ -59,8 +59,9 @@ class BasicCamera : public Camera nNumFrameRetrievalThreads) { // Initialize member variables. - m_nCameraIndex = -1; - m_szCameraPath = szCameraPath; + m_nCameraIndex = -1; + m_szCameraPath = szCameraPath; + m_bCameraIsConnectedOnVideoIndex = false; } /****************************************************************************** @@ -95,8 +96,9 @@ class BasicCamera : public Camera nNumFrameRetrievalThreads) { // Initialize member variables. - m_nCameraIndex = nCameraIndex; - m_szCameraPath = ""; + m_nCameraIndex = nCameraIndex; + m_szCameraPath = ""; + m_bCameraIsConnectedOnVideoIndex = true; } /****************************************************************************** @@ -142,7 +144,7 @@ class BasicCamera : public Camera * @author clayjay3 (claytonraycowen@gmail.com) * @date 2024-12-22 ******************************************************************************/ - virtual std::future RequestFrameCopy(cv::Mat& cvFrame) override = 0; + std::future RequestFrameCopy(cv::Mat& cvFrame) override = 0; /****************************************************************************** * @brief Accessor for the cameras path or video index. @@ -158,6 +160,7 @@ class BasicCamera : public Camera // Declare protected methods and member variables. int m_nCameraIndex; std::string m_szCameraPath; + bool m_bCameraIsConnectedOnVideoIndex; private: // Declare private methods and member variables. diff --git a/src/interfaces/ZEDCamera.hpp b/src/interfaces/ZEDCamera.hpp index 371d05b99..dba640aca 100644 --- a/src/interfaces/ZEDCamera.hpp +++ b/src/interfaces/ZEDCamera.hpp @@ -216,7 +216,7 @@ class ZEDCamera : public Camera * @author clayjay3 (claytonraycowen@gmail.com) * @date 2024-12-22 ******************************************************************************/ - virtual std::future RequestFrameCopy(cv::Mat& cvFrame) override = 0; + std::future RequestFrameCopy(cv::Mat& cvFrame) override = 0; /****************************************************************************** * @brief Puts a frame pointer into a queue so a copy of a frame from the camera can be written to it. diff --git a/src/states/ApproachingMarkerState.cpp b/src/states/ApproachingMarkerState.cpp index 77a56b4ce..f97c8d23f 100644 --- a/src/states/ApproachingMarkerState.cpp +++ b/src/states/ApproachingMarkerState.cpp @@ -328,8 +328,9 @@ namespace statemachine bool ApproachingMarkerState::IdentifyTargetArucoMarker(arucotag::ArucoTag& stTarget) { // Load all detected tags in the rover's vision. - std::vector vDetectedTags; - tagdetectutils::LoadDetectedArucoTags(vDetectedTags, m_vTagDetectors, true); + std::vector vDetectedArucoTags; + std::vector vDetectedTensorflowTags; + tagdetectutils::LoadDetectedTags(vDetectedArucoTags, vDetectedTensorflowTags, m_vTagDetectors); arucotag::ArucoTag stBestTag; stBestTag.dStraightLineDistance = std::numeric_limits::max(); @@ -339,7 +340,7 @@ namespace statemachine std::string szIdentifiedTags = ""; // Select the tag that is the closest to the rover's current position. - for (const arucotag::ArucoTag& stCandidate : vDetectedTags) + for (const arucotag::ArucoTag& stCandidate : vDetectedArucoTags) { szIdentifiedTags += "\tID: " + std::to_string(stCandidate.nID) + " Hits: " + std::to_string(stCandidate.nHits) + "\n"; @@ -385,8 +386,9 @@ namespace statemachine bool ApproachingMarkerState::IdentifyTargetTensorflowMarker(tensorflowtag::TensorflowTag& stTarget) { // Load all detected tags in the rover's vision. - std::vector vDetectedTags; - tagdetectutils::LoadDetectedTensorflowTags(vDetectedTags, m_vTagDetectors); + std::vector vDetectedArucoTags; + std::vector vDetectedTensorflowTags; + tagdetectutils::LoadDetectedTags(vDetectedArucoTags, vDetectedTensorflowTags, m_vTagDetectors); tensorflowtag::TensorflowTag stBestTag; stBestTag.dStraightLineDistance = std::numeric_limits::max(); @@ -394,7 +396,7 @@ namespace statemachine // stBestTag.nID = -1; // Select the tag that is the closest to the rover's current position and above the confidence threshold. - for (const tensorflowtag::TensorflowTag& stCandidate : vDetectedTags) + for (const tensorflowtag::TensorflowTag& stCandidate : vDetectedTensorflowTags) { if (stCandidate.dStraightLineDistance < stBestTag.dStraightLineDistance && stCandidate.dConfidence >= constants::APPROACH_MARKER_TF_CONFIDENCE_THRESHOLD) { @@ -413,19 +415,5 @@ namespace statemachine } return bTagIdentified; - - // LEAD: Since TensorflowTag no longer has ID, commented out. - // // A tag was found. - // if (stBestTag.nID >= 0) - // { - // // Save it to the passed in reference. - // stTarget = stBestTag; - // return true; - // } - // // No target tag was found. - // else - // { - // return false; - // } } } // namespace statemachine diff --git a/src/states/NavigatingState.cpp b/src/states/NavigatingState.cpp index d3d9bbe87..e5f5c732e 100644 --- a/src/states/NavigatingState.cpp +++ b/src/states/NavigatingState.cpp @@ -221,8 +221,7 @@ namespace statemachine // Get a list of the currently detected tags, and their stats. std::vector vDetectedArucoTags; std::vector vDetectedTensorflowTags; - tagdetectutils::LoadDetectedArucoTags(vDetectedArucoTags, m_vTagDetectors, false); - tagdetectutils::LoadDetectedTensorflowTags(vDetectedTensorflowTags, m_vTagDetectors); + tagdetectutils::LoadDetectedTags(vDetectedArucoTags, vDetectedTensorflowTags, m_vTagDetectors, false); // Check if we have detected any tags. if (vDetectedArucoTags.size() || vDetectedTensorflowTags.size()) diff --git a/src/states/SearchPatternState.cpp b/src/states/SearchPatternState.cpp index 5b879d52a..7608ea3a8 100644 --- a/src/states/SearchPatternState.cpp +++ b/src/states/SearchPatternState.cpp @@ -36,8 +36,6 @@ namespace statemachine LOG_INFO(logging::g_qSharedLogger, "SearchPatternState: Scheduling next run of state logic."); // Initialize member variables. - m_nMaxDataPoints = 100; - m_vRoverPosition.reserve(m_nMaxDataPoints); m_eCurrentSearchPatternType = eSpiral; m_nSearchPathIdx = 0; @@ -48,7 +46,7 @@ namespace statemachine m_stSearchPatternCenter = globals::g_pWaypointHandler->PeekNextWaypoint().GetGPSCoordinate(); m_vSearchPath = searchpattern::CalculateSpiralPatternWaypoints(m_stSearchPatternCenter, constants::SEARCH_ANGULAR_STEP_DEGREES, - constants::SEARCH_MAX_RADIUS * 2, + constants::SEARCH_MAX_RADIUS, stCurrentRoverPose.GetCompassHeading(), constants::SEARCH_SPIRAL_SPACING); @@ -124,16 +122,6 @@ namespace statemachine // Get the current rover pose. geoops::RoverPose stCurrentRoverPose = globals::g_pWaypointHandler->SmartRetrieveRoverPose(); - ////////////////////////// - /* --- Log Position --- */ - ////////////////////////// - - if (m_vRoverPosition.size() == m_nMaxDataPoints) - { - m_vRoverPosition.erase(m_vRoverPosition.begin()); - } - m_vRoverPosition.emplace_back(stCurrentRoverPose.GetUTMCoordinate().dEasting, stCurrentRoverPose.GetUTMCoordinate().dNorthing); - /* The overall flow of this state is as follows. 1. Is there a tag -> MarkerSeen @@ -151,8 +139,7 @@ namespace statemachine // Get a list of the currently detected tags, and their stats. std::vector vDetectedArucoTags; std::vector vDetectedTensorflowTags; - tagdetectutils::LoadDetectedArucoTags(vDetectedArucoTags, m_vTagDetectors, false); - tagdetectutils::LoadDetectedTensorflowTags(vDetectedTensorflowTags, m_vTagDetectors); + tagdetectutils::LoadDetectedTags(vDetectedArucoTags, vDetectedTensorflowTags, m_vTagDetectors, false); // Check if we have detected any tags. if (vDetectedArucoTags.size() || vDetectedTensorflowTags.size()) diff --git a/src/states/SearchPatternState.h b/src/states/SearchPatternState.h index 867902522..ae1a0b00b 100644 --- a/src/states/SearchPatternState.h +++ b/src/states/SearchPatternState.h @@ -59,8 +59,6 @@ namespace statemachine std::vector m_vSearchPath; int m_nSearchPathIdx; SearchPatternType m_eCurrentSearchPatternType; - std::vector> m_vRoverPosition; - size_t m_nMaxDataPoints; statemachine::TimeIntervalBasedStuckDetector m_StuckDetector; protected: diff --git a/src/util/vision/TagDetectionUtilty.hpp b/src/util/vision/TagDetectionUtilty.hpp index f05627e0c..b17c99800 100644 --- a/src/util/vision/TagDetectionUtilty.hpp +++ b/src/util/vision/TagDetectionUtilty.hpp @@ -35,29 +35,33 @@ namespace tagdetectutils { /****************************************************************************** - * @brief Aggregates all detected tags from each provided tag detector for OpenCV detection. + * @brief Aggregates all detected tags from each provided tag detector for both OpenCV and Tensorflow detection. * * @note When using bUnique, if you wish to prioritize one tag detector's detections over another put that tag detector earlier in the vTagDetectors. * - * @param vDetectedTags - Reference vector that will hold all of the aggregated detected tags. + * @param vDetectedArucoTags - Reference vector that will hold all of the aggregated detected Aruco tags. + * @param vDetectedTensorflowTags - Reference vector that will hold all of the aggregated detected Tensorflow tags. * @param vTagDetectors - Vector of pointers to tag detectors that will be used to request their detected tags. - * @param bUnique - Ensure vDetectedTags is a unique list of tags (unique by ID). + * @param bUnique - Ensure vDetectedArucoTags is a unique list of tags (unique by ID). * * @author JSpencerPittman (jspencerpittman@gmail.com) * @date 2024-03-07 ******************************************************************************/ - inline void LoadDetectedArucoTags(std::vector& vDetectedTags, const std::vector& vTagDetectors, bool bUnique = false) + inline void LoadDetectedTags(std::vector& vDetectedArucoTags, + std::vector& vDetectedTensorflowTags, + const std::vector& vTagDetectors, + bool bUnique = false) { // Number of tag detectors. size_t siNumTagDetectors = vTagDetectors.size(); // Initialize vectors to store detected tags temporarily. - // Using pointers the interference between vectors being updated across different threads should be minimal. - std::vector> vDetectedTagBuffers; - vDetectedTagBuffers.resize(siNumTagDetectors); + std::vector> vDetectedArucoTagBuffers(siNumTagDetectors); + std::vector> vDetectedTensorflowTagBuffers(siNumTagDetectors); - // Initialize vectors to stored detected tags - std::vector> vDetectedTagsFuture; + // Initialize vectors to store detected tags futures. + std::vector> vDetectedArucoTagsFuture; + std::vector> vDetectedTensorflowTagsFuture; // Request tags from each detector. for (size_t siIdx = 0; siIdx < siNumTagDetectors; ++siIdx) @@ -65,112 +69,60 @@ namespace tagdetectutils // Check if this tag detector is ready. if (vTagDetectors[siIdx]->GetIsReady()) { - // Request detected tags from detector. - vDetectedTagsFuture.emplace_back(vTagDetectors[siIdx]->RequestDetectedArucoTags(vDetectedTagBuffers[siIdx])); + // Request detected Aruco tags from detector. + vDetectedArucoTagsFuture.emplace_back(vTagDetectors[siIdx]->RequestDetectedArucoTags(vDetectedArucoTagBuffers[siIdx])); + // Request detected Tensorflow tags from detector. + vDetectedTensorflowTagsFuture.emplace_back(vTagDetectors[siIdx]->RequestDetectedTensorflowTags(vDetectedTensorflowTagBuffers[siIdx])); } } // Ensure all requests have been fulfilled. - // Then transfer tags from the buffer to vDetectedTags for the user to access. - for (size_t siIdx = 0; siIdx < vDetectedTagsFuture.size(); ++siIdx) + // Then transfer tags from the buffer to vDetectedArucoTags and vDetectedTensorflowTags for the user to access. + for (size_t siIdx = 0; siIdx < vDetectedArucoTagsFuture.size(); ++siIdx) { - vDetectedTagsFuture[siIdx].get(); - vDetectedTags.insert(vDetectedTags.end(), vDetectedTagBuffers[siIdx].begin(), vDetectedTagBuffers[siIdx].end()); + // Wait for the request to be fulfilled. + vDetectedArucoTagsFuture[siIdx].get(); + vDetectedTensorflowTagsFuture[siIdx].get(); + + // Loop through the detected Aruco tags and add them to the vDetectedArucoTags vector. + for (const arucotag::ArucoTag& tTag : vDetectedArucoTagBuffers[siIdx]) + { + vDetectedArucoTags.emplace_back(tTag); + } + + // Loop through the detected Tensorflow tags and add them to the vDetectedTensorflowTags vector. + for (const tensorflowtag::TensorflowTag& tTag : vDetectedTensorflowTagBuffers[siIdx]) + { + vDetectedTensorflowTags.emplace_back(tTag); + } } if (bUnique) { - // Remove all tags with a duplicate ID. - // This is done in ascending order. This means that if a user wishes to prioritize tags detected from - // specific tag detectors they should be first in the vector. + // Remove all Aruco tags with a duplicate ID. std::set setIds; size_t szIdx = 0; - while (szIdx < vDetectedTags.size()) + while (szIdx < vDetectedArucoTags.size()) { // Tag was detected by another tag detector. - if (setIds.count(vDetectedTags[szIdx].nID)) + if (setIds.count(vDetectedArucoTags[szIdx].nID)) { - vDetectedTags.erase(vDetectedTags.begin() + szIdx); + vDetectedArucoTags.erase(vDetectedArucoTags.begin() + szIdx); } else { + setIds.insert(vDetectedArucoTags[szIdx].nID); ++szIdx; } } } } - /****************************************************************************** - * @brief Aggregates all detected tags from each provided tag detector for Tensorflow detection. - * - * @note When using bUnique, if you wish to prioritize one tag detector's detections over another put that tag detector earlier in the vTagDetectors. - * - * @param vDetectedTags - Reference vector that will hold all of the aggregated detected tags. - * @param vTagDetectors - Vector of pointers to tag detectors that will be used to request their detected tags. - * - * @author JSpencerPittman (jspencerpittman@gmail.com) - * @date 2024-03-07 - ******************************************************************************/ - inline void LoadDetectedTensorflowTags(std::vector& vDetectedTags, const std::vector& vTagDetectors) - { - // Number of tag detectors. - size_t siNumTagDetectors = vTagDetectors.size(); - - // Initialize vectors to store detected tags temporarily. - // Using pointers the interference between vectors being updated across different threads should be minimal. - std::vector> vDetectedTagBuffers; - vDetectedTagBuffers.resize(siNumTagDetectors); - - // Initialize vectors to stored detected tags - std::vector> vDetectedTagsFuture; - - // Request tags from each detector. - for (size_t siIdx = 0; siIdx < siNumTagDetectors; ++siIdx) - { - // Check if this tag detector is ready. - if (vTagDetectors[siIdx]->GetIsReady()) - { - // Request detected tags from detector. - vDetectedTagsFuture.emplace_back(vTagDetectors[siIdx]->RequestDetectedTensorflowTags(vDetectedTagBuffers[siIdx])); - } - } - - // Ensure all requests have been fulfilled. - // Then transfer tags from the buffer to vDetectedTags for the user to access. - for (size_t siIdx = 0; siIdx < vDetectedTagsFuture.size(); ++siIdx) - { - vDetectedTagsFuture[siIdx].get(); - vDetectedTags.insert(vDetectedTags.end(), vDetectedTagBuffers[siIdx].begin(), vDetectedTagBuffers[siIdx].end()); - } - - // LEAD: Commented out since TensorflowTag no longer has ID. - // if (bUnique) - // { - // // Remove all tags with a duplicate ID. - // // This is done in ascending order. This means that if a user wishes to prioritize tags detected from - // // specific tag detectors they should be first in the vector. - // std::set setIds; - // size_t szIdx = 0; - // while (szIdx < vDetectedTags.size()) - // { - // // Tag was detected by another tag detector. - // if (setIds.count(vDetectedTags[szIdx].nID)) - // { - // vDetectedTags.erase(vDetectedTags.begin() + szIdx); - // } - // else - // { - // ++szIdx; - // } - // } - // } - } - /****************************************************************************** * @brief Find a tag in the rover's vision with the specified ID, using OpenCV detection. * * @param nID - The ID of the tag being looked for. - * @param tIdentifiedTag - Reference to save the identified tag. + * @param stIdentifiedArucoTag - Reference to save the identified tag. * @param vTagDetectors - Vector of pointers to tag detectors that will be used to request their detected tags. * @return true - The tag was found. * @return false - The tag was not found. @@ -178,19 +130,20 @@ namespace tagdetectutils * @author JSpencerPittman (jspencerpittman@gmail.com) * @date 2024-03-08 ******************************************************************************/ - inline bool FindArucoTagByID(int nID, arucotag::ArucoTag& stIdentifiedTag, const std::vector& vTagDetectors) + inline bool FindArucoTagByID(int nID, arucotag::ArucoTag& stIdentifiedArucoTag, const std::vector& vTagDetectors) { // Load all detected tags in the rover's vision. - std::vector vDetectedTags; - LoadDetectedArucoTags(vDetectedTags, vTagDetectors, true); + std::vector vDetectedArucoTags; + std::vector vDetectedTensorflowTags; + LoadDetectedTags(vDetectedArucoTags, vDetectedTensorflowTags, vTagDetectors, true); // Find the tag with the corresponding id. - for (const arucotag::ArucoTag& tTag : vDetectedTags) + for (const arucotag::ArucoTag& tTag : vDetectedArucoTags) { // Is this the tag being searched for. if (tTag.nID == nID) { - stIdentifiedTag = tTag; + stIdentifiedArucoTag = tTag; return true; } } diff --git a/src/vision/aruco/TagDetector.cpp b/src/vision/aruco/TagDetector.cpp index 5c8e80e0b..7f517b7ac 100644 --- a/src/vision/aruco/TagDetector.cpp +++ b/src/vision/aruco/TagDetector.cpp @@ -263,7 +263,7 @@ void TagDetector::ThreadedContinuousCode() } else if (!m_cvFrame.empty() && m_cvFrame.channels() > 3) { - // Drop the Alpha channel from the image copy to preproc frame. + // Drop the Alpha channel from the image. This is necessary for the Aruco detection. cv::cvtColor(m_cvFrame, m_cvFrame, cv::COLOR_BGRA2RGB); } } @@ -342,7 +342,7 @@ void TagDetector::ThreadedContinuousCode() // Check if the detected tag copy queue is empty. if (!m_qDetectedArucoTagCopySchedule.empty() || !m_qDetectedTensorflowTagCopySchedule.empty() || !m_qDetectedTagDrawnOverlayFrames.empty()) { - size_t siQueueLength = std::max({m_qDetectedArucoTagCopySchedule.size(), m_qDetectedTensorflowTagCopySchedule.size(), m_qDetectedTagDrawnOverlayFrames.size()}); + size_t siQueueLength = m_qDetectedArucoTagCopySchedule.size() + m_qDetectedTensorflowTagCopySchedule.size() + m_qDetectedTagDrawnOverlayFrames.size(); // Start the thread pool to store multiple copies of the detected tags to the requesting threads this->RunDetachedPool(siQueueLength, m_nNumDetectedTagsRetrievalThreads); // Wait for thread pool to finish. diff --git a/src/vision/cameras/BasicCam.cpp b/src/vision/cameras/BasicCam.cpp index 705af0e6a..e95599f69 100644 --- a/src/vision/cameras/BasicCam.cpp +++ b/src/vision/cameras/BasicCam.cpp @@ -47,19 +47,24 @@ BasicCam::BasicCam(const std::string szCameraPath, bEnableRecordingFlag, nNumFrameRetrievalThreads) { - // Set flag specifying that the camera is located at a dev/video index. - m_bCameraIsConnectedOnVideoIndex = false; + // Initialize the OpenCV mat to a black/empty image the size of the camera resolution. + m_cvFrame = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC4); + + // Set video cap properties. + m_cvCamera.set(cv::CAP_PROP_FRAME_WIDTH, nPropResolutionX); + m_cvCamera.set(cv::CAP_PROP_FRAME_HEIGHT, nPropResolutionY); + m_cvCamera.set(cv::CAP_PROP_FPS, nPropFramesPerSecond); // Attempt to open camera with OpenCV's VideoCapture and print if successfully opened or not. if (m_cvCamera.open(szCameraPath)) { // Submit logger message. - LOG_INFO(logging::g_qSharedLogger, "Camera {} at path/URL {} has been successfully opened.", m_cvCamera.getBackendName(), m_szCameraPath); + LOG_INFO(logging::g_qSharedLogger, "Camera {} at path/URL {} has been successfully opened.", m_cvCamera.getBackendName(), szCameraPath); } else { // Submit logger message. - LOG_ERROR(logging::g_qSharedLogger, "Unable to open camera at path/URL {}", m_szCameraPath); + LOG_ERROR(logging::g_qSharedLogger, "Unable to open camera at path/URL {}", szCameraPath); } // Set max FPS of the ThreadedContinuousCode method. @@ -105,9 +110,6 @@ BasicCam::BasicCam(const int nCameraIndex, // Initialize the OpenCV mat to a black/empty image the size of the camera resolution. m_cvFrame = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC4); - // Set flag specifying that the camera is located at a dev/video index. - m_bCameraIsConnectedOnVideoIndex = true; - // Set video cap properties. m_cvCamera.set(cv::CAP_PROP_FRAME_WIDTH, nPropResolutionX); m_cvCamera.set(cv::CAP_PROP_FRAME_HEIGHT, nPropResolutionY); @@ -147,8 +149,17 @@ BasicCam::~BasicCam() // Release camera capture object. m_cvCamera.release(); - // Submit logger message. - LOG_INFO(logging::g_qSharedLogger, "Basic camera at video index {} has been successfully closed.", m_nCameraIndex); + // Check if camera was connected on a video index. + if (m_bCameraIsConnectedOnVideoIndex) + { + // Submit logger message. + LOG_INFO(logging::g_qSharedLogger, "Basic camera at video index {} has been successfully closed.", m_nCameraIndex); + } + else + { + // Submit logger message. + LOG_INFO(logging::g_qSharedLogger, "Basic camera at path/URL {} has been successfully closed.", m_szCameraPath); + } } /****************************************************************************** diff --git a/src/vision/cameras/BasicCam.h b/src/vision/cameras/BasicCam.h index 36f8c6128..06d9bf98b 100644 --- a/src/vision/cameras/BasicCam.h +++ b/src/vision/cameras/BasicCam.h @@ -62,7 +62,7 @@ class BasicCam : public BasicCamera ///////////////////////////////////////// bool GetCameraIsOpen() override; - std::string GetCameraLocation() const; + std::string GetCameraLocation() const override; private: ///////////////////////////////////////// @@ -70,10 +70,6 @@ class BasicCam : public BasicCamera ///////////////////////////////////////// // Basic Camera specific. cv::VideoCapture m_cvCamera; - std::string m_szCameraPath; - bool m_bCameraIsConnectedOnVideoIndex; - int m_nCameraIndex; - int m_nNumFrameRetrievalThreads; // Mats for storing frames. cv::Mat m_cvFrame; diff --git a/src/vision/cameras/sim/SIMBasicCam.cpp b/src/vision/cameras/sim/SIMBasicCam.cpp index be9b40c59..5d56a74c2 100644 --- a/src/vision/cameras/sim/SIMBasicCam.cpp +++ b/src/vision/cameras/sim/SIMBasicCam.cpp @@ -48,9 +48,6 @@ SIMBasicCam::SIMBasicCam(const std::string szCameraPath, bEnableRecordingFlag, nNumFrameRetrievalThreads) { - // Set flag specifying that the camera is located at a dev/video index. - m_bCameraIsConnectedOnVideoIndex = false; - // Initialize OpenCV mats to a black/empty image the size of the camera resolution. m_cvFrame = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC4); @@ -58,12 +55,12 @@ SIMBasicCam::SIMBasicCam(const std::string szCameraPath, if (m_cvCamera.open(szCameraPath)) { // Submit logger message. - LOG_DEBUG(logging::g_qSharedLogger, "SIMCamera {} at path/URL {} has been successfully opened.", m_cvCamera.getBackendName(), m_szCameraPath); + LOG_DEBUG(logging::g_qSharedLogger, "SIMCamera {} at path/URL {} has been successfully opened.", m_cvCamera.getBackendName(), szCameraPath); } else { // Submit logger message. - LOG_ERROR(logging::g_qSharedLogger, "Unable to open SIMCamera at path/URL {}", m_szCameraPath); + LOG_ERROR(logging::g_qSharedLogger, "Unable to open SIMCamera at path/URL {}", szCameraPath); } // Set max FPS of the ThreadedContinuousCode method. diff --git a/src/vision/cameras/sim/SIMBasicCam.h b/src/vision/cameras/sim/SIMBasicCam.h index 9c514dea7..5e33dffbf 100644 --- a/src/vision/cameras/sim/SIMBasicCam.h +++ b/src/vision/cameras/sim/SIMBasicCam.h @@ -51,8 +51,8 @@ class SIMBasicCam : public BasicCamera // Getters. ///////////////////////////////////////// - std::string GetCameraLocation() const; bool GetCameraIsOpen() override; + std::string GetCameraLocation() const override; private: ///////////////////////////////////////// @@ -61,10 +61,6 @@ class SIMBasicCam : public BasicCamera // Basic Camera specific. cv::VideoCapture m_cvCamera; - std::string m_szCameraPath; - bool m_bCameraIsConnectedOnVideoIndex; - int m_nCameraIndex; - int m_nNumFrameRetrievalThreads; // Mats for storing frames. diff --git a/src/vision/cameras/sim/SIMZEDCam.cpp b/src/vision/cameras/sim/SIMZEDCam.cpp index 314a7fbbf..517b633b7 100644 --- a/src/vision/cameras/sim/SIMZEDCam.cpp +++ b/src/vision/cameras/sim/SIMZEDCam.cpp @@ -17,6 +17,7 @@ /// \cond #include "../../../util/NumberOperations.hpp" #include +#include /// \endcond @@ -65,14 +66,14 @@ SIMZEDCam::SIMZEDCam(const std::string szCameraPath, // Initialize OpenCV mats to a black/empty image the size of the camera resolution. m_cvFrame = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC4); m_cvDepthImage = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC3); - m_cvDepthMeasure = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_16UC1); + m_cvDepthMeasure = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_32FC1); m_cvDepthBuffer = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_8UC3); m_cvPointCloud = cv::Mat::zeros(nPropResolutionY, nPropResolutionX, CV_32FC4); // Construct camera stream objects. Append proper camera path arguments to each URL camera path. - m_pRGBStream = std::make_unique(szCameraPath, "ZEDFrontRGB"); - // m_pDepthImageStream = std::make_unique(szCameraPath, "ZEDFrontDepthImage"); - // m_pDepthMeasureStream = std::make_unique(szCameraPath, "ZEDFrontDepthMeasure"); + m_pRGBStream = std::make_unique(szCameraPath, "ZEDFrontRGB"); + m_pDepthImageStream = std::make_unique(szCameraPath, "ZEDFrontDepthImage"); + m_pDepthMeasureStream = std::make_unique(szCameraPath, "ZEDFrontDepthMeasure"); // Set callbacks for the WebRTC connections. this->SetCallbacks(); @@ -118,22 +119,138 @@ void SIMZEDCam::SetCallbacks() // Deep copy the frame. m_cvFrame = cvFrame.clone(); }); - // m_pDepthImageStream->SetOnFrameReceivedCallback( - // [this](cv::Mat& cvFrame) - // { - // // Acquire a lock on the webRTC copy mutex. - // std::unique_lock lkWebRTC(m_muWebRTCDepthImageCopyMutex); - // // Deep copy the frame. - // m_cvDepthImage = cvFrame.clone(); - // }); - // m_pDepthMeasureStream->SetOnFrameReceivedCallback( - // [this](cv::Mat& cvFrame) - // { - // // Acquire a lock on the webRTC copy mutex. - // std::unique_lock lkWebRTC(m_muWebRTCDepthMeasureCopyMutex); - // // Deep copy the frame. - // m_cvDepthBuffer = cvFrame.clone(); - // }); + m_pDepthImageStream->SetOnFrameReceivedCallback( + [this](cv::Mat& cvFrame) + { + // Acquire a lock on the webRTC copy mutex. + std::unique_lock lkWebRTC(m_muWebRTCDepthImageCopyMutex); + // Deep copy the frame. + m_cvDepthImage = cvFrame.clone(); + }); + m_pDepthMeasureStream->SetOnFrameReceivedCallback( + [this](cv::Mat& cvFrame) + { + // Acquire a lock on the webRTC copy mutex. + std::unique_lock lkWebRTC(m_muWebRTCDepthMeasureCopyMutex); + // Deep copy the frame. + m_cvDepthBuffer = cvFrame.clone(); + }); +} + +/****************************************************************************** + * @brief This method decodes the encoded depth measure data from the simulator. + * We receive the depth measure from an H264 stream so it has to be encoded + * and packed in a special way to ensure things like compression and + * transmission are efficient. + * + * @param cvDepthBuffer - The encoded depth buffer. + * @param cvDepthMeasure - The decoded depth measure that will be written to. + * + * @note http://reality.cs.ucl.ac.uk/projects/depth-streaming/depth-streaming.pdf + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-04 + ******************************************************************************/ +void SIMZEDCam::DecodeDepthMeasure(const cv::Mat& cvDepthBuffer, cv::Mat& cvDepthMeasure) +{ + // Declare instance variables. + float fW = 65536.0; + float fNP = 512.0; + +// TEST: Even though this speeds up the code, it might be too much CPU work as the codebase grows. Use a GpuMat instead. +// This is a parallel for loop that decodes the depth measure from the encoded depth buffer. +#pragma omp parallel for collapse(2) + + // Iterate over each pixel in the cvDepthMeasure image + for (int nY = 0; nY < cvDepthBuffer.rows; ++nY) + { + for (int nX = 0; nX < cvDepthBuffer.cols; ++nX) + { + // Extract the encoded depth values + cv::Vec3b cvEncodedDepth = cvDepthBuffer.at(nY, nX); + + // Extract encoded values + float fL = cvEncodedDepth[2] / 255.0; + float fHa = cvEncodedDepth[1] / 255.0; + float fHb = cvEncodedDepth[0] / 255.0; + + // Period for triangle waves + float fP = fNP / fW; + + // Determine offset and fine-grain correction + int fM = fmod((4.0 * (fL / fP)) - 0.5, 4.0); + float fL0 = fL - fmod(fL - (fP / 8.0), fP) + ((fP / 4.0) * fM) - (fP / 8.0); + + float fDelta = 0.0f; + if (fM == 0) + fDelta = (fP / 2.0) * fHa; + else if (fM == 1) + fDelta = (fP / 2.0) * fHb; + else if (fM == 2) + fDelta = (fP / 2.0) * (1.0 - fHa); + else if (fM == 3) + fDelta = (fP / 2.0) * (1.0 - fHb); + + // Combine to compute the original depth + float fDepth = fW * (fL0 + fDelta); + + // Check if the depth is within the bounds of the depth image + if (fDepth < 0.0) + fDepth = 0.0; + else if (fDepth > 65535.0) + fDepth = 65535.0; + + // Check if nY and nX are within the bounds of the depth image + if (nY < cvDepthMeasure.rows && nX < cvDepthMeasure.cols) + { + // Store the decoded depth in the new cv::Mat. Convert cm to m. + cvDepthMeasure.at(nY, nX) = static_cast(fDepth / 100.0); + } + } + } +} + +/****************************************************************************** + * @brief This method calculates a point cloud from the decoded depth measure + * use some simple trig and the camera FOV. + * + * @param cvDepthMeasure - The decoded depth measure. + * @param cvPointCloud - The point cloud that will be written to. + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-04 + ******************************************************************************/ +void SIMZEDCam::CalculatePointCloud(const cv::Mat& cvDepthMeasure, cv::Mat& cvPointCloud) +{ +// TEST: Even though this speeds up the code, it might be too much CPU work as the codebase grows. Use a GpuMat instead. +// This is a parallel for loop that calculates the point cloud from the decoded depth measure. +#pragma omp parallel for collapse(2) + + // Use the decoded depth measure to create a point cloud. + for (int nY = 0; nY < cvDepthMeasure.rows; ++nY) + { + for (int nX = 0; nX < cvDepthMeasure.cols; ++nX) + { + // Get the depth value. + float fDepth = cvDepthMeasure.at(nY, nX); + + // Get the horizontal and vertical angles. + double dHorizontalAngle = (nX - cvDepthMeasure.cols / 2.0) * m_dPropHorizontalFOV / cvDepthMeasure.cols; + double dVerticalAngle = (nY - cvDepthMeasure.rows / 2.0) * m_dPropVerticalFOV / cvDepthMeasure.rows; + + // Convert angles to radians. + double dHorizontalAngleRad = dHorizontalAngle * M_PI / 180.0; + double dVerticalAngleRad = dVerticalAngle * M_PI / 180.0; + + // Calculate the Cartesian coordinates. + float fX = fDepth * sin(dHorizontalAngleRad); + float fY = fDepth * sin(dVerticalAngleRad); + float fZ = fDepth * cos(dHorizontalAngleRad) * cos(dVerticalAngleRad); + + // Store the decoded depth in the new cv::Mat + cvPointCloud.at(nY, nX) = cv::Vec4f(fX, fY, fZ, 1.0); + } + } } /****************************************************************************** @@ -153,7 +270,7 @@ void SIMZEDCam::ThreadedContinuousCode() // Acquire a lock on the rover pose mutex. std::unique_lock lkRoverPoseLock(m_muCurrentRoverPoseMutex); // Check if the NavBoard pointer is valid. - if (globals::g_pNavigationBoard) + if (globals::g_pNavigationBoard != nullptr) { // Get the current rover pose from the NavBoard. m_stCurrentRoverPose = geoops::RoverPose(globals::g_pNavigationBoard->GetGPSData(), globals::g_pNavigationBoard->GetHeading()); @@ -161,6 +278,20 @@ void SIMZEDCam::ThreadedContinuousCode() // Release lock. lkRoverPoseLock.unlock(); + // Check if the depth measure WebRTC connection is open. + if (m_pDepthMeasureStream != nullptr && m_pDepthMeasureStream->GetIsConnected()) + { + // Acquire a lock on the WebRTC mutex. + std::shared_lock lkWebRTC3(m_muWebRTCDepthMeasureCopyMutex); + // Decode the depth measure. + this->DecodeDepthMeasure(m_cvDepthBuffer, m_cvDepthMeasure); + // Release lock. + lkWebRTC3.unlock(); + + // Calculate the point cloud from the decoded depth measure. + this->CalculatePointCloud(m_cvDepthMeasure, m_cvPointCloud); + } + // Acquire a shared_lock on the frame copy queue. std::shared_lock lkSchedulers(m_muPoolScheduleMutex); // Check if the frame copy queue is empty. @@ -200,6 +331,11 @@ void SIMZEDCam::ThreadedContinuousCode() // Wait for thread pool to finish. this->JoinPool(); + + // Release lock on WebRTC mutex. + lkWebRTC.unlock(); + lkWebRTC2.unlock(); + lkWebRTC3.unlock(); } // Release lock on frame copy queue. @@ -239,66 +375,7 @@ void SIMZEDCam::PooledLinearCode() { case PIXEL_FORMATS::eBGRA: *(stContainer.pFrame) = m_cvFrame.clone(); break; case PIXEL_FORMATS::eDepthImage: *(stContainer.pFrame) = m_cvDepthImage.clone(); break; - case PIXEL_FORMATS::eDepthMeasure: - { - // Copy depth buffer. - - // The Simulator uses this a special method of packing the depth measure data as defined in this paper. - // http://reality.cs.ucl.ac.uk/projects/depth-streaming/depth-streaming.pdf - // Here we will decode it. - float fW = 65536.0; - float fNP = 512.0; - - // Iterate over each pixel in the cvDepthMeasure image - for (int nY = 0; nY < m_cvDepthBuffer.rows; ++nY) - { - for (int nX = 0; nX < m_cvDepthBuffer.cols; ++nX) - { - // Extract the encoded depth values - cv::Vec3b cvEncodedDepth = m_cvDepthBuffer.at(nY, nX); - - // Extract encoded values - float fL = cvEncodedDepth[2] / 255.0; - float fHa = cvEncodedDepth[1] / 255.0; - float fHb = cvEncodedDepth[0] / 255.0; - - // Period for triangle waves - float fP = fNP / fW; - - // Determine offset and fine-grain correction - int fM = fmod((4.0 * (fL / fP)) - 0.5, 4.0); - float fL0 = fL - fmod(fL - (fP / 8.0), fP) + ((fP / 4.0) * fM) - (fP / 8.0); - - float fDelta = 0.0f; - if (fM == 0) - fDelta = (fP / 2.0) * fHa; - else if (fM == 1) - fDelta = (fP / 2.0) * fHb; - else if (fM == 2) - fDelta = (fP / 2.0) * (1.0 - fHa); - else if (fM == 3) - fDelta = (fP / 2.0) * (1.0 - fHb); - - // Combine to compute the original depth - float fDepth = fW * (fL0 + fDelta); - - // Check if the depth is within the bounds of the depth image - if (fDepth < 0.0) - fDepth = 0.0; - else if (fDepth > 65535.0) - fDepth = 65535.0; - - // Check if nY and nX are within the bounds of the depth image - if (nY < m_cvDepthMeasure.rows && nX < m_cvDepthMeasure.cols) - { - // Store the decoded depth in the new cv::Mat - m_cvDepthMeasure.at(nY, nX) = static_cast(fDepth); - } - } - } - *(stContainer.pFrame) = m_cvDepthMeasure.clone(); - break; - } + case PIXEL_FORMATS::eDepthMeasure: *(stContainer.pFrame) = m_cvDepthMeasure.clone(); break; case PIXEL_FORMATS::eXYZ: *(stContainer.pFrame) = m_cvPointCloud.clone(); break; default: *(stContainer.pFrame) = m_cvFrame.clone(); break; } @@ -666,7 +743,7 @@ void SIMZEDCam::SetPositionalPose(const double dX, const double dY, const double ******************************************************************************/ bool SIMZEDCam::GetCameraIsOpen() { - return m_pRGBStream->GetIsConnected(); //&& m_pDepthImageStream->GetIsConnected() && m_pDepthMeasureStream->GetIsConnected(); + return m_pRGBStream->GetIsConnected() && m_pDepthImageStream->GetIsConnected() && m_pDepthMeasureStream->GetIsConnected(); } /****************************************************************************** diff --git a/src/vision/cameras/sim/SIMZEDCam.h b/src/vision/cameras/sim/SIMZEDCam.h index c0ec53f3e..563dc8bf2 100644 --- a/src/vision/cameras/sim/SIMZEDCam.h +++ b/src/vision/cameras/sim/SIMZEDCam.h @@ -81,8 +81,9 @@ class SIMZEDCam : public ZEDCamera void ThreadedContinuousCode() override; void PooledLinearCode() override; - void SetCallbacks(); + void DecodeDepthMeasure(const cv::Mat& cvDepthBuffer, cv::Mat& cvDepthMeasure); + void CalculatePointCloud(const cv::Mat& cvDepthMeasure, cv::Mat& cvPointCloud); ///////////////////////////////////////// // Declare private member variables. diff --git a/src/vision/cameras/sim/WebRTC.cpp b/src/vision/cameras/sim/WebRTC.cpp index 96dddd96b..7f0cd7ace 100644 --- a/src/vision/cameras/sim/WebRTC.cpp +++ b/src/vision/cameras/sim/WebRTC.cpp @@ -11,8 +11,11 @@ #include "WebRTC.h" #include "../../../AutonomyLogging.h" +/// \cond #include +/// \endcond + /****************************************************************************** * @brief Construct a new Web RTC::WebRTC object. * @@ -57,14 +60,26 @@ WebRTC::WebRTC(const std::string& szSignallingServerURL, const std::string& szSt ******************************************************************************/ WebRTC::~WebRTC() { - // Close the video track, peer connection, data channel, and websocket. - m_pVideoTrack1->close(); - m_pPeerConnection->close(); - m_pDataChannel->close(); - m_pWebSocket->close(); + // Check if the smart pointers are valid before calling any methods. + if (m_pVideoTrack1) + { + m_pVideoTrack1->close(); + } + if (m_pPeerConnection) + { + m_pPeerConnection->close(); + } + if (m_pDataChannel) + { + m_pDataChannel->close(); + } + if (m_pWebSocket) + { + m_pWebSocket->close(); + } // Wait for all connections to close. - while (!m_pVideoTrack1->isClosed() || !m_pDataChannel->isClosed() || !m_pWebSocket->isClosed()) + while ((m_pVideoTrack1 && !m_pVideoTrack1->isClosed()) || (m_pDataChannel && !m_pDataChannel->isClosed()) || (m_pWebSocket && !m_pWebSocket->isClosed())) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } @@ -95,6 +110,8 @@ WebRTC::~WebRTC() // Set dangling pointers to nullptr. m_pAVCodecContext = nullptr; + m_pFrame = nullptr; + m_pPacket = nullptr; m_pSWSContext = nullptr; } @@ -126,7 +143,15 @@ void WebRTC::SetOnFrameReceivedCallback(std::function fnOnFrameR ******************************************************************************/ bool WebRTC::GetIsConnected() const { - return m_pWebSocket->isOpen(); + // Check if the datachannel shared pointer is valid. + if (m_pWebSocket != nullptr) + { + // Check if the datachannel is open. + return m_pWebSocket->isOpen(); + } + + // Return false if the datachannel is not valid. + return false; } /****************************************************************************** @@ -218,7 +243,7 @@ bool WebRTC::ConnectToSignallingServer(const std::string& szSignallingServerURL) LOG_DEBUG(logging::g_qSharedLogger, "Received config message from signalling server: {}", jsnMessage.dump()); } // If the message from the server is an offer, set the remote description offer. - if (szType == "offer") + else if (szType == "offer") { // Get the SDP offer and set it as the remote description. std::string sdp = jsnMessage["sdp"]; @@ -371,6 +396,8 @@ bool WebRTC::ConnectToSignallingServer(const std::string& szSignallingServerURL) vH264EncodedBytes.push_back(static_cast(stdByte)); } + // Acquire a mutex lock on the shared_mutex before calling sws_scale. + std::unique_lock lkDecoderLock(m_muDecoderMutex); // Pass to FFmpeg decoder this->DecodeH264BytesToCVMat(vH264EncodedBytes, m_cvFrame, m_eOutputPixelFormat); @@ -380,6 +407,8 @@ bool WebRTC::ConnectToSignallingServer(const std::string& szSignallingServerURL) // Call the user's callback function. m_fnOnFrameReceivedCallback(m_cvFrame); } + // Release the lock on the shared_mutex. + lkDecoderLock.unlock(); } }); }); @@ -575,6 +604,9 @@ bool WebRTC::InitializeH264Decoder() return false; } + // Set the SWSScale context to nullptr. + m_pSWSContext = nullptr; + return true; } @@ -592,9 +624,6 @@ bool WebRTC::InitializeH264Decoder() ******************************************************************************/ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedBytes, cv::Mat& cvDecodedFrame, const AVPixelFormat eOutputPixelFormat) { - // Acquire the lock to prevent multiple threads from accessing the decoder at the same time. - std::lock_guard lkDecoder(m_muDecoderMutex); - // Initialize packet data. m_pPacket->data = const_cast(vH264EncodedBytes.data()); m_pPacket->size = static_cast(vH264EncodedBytes.size()); @@ -607,7 +636,11 @@ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedByte char aErrorBuffer[AV_ERROR_MAX_STRING_SIZE]; av_strerror(nReturnCode, aErrorBuffer, AV_ERROR_MAX_STRING_SIZE); // Submit logger message. - LOG_WARNING(logging::g_qSharedLogger, "Failed to send packet to decoder! Error code: {} {}", nReturnCode, aErrorBuffer); + LOG_NOTICE(logging::g_qSharedLogger, + "Failed to send packet to decoder! Error code: {} {}. This is not a serious problem and is likely just because some of the UDP RTP packets didn't " + "make it to us.", + nReturnCode, + aErrorBuffer); // Request a new keyframe from the video track. this->RequestKeyFrame(); @@ -658,7 +691,7 @@ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedByte else { // Convert the decoded frame to cv::Mat using sws_scale. - if (!m_pSWSContext) + if (m_pSWSContext == nullptr) { m_pSWSContext = sws_getContext(m_pFrame->width, m_pFrame->height, @@ -670,7 +703,7 @@ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedByte nullptr, nullptr, nullptr); - if (!m_pSWSContext) + if (m_pSWSContext == nullptr) { // Submit logger message. LOG_WARNING(logging::g_qSharedLogger, "Failed to initialize SwsContext!"); @@ -679,13 +712,24 @@ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedByte return false; } + + // Allocate buffer for the frame's data + int nRetCode = av_image_alloc(m_pFrame->data, m_pFrame->linesize, m_pFrame->width, m_pFrame->height, static_cast(m_pFrame->format), 32); + if (nRetCode < 0) + { + // Submit logger message. + LOG_WARNING(logging::g_qSharedLogger, "Failed to allocate image buffer!"); + return false; + } } // Create new mat for the decoded frame. cvDecodedFrame.create(m_pFrame->height, m_pFrame->width, CV_8UC3); - uint8_t* aDest[4] = {cvDecodedFrame.data, nullptr, nullptr, nullptr}; - int aDestLinesize[4] = {static_cast(cvDecodedFrame.step[0]), 0, 0, 0}; - sws_scale(m_pSWSContext, m_pFrame->data, m_pFrame->linesize, 0, m_pAVCodecContext->height, aDest, aDestLinesize); + std::array aDest = {cvDecodedFrame.data, nullptr, nullptr, nullptr}; + std::array aDestLinesize = {static_cast(cvDecodedFrame.step[0]), 0, 0, 0}; + + // Convert the frame to the output pixel format. + sws_scale(m_pSWSContext, m_pFrame->data, m_pFrame->linesize, 0, m_pFrame->height, aDest.data(), aDestLinesize.data()); } // Calculate the time since the last key frame request. @@ -697,7 +741,7 @@ bool WebRTC::DecodeH264BytesToCVMat(const std::vector& vH264EncodedByte // this->RequestKeyFrame(); // Set the QP factor to 0. (Max quality) - this->SendCommandToStreamer(R"({"Encoder.MaxQP":15})"); + this->SendCommandToStreamer("{\"Encoder.MaxQP\":" + std::to_string(constants::SIM_WEBRTC_QP) + "}"); // Set the bitrate limits. this->SendCommandToStreamer(R"({"WebRTC.MinBitrate":99999})"); this->SendCommandToStreamer(R"({"WebRTC.MaxBitrate":99999999})"); diff --git a/src/vision/cameras/sim/WebRTC.h b/src/vision/cameras/sim/WebRTC.h index 8006f4e49..a2686fa16 100644 --- a/src/vision/cameras/sim/WebRTC.h +++ b/src/vision/cameras/sim/WebRTC.h @@ -15,6 +15,7 @@ #include #include #include +#include extern "C" { @@ -85,7 +86,7 @@ class WebRTC AVPacket* m_pPacket; SwsContext* m_pSWSContext; AVPixelFormat m_eOutputPixelFormat; - std::mutex m_muDecoderMutex; + std::shared_mutex m_muDecoderMutex; // OpenCV Mat for storing the frame. cv::Mat m_cvFrame; diff --git a/tests/Unit/src/aruco/TagDetectionOpenCV.cc b/tests/Unit/src/vision/aruco/TagDetectionOpenCV.cc similarity index 97% rename from tests/Unit/src/aruco/TagDetectionOpenCV.cc rename to tests/Unit/src/vision/aruco/TagDetectionOpenCV.cc index c08def527..fd17e7210 100644 --- a/tests/Unit/src/aruco/TagDetectionOpenCV.cc +++ b/tests/Unit/src/vision/aruco/TagDetectionOpenCV.cc @@ -8,7 +8,7 @@ * @copyright Copyright MRDT 2023 - All Rights Reserved ******************************************************************************/ -#include "../../../../src/vision/aruco/ArucoDetection.hpp" +#include "../../../../../src/vision/aruco/ArucoDetection.hpp" /// \cond #include @@ -79,7 +79,7 @@ TEST(TagDetectOpenCVTest, SingleCleanTagDetect) cv::aruco::ArucoDetector cvDetector(cvDictionary); // Load the image containing the sample ArUco tag - cv::Mat cvTestImageMat = LoadImageFromRelativePath("../../../../data/Tests/aruco/cleanArucoMarker0.png"); + cv::Mat cvTestImageMat = LoadImageFromRelativePath("../../../../../data/Tests/aruco/cleanArucoMarker0.png"); // Detect tags in the image std::vector vDetectedTags; @@ -122,7 +122,7 @@ TEST(TagDetectOpenCVTest, MultiCleanTagDetect) cv::aruco::ArucoDetector cvDetector(cvDictionary); // Load the image containing the sample ArUco tags - cv::Mat cvTestImageMat = LoadImageFromRelativePath("../../../../data/Tests/aruco/cleanArucoMarkersMultiple.png"); + cv::Mat cvTestImageMat = LoadImageFromRelativePath("../../../../../data/Tests/aruco/cleanArucoMarkersMultiple.png"); // Detect tags in the image std::vector vecDetectedTags; diff --git a/tests/Unit/src/vision/cameras/BasicCam.cc b/tests/Unit/src/vision/cameras/BasicCam.cc new file mode 100644 index 000000000..47800afc9 --- /dev/null +++ b/tests/Unit/src/vision/cameras/BasicCam.cc @@ -0,0 +1,98 @@ +/****************************************************************************** + * @brief BasicCam unit tests. + * + * @file BasicCam.cc + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + * + * @copyright Copyright Mars Rover Design Team 2025 - All Rights Reserved + ******************************************************************************/ + +#include "../../../../../src/vision/cameras/BasicCam.h" + +/// \cond +#include +#include + +/// \endcond + +/****************************************************************************** + * @brief Test the functionality of the BasicCam constructor and destructor. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(BasicCamTest, ConstructorDestructor) +{ + // Create a BasicCam object. + BasicCam basicCam("/dev/video0", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check initial camera open status. + EXPECT_FALSE(basicCam.GetCameraIsOpen()); + + // Destructor will be called automatically when the object goes out of scope. +} + +/****************************************************************************** + * @brief Check that BasicCam doesn't leak any memory. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(BasicCamTest, DoesNotLeak) +{ + // Create a new BasicCam object. + BasicCam* pBasicCam = new BasicCam("/dev/video0", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + // Delete object. + delete pBasicCam; + // Point to null. + pBasicCam = nullptr; +} + +/****************************************************************************** + * @brief This should fail when the --check_for_leaks command line flag is specified. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(BasicCamTest, Leaks) +{ + // Create a new BasicCam object. + BasicCam* pBasicCam = new BasicCam("/dev/video0", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + EXPECT_TRUE(pBasicCam != nullptr); +} + +/****************************************************************************** + * @brief Test the GetCameraIsOpen method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(BasicCamTest, GetCameraIsOpen) +{ + // Create a BasicCam object. + BasicCam basicCam("/dev/video0", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check initial camera open status. + EXPECT_FALSE(basicCam.GetCameraIsOpen()); +} + +/****************************************************************************** + * @brief Test the GetCameraLocation method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(BasicCamTest, GetCameraLocation) +{ + // Create a BasicCam object. + BasicCam basicCam("/dev/video0", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check camera location. + EXPECT_EQ(basicCam.GetCameraLocation(), "/dev/video0"); +} diff --git a/tests/Unit/src/vision/cameras/ZEDCam.cc b/tests/Unit/src/vision/cameras/ZEDCam.cc new file mode 100644 index 000000000..a6d05e7d0 --- /dev/null +++ b/tests/Unit/src/vision/cameras/ZEDCam.cc @@ -0,0 +1,83 @@ +/****************************************************************************** + * @brief ZEDCam unit tests. + * + * @file ZEDCam.cc + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + * + * @copyright Copyright Mars Rover Design Team 2025 - All Rights Reserved + ******************************************************************************/ + +#include "../../../../../src/vision/cameras/ZEDCam.h" + +/// \cond +#include +#include +#include + +/// \endcond + +/****************************************************************************** + * @brief Test the functionality of the ZEDCam constructor and destructor. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(ZEDCamTest, ConstructorDestructor) +{ + // Create a ZEDCam object. + ZEDCam zedCam(1280, 720, 30, 90.0, 60.0, false, 0.5f, 20.0f, false, false, false, 1, 0); + + // Check initial camera open status. + EXPECT_FALSE(zedCam.GetCameraIsOpen()); + + // Destructor will be called automatically when the object goes out of scope. +} + +/****************************************************************************** + * @brief Check that ZEDCam doesn't leak any memory. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(ZEDCamTest, DoesNotLeak) +{ + // Create a new ZEDCam object. + ZEDCam* pZEDCam = new ZEDCam(1280, 720, 30, 90.0, 60.0, false, 0.5f, 20.0f, false, false, false, 1, 0); + // Delete object. + delete pZEDCam; + // Point to null. + pZEDCam = nullptr; +} + +/****************************************************************************** + * @brief This should fail when the --check_for_leaks command line flag is specified. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(ZEDCamTest, Leaks) +{ + // Create a new ZEDCam object. + ZEDCam* pZEDCam = new ZEDCam(1280, 720, 30, 90.0, 60.0, false, 0.5f, 20.0f, false, false, false, 1, 0); + EXPECT_TRUE(pZEDCam != nullptr); +} + +/****************************************************************************** + * @brief Test the GetCameraIsOpen method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(ZEDCamTest, GetCameraIsOpen) +{ + // Create a ZEDCam object. + ZEDCam zedCam(1280, 720, 30, 90.0, 60.0, false, 0.5f, 20.0f, false, false, false, 1, 0); + + // Check initial camera open status. + EXPECT_FALSE(zedCam.GetCameraIsOpen()); +} diff --git a/tests/Unit/src/vision/cameras/sim/SIMBasicCam.cc b/tests/Unit/src/vision/cameras/sim/SIMBasicCam.cc new file mode 100644 index 000000000..5237d59c4 --- /dev/null +++ b/tests/Unit/src/vision/cameras/sim/SIMBasicCam.cc @@ -0,0 +1,87 @@ +/****************************************************************************** + * @brief SIMBasicCam unit tests. + * + * @file test_SIMBasicCam.cc + * @date 2025-01-05 + * + * @copyright Copyright Mars Rover Design Team 2025 - All Rights Reserved + ******************************************************************************/ + +#include "../../../../../../src/vision/cameras/sim/SIMBasicCam.h" + +/// \cond +#include +#include + +/// \endcond + +/****************************************************************************** + * @brief Test the functionality of the SIMBasicCam constructor and destructor. + * + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMBasicCamTest, ConstructorDestructor) +{ + // Create a SIMBasicCam object. + SIMBasicCam simBasicCam("ws://127.0.0.1:80", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check initial camera open status. + EXPECT_FALSE(simBasicCam.GetCameraIsOpen()); + + // Destructor will be called automatically when the object goes out of scope. +} + +/****************************************************************************** + * @brief Check that SIMBasicCam doesn't leak any memory. + * + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMBasicCamTest, DoesNotLeak) +{ + // Create a new SIMBasicCam object. + SIMBasicCam* pSIMBasicCam = new SIMBasicCam("ws://127.0.0.1:80", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + // Delete object. + delete pSIMBasicCam; + // Point to null. + pSIMBasicCam = nullptr; +} + +/****************************************************************************** + * @brief This should fail when the --check_for_leaks command line flag is specified. + * + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMBasicCamTest, Leaks) +{ + // Create a new SIMBasicCam object. + SIMBasicCam* pSIMBasicCam = new SIMBasicCam("ws://127.0.0.1:80", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + EXPECT_TRUE(pSIMBasicCam != nullptr); +} + +/****************************************************************************** + * @brief Test the GetCameraIsOpen method. + * + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMBasicCamTest, GetCameraIsOpen) +{ + // Create a SIMBasicCam object. + SIMBasicCam simBasicCam("ws://127.0.0.1:80", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check initial camera open status. + EXPECT_FALSE(simBasicCam.GetCameraIsOpen()); +} + +/****************************************************************************** + * @brief Test the GetCameraLocation method. + * + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMBasicCamTest, GetCameraLocation) +{ + // Create a SIMBasicCam object. + SIMBasicCam simBasicCam("ws://127.0.0.1:80", 1280, 720, 30, PIXEL_FORMATS::eBGRA, 90.0, 60.0, false, 1); + + // Check camera location. + EXPECT_EQ(simBasicCam.GetCameraLocation(), "ws://127.0.0.1:80"); +} diff --git a/tests/Unit/src/vision/cameras/sim/SIMZEDCam.cc b/tests/Unit/src/vision/cameras/sim/SIMZEDCam.cc new file mode 100644 index 000000000..4f0e4f71b --- /dev/null +++ b/tests/Unit/src/vision/cameras/sim/SIMZEDCam.cc @@ -0,0 +1,143 @@ +/****************************************************************************** + * @brief SIMZEDCam unit tests. + * + * @file SIMZEDCam.cc + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + * + * @copyright Copyright Mars Rover Design Team 2025 - All Rights Reserved + ******************************************************************************/ + +#include "../../../../../../src/vision/cameras/sim/SIMZEDCam.h" + +/// \cond +#include +#include + +/// \endcond + +/****************************************************************************** + * @brief Test the functionality of the SIMZEDCam constructor and destructor. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, ConstructorDestructor) +{ + // Create a SIMZEDCam object. + SIMZEDCam simZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + + // Check initial camera open status. + EXPECT_FALSE(simZEDCam.GetCameraIsOpen()); + + // Destructor will be called automatically when the object goes out of scope. +} + +/****************************************************************************** + * @brief Check that SIMZEDCam doesn't leak any memory. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, DoesNotLeak) +{ + // Create a new SIMZEDCam object. + SIMZEDCam* pSimZEDCam = new SIMZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + // Delete object. + delete pSimZEDCam; + // Point to null. + pSimZEDCam = nullptr; +} + +/****************************************************************************** + * @brief This should fail when the --check_for_leaks command line flag is specified. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, Leaks) +{ + // Create a new SIMZEDCam object. + SIMZEDCam* pSimZEDCam = new SIMZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + EXPECT_TRUE(pSimZEDCam != nullptr); +} + +/****************************************************************************** + * @brief Test the ResetPositionalTracking method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, ResetPositionalTracking) +{ + // Create a SIMZEDCam object. + SIMZEDCam simZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + + // Call ResetPositionalTracking. + auto errorCode = simZEDCam.ResetPositionalTracking(); + + // Check that the error code is SUCCESS. + EXPECT_EQ(errorCode, sl::ERROR_CODE::SUCCESS); +} + +/****************************************************************************** + * @brief Test the RebootCamera method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, RebootCamera) +{ + // Create a SIMZEDCam object. + SIMZEDCam simZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + + // Call RebootCamera. + auto errorCode = simZEDCam.RebootCamera(); + + // Check that the error code is SUCCESS. + EXPECT_EQ(errorCode, sl::ERROR_CODE::SUCCESS); +} + +/****************************************************************************** + * @brief Test the EnablePositionalTracking method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, EnablePositionalTracking) +{ + // Create a SIMZEDCam object. + SIMZEDCam simZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + + // Call EnablePositionalTracking. + auto errorCode = simZEDCam.EnablePositionalTracking(1.0f); + + // Check that the error code is SUCCESS. + EXPECT_EQ(errorCode, sl::ERROR_CODE::SUCCESS); +} + +/****************************************************************************** + * @brief Test the DisablePositionalTracking method. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(SIMZEDCamTest, DisablePositionalTracking) +{ + // Create a SIMZEDCam object. + SIMZEDCam simZEDCam("/dev/video0", 1280, 720, 30, 90.0, 60.0, false, 1, 12345); + + // Call DisablePositionalTracking. + simZEDCam.DisablePositionalTracking(); + + // Check that positional tracking is disabled. + // Assuming there is a method to check this, e.g., IsPositionalTrackingEnabled(). + EXPECT_FALSE(simZEDCam.GetPositionalTrackingEnabled()); +} diff --git a/tests/Unit/src/vision/cameras/sim/WebRTC.cc b/tests/Unit/src/vision/cameras/sim/WebRTC.cc new file mode 100644 index 000000000..f740b59c1 --- /dev/null +++ b/tests/Unit/src/vision/cameras/sim/WebRTC.cc @@ -0,0 +1,69 @@ +/****************************************************************************** + * @brief WebRTC unit tests. + * + * @file WebRTC.cc + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + * + * @copyright Copyright Mars Rover Design Team 2025 - All Rights Reserved + ******************************************************************************/ + +#include "../../../../../../src/vision/cameras/sim/WebRTC.h" + +/// \cond +#include +#include +#include +#include +#include + +/// \endcond + +/****************************************************************************** + * @brief Test the functionality of the WebRTC constructor and destructor. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(WebRTCTest, ConstructorDestructor) +{ + // Create a WebRTC object. + WebRTC webrtc("ws://localhost:8080", "streamer1"); + + // Check initial connection status. + EXPECT_FALSE(webrtc.GetIsConnected()); + + // Destructor will be called automatically when the object goes out of scope. +} + +/****************************************************************************** + * @brief Check that WebRTC doesn't leak any memory. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(WebRTCTest, DoesNotLeak) +{ + // Create a new WebRTC object. + WebRTC* pWebRTC = new WebRTC("ws://localhost:8080", "streamer1"); + // Delete object. + delete pWebRTC; + // Point to null. + pWebRTC = nullptr; +} + +/****************************************************************************** + * @brief This should fail when the --check_for_leaks command line flag is specified. + * + * + * @author clayjay3 (claytonraycowen@gmail.com) + * @date 2025-01-05 + ******************************************************************************/ +TEST(WebRTCTest, Leaks) +{ + // Create a new WebRTC object. + WebRTC* pWebRTC = new WebRTC("ws://localhost:8080", "streamer1"); + EXPECT_TRUE(pWebRTC != nullptr); +} diff --git a/tools/logging/path_with_distances.png b/tools/logging/path_with_distances.png deleted file mode 100644 index b64832603..000000000 Binary files a/tools/logging/path_with_distances.png and /dev/null differ diff --git a/tools/package-builders/ffmpeg/ffmpeg-amd64-pkg.sh b/tools/package-builders/ffmpeg/ffmpeg-amd64-pkg.sh index 960fd15c8..c50645787 100755 --- a/tools/package-builders/ffmpeg/ffmpeg-amd64-pkg.sh +++ b/tools/package-builders/ffmpeg/ffmpeg-amd64-pkg.sh @@ -17,6 +17,20 @@ else echo "Package version ${FFMPEG_VERSION} does not exist in the repository. Building the package." echo "rebuilding_pkg=true" >> $GITHUB_OUTPUT + # Install Dependencies + apt update + apt install -y \ + libaom-dev \ + libass-dev \ + libfdk-aac-dev \ + libdav1d-dev \ + libmp3lame-dev \ + libopus-dev \ + libvorbis-dev \ + libvpx-dev \ + libx264-dev \ + libx265-dev + # Delete Old Packages rm -rf /tmp/pkg rm -rf /tmp/ffmpeg @@ -36,12 +50,46 @@ else echo "Description: A prebuilt version of ffmpeg. Made by the Mars Rover Design Team." } > /tmp/pkg/ffmpeg_${FFMPEG_VERSION}_amd64/DEBIAN/control + # This is a workaround for the libsvtav1-dev package not being available in the repository. The package is installed manually. + git clone --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git + cd SVT-AV1 + cd Build + # We need to install to system first. Then we can install to the package directory. + cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release + make -j 8 + make install + cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_amd64/usr/local + make -j 8 + make install + cd ../.. + rm -rf SVT-AV1 + # Download FFMPEG git clone --recurse-submodules --depth 1 --branch n${FFMPEG_VERSION} https://github.com/FFmpeg/FFmpeg.git ffmpeg cd ffmpeg # Configure FFMPEG - ./configure --prefix=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_amd64/usr/local --enable-static --disable-shared --disable-doc --enable-gpl --enable-libx264 --enable-pic + ./configure --prefix=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_amd64/usr/local \ + --enable-static \ + --disable-shared \ + --disable-doc \ + --enable-pic \ + --extra-libs="-lpthread -lm" \ + --ld="g++" \ + --enable-gpl \ + --enable-libaom \ + --enable-libass \ + --enable-libfdk-aac \ + --enable-libfreetype \ + --enable-libmp3lame \ + --enable-libopus \ + --enable-libsvtav1 \ + --enable-libdav1d \ + --enable-libvorbis \ + --enable-libvpx \ + --enable-libx264 \ + --enable-libx265 \ + --enable-nonfree # Install FFMPEG make diff --git a/tools/package-builders/ffmpeg/ffmpeg-arm64-pkg.sh b/tools/package-builders/ffmpeg/ffmpeg-arm64-pkg.sh index 1dd7947b7..c0ecb2ca0 100755 --- a/tools/package-builders/ffmpeg/ffmpeg-arm64-pkg.sh +++ b/tools/package-builders/ffmpeg/ffmpeg-arm64-pkg.sh @@ -17,6 +17,20 @@ else echo "Package version ${FFMPEG_VERSION} does not exist in the repository. Building the package." echo "rebuilding_pkg=true" >> $GITHUB_OUTPUT + # Install Dependencies + apt update + apt install -y \ + libaom-dev \ + libass-dev \ + libfdk-aac-dev \ + libdav1d-dev \ + libmp3lame-dev \ + libopus-dev \ + libvorbis-dev \ + libvpx-dev \ + libx264-dev \ + libx265-dev + # Delete Old Packages rm -rf /tmp/pkg rm -rf /tmp/ffmpeg @@ -36,12 +50,46 @@ else echo "Description: A prebuilt version of ffmpeg. Made by the Mars Rover Design Team." } > /tmp/pkg/ffmpeg_${FFMPEG_VERSION}_arm64/DEBIAN/control + # This is a workaround for the libsvtav1-dev package not being available in the repository. The package is installed manually. + git clone --depth=1 https://gitlab.com/AOMediaCodec/SVT-AV1.git + cd SVT-AV1 + cd Build + # We need to install to system first. Then we can install to the package directory. + cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release + make -j 8 + make install + cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_arm64/usr/local + make -j 8 + make install + cd ../.. + rm -rf SVT-AV1 + # Download FFMPEG git clone --recurse-submodules --depth 1 --branch n${FFMPEG_VERSION} https://github.com/FFmpeg/FFmpeg.git ffmpeg cd ffmpeg # Configure FFMPEG - ./configure --prefix=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_amd64/usr/local --enable-static --disable-shared --disable-doc --enable-gpl --enable-libx264 --enable-pic + ./configure --prefix=/tmp/pkg/ffmpeg_${FFMPEG_VERSION}_arm64/usr/local \ + --enable-static \ + --disable-shared \ + --disable-doc \ + --enable-pic \ + --extra-libs="-lpthread -lm" \ + --ld="g++" \ + --enable-gpl \ + --enable-libaom \ + --enable-libass \ + --enable-libfdk-aac \ + --enable-libfreetype \ + --enable-libmp3lame \ + --enable-libopus \ + --enable-libsvtav1 \ + --enable-libdav1d \ + --enable-libvorbis \ + --enable-libvpx \ + --enable-libx264 \ + --enable-libx265 \ + --enable-nonfree # Install FFMPEG make diff --git a/tools/package-builders/libdatachannel/libdatachannel-arm64-pkg.sh b/tools/package-builders/libdatachannel/libdatachannel-arm64-pkg.sh index a5c914add..1367fc9fe 100755 --- a/tools/package-builders/libdatachannel/libdatachannel-arm64-pkg.sh +++ b/tools/package-builders/libdatachannel/libdatachannel-arm64-pkg.sh @@ -4,7 +4,7 @@ cd /tmp # Install Variables -LIBDATACHANNEL_VERSION="0.22" +LIBDATACHANNEL_VERSION="0.22.3" # Define Package URL FILE_URL="https://github.com/MissouriMRDT/Autonomy_Packages/raw/main/libdatachannel/arm64/libdatachannel_${LIBDATACHANNEL_VERSION}_arm64.deb"