From 4ec64d189d5ad4612307a66d0cd7a393be572fe8 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Thu, 16 Dec 2021 15:08:27 -0300 Subject: [PATCH 01/13] Exiting processor when processing is finished --- config-x86-openvino.ini | 130 ++++++++++++++++++++++++--------------- config-x86.ini | 75 +++++++--------------- libs/engine_threading.py | 7 ++- libs/processor_core.py | 27 ++++---- run_processor_core.py | 6 +- share/commands.py | 3 +- supervisord.conf | 40 ++++++------ 7 files changed, 148 insertions(+), 140 deletions(-) diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index e7daee9c..8a808675 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -3,8 +3,8 @@ Host = 0.0.0.0 Port = 8000 UseAuthToken = False SSLEnabled = False -SSLCertificateFile = -SSLKeyFile = +SSLCertificateFile = +SSLKeyFile = [CORE] Host = 0.0.0.0 @@ -12,72 +12,47 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] -HasBeenConfigured = False +HasBeenConfigured = True Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 2 -; WIP https://github.com/neuralet/neuralet/issues/91 -;Encoder: videoconvert ! vaapih264enc -DashboardURL = https://app.lanthorn.ai/ -DashboardAuthorizationToken = +DashboardURL = https://hybrid.sta.lanthorn.ai/ +DashboardAuthorizationToken = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjQ0LCJleHAiOjE2NjY5NzI5MDl9.qfwY0pioVwLL2UKPYgkMF0nFB2O05sRp20wIS_pA2Oc SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = +GlobalReportingEmails = GlobalReportTime = 06:00 DailyGlobalReport = False WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False -LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics +LogPerformanceMetricsDirectory = EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True +ProcessAreas = False [Area_0] -Id = 0 +Id = 1 Name = Kitchen Cameras = 0 NotifyEveryMinutes = 0 -Emails = +Emails = EnableSlackNotifications = False OccupancyThreshold = 300 ViolationThreshold = 60 DailyReport = False DailyReportTime = 06:00 -[Source_0] -VideoPath = /repo/data/softbio_vid.mp4 -Tags = kitchen -Name = Garden-Camera -Id = 0 -Emails = -EnableSlackNotifications = False -NotifyEveryMinutes = 0 -ViolationThreshold = 60 -; Distance measurement method: -; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. -; - CenterPointsDistance: compare center of pedestrian boxes together -; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. -; - If left empty the DefaultDistMethod will be employed -DistMethod = -DailyReport = False -DailyReportTime = 06:00 -LiveFeedEnabled = True - [Detector] -; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 Name = openvino -;ImageSize should be 3 numbers seperated by commas, no spaces: 300,300,3 ImageSize = 300,300,3 -ModelPath = +ModelPath = ClassID = 1 MinScore = 0.25 [Tracker] Name = IOUTracker -; Number of times tracker was lost while tracking MaxLost = 5 TrackerIOUThreshold = 0.5 @@ -88,10 +63,6 @@ Enabled = True [SourcePostProcessor_1] Name = social_distance -; Default distance measurement method (used when no DistMethod is specified for the source): -; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. -; - CenterPointsDistance: compare center of pedestrian boxes together -; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. DefaultDistMethod = CenterPointsDistance DistThreshold = 150 Enabled = True @@ -106,7 +77,6 @@ Enabled = True [SourceLogger_1] Name = s3_logger -; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 0 ScreenshotS3Bucket = my-screenshot-bucket Enabled = False @@ -115,15 +85,14 @@ Enabled = False Name = file_system_logger TimeInterval = 0.5 LogDirectory = /repo/data/processor/static/data/sources -; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 5 ScreenshotsDirectory = /repo/data/processor/static/screenshots Enabled = True [SourceLogger_3] Name = web_hook_logger -Endpoint = -Authorization = +Endpoint = https://hybrid.sta.lanthorn.ai/api/processor_logger/raw_data +Authorization = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjQ0LCJleHAiOjE2NjY5NzI5MDl9.qfwY0pioVwLL2UKPYgkMF0nFB2O05sRp20wIS_pA2Oc TimeInterval = 0.5 Enabled = False SendingInterval = 5 @@ -133,17 +102,82 @@ Name = file_system_logger LogDirectory = /repo/data/processor/static/data/areas Enabled = True -; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics Enabled = True -; Expressed in minutes LiveInterval = 10 -; Enable the PeriodicTask_1 if you want to backup your files in S3 [PeriodicTask_1] Name = s3_backup Enabled = False -; Expressed in minutes BackupInterval = 30 BackupS3Bucket = your-s3-bucket + +[Source_0] +ViolationThreshold = 0 +NotifyEveryMinutes = 0 +Emails = +EnableSlackNotifications = False +DailyReport = False +DailyReportTime = 06:00 +Id = 0 +Name = cam_1 +VideoPath = /repo/data/historical_data +Tags = +DistMethod = +LiveFeedEnabled = True +HasBeenCalibrated = False +HasDefinedRoi = False +HasInOutBorder = False + +[Source_1] +ViolationThreshold = 0 +NotifyEveryMinutes = 0 +Emails = +EnableSlackNotifications = False +DailyReport = False +DailyReportTime = 06:00 +Id = 1 +Name = spaces rule +VideoPath = http://149.43.156.105/mjpg/video.mjpg +Tags = +DistMethod = +LiveFeedEnabled = True +HasBeenCalibrated = False +HasDefinedRoi = False +HasInOutBorder = False + +[Source_2] +ViolationThreshold = 0 +NotifyEveryMinutes = 0 +Emails = +EnableSlackNotifications = False +DailyReport = False +DailyReportTime = 06:00 +Id = 2 +Name = another_cam +VideoPath = http://cam1.infolink.ru/mjpg/video.mjpg +Tags = +DistMethod = CalibratedDistance +LiveFeedEnabled = True +HasBeenCalibrated = False +HasDefinedRoi = False +HasInOutBorder = False + +[Source_3] +ViolationThreshold = 0 +NotifyEveryMinutes = 0 +Emails = +EnableSlackNotifications = False +DailyReport = False +DailyReportTime = 06:00 +Id = 3 +Name = blablabla +VideoPath = http://webcam4.mmto.arizona.edu/mjpg/video.mjpg +Tags = +DistMethod = CalibratedDistance +LiveFeedEnabled = True +HasBeenCalibrated = False +HasDefinedRoi = False +HasInOutBorder = False + diff --git a/config-x86.ini b/config-x86.ini index abbeefde..b52c7c52 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -3,8 +3,8 @@ Host = 0.0.0.0 Port = 8000 UseAuthToken = False SSLEnabled = False -SSLCertificateFile = -SSLKeyFile = +SSLCertificateFile = +SSLKeyFile = [CORE] Host = 0.0.0.0 @@ -12,35 +12,32 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] -HasBeenConfigured = False +HasBeenConfigured = True Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 1 -; WIP https://github.com/neuralet/neuralet/issues/91 -;Encoder: videoconvert ! vaapih264enc -DashboardURL = https://app.lanthorn.ai/ -DashboardAuthorizationToken = +DashboardURL = http://192.168.1.8:8000/ +DashboardAuthorizationToken = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjIsImV4cCI6MTYyODk0NzI1NX0.tmRC0_ywYgghXfdWsZ93NtoEBZA6poA5vtTnjUNAJNY EnableSlackNotifications = no SlackChannel = lanthorn-notifications -; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = +GlobalReportingEmails = GlobalReportTime = 06:00 DailyGlobalReport = False WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False -LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics +LogPerformanceMetricsDirectory = EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = True +ProcessAreas = False [Area_0] Id = 0 Name = Kitchen Cameras = 0 NotifyEveryMinutes = 0 -Emails = +Emails = EnableSlackNotifications = False OccupancyThreshold = 300 ViolationThreshold = 60 @@ -48,51 +45,32 @@ DailyReport = False DailyReportTime = 06:00 [Source_0] -VideoPath = /repo/data/softbio_vid.mp4 -Tags = kitchen +VideoPath = /repo/data/historical_data/videos/camera15 +Tags = Name = Garden-Camera Id = 0 -Emails = +Emails = EnableSlackNotifications = False NotifyEveryMinutes = 0 -ViolationThreshold = 60 -; Distance measurement method: -; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. -; - CenterPointsDistance: compare center of pedestrian boxes together -; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. -; - If left empty the DefaultDistMethod will be employed -DistMethod = +ViolationThreshold = 0 +DistMethod = DailyReport = False DailyReportTime = 06:00 LiveFeedEnabled = True +HasBeenCalibrated = False +HasDefinedRoi = False +HasInOutBorder = False [Detector] -; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 -; Supported models: mobilenet_ssd_v2, openpifpaf, and yolov3 Name = yolov3 -; ImageSize should be 3 numbers seperated by commas, no spaces: 300,300,3 (for better accuracy use higher resolution when -; using openpifpaf (openpifpaf detects both faces and pedestrians) -; For yolov3 model the ImageSize MUST be w = h = 32x e.g: x= 13=> ImageSize = 416,416,3 -; Set the ImageSize to 1281,721,3 for openpifpaf ImageSize = 416,416,3 -ModelPath = +ModelPath = ClassID = 1 MinScore = 0.25 -; Uncomment this if you want facemask detection. Only works for openpifpaf. -; [Classifier] -; Device = x86 -; Name = OFMClassifier -; ModelPath = -; ImageSize = 45,45,3 -; MinScore = 0.15 -; MinImageSize = - - [Tracker] Name = IOUTracker -; Number of times tracker was lost while tracking MaxLost = 5 TrackerIOUThreshold = 0.5 @@ -103,10 +81,6 @@ Enabled = True [SourcePostProcessor_1] Name = social_distance -; Default distance measurement method (used when no DistMethod is specified for the source): -; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. -; - CenterPointsDistance: compare center of pedestrian boxes together -; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. DefaultDistMethod = CenterPointsDistance DistThreshold = 150 Enabled = True @@ -121,7 +95,6 @@ Enabled = True [SourceLogger_1] Name = s3_logger -; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 0 ScreenshotS3Bucket = my-screenshot-bucket Enabled = False @@ -129,16 +102,15 @@ Enabled = False [SourceLogger_2] Name = file_system_logger TimeInterval = 0.5 -LogDirectory = /repo/data/processor/static/data/sources -; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) +LogDirectory = /repo/data/historical_data/sources ScreenshotPeriod = 5 ScreenshotsDirectory = /repo/data/processor/static/screenshots Enabled = True [SourceLogger_3] Name = web_hook_logger -Endpoint = -Authorization = +Endpoint = +Authorization = Bearer TimeInterval = 0.5 Enabled = False SendingInterval = 5 @@ -148,17 +120,14 @@ Name = file_system_logger LogDirectory = /repo/data/processor/static/data/areas Enabled = True -; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics Enabled = True -; Expressed in minutes LiveInterval = 10 -; Enable the PeriodicTask_1 if you want to backup your files in S3 [PeriodicTask_1] Name = s3_backup Enabled = False -; Expressed in minutes BackupInterval = 30 BackupS3Bucket = your-s3-bucket + diff --git a/libs/engine_threading.py b/libs/engine_threading.py index ec606ec4..01d5060a 100644 --- a/libs/engine_threading.py +++ b/libs/engine_threading.py @@ -2,8 +2,9 @@ import logging import time +import sys + from datetime import datetime -from pathlib import Path from shutil import rmtree from threading import Thread from libs.cv_engine import CvEngine @@ -54,8 +55,8 @@ def run(self): last_restart_time = datetime.now() self.engine.process_video(self.source['url']) if os.path.isdir(self.source['url']): - # Source is a video. Stop the iteration - break + logging.info("SE TERMINARON LOS VIDEOS") + sys.exit() except Exception as e: logging.error(e, exc_info=True) logging.info(f"Exception processing video for source {self.source['name']}") diff --git a/libs/processor_core.py b/libs/processor_core.py index a67923dc..6b35d26e 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -3,7 +3,6 @@ from multiprocessing.managers import BaseManager import logging from share.commands import Commands -from queue import Empty import schedule from libs.engine_threading import run_video_processing from libs.area_threading import run_area_processing @@ -12,6 +11,7 @@ logger = logging.getLogger(__name__) logging.getLogger().setLevel(logging.INFO) + class QueueManager(BaseManager): pass @@ -74,15 +74,18 @@ def _setup_scheduled_tasks(self): logger.info(f"should not send notification for camera {area.id}") def _serve(self): - logger.info("Core is listening for commands ... ") - while True: - try: - cmd_code = self._cmd_queue.get(timeout=10) - logger.info("command received: " + str(cmd_code)) - self._handle_command(cmd_code) - except Empty: - # Run pending tasks - schedule.run_pending() + logger.info("Starting process") + + self._start_processing() + + # while True: + # try: + # cmd_code = self._cmd_queue.get(timeout=10) + # logger.info("command received: " + str(cmd_code)) + # self._handle_command(cmd_code) + # except Empty: + # # Run pending tasks + # schedule.run_pending() def _handle_command(self, cmd_code): if cmd_code == Commands.PROCESS_VIDEO_CFG: @@ -145,8 +148,8 @@ def start_processing_areas(self): def _start_processing(self): self._engines = self.start_processing_sources() - area_engine = self.start_processing_areas() - self._engines.append(area_engine) + # area_engine = self.start_processing_areas() + # self._engines.append(area_engine) def _stop_processing(self): for (conn, proc) in self._engines: diff --git a/run_processor_core.py b/run_processor_core.py index 8bd38fc4..bca47619 100644 --- a/run_processor_core.py +++ b/run_processor_core.py @@ -1,20 +1,20 @@ #!/usr/bin/python3 import argparse -from multiprocessing import Process -import threading from libs.config_engine import ConfigEngine import logging logger = logging.getLogger(__name__) + def start_core(config): from libs.processor_core import ProcessorCore core = ProcessorCore(config) - + logger.info("Core Started.") core.start() logger.info("Core Terminated.") + def main(config): logging.basicConfig(level=logging.INFO) if isinstance(config, str): diff --git a/share/commands.py b/share/commands.py index 339448fb..bb9988f1 100644 --- a/share/commands.py +++ b/share/commands.py @@ -1,5 +1,6 @@ from enum import Enum + class Commands(Enum): - PROCESS_VIDEO_CFG = 1 + PROCESS_VIDEO_CFG = 1 STOP_PROCESS_VIDEO = 2 diff --git a/supervisord.conf b/supervisord.conf index a9ab549d..d97035bb 100644 --- a/supervisord.conf +++ b/supervisord.conf @@ -10,13 +10,13 @@ supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] serverurl=unix:///var/run/supervisor.sock -[program:api] -command=python3 run_processor_api.py --config %(ENV_CONFIG_FILE)s -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 -autorestart=true +# [program:api] +# command=python3 run_processor_api.py --config %(ENV_CONFIG_FILE)s +# stdout_logfile=/dev/stdout +# stdout_logfile_maxbytes=0 +# stderr_logfile=/dev/stderr +# stderr_logfile_maxbytes=0 +# autorestart=true [program:core] command=python3 run_processor_core.py --config %(ENV_CONFIG_FILE)s @@ -26,17 +26,17 @@ stderr_logfile=/dev/stderr stderr_logfile_maxbytes=0 autorestart=true -[program:startup] -command=/repo/sample_startup.bash %(ENV_CONFIG_FILE)s -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +# [program:startup] +# command=/repo/sample_startup.bash %(ENV_CONFIG_FILE)s +# stdout_logfile=/dev/stdout +# stdout_logfile_maxbytes=0 +# stderr_logfile=/dev/stderr +# stderr_logfile_maxbytes=0 -[program:periodic_task] -command=python3 run_periodic_task.py --config %(ENV_CONFIG_FILE)s -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 -autorestart=true +# [program:periodic_task] +# command=python3 run_periodic_task.py --config %(ENV_CONFIG_FILE)s +# stdout_logfile=/dev/stdout +# stdout_logfile_maxbytes=0 +# stderr_logfile=/dev/stderr +# stderr_logfile_maxbytes=0 +# autorestart=true From 4c1b2192795bea9503913a328b619cfc064b3a0b Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Thu, 16 Dec 2021 15:35:35 -0300 Subject: [PATCH 02/13] Removed bearer tokens --- config-x86-openvino.ini | 4 ++-- config-x86.ini | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index 8a808675..a9228f59 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -17,7 +17,7 @@ Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 2 DashboardURL = https://hybrid.sta.lanthorn.ai/ -DashboardAuthorizationToken = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjQ0LCJleHAiOjE2NjY5NzI5MDl9.qfwY0pioVwLL2UKPYgkMF0nFB2O05sRp20wIS_pA2Oc +DashboardAuthorizationToken = Bearer SlackChannel = lanthorn-notifications OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 @@ -92,7 +92,7 @@ Enabled = True [SourceLogger_3] Name = web_hook_logger Endpoint = https://hybrid.sta.lanthorn.ai/api/processor_logger/raw_data -Authorization = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjQ0LCJleHAiOjE2NjY5NzI5MDl9.qfwY0pioVwLL2UKPYgkMF0nFB2O05sRp20wIS_pA2Oc +Authorization = Bearer TimeInterval = 0.5 Enabled = False SendingInterval = 5 diff --git a/config-x86.ini b/config-x86.ini index b52c7c52..f39a0b51 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -17,7 +17,7 @@ Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 1 DashboardURL = http://192.168.1.8:8000/ -DashboardAuthorizationToken = Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjIsImV4cCI6MTYyODk0NzI1NX0.tmRC0_ywYgghXfdWsZ93NtoEBZA6poA5vtTnjUNAJNY +DashboardAuthorizationToken = Bearer EnableSlackNotifications = no SlackChannel = lanthorn-notifications OccupancyAlertsMinInterval = 180 From a97dede7df2be67884206d1825277b47dc3a2127 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Mon, 20 Dec 2021 09:21:36 -0300 Subject: [PATCH 03/13] Run video processing without creating extra process, don't wait for signal to die --- config-coral.ini | 1 + config-jetson-nano.ini | 1 + config-jetson-tx2.ini | 1 + config-x86-gpu-tensorrt.ini | 1 + config-x86-gpu.ini | 1 + config-x86-openvino.ini | 1 + config-x86.ini | 1 + libs/engine_threading.py | 37 ++++++++-------- libs/processor_core.py | 29 ++++++++----- run_historical_metrics.sh | 2 + start-x86-historical.sh | 3 ++ x86-historical.Dockerfile | 87 +++++++++++++++++++++++++++++++++++++ 12 files changed, 137 insertions(+), 28 deletions(-) create mode 100755 run_historical_metrics.sh create mode 100755 start-x86-historical.sh create mode 100644 x86-historical.Dockerfile diff --git a/config-coral.ini b/config-coral.ini index 4124f801..0cb33d98 100644 --- a/config-coral.ini +++ b/config-coral.ini @@ -1,4 +1,5 @@ [App] +HistoricalDataMode = False HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-jetson-nano.ini b/config-jetson-nano.ini index fd74862a..e8659def 100644 --- a/config-jetson-nano.ini +++ b/config-jetson-nano.ini @@ -1,4 +1,5 @@ [App] +HistoricalDataMode = False HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-jetson-tx2.ini b/config-jetson-tx2.ini index 014c8b7f..fb269699 100644 --- a/config-jetson-tx2.ini +++ b/config-jetson-tx2.ini @@ -1,4 +1,5 @@ [App] +HistoricalDataMode = False HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-x86-gpu-tensorrt.ini b/config-x86-gpu-tensorrt.ini index 4c7465a6..65ce9bcb 100644 --- a/config-x86-gpu-tensorrt.ini +++ b/config-x86-gpu-tensorrt.ini @@ -13,6 +13,7 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] +HistoricalDataMode = False HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-x86-gpu.ini b/config-x86-gpu.ini index f9c20259..607d513c 100644 --- a/config-x86-gpu.ini +++ b/config-x86-gpu.ini @@ -13,6 +13,7 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] +HistoricalDataMode = False HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index a9228f59..2f67f49d 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -12,6 +12,7 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] +HistoricalDataMode = False HasBeenConfigured = True Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/config-x86.ini b/config-x86.ini index f39a0b51..65741f7a 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -12,6 +12,7 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] +HistoricalDataMode = True HasBeenConfigured = True Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast diff --git a/libs/engine_threading.py b/libs/engine_threading.py index 01d5060a..81d41e55 100644 --- a/libs/engine_threading.py +++ b/libs/engine_threading.py @@ -12,7 +12,7 @@ logger = logging.getLogger(__name__) -def run_video_processing(config, pipe, sources): +def run_video_processing(config, pipe, sources, historical_data_mode: bool = False): pid = os.getpid() logger.info(f"[{pid}] taking on {len(sources)} cameras") threads = [] @@ -21,21 +21,23 @@ def run_video_processing(config, pipe, sources): engine.start() threads.append(engine) - # Wait for a signal to die - pipe.recv() - logger.info(f"[{pid}] will stop cameras and die") - for t in threads: - t.stop() + if not historical_data_mode: + # Wait for a signal to die + pipe.recv() - for src in sources: - logger.info("Clean up video output") - playlist_path = os.path.join('/repo/data/processor/static/gstreamer/', src['id']) - birdseye_path = os.path.join('/repo/data/processor/static/gstreamer/', src['id'] + '-birdseye') - if os.path.exists(playlist_path): - rmtree(playlist_path) - if os.path.exists(birdseye_path): - rmtree(birdseye_path) - logger.info(f"[{pid}] Goodbye!") + logger.info(f"[{pid}] will stop cameras and die") + for t in threads: + t.stop() + + for src in sources: + logger.info("Clean up video output") + playlist_path = os.path.join('/repo/data/processor/static/gstreamer/', src['id']) + birdseye_path = os.path.join('/repo/data/processor/static/gstreamer/', src['id'] + '-birdseye') + if os.path.exists(playlist_path): + rmtree(playlist_path) + if os.path.exists(birdseye_path): + rmtree(birdseye_path) + logger.info(f"[{pid}] Goodbye!") class EngineThread(Thread): @@ -55,8 +57,8 @@ def run(self): last_restart_time = datetime.now() self.engine.process_video(self.source['url']) if os.path.isdir(self.source['url']): - logging.info("SE TERMINARON LOS VIDEOS") - sys.exit() + logging.info("Finished processing") + break except Exception as e: logging.error(e, exc_info=True) logging.info(f"Exception processing video for source {self.source['name']}") @@ -69,6 +71,7 @@ def run(self): time.sleep(5) logging.info("Restarting the video processing") restarts += 1 + sys.exit() except Exception as e: logging.error(e, exc_info=True) raise e diff --git a/libs/processor_core.py b/libs/processor_core.py index 6b35d26e..88369ff0 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -3,6 +3,7 @@ from multiprocessing.managers import BaseManager import logging from share.commands import Commands +from queue import Empty import schedule from libs.engine_threading import run_video_processing from libs.area_threading import run_area_processing @@ -74,18 +75,20 @@ def _setup_scheduled_tasks(self): logger.info(f"should not send notification for camera {area.id}") def _serve(self): - logger.info("Starting process") - self._start_processing() - - # while True: - # try: - # cmd_code = self._cmd_queue.get(timeout=10) - # logger.info("command received: " + str(cmd_code)) - # self._handle_command(cmd_code) - # except Empty: - # # Run pending tasks - # schedule.run_pending() + if self.config.get_section_dict("App")["HistoricalDataMode"]: + logger.info("Starting historical data processing") + self.start_processing_historical_data() + else: + logger.info("Starting process") + while True: + try: + cmd_code = self._cmd_queue.get(timeout=10) + logger.info("command received: " + str(cmd_code)) + self._handle_command(cmd_code) + except Empty: + # Run pending tasks + schedule.run_pending() def _handle_command(self, cmd_code): if cmd_code == Commands.PROCESS_VIDEO_CFG: @@ -118,6 +121,10 @@ def _handle_command(self, cmd_code): logger.warning("Invalid core command " + str(cmd_code)) self._result_queue.put("invalid_cmd_code") + def start_processing_historical_data(self): + sources = self.config.get_video_sources() + run_video_processing(self.config, None, sources, True) + def start_processing_sources(self): sources = self.config.get_video_sources() if len(sources) == 0: diff --git a/run_historical_metrics.sh b/run_historical_metrics.sh new file mode 100755 index 00000000..3ed80b0a --- /dev/null +++ b/run_historical_metrics.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python3 run_processor_core.py --config $CONFIG_FILE \ No newline at end of file diff --git a/start-x86-historical.sh b/start-x86-historical.sh new file mode 100755 index 00000000..b54df505 --- /dev/null +++ b/start-x86-historical.sh @@ -0,0 +1,3 @@ +#!/bin/bash +docker build -f x86-historical.Dockerfile -t "neuralet/smart-social-distancing:latest-x86_64" . +docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 \ No newline at end of file diff --git a/x86-historical.Dockerfile b/x86-historical.Dockerfile new file mode 100644 index 00000000..bbbe302a --- /dev/null +++ b/x86-historical.Dockerfile @@ -0,0 +1,87 @@ +FROM tensorflow/tensorflow:latest-py3 + +# The `python3-opencv` package isn't built with gstreamer on Ubuntu. So we need to manually build opencv. +ARG OPENCV_VERSION=4.3.0 +# http://amritamaz.net/blog/opencv-config +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + curl \ + git \ + gstreamer1.0-plugins-bad \ + gstreamer1.0-plugins-good \ + gstreamer1.0-plugins-ugly \ + gstreamer1.0-vaapi \ + libavcodec-dev \ + libavformat-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libsm6 \ + libswscale-dev \ + libxext6 \ + libxrender-dev \ + mesa-va-drivers \ + python3-dev \ + python3-numpy \ + && rm -rf /var/lib/apt/lists/* \ + && cd /tmp/ \ + && curl -L https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz -o opencv.tar.gz \ + && tar zxvf opencv.tar.gz && rm opencv.tar.gz \ + && cd /tmp/opencv-${OPENCV_VERSION} \ + && mkdir build \ + && cd build \ + && cmake \ + -DBUILD_opencv_python3=yes \ + -DPYTHON_EXECUTABLE=$(which python3) \ + -DCMAKE_BUILD_TYPE=RELEASE \ + -DBUILD_TESTS=OFF \ + -DBUILD_PERF_TESTS=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DINSTALL_TESTS=OFF \ + -DBUILD_opencv_apps=OFF \ + -DBUILD_DOCS=OFF \ + ../ \ + && make -j$(nproc) \ + && make install \ + && cd /tmp \ + && rm -rf opencv-${OPENCV_VERSION} \ + && apt-get purge -y \ + cmake \ + git \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libxrender-dev \ + python3-dev \ + && apt-get autoremove -y + +# https://askubuntu.com/questions/909277/avoiding-user-interaction-with-tzdata-when-installing-certbot-in-a-docker-contai +ARG DEBIAN_FRONTEND=noninteractive + +COPY api/requirements.txt / + +RUN apt-get update && apt-get install -y --no-install-recommends \ + tzdata \ + pkg-config \ + python3-dev \ + python3-numpy \ + python3-pillow \ + python3-pip \ + python3-scipy \ + python3-wget \ + supervisor \ + && rm -rf /var/lib/apt/lists/* \ + && python3 -m pip install --upgrade pip setuptools==41.0.0 && pip install -r /requirements.txt \ + && apt-get purge -y \ + python3-dev \ + && apt-get autoremove -y + +RUN apt-get update && apt-get install -y python3-dev && pip3 install torch==1.5 torchvision==0.6.0 openpifpaf + +ENV DEV_ALLOW_ALL_ORIGINS=true +ENV CONFIG_FILE=config-x86.ini + +COPY . /repo +WORKDIR /repo + +HEALTHCHECK --interval=30s --retries=2 --start-period=15s CMD bash healthcheck.bash +CMD bash /repo/run_historical_metrics.sh From 248ca83a8dd8208d6987f59e63e734ae969dabf3 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Mon, 20 Dec 2021 09:45:44 -0300 Subject: [PATCH 04/13] Removed unused dockerfile, modified getting boolean config --- libs/processor_core.py | 6 +-- start-x86-historical.sh | 3 +- x86-historical.Dockerfile | 87 --------------------------------------- 3 files changed, 4 insertions(+), 92 deletions(-) delete mode 100644 x86-historical.Dockerfile diff --git a/libs/processor_core.py b/libs/processor_core.py index 88369ff0..560cb895 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -76,7 +76,7 @@ def _setup_scheduled_tasks(self): def _serve(self): - if self.config.get_section_dict("App")["HistoricalDataMode"]: + if self.config.get_boolean("App", "HistoricalDataMode"): logger.info("Starting historical data processing") self.start_processing_historical_data() else: @@ -155,8 +155,8 @@ def start_processing_areas(self): def _start_processing(self): self._engines = self.start_processing_sources() - # area_engine = self.start_processing_areas() - # self._engines.append(area_engine) + area_engine = self.start_processing_areas() + self._engines.append(area_engine) def _stop_processing(self): for (conn, proc) in self._engines: diff --git a/start-x86-historical.sh b/start-x86-historical.sh index b54df505..aa9cd559 100755 --- a/start-x86-historical.sh +++ b/start-x86-historical.sh @@ -1,3 +1,2 @@ #!/bin/bash -docker build -f x86-historical.Dockerfile -t "neuralet/smart-social-distancing:latest-x86_64" . -docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 \ No newline at end of file +docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh \ No newline at end of file diff --git a/x86-historical.Dockerfile b/x86-historical.Dockerfile deleted file mode 100644 index bbbe302a..00000000 --- a/x86-historical.Dockerfile +++ /dev/null @@ -1,87 +0,0 @@ -FROM tensorflow/tensorflow:latest-py3 - -# The `python3-opencv` package isn't built with gstreamer on Ubuntu. So we need to manually build opencv. -ARG OPENCV_VERSION=4.3.0 -# http://amritamaz.net/blog/opencv-config -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - cmake \ - curl \ - git \ - gstreamer1.0-plugins-bad \ - gstreamer1.0-plugins-good \ - gstreamer1.0-plugins-ugly \ - gstreamer1.0-vaapi \ - libavcodec-dev \ - libavformat-dev \ - libgstreamer-plugins-base1.0-dev \ - libgstreamer1.0-dev \ - libsm6 \ - libswscale-dev \ - libxext6 \ - libxrender-dev \ - mesa-va-drivers \ - python3-dev \ - python3-numpy \ - && rm -rf /var/lib/apt/lists/* \ - && cd /tmp/ \ - && curl -L https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz -o opencv.tar.gz \ - && tar zxvf opencv.tar.gz && rm opencv.tar.gz \ - && cd /tmp/opencv-${OPENCV_VERSION} \ - && mkdir build \ - && cd build \ - && cmake \ - -DBUILD_opencv_python3=yes \ - -DPYTHON_EXECUTABLE=$(which python3) \ - -DCMAKE_BUILD_TYPE=RELEASE \ - -DBUILD_TESTS=OFF \ - -DBUILD_PERF_TESTS=OFF \ - -DBUILD_EXAMPLES=OFF \ - -DINSTALL_TESTS=OFF \ - -DBUILD_opencv_apps=OFF \ - -DBUILD_DOCS=OFF \ - ../ \ - && make -j$(nproc) \ - && make install \ - && cd /tmp \ - && rm -rf opencv-${OPENCV_VERSION} \ - && apt-get purge -y \ - cmake \ - git \ - libgstreamer-plugins-base1.0-dev \ - libgstreamer1.0-dev \ - libxrender-dev \ - python3-dev \ - && apt-get autoremove -y - -# https://askubuntu.com/questions/909277/avoiding-user-interaction-with-tzdata-when-installing-certbot-in-a-docker-contai -ARG DEBIAN_FRONTEND=noninteractive - -COPY api/requirements.txt / - -RUN apt-get update && apt-get install -y --no-install-recommends \ - tzdata \ - pkg-config \ - python3-dev \ - python3-numpy \ - python3-pillow \ - python3-pip \ - python3-scipy \ - python3-wget \ - supervisor \ - && rm -rf /var/lib/apt/lists/* \ - && python3 -m pip install --upgrade pip setuptools==41.0.0 && pip install -r /requirements.txt \ - && apt-get purge -y \ - python3-dev \ - && apt-get autoremove -y - -RUN apt-get update && apt-get install -y python3-dev && pip3 install torch==1.5 torchvision==0.6.0 openpifpaf - -ENV DEV_ALLOW_ALL_ORIGINS=true -ENV CONFIG_FILE=config-x86.ini - -COPY . /repo -WORKDIR /repo - -HEALTHCHECK --interval=30s --retries=2 --start-period=15s CMD bash healthcheck.bash -CMD bash /repo/run_historical_metrics.sh From 1cdf65b32f2467013fb131f9cdfd761e60e894dc Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Mon, 20 Dec 2021 09:46:46 -0300 Subject: [PATCH 05/13] Added whitespace --- start-x86-historical.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/start-x86-historical.sh b/start-x86-historical.sh index aa9cd559..0e811bcb 100755 --- a/start-x86-historical.sh +++ b/start-x86-historical.sh @@ -1,2 +1,2 @@ #!/bin/bash -docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh \ No newline at end of file +docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh From 5b169aa23bb9b0524a05223507921b4c17bb86b2 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Mon, 20 Dec 2021 10:15:27 -0300 Subject: [PATCH 06/13] Removed unused bash file, removed unused config, removed comments on supervisord conf --- config-x86-openvino.ini | 130 ++++++++++++++------------------------ config-x86.ini | 77 +++++++++++++++------- run_historical_metrics.sh | 2 +- start-x86-historical.sh | 2 - supervisord.conf | 40 ++++++------ 5 files changed, 123 insertions(+), 128 deletions(-) delete mode 100755 start-x86-historical.sh diff --git a/config-x86-openvino.ini b/config-x86-openvino.ini index 2f67f49d..095e6183 100644 --- a/config-x86-openvino.ini +++ b/config-x86-openvino.ini @@ -3,8 +3,8 @@ Host = 0.0.0.0 Port = 8000 UseAuthToken = False SSLEnabled = False -SSLCertificateFile = -SSLKeyFile = +SSLCertificateFile = +SSLKeyFile = [CORE] Host = 0.0.0.0 @@ -13,47 +13,72 @@ QueueAuthKey = shibalba [App] HistoricalDataMode = False -HasBeenConfigured = True +HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 2 -DashboardURL = https://hybrid.sta.lanthorn.ai/ -DashboardAuthorizationToken = Bearer +; WIP https://github.com/neuralet/neuralet/issues/91 +;Encoder: videoconvert ! vaapih264enc +DashboardURL = https://app.lanthorn.ai/ +DashboardAuthorizationToken = SlackChannel = lanthorn-notifications +; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = +GlobalReportingEmails = GlobalReportTime = 06:00 DailyGlobalReport = False WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False -LogPerformanceMetricsDirectory = +LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = False +ProcessAreas = True [Area_0] -Id = 1 +Id = 0 Name = Kitchen Cameras = 0 NotifyEveryMinutes = 0 -Emails = +Emails = EnableSlackNotifications = False OccupancyThreshold = 300 ViolationThreshold = 60 DailyReport = False DailyReportTime = 06:00 +[Source_0] +VideoPath = /repo/data/softbio_vid.mp4 +Tags = kitchen +Name = Garden-Camera +Id = 0 +Emails = +EnableSlackNotifications = False +NotifyEveryMinutes = 0 +ViolationThreshold = 60 +; Distance measurement method: +; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. +; - CenterPointsDistance: compare center of pedestrian boxes together +; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. +; - If left empty the DefaultDistMethod will be employed +DistMethod = +DailyReport = False +DailyReportTime = 06:00 +LiveFeedEnabled = True + [Detector] +; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 Name = openvino +;ImageSize should be 3 numbers seperated by commas, no spaces: 300,300,3 ImageSize = 300,300,3 -ModelPath = +ModelPath = ClassID = 1 MinScore = 0.25 [Tracker] Name = IOUTracker +; Number of times tracker was lost while tracking MaxLost = 5 TrackerIOUThreshold = 0.5 @@ -64,6 +89,10 @@ Enabled = True [SourcePostProcessor_1] Name = social_distance +; Default distance measurement method (used when no DistMethod is specified for the source): +; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. +; - CenterPointsDistance: compare center of pedestrian boxes together +; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. DefaultDistMethod = CenterPointsDistance DistThreshold = 150 Enabled = True @@ -78,6 +107,7 @@ Enabled = True [SourceLogger_1] Name = s3_logger +; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 0 ScreenshotS3Bucket = my-screenshot-bucket Enabled = False @@ -86,14 +116,15 @@ Enabled = False Name = file_system_logger TimeInterval = 0.5 LogDirectory = /repo/data/processor/static/data/sources +; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 5 ScreenshotsDirectory = /repo/data/processor/static/screenshots Enabled = True [SourceLogger_3] Name = web_hook_logger -Endpoint = https://hybrid.sta.lanthorn.ai/api/processor_logger/raw_data -Authorization = Bearer +Endpoint = +Authorization = TimeInterval = 0.5 Enabled = False SendingInterval = 5 @@ -103,82 +134,17 @@ Name = file_system_logger LogDirectory = /repo/data/processor/static/data/areas Enabled = True +; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics Enabled = True +; Expressed in minutes LiveInterval = 10 +; Enable the PeriodicTask_1 if you want to backup your files in S3 [PeriodicTask_1] Name = s3_backup Enabled = False +; Expressed in minutes BackupInterval = 30 BackupS3Bucket = your-s3-bucket - -[Source_0] -ViolationThreshold = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -DailyReport = False -DailyReportTime = 06:00 -Id = 0 -Name = cam_1 -VideoPath = /repo/data/historical_data -Tags = -DistMethod = -LiveFeedEnabled = True -HasBeenCalibrated = False -HasDefinedRoi = False -HasInOutBorder = False - -[Source_1] -ViolationThreshold = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -DailyReport = False -DailyReportTime = 06:00 -Id = 1 -Name = spaces rule -VideoPath = http://149.43.156.105/mjpg/video.mjpg -Tags = -DistMethod = -LiveFeedEnabled = True -HasBeenCalibrated = False -HasDefinedRoi = False -HasInOutBorder = False - -[Source_2] -ViolationThreshold = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -DailyReport = False -DailyReportTime = 06:00 -Id = 2 -Name = another_cam -VideoPath = http://cam1.infolink.ru/mjpg/video.mjpg -Tags = -DistMethod = CalibratedDistance -LiveFeedEnabled = True -HasBeenCalibrated = False -HasDefinedRoi = False -HasInOutBorder = False - -[Source_3] -ViolationThreshold = 0 -NotifyEveryMinutes = 0 -Emails = -EnableSlackNotifications = False -DailyReport = False -DailyReportTime = 06:00 -Id = 3 -Name = blablabla -VideoPath = http://webcam4.mmto.arizona.edu/mjpg/video.mjpg -Tags = -DistMethod = CalibratedDistance -LiveFeedEnabled = True -HasBeenCalibrated = False -HasDefinedRoi = False -HasInOutBorder = False - diff --git a/config-x86.ini b/config-x86.ini index 65741f7a..bee14477 100644 --- a/config-x86.ini +++ b/config-x86.ini @@ -3,8 +3,8 @@ Host = 0.0.0.0 Port = 8000 UseAuthToken = False SSLEnabled = False -SSLCertificateFile = -SSLKeyFile = +SSLCertificateFile = +SSLKeyFile = [CORE] Host = 0.0.0.0 @@ -12,33 +12,36 @@ QueuePort = 8010 QueueAuthKey = shibalba [App] -HistoricalDataMode = True -HasBeenConfigured = True +HistoricalDataMode = False +HasBeenConfigured = False Resolution = 640,480 Encoder = videoconvert ! video/x-raw,format=I420 ! x264enc speed-preset=ultrafast MaxProcesses = 1 -DashboardURL = http://192.168.1.8:8000/ -DashboardAuthorizationToken = Bearer +; WIP https://github.com/neuralet/neuralet/issues/91 +;Encoder: videoconvert ! vaapih264enc +DashboardURL = https://app.lanthorn.ai/ +DashboardAuthorizationToken = EnableSlackNotifications = no SlackChannel = lanthorn-notifications +; OccupancyAlertsMinInterval time is measured in seconds (if interval < 0 then no occupancy alerts are triggered) OccupancyAlertsMinInterval = 180 MaxThreadRestarts = 5 -GlobalReportingEmails = +GlobalReportingEmails = GlobalReportTime = 06:00 DailyGlobalReport = False WeeklyGlobalReport = False HeatmapResolution = 150,150 LogPerformanceMetrics = False -LogPerformanceMetricsDirectory = +LogPerformanceMetricsDirectory = /repo/data/processor/static/data/performace-metrics EntityConfigDirectory = /repo/data/processor/config -ProcessAreas = False +ProcessAreas = True [Area_0] Id = 0 Name = Kitchen Cameras = 0 NotifyEveryMinutes = 0 -Emails = +Emails = EnableSlackNotifications = False OccupancyThreshold = 300 ViolationThreshold = 60 @@ -46,32 +49,51 @@ DailyReport = False DailyReportTime = 06:00 [Source_0] -VideoPath = /repo/data/historical_data/videos/camera15 -Tags = +VideoPath = /repo/data/softbio_vid.mp4 +Tags = kitchen Name = Garden-Camera Id = 0 -Emails = +Emails = EnableSlackNotifications = False NotifyEveryMinutes = 0 -ViolationThreshold = 0 -DistMethod = +ViolationThreshold = 60 +; Distance measurement method: +; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. +; - CenterPointsDistance: compare center of pedestrian boxes together +; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. +; - If left empty the DefaultDistMethod will be employed +DistMethod = DailyReport = False DailyReportTime = 06:00 LiveFeedEnabled = True -HasBeenCalibrated = False -HasDefinedRoi = False -HasInOutBorder = False [Detector] +; Supported devices: Jetson , EdgeTPU, Dummy, x86 Device = x86 +; Supported models: mobilenet_ssd_v2, openpifpaf, and yolov3 Name = yolov3 +; ImageSize should be 3 numbers seperated by commas, no spaces: 300,300,3 (for better accuracy use higher resolution when +; using openpifpaf (openpifpaf detects both faces and pedestrians) +; For yolov3 model the ImageSize MUST be w = h = 32x e.g: x= 13=> ImageSize = 416,416,3 +; Set the ImageSize to 1281,721,3 for openpifpaf ImageSize = 416,416,3 -ModelPath = +ModelPath = ClassID = 1 MinScore = 0.25 +; Uncomment this if you want facemask detection. Only works for openpifpaf. +; [Classifier] +; Device = x86 +; Name = OFMClassifier +; ModelPath = +; ImageSize = 45,45,3 +; MinScore = 0.15 +; MinImageSize = + + [Tracker] Name = IOUTracker +; Number of times tracker was lost while tracking MaxLost = 5 TrackerIOUThreshold = 0.5 @@ -82,6 +104,10 @@ Enabled = True [SourcePostProcessor_1] Name = social_distance +; Default distance measurement method (used when no DistMethod is specified for the source): +; - CalibratedDistance: calculate the distance with 3-d transformed points, note that by choosing this method you should specify the inverse calibration matrix of your environment. +; - CenterPointsDistance: compare center of pedestrian boxes together +; - FourCornerPointsDistance: compare four corresponding points of pedestrian boxes and get the minimum of them. DefaultDistMethod = CenterPointsDistance DistThreshold = 150 Enabled = True @@ -96,6 +122,7 @@ Enabled = True [SourceLogger_1] Name = s3_logger +; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 0 ScreenshotS3Bucket = my-screenshot-bucket Enabled = False @@ -103,15 +130,16 @@ Enabled = False [SourceLogger_2] Name = file_system_logger TimeInterval = 0.5 -LogDirectory = /repo/data/historical_data/sources +LogDirectory = /repo/data/processor/static/data/sources +; Screenshot time is measured in minutes (if period <= 0 then no screenshots are uploaded) ScreenshotPeriod = 5 ScreenshotsDirectory = /repo/data/processor/static/screenshots Enabled = True [SourceLogger_3] Name = web_hook_logger -Endpoint = -Authorization = Bearer +Endpoint = +Authorization = TimeInterval = 0.5 Enabled = False SendingInterval = 5 @@ -121,14 +149,17 @@ Name = file_system_logger LogDirectory = /repo/data/processor/static/data/areas Enabled = True +; Enable the PeriodicTask_0 if you want to generate metrics [PeriodicTask_0] Name = metrics Enabled = True +; Expressed in minutes LiveInterval = 10 +; Enable the PeriodicTask_1 if you want to backup your files in S3 [PeriodicTask_1] Name = s3_backup Enabled = False +; Expressed in minutes BackupInterval = 30 BackupS3Bucket = your-s3-bucket - diff --git a/run_historical_metrics.sh b/run_historical_metrics.sh index 3ed80b0a..dc5cce2e 100755 --- a/run_historical_metrics.sh +++ b/run_historical_metrics.sh @@ -1,2 +1,2 @@ #!/bin/bash -python3 run_processor_core.py --config $CONFIG_FILE \ No newline at end of file +python3 run_processor_core.py --config $CONFIG_FILE diff --git a/start-x86-historical.sh b/start-x86-historical.sh deleted file mode 100755 index 0e811bcb..00000000 --- a/start-x86-historical.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -docker run -it -p 8300:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh diff --git a/supervisord.conf b/supervisord.conf index d97035bb..a9ab549d 100644 --- a/supervisord.conf +++ b/supervisord.conf @@ -10,13 +10,13 @@ supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] serverurl=unix:///var/run/supervisor.sock -# [program:api] -# command=python3 run_processor_api.py --config %(ENV_CONFIG_FILE)s -# stdout_logfile=/dev/stdout -# stdout_logfile_maxbytes=0 -# stderr_logfile=/dev/stderr -# stderr_logfile_maxbytes=0 -# autorestart=true +[program:api] +command=python3 run_processor_api.py --config %(ENV_CONFIG_FILE)s +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +autorestart=true [program:core] command=python3 run_processor_core.py --config %(ENV_CONFIG_FILE)s @@ -26,17 +26,17 @@ stderr_logfile=/dev/stderr stderr_logfile_maxbytes=0 autorestart=true -# [program:startup] -# command=/repo/sample_startup.bash %(ENV_CONFIG_FILE)s -# stdout_logfile=/dev/stdout -# stdout_logfile_maxbytes=0 -# stderr_logfile=/dev/stderr -# stderr_logfile_maxbytes=0 +[program:startup] +command=/repo/sample_startup.bash %(ENV_CONFIG_FILE)s +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 -# [program:periodic_task] -# command=python3 run_periodic_task.py --config %(ENV_CONFIG_FILE)s -# stdout_logfile=/dev/stdout -# stdout_logfile_maxbytes=0 -# stderr_logfile=/dev/stderr -# stderr_logfile_maxbytes=0 -# autorestart=true +[program:periodic_task] +command=python3 run_periodic_task.py --config %(ENV_CONFIG_FILE)s +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +autorestart=true From 2da15299196068499363581cd09ebe24d4c15b3b Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Mon, 20 Dec 2021 10:18:14 -0300 Subject: [PATCH 07/13] Restored deleted log message --- libs/processor_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/processor_core.py b/libs/processor_core.py index 560cb895..95c369ca 100644 --- a/libs/processor_core.py +++ b/libs/processor_core.py @@ -80,7 +80,7 @@ def _serve(self): logger.info("Starting historical data processing") self.start_processing_historical_data() else: - logger.info("Starting process") + logger.info("Core is listening for commands ... ") while True: try: cmd_code = self._cmd_queue.get(timeout=10) From 29c2c173a0e8dd4fa1d9316b9d7c972d201b3bd8 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Wed, 29 Dec 2021 12:27:09 -0300 Subject: [PATCH 08/13] Added section for running historical data, fixed other link --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cd15ce95..d50cd76d 100644 --- a/README.md +++ b/README.md @@ -434,7 +434,7 @@ docker-compose -f docker-compose.yml -f docker-compose-x86-openvino.yml up ### Optional Parameters This is a list of optional parameters for the `docker run` commands. -They are included in the examples of the [Run the processor](#run-the-processor) section. +They are included in the examples of the [Run the processor](#running-the-processor) section. #### Logging in the system's timezone @@ -450,6 +450,16 @@ If you are running the processor directly from the Docker Hub repository, rememb We recommend adding the projects folder as a mounted volume (`-v "$PWD":/repo`) if you are building the docker image. If you are using the already built one we recommend creating a directory named `data` and mount it (`-v $PWD/data:/repo/data`). +#### Processing historical data + +If you'd like to process historical data (videos stored on the device instead of a stream), you must follow two steps: +- Enable the `HistoricalDataMode` parameter on the device's `config-*.ini` file (see [Change the default configuration](#change-the-default-configuration)) +- Run the `/repo/run_historical_metrics.sh` script on the `docker run` command. + +Example using `x86`: + + docker run -it -p HOST_PORT:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh + ### Configuring AWS credentials Some of the implemented features allow you to upload files into an S3 bucket. To do that you need to provide the envs `AWS_BUCKET_REGION`, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. An easy way to do that is to create a `.env` file (following the template `.env.example`) and pass the flag ```--env-file .env ``` when you run the processor. @@ -543,6 +553,7 @@ If you choose this option, make sure to mount the config file as a volume to kee All the configurations are grouped in *sections* and some of them can vary depending on the chosen device. - `[App]` + - `HistoricalDataMode`: A boolean parameter that determines wheter to process historical data instead of a video stream. - `HasBeenConfigured`: A boolean parameter that states whether the *config.ini* was set up or not. - `Resolution`: Specifies the image resolution that the whole processor will use. If you are using a single camera we recommend using that resolution. - `Encoder`: Specifies the video encoder used by the processing pipeline. From 06bbc6df3289f6c04317090ae40eae74821637a5 Mon Sep 17 00:00:00 2001 From: Luca Benvenuto Date: Thu, 30 Dec 2021 10:30:14 -0300 Subject: [PATCH 09/13] Changed README example to bash code block --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d50cd76d..bb9fcd22 100644 --- a/README.md +++ b/README.md @@ -457,9 +457,9 @@ If you'd like to process historical data (videos stored on the device instead of - Run the `/repo/run_historical_metrics.sh` script on the `docker run` command. Example using `x86`: - - docker run -it -p HOST_PORT:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh - +```bash +docker run -it -p HOST_PORT:8000 -v "$PWD":/repo -e TZ=`./timezone.sh` neuralet/smart-social-distancing:latest-x86_64 /repo/run_historical_metrics.sh +``` ### Configuring AWS credentials Some of the implemented features allow you to upload files into an S3 bucket. To do that you need to provide the envs `AWS_BUCKET_REGION`, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. An easy way to do that is to create a `.env` file (following the template `.env.example`) and pass the flag ```--env-file .env ``` when you run the processor. From 08a2a2f6c2308e058142a650c21fefd3abfc4b28 Mon Sep 17 00:00:00 2001 From: LazaroM2019 Date: Thu, 30 Dec 2021 17:08:40 -0300 Subject: [PATCH 10/13] Adding processor's automatic cameras setting --- run_historical_conf.py | 56 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 run_historical_conf.py diff --git a/run_historical_conf.py b/run_historical_conf.py new file mode 100644 index 00000000..9fc17822 --- /dev/null +++ b/run_historical_conf.py @@ -0,0 +1,56 @@ +from configparser import ConfigParser, RawConfigParser +import io +import os + + +def new_camera(camera, camera_index, config_file): + add_config = f""" + [Source_{camera_index}] + VideoPath = /repo/data/historical_data/videos/{camera} + Tags = tags_{camera} + Name = name_{camera} + Id = {camera_index} + Emails = + EnableSlackNotifications = False + NotifyEveryMinutes = 0 + ViolationThreshold = 60 + DistMethod = + DailyReport = False + DailyReportTime = 06:00 + LiveFeedEnabled = True + """ + new_config = RawConfigParser(allow_no_value=True) + new_config.readfp(io.StringIO(add_config)) + + with open(config_file, 'a') as add_configfile: + new_config.write(add_configfile) + add_configfile.close() + + +if __name__ == '__main__': + config = RawConfigParser(allow_no_value=True) + config.read('config-x86.ini') + list_cameras = os.listdir('data/historical_data/') + if len(list_cameras) > 1: + for i, camera in enumerate(list_cameras): + if i == 0: + config.set(f'Source_{0}', 'VideoPath', f'/repo/data/historical_data/videos/{camera}') + + with open('config-x86.ini', 'w') as configfile: + config.write(configfile) + configfile.close() + elif i > 0: + new_camera(camera, i, 'config-x86.ini') + elif len(list_cameras) == 1: + if 'Source_0' in config.sections(): + config.set('Source_0', 'VideoPath', f'/repo/data/historical_data/videos/{list_cameras[0]}') + + print(config.get('Source_0', 'VideoPath')) + + with open('config-x86.ini', 'w') as edit_configfile: + config.write(edit_configfile) + edit_configfile.close() + else: + new_camera(list_cameras[0], 0, 'config-x86.ini') + + \ No newline at end of file From 6510c0d4586bae6fa507bc5a265be8ae4ec0f730 Mon Sep 17 00:00:00 2001 From: LazaroM2019 Date: Thu, 30 Dec 2021 19:24:20 -0300 Subject: [PATCH 11/13] Adding processor's automatical setting --- run_historical_conf.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/run_historical_conf.py b/run_historical_conf.py index 9fc17822..769283b5 100644 --- a/run_historical_conf.py +++ b/run_historical_conf.py @@ -26,31 +26,31 @@ def new_camera(camera, camera_index, config_file): new_config.write(add_configfile) add_configfile.close() +def edit_camera(camera, camera_index, config_file): + config.set(f'Source_{camera_index}', 'VideoPath', f'/repo/data/historical_data/videos/{camera}') + config.set(f'Source_{camera_index}', 'Tags', f'tags_{camera}') + config.set(f'Source_{camera_index}', 'Name', f'name_{camera}') + + with open(config_file, 'w') as configfile: + config.write(configfile) + configfile.close() + if __name__ == '__main__': config = RawConfigParser(allow_no_value=True) - config.read('config-x86.ini') + ini_file = 'config-x86.ini' + config.read(ini_file) list_cameras = os.listdir('data/historical_data/') if len(list_cameras) > 1: for i, camera in enumerate(list_cameras): if i == 0: - config.set(f'Source_{0}', 'VideoPath', f'/repo/data/historical_data/videos/{camera}') - - with open('config-x86.ini', 'w') as configfile: - config.write(configfile) - configfile.close() + edit_camera(camera, i, ini_file) elif i > 0: - new_camera(camera, i, 'config-x86.ini') + new_camera(camera, i, ini_file) elif len(list_cameras) == 1: if 'Source_0' in config.sections(): - config.set('Source_0', 'VideoPath', f'/repo/data/historical_data/videos/{list_cameras[0]}') - - print(config.get('Source_0', 'VideoPath')) - - with open('config-x86.ini', 'w') as edit_configfile: - config.write(edit_configfile) - edit_configfile.close() + edit_camera(list_cameras[0], 0, ini_file) else: - new_camera(list_cameras[0], 0, 'config-x86.ini') + new_camera(list_cameras[0], 0, ini_file) \ No newline at end of file From b665b2c844badcf11ebaf1a8c866a3bd63c2a1d6 Mon Sep 17 00:00:00 2001 From: LazaroM2019 Date: Mon, 3 Jan 2022 14:38:23 -0300 Subject: [PATCH 12/13] refactoring issues found PR 199 --- run_historical_conf.py | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/run_historical_conf.py b/run_historical_conf.py index 769283b5..02f5636b 100644 --- a/run_historical_conf.py +++ b/run_historical_conf.py @@ -1,4 +1,5 @@ -from configparser import ConfigParser, RawConfigParser +from configparser import RawConfigParser +import argparse import io import os @@ -26,31 +27,29 @@ def new_camera(camera, camera_index, config_file): new_config.write(add_configfile) add_configfile.close() -def edit_camera(camera, camera_index, config_file): - config.set(f'Source_{camera_index}', 'VideoPath', f'/repo/data/historical_data/videos/{camera}') - config.set(f'Source_{camera_index}', 'Tags', f'tags_{camera}') - config.set(f'Source_{camera_index}', 'Name', f'name_{camera}') +def delete_camera(config_file): + config.read(config_file) + for section in config.sections(): + if 'Source_' in section: + config.remove_section(section) - with open(config_file, 'w') as configfile: - config.write(configfile) - configfile.close() + with open(config_file, 'w') as del_configfile: + config.write(del_configfile) + del_configfile.close() if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Processor's automatic setting.") + parser.add_argument('--ini_file', type=str, default='config-x86.ini') + parser.add_argument('--data_hist', type=str, default='data/historical_data/') + args = parser.parse_args() config = RawConfigParser(allow_no_value=True) - ini_file = 'config-x86.ini' + ini_file = args.ini_file config.read(ini_file) - list_cameras = os.listdir('data/historical_data/') - if len(list_cameras) > 1: - for i, camera in enumerate(list_cameras): - if i == 0: - edit_camera(camera, i, ini_file) - elif i > 0: - new_camera(camera, i, ini_file) - elif len(list_cameras) == 1: - if 'Source_0' in config.sections(): - edit_camera(list_cameras[0], 0, ini_file) - else: - new_camera(list_cameras[0], 0, ini_file) + list_cameras = os.listdir(args.data_hist) + delete_camera(ini_file) + for i, camera in enumerate(list_cameras): + new_camera(camera, i, ini_file) + \ No newline at end of file From bcca1f8ff8c9c5d8ae8e735d5361c7461961da67 Mon Sep 17 00:00:00 2001 From: LazaroM2019 Date: Mon, 3 Jan 2022 15:37:32 -0300 Subject: [PATCH 13/13] Adding delete sections funcion and refactoring adding camera sections --- run_historical_conf.py | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/run_historical_conf.py b/run_historical_conf.py index 769283b5..043ec63d 100644 --- a/run_historical_conf.py +++ b/run_historical_conf.py @@ -1,4 +1,5 @@ -from configparser import ConfigParser, RawConfigParser +from configparser import RawConfigParser +import argparse import io import os @@ -26,31 +27,26 @@ def new_camera(camera, camera_index, config_file): new_config.write(add_configfile) add_configfile.close() -def edit_camera(camera, camera_index, config_file): - config.set(f'Source_{camera_index}', 'VideoPath', f'/repo/data/historical_data/videos/{camera}') - config.set(f'Source_{camera_index}', 'Tags', f'tags_{camera}') - config.set(f'Source_{camera_index}', 'Name', f'name_{camera}') +def delete_camera(config_file): + config.read(config_file) + for section in config.sections(): + if 'Source_' in section: + config.remove_section(section) - with open(config_file, 'w') as configfile: - config.write(configfile) - configfile.close() + with open(config_file, 'w') as del_configfile: + config.write(del_configfile) + del_configfile.close() if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Processor's automatic setting.") + parser.add_argument('--ini_file', type=str, default='config-x86.ini') + parser.add_argument('--data_hist', type=str, default='data/historical_data/') + args = parser.parse_args() config = RawConfigParser(allow_no_value=True) - ini_file = 'config-x86.ini' + ini_file = args.ini_file config.read(ini_file) - list_cameras = os.listdir('data/historical_data/') - if len(list_cameras) > 1: - for i, camera in enumerate(list_cameras): - if i == 0: - edit_camera(camera, i, ini_file) - elif i > 0: - new_camera(camera, i, ini_file) - elif len(list_cameras) == 1: - if 'Source_0' in config.sections(): - edit_camera(list_cameras[0], 0, ini_file) - else: - new_camera(list_cameras[0], 0, ini_file) - - \ No newline at end of file + list_cameras = os.listdir(args.data_hist) + delete_camera(ini_file) + for i, camera in enumerate(list_cameras): + new_camera(camera, i, ini_file)