diff --git a/k8s/configmap.yaml b/k8s/configmap.yaml new file mode 100644 index 0000000..408d58c --- /dev/null +++ b/k8s/configmap.yaml @@ -0,0 +1,334 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: deepstream + namespace: deepstream +data: + rtsp_setup.sh: | + #!/bin/bash + + cd /opt/nvidia/deepstream/deepstream-6.4 + git clone https://github.com/marcoslucianops/DeepStream-Yolo + cd /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo + CUDA_VER=12.2 make -C nvdsinfer_custom_impl_Yolo + + echo -n "--------------FINISHED--------------" + + rtsp_config.txt: | + [application] + enable-perf-measurement=1 + perf-measurement-interval-sec=5 + + [tiled-display] + enable=1 + rows=1 + columns=1 + width=1280 + height=720 + gpu-id=0 + nvbuf-memory-type=0 + + [source0] + enable=1 + type=3 + uri=http://89.106.109.144:12060/mjpg/video.mjpg + num-sources=1 + gpu-id=0 + cudadec-memtype=0 + + [sink0] + enable=0 + type=2 + sync=0 + gpu-id=0 + nvbuf-memory-type=0 + + [sink1] + enable=1 + type=4 + rtsp-port=8553 + sync=0 + source-id=0 + + [osd] + enable=1 + gpu-id=0 + border-width=5 + text-size=15 + text-color=1;1;1;1; + text-bg-color=0.3;0.3;0.3;1 + font=Serif + show-clock=0 + clock-x-offset=800 + clock-y-offset=820 + clock-text-size=12 + clock-color=1;0;0;0 + nvbuf-memory-type=0 + + [streammux] + gpu-id=0 + live-source=0 + batch-size=1 + batched-push-timeout=40000 + width=1920 + height=1080 + enable-padding=0 + nvbuf-memory-type=0 + + # [primary-gie] + # enable=1 + # gpu-id=0 + # gie-unique-id=1 + # nvbuf-memory-type=0 + # config-file=config_infer_primary.txt + + [tests] + file-loop=0 + + deepstream_setup.sh: | + #!/bin/bash + + # from deepstream-test5 + cd /opt/nvidia/deepstream/deepstream-6.4 + apt-get install -y \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libgstrtspserver-1.0-dev \ + libx11-dev \ + libjson-glib-dev \ + libyaml-cpp-dev + + # from /opt/nvidia/deepstream/deepstream/sources/libs/kafka_protocol_adaptor + cd /opt/nvidia/deepstream/deepstream-6.4 + git clone https://github.com/confluentinc/librdkafka.git + cd librdkafka + git checkout tags/v2.2.0 + # --enable-ssl + ./configure + make + make install + cp /usr/local/lib/librdkafka* /opt/nvidia/deepstream/deepstream/lib/ + ldconfig + + apt-get install -y \ + libglib2.0 \ + libglib2.0-dev \ + libjansson4 \ + libjansson-dev \ + libssl-dev \ + protobuf-compiler + + # from deepstream-test5 + cd /opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5 + export CUDA_VER=12.2 + make + + cd /opt/nvidia/deepstream/deepstream-6.4 + git clone https://github.com/WongKinYiu/yolov7.git + git clone https://github.com/marcoslucianops/DeepStream-Yolo + cd yolov7 + pip3 install -r requirements.txt + pip3 install onnx onnxsim onnxruntime + cp ../DeepStream-Yolo/utils/export_yoloV7.py . + wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt + python3 export_yoloV7.py -w yolov7.pt --dynamic + cp labels.txt ../DeepStream-Yolo + cp yolov7.onnx ../DeepStream-Yolo + cd /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo + CUDA_VER=12.2 make -C nvdsinfer_custom_impl_Yolo + + cd /opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5 + + # move artifacts from DeepStream-YOLO repository to deepstream-test5 + cp /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo/config_infer_primary_yoloV7.txt . + cp /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo/labels.txt . + cp /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo/yolov7.onnx . + cp -r /opt/nvidia/deepstream/deepstream-6.4/DeepStream-Yolo/nvdsinfer_custom_impl_Yolo/ . + + echo -n "--------------FINISHED--------------" + + inference_config.txt: | + # file content of rl_test5_dec_infer-resnet_tracker_sgie_tiled_display_int8.txt + [application] + enable-perf-measurement=1 + perf-measurement-interval-sec=5 + #gie-kitti-output-dir=streamscl + + [tiled-display] + enable=1 + rows=1 + columns=1 + width=960 + height=540 + gpu-id=0 + nvbuf-memory-type=0 + + [source0] + enable=1 + #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP + type=4 + uri=rtsp://3.127.21.251:30007/ds-test + num-sources=1 + gpu-id=0 + nvbuf-memory-type=0 + # smart record specific fields, valid only for source type=4 + # 0 = disable, 1 = through cloud events, 2 = through cloud + local events + #smart-record=1 + # 0 = mp4, 1 = mkv + #smart-rec-container=0 + #smart-rec-file-prefix + #smart-rec-dir-path + # smart record cache size in seconds + #smart-rec-cache=30 + # default duration of recording in seconds. + #smart-rec-default-duration=10 + # duration of recording in seconds. + # this will override default value. + #smart-rec-duration + # seconds before the current time to start recording. + #smart-rec-start-time + # value in seconds to dump video stream. + #smart-rec-interval + + [sink0] + enable=0 + #Type - 1=FakeSink 2=EglSink 3=File + type=2 + sync=0 + source-id=0 + gpu-id=0 + nvbuf-memory-type=0 + + [sink1] + enable=1 + #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker + type=6 + msg-conv-config=dstest5_msgconv_sample_config.txt + #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload + #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal + #(2): PAYLOAD_DEEPSTREAM_PROTOBUF - Deepstream schema protobuf encoded payload + #(256): PAYLOAD_RESERVED - Reserved type + #(257): PAYLOAD_CUSTOM - Custom schema payload + msg-conv-payload-type=1 + #(0): Create payload using NvdsEventMsgMeta + #(1): New Api to create payload using NvDsFrameMeta + msg-conv-msg2p-new-api=0 + #Frame interval at which payload is generated + msg-conv-frame-interval=30 + msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so + #Provide your msg-broker-conn-str here + msg-broker-conn-str=3.127.21.251;30352;my-topic + topic=my-topic + #Optional: + #msg-broker-config=/opt/nvidia/deepstream/deepstream/sources/libs/kafka_protocol_adaptor/cfg_kafka.txt + #new-api=0 + #(0) Use message adapter library api's + #(1) Use new msgbroker library api's + + [sink2] + enable=1 + type=4 + rtsp-port=8554 + sync=0 + source-id=0 + + # sink type = 6 by default creates msg converter + broker. + # To use multiple brokers use this group for converter and use + # sink type = 6 with disable-msgconv = 1 + [message-converter] + enable=0 + msg-conv-config=dstest5_msgconv_sample_config.txt + #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload + #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal + #(2): PAYLOAD_DEEPSTREAM_PROTOBUF - Deepstream schema protobuf payload + #(256): PAYLOAD_RESERVED - Reserved type + #(257): PAYLOAD_CUSTOM - Custom schema payload + msg-conv-payload-type=0 + # Name of library having custom implementation. + #msg-conv-msg2p-lib= + # Id of component in case only selected message to parse. + #msg-conv-comp-id= + + # Configure this group to enable cloud message consumer. + [message-consumer0] + enable=0 + proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so + conn-str=; + config-file=/opt/nvidia/deepstream/deepstream/sources/libs/kafka_protocol_adaptor/cfg_kafka.txt + subscribe-topic-list=;; + # Use this option if message has sensor name as id instead of index (0,1,2 etc.). + #sensor-list-file=dstest5_msgconv_sample_config.txt + + [osd] + enable=1 + gpu-id=0 + border-width=1 + text-size=15 + text-color=1;1;1;1; + text-bg-color=0.3;0.3;0.3;1 + font=Arial + show-clock=0 + clock-x-offset=800 + clock-y-offset=820 + clock-text-size=12 + clock-color=1;0;0;0 + nvbuf-memory-type=0 + + [streammux] + gpu-id=0 + ##Boolean property to inform muxer that sources are live + live-source=0 + batch-size=2 + ##time out in usec, to wait after the first buffer is available + ##to push the batch even if the complete batch is not formed + batched-push-timeout=40000 + ## Set muxer output width and height + width=1920 + height=1080 + ##Enable to maintain aspect ratio wrt source, and allow black borders, works + ##along with width, height properties + enable-padding=0 + nvbuf-memory-type=0 + ## If set to TRUE, system timestamp will be attached as ntp timestamp + ## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached + # attach-sys-ts-as-ntp=1 + + # config-file property is mandatory for any gie section. + # Other properties are optional and if set will override the properties set in + # the infer config file. + [primary-gie] + enable=1 + gpu-id=0 + #Required to display the PGIE labels, should be added even when using config-file + #property + batch-size=1 + #Required by the app for OSD, not a plugin property + bbox-border-color0=1;0;0;1 + bbox-border-color1=0;1;1;1 + bbox-border-color2=0;0;1;1 + bbox-border-color3=0;1;0;1 + interval=0 + #Required by the app for SGIE, when used along with config-file property + gie-unique-id=1 + nvbuf-memory-type=0 + labelfile-path=/opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5/labels.txt + config-file=/opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5/config_infer_primary_yoloV7.txt + #infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/ + + [tracker] + enable=1 + # For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively + tracker-width=960 + tracker-height=544 + ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so + # ll-config-file required to set different tracker types + # ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml + # ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml + ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml + # ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml + # ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml + gpu-id=0 + display-tracking-id=1 + + [tests] + file-loop=0 \ No newline at end of file diff --git a/k8s/deployment.yaml b/k8s/deployment.yaml new file mode 100644 index 0000000..0975abe --- /dev/null +++ b/k8s/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deepstream + namespace: deepstream + labels: + app: deepstream +spec: + replicas: 1 + selector: + matchLabels: + app: deepstream + template: + metadata: + labels: + app: deepstream + spec: + runtimeClassName: nvidia + containers: + - name: rtsp + image: nvcr.io/nvidia/deepstream:6.4-gc-triton-devel + command: ["/bin/bash", "-c", "/etc/config/rtsp_setup.sh && deepstream-app -c /etc/config/rtsp_config.txt"] + ports: + - containerPort: 8553 + name: rtsp + volumeMounts: + - name: config + mountPath: /etc/config + - name: deepstream + image: nvcr.io/nvidia/deepstream:6.4-gc-triton-devel + command: ["/bin/bash", "-c", "/etc/config/deepstream_setup.sh && cd /opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5 && ./deepstream-test5-app -c config/inference_config.txt -p 0"] + ports: + - containerPort: 8554 + name: rtsp-out + resources: + limits: + nvidia.com/gpu: 1 + volumeMounts: + - name: config + mountPath: /etc/config + - name: inference-config + mountPath: /opt/nvidia/deepstream/deepstream-6.4/sources/apps/sample_apps/deepstream-test5/config + volumes: + - name: config + configMap: + name: deepstream + defaultMode: 0777 + - name: inference-config + configMap: + name: deepstream + defaultMode: 0777 diff --git a/k8s/service.yaml b/k8s/service.yaml new file mode 100644 index 0000000..420bec4 --- /dev/null +++ b/k8s/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: deepstream + namespace: deepstream +spec: + type: NodePort + selector: + app: deepstream + ports: + - name: rtsp + protocol: TCP + port: 8553 + nodePort: 30007 + - name: rtsp-out + protocol: TCP + port: 8554 + nodePort: 30008 \ No newline at end of file