diff --git a/.github/workflows/ci_linux.yml b/.github/workflows/ci_linux.yml index 0e6d0c0c1..dd5191e38 100644 --- a/.github/workflows/ci_linux.yml +++ b/.github/workflows/ci_linux.yml @@ -59,12 +59,12 @@ jobs: chmod +x scripts/bash/install_opencv.sh - name: install pip_dependencies run: | - pip install -U pip wheel numpy - pip install -U .[asyncio] + pip install -U pip wheel + pip install "numpy<2.0.0" + pip install -U .[asyncio] six httpx yt_dlp paramiko pip uninstall opencv-python -y - pip install -U flake8 six codecov httpx pytest pytest-asyncio pytest-cov yt_dlp mpegdash paramiko m3u8 async-asgi-testclient + pip install -U flake8 codecov pytest pytest-asyncio pytest-cov mpegdash m3u8 async-asgi-testclient pip install -U deffcode - pip install cryptography==38.0.4 if: success() - name: run prepare_dataset_script run: bash scripts/bash/prepare_dataset.sh diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index de74a6c73..08a20b045 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -47,7 +47,9 @@ jobs: if: success() - name: install_dependencies run: | - pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin + pip install -U mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs-git-authors-plugin pip install jinja2==3.0.* if: success() - name: git configure @@ -65,7 +67,7 @@ jobs: - name: mike deploy docs release run: | echo "${{ env.NAME_RELEASE }}" - mike deploy --push --update-aliases --no-redirect ${{ env.NAME_RELEASE }} ${{ env.RELEASE_NAME }} --title=${{ env.RELEASE_NAME }} + mike deploy --push --update-aliases --alias-type=copy ${{ env.NAME_RELEASE }} ${{ env.RELEASE_NAME }} --title=${{ env.RELEASE_NAME }} env: NAME_RELEASE: "v${{ env.RELEASE_NAME }}-release" if: success() @@ -88,7 +90,9 @@ jobs: if: success() - name: install_dependencies run: | - pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin + pip install -U mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs-git-authors-plugin pip install jinja2==3.0.* if: success() - name: git configure @@ -106,7 +110,7 @@ jobs: - name: mike deploy docs stable run: | echo "${{ env.NAME_STABLE }}" - mike deploy --push --update-aliases --no-redirect ${{ env.NAME_STABLE }} latest --title=latest + mike deploy --push --update-aliases --alias-type=copy ${{ env.NAME_STABLE }} latest --title=latest mike set-default --push latest env: NAME_STABLE: "v${{ env.RELEASE_NAME }}-stable" @@ -131,7 +135,9 @@ jobs: if: success() - name: install_dependencies run: | - pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-minify-plugin + pip install -U mkdocs-exclude mike mkdocstrings mkdocstrings-python-legacy + pip install -U mkdocs-git-authors-plugin pip install jinja2==3.0.* if: success() - name: git configure @@ -149,7 +155,7 @@ jobs: - name: mike deploy docs dev run: | echo "Releasing ${{ env.NAME_DEV }}" - mike deploy --push --update-aliases --no-redirect ${{ env.NAME_DEV }} dev --title=dev + mike deploy --push --update-aliases --alias-type=copy ${{ env.NAME_DEV }} dev --title=dev env: NAME_DEV: "v${{ env.RELEASE_NAME }}-dev" if: success() diff --git a/.gitignore b/.gitignore index f08fe54d7..92bb22f08 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ venv Pipfile.lock env3.* env +.cache .coverage coverage.xml .netlify diff --git a/README.md b/README.md index c5b8e8b71..051993e66 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ limitations under the License.   -VidGear is a **High-Performance Video Processing Python Library** that provides an easy-to-use, highly extensible, thoroughly optimised **Multi-Threaded + Asyncio API Framework** on top of many state-of-the-art specialized libraries like _[OpenCV][opencv], [FFmpeg][ffmpeg], [ZeroMQ][zmq], [picamera][picamera], [starlette][starlette], [yt_dlp][yt_dlp], [pyscreenshot][pyscreenshot], [dxcam][dxcam], [aiortc][aiortc] and [python-mss][mss]_ serving at its backend, and enable us to flexibly exploit their internal parameters and methods, while silently delivering **robust error-handling and real-time performance 🔥** +VidGear is a **High-Performance Video Processing Python Library** that provides an easy-to-use, highly extensible, thoroughly optimised **Multi-Threaded + Asyncio API Framework** on top of many state-of-the-art specialized libraries like _[OpenCV][opencv], [FFmpeg][ffmpeg], [ZeroMQ][zmq], [picamera2][picamera2], [starlette][starlette], [yt_dlp][yt_dlp], [pyscreenshot][pyscreenshot], [dxcam][dxcam], [aiortc][aiortc] and [python-mss][mss]_ serving at its backend, and enable us to flexibly exploit their internal parameters and methods, while silently delivering **robust error-handling and real-time performance 🔥** VidGear primarily focuses on simplicity, and thereby lets programmers and software developers to easily integrate and perform Complex Video Processing Tasks, in just a few lines of code. @@ -122,7 +122,7 @@ Each API is designed exclusively to handle/control/process different data-specif **A. Video-Capture Gears:** - [**CamGear:**](#camgear) Multi-Threaded API targeting various IP-USB-Cameras/Network-Streams/Streaming-Sites-URLs. -- [**PiGear:**](#pigear) Multi-Threaded API targeting various Raspberry-Pi Camera Modules. +- [**PiGear:**](#pigear) Multi-Threaded API targeting various Camera Modules and _(limited)_ USB cameras on Raspberry Pis :grapes:. - [**ScreenGear:**](#screengear) High-performance API targeting rapid Screencasting Capabilities. - [**VideoGear:**](#videogear) Common Video-Capture API with internal [Video Stabilizer](https://abhitronix.github.io/vidgear/latest/gears/stabilizer/overview/) wrapper. @@ -265,27 +265,35 @@ stream_stab.stop() PiGear

-> _PiGear is similar to CamGear but made to support various Raspberry Pi Camera Modules _(such as [OmniVision OV5647 Camera Module][ov5647-picam] and [Sony IMX219 Camera Module][imx219-picam])_._ +> _PiGear is a specialized API similar to the [CamGear API](#camgear) but optimized for **Raspberry Pi :grapes: Boards**, offering comprehensive **support for camera modules** _(e.g., [OmniVision OV5647 Camera Module][ov5647-picam], [Sony IMX219 Camera Module][imx219-picam])_, along with **limited compatibility for USB cameras**._ -PiGear provides a flexible multi-threaded framework around complete [picamera](https://picamera.readthedocs.io/en/release-1.13/index.html) python library, and provide us the ability to exploit almost all of its parameters like `brightness, saturation, sensor_mode, iso, exposure, etc.` effortlessly. Furthermore, PiGear also supports multiple camera modules, such as in the case of Raspberry-Pi Compute Module IO boards. +PiGear implements a seamless and robust wrapper around the [picamera2][picamera2] python library, simplifying integration with minimal code changes and ensuring a smooth transition for developers already familiar with the Picamera2 API. PiGear leverages the `libcamera` API under the hood with multi-threading, providing high-performance :fire:, enhanced control and functionality for Raspberry Pi camera modules. + +PiGear handles common configuration parameters and non-standard settings for various camera types, simplifying the integration process. PiGear currently supports PiCamera2 API parameters such as `sensor`, `controls`, `transform`, and `format` etc., with internal type and sanity checks for robust performance. + +While primarily focused on Raspberry Pi camera modules, PiGear also provides **basic functionality for USB webcams** only with Picamera2 API, along with the ability to accurately differentiate between USB and Raspberry Pi cameras using metadata. + +PiGear seamlessly switches to the legacy [picamera][picamera] library if the `picamera2` library is unavailable, ensuring seamless backward compatibility. For this, PiGear also provides a flexible multi-threaded framework around complete `picamera` API, allowing developers to effortlessly exploit a wide range of parameters, such as `brightness`, `saturation`, `sensor_mode`, `iso`, `exposure`, and more. + +Furthermore, PiGear supports the use of multiple camera modules, including those found on Raspberry Pi Compute Module IO boards and USB cameras _(only with Picamera2 API)_. Best of all, PiGear contains **Threaded Internal Timer** - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur. That means that if you're running PiGear API in your script and someone accidentally pulls the Camera-Module cable out, instead of going into possible kernel panic, API will exit safely to save resources. -**Code to open picamera stream with variable parameters in PiGear API:** +**Code to open picamera2 stream with variable parameters in PiGear API:** ```python # import required libraries from vidgear.gears import PiGear +from libcamera import Transform import cv2 -# add various Picamera tweak parameters to dictionary +# formulate various Picamera2 API +# configurational parameters options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "sensor": {"output_size": (480, 320)}, # will override `resolution` + "format": "RGB888", # 8-bit BGR } # open pi video stream with defined parameters @@ -316,7 +324,6 @@ cv2.destroyAllWindows() # safely close video stream stream.stop() - ``` ### PiGear API Guide: @@ -420,21 +427,21 @@ In addition to this, WriteGear also provides flexible access to [**OpenCV's Vide NetGear API

-> _StreamGear automates transcoding workflow for generating Ultra-Low Latency, High-Quality, Dynamic & Adaptive Streaming Formats (such as MPEG-DASH and Apple HLS) in just few lines of python code._ +> _StreamGear streamlines and simplifies the transcoding workflow to generate Ultra-Low Latency, High-Quality, Dynamic & Adaptive Streaming Formats like MPEG-DASH and Apple HLS with just a few lines of Python code, allowing developers to focus on their application logic rather than dealing with the complexities of transcoding and chunking media files._ -StreamGear provides a standalone, highly extensible, and flexible wrapper around [**FFmpeg**][ffmpeg] multimedia framework for generating chunked-encoded media segments of the content. +StreamGear API provides a standalone, highly extensible, and flexible wrapper around the [**FFmpeg**](https://ffmpeg.org/) multimedia framework for generating chunk-encoded media segments from your multimedia content effortlessly. -SteamGear is an out-of-the-box solution for transcoding source videos/audio files & real-time video frames and breaking them into a sequence of multiple smaller chunks/segments of suitable lengths. These segments make it possible to stream videos at different quality levels _(different bitrates or spatial resolutions)_ and can be switched in the middle of a video from one quality level to another – if bandwidth permits – on a per-segment basis. A user can serve these segments on a web server that makes it easier to download them through HTTP standard-compliant GET requests. +With StreamGear, you can transcode source video/audio files and real-time video frames into a sequence of multiple smaller chunks/segments of suitable lengths. These segments facilitate streaming at different quality levels _(bitrates or spatial resolutions)_ and allow for seamless switching between quality levels during playback based on available bandwidth. You can serve these segments on a web server, making them easily accessible via standard **HTTP GET** requests. -SteamGear currently supports [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/) _(Dynamic Adaptive Streaming over HTTP, ISO/IEC 23009-1)_ and [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming) _(HTTP Live Streaming)_. But, Multiple DRM support is yet to be implemented. +SteamGear currently supports both [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/) _(Dynamic Adaptive Streaming over HTTP, ISO/IEC 23009-1)_ and [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming) _(HTTP Live Streaming)_. -SteamGear also creates a Manifest file _(such as MPD in-case of DASH)_ or a Master Playlist _(such as M3U8 in-case of Apple HLS)_ besides segments that describe these segment information _(timing, URL, media characteristics like video resolution and bit rates)_ and is provided to the client before the streaming session. +Additionally, StreamGear generates a manifest file _(such as MPD for DASH)_ or a master playlist _(such as M3U8 for Apple HLS)_ alongside the segments. These files contain essential segment information, _including timing, URLs, and media characteristics like video resolution and adaptive bitrates_. They are provided to the client before the streaming session begins. **StreamGear primarily works in two Independent Modes for transcoding which serves different purposes:** -- **Single-Source Mode:** In this mode, StreamGear **transcodes entire video file** _(as opposed to frame-by-frame)_ into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you're transcoding long-duration lossless videos(with audio) for streaming that required no interruptions. But on the downside, the provided source cannot be flexibly manipulated or transformed before sending onto FFmpeg Pipeline for processing. **_Learn more about this mode [here ➶][ss-mode-doc]_** +- **Single-Source Mode 💿 :** In this mode, StreamGear **transcodes entire video file** _(as opposed to frame-by-frame)_ into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you're transcoding long-duration lossless videos(with audio) for streaming that required no interruptions. But on the downside, the provided source cannot be flexibly manipulated or transformed before sending onto FFmpeg Pipeline for processing. **_Learn more about this mode [here ➶][ss-mode-doc]_** -- **Real-time Frames Mode:** In this mode, StreamGear directly **transcodes frame-by-frame** _(as opposed to a entire video file)_, into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you desire to flexibility manipulate or transform [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) frames in real-time before sending them onto FFmpeg Pipeline for processing. But on the downside, audio has to added manually _(as separate source)_ for streams. **_Learn more about this mode [here ➶][rtf-mode-doc]_** +- **Real-time Frames Mode 🎞️ :** In this mode, StreamGear directly **transcodes frame-by-frame** _(as opposed to a entire video file)_, into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you desire to flexibility manipulate or transform [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) frames in real-time before sending them onto FFmpeg Pipeline for processing. But on the downside, audio has to added manually _(as separate source)_ for streams. **_Learn more about this mode [here ➶][rtf-mode-doc]_** ### StreamGear API Guide: @@ -508,9 +515,9 @@ from vidgear.gears.asyncio import WebGear # various performance tweaks options = { "frame_size_reduction": 40, - "frame_jpeg_quality": 80, - "frame_jpeg_optimize": True, - "frame_jpeg_progressive": False, + "jpeg_compression_quality": 80, + "jpeg_compression_fastdct": True, + "jpeg_compression_fastupsample": False, } # initialize WebGear app @@ -650,7 +657,7 @@ It is something I am doing with my own free time. But so much more needs to be d Here is a Bibtex entry you can use to cite this project in a publication: -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8174694.svg)](https://doi.org/10.5281/zenodo.8174694) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8332548.svg)](https://doi.org/10.5281/zenodo.8332548) ```BibTeX @software{vidgear, @@ -660,18 +667,19 @@ Here is a Bibtex entry you can use to cite this project in a publication: Christian Hollinger and Ian Max Andolina and Vincent Boivin and - enarche-ahn and + Kyle Ahn and freol35241 and Benjamin Lowe and Mickaël Schoentgen and - Renaud Bouckenooghe}, - title = {abhiTronix/vidgear: VidGear v0.3.1}, - month = jul, + Renaud Bouckenooghe and + Ibtsam Ahmad}, + title = {abhiTronix/vidgear: VidGear Stable v0.3.2}, + month = sep, year = 2023, publisher = {Zenodo}, - version = {vidgear-0.3.1}, - doi = {10.5281/zenodo.8174694}, - url = {https://doi.org/10.5281/zenodo.8174694} + version = {vidgear-0.3.2}, + doi = {10.5281/zenodo.8332548}, + url = {https://doi.org/10.5281/zenodo.8332548} } ``` @@ -807,5 +815,6 @@ External URLs [zmq-req-rep]: https://learning-0mq-with-pyzmq.readthedocs.io/en/latest/pyzmq/patterns/client_server.html [zmq-pub-sub]: https://learning-0mq-with-pyzmq.readthedocs.io/en/latest/pyzmq/patterns/pubsub.html [zmq-pull-push]: https://learning-0mq-with-pyzmq.readthedocs.io/en/latest/pyzmq/patterns/pushpull.html#push-pull +[picamera2]:https://github.com/raspberrypi/picamera2 [picamera-setting]: https://picamera.readthedocs.io/en/release-1.13/quickstart.html [webrtc]: https://webrtc.org/ diff --git a/appveyor.yml b/appveyor.yml index 4b5e528c2..656054500 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -60,9 +60,10 @@ install: - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - "python --version" - "python -m pip install --upgrade pip wheel" - - "python -m pip install .[asyncio] six codecov httpx pytest-cov pytest-asyncio yt_dlp aiortc paramiko m3u8 async-asgi-testclient" + - cmd: python -m pip install "numpy<2.0.0" + - "python -m pip install --upgrade .[asyncio] six httpx yt_dlp aiortc" + - "python -m pip install --upgrade pytest codecov pytest-cov pytest-asyncio m3u8 async-asgi-testclient paramiko" - "python -m pip install --upgrade deffcode" - - "python -m pip install cryptography==38.0.4" - "python -m pip install https://github.com/abhiTronix/python-mpegdash/releases/download/0.3.0-dev2/mpegdash-0.3.0.dev2-py3-none-any.whl" - cmd: chmod +x scripts/bash/prepare_dataset.sh - cmd: bash scripts/bash/prepare_dataset.sh diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 23cbde702..93286a688 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -66,11 +66,11 @@ steps: displayName: "Prepare dataset" - script: | - python -m pip install --upgrade pip wheel - pip install --upgrade .[asyncio] six codecov yt_dlp httpx mpegdash paramiko m3u8 async-asgi-testclient - pip install --upgrade deffcode - pip install cryptography==38.0.4 - pip install --upgrade pytest pytest-asyncio pytest-cov pytest-azurepipelines + python -m pip install -U pip wheel + python -m pip install "numpy<2.0.0" + python -m pip install -U .[asyncio] yt_dlp httpx six paramiko + python -m pip install -U codecov pytest pytest-asyncio pytest-cov mpegdash m3u8 async-asgi-testclient + python -m pip install -U deffcode displayName: "Install pip dependencies" - script: | diff --git a/codecov.yml b/codecov.yml index 0be02ec86..212a70454 100644 --- a/codecov.yml +++ b/codecov.yml @@ -32,6 +32,7 @@ ignore: - "scripts" - "vidgear/gears/__init__.py" #trivial - "vidgear/gears/asyncio/__main__.py" #trivial + - "vidgear/gears/pigear.py" #HW limits - "setup.py" - "**/*.md" - "**/*.html" diff --git a/docs/changelog.md b/docs/changelog.md index 916bf42a8..a68d1220f 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -20,9 +20,282 @@ limitations under the License. # Release Notes -## v0.3.2 (2023-09-10) +## v0.3.3 (2024-06-22) ???+ tip "New Features" + - [x] **PiGear:** + * ⚡️ Official Support for [**Picamera2**](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) API backend. (Fixes #342) + + This massive update brings official support for the new Picamera2 API, unlocking powerful features for Raspberry Pi Camera Modules and limited USB camera support. + + **Seamless Python wrapper:** A robust wrapper around Picamera2 API library simplifies integration with minimal code changes for existing PiGear users. + + **Enhanced camera control:** Leverages libcamera API under the hood for Raspberry Pi Camera Modules. + + **Existing compatibility:** Maintains compatibility with PiGear's existing super-charged multi-threaded and colorspace manipulation framework. + + **Proper Resource management:** Ensures proper resource release during PiGear termination. + + **USB camera support (limited):** Provides basic functionality for USB webcams. PiGear could accurately differentiates between USB and Raspberry Pi cameras using metadata. + + **Backward compatibility:** Seamlessly switches to the legacy Picamera library backend if Picamera2 is unavailable. + + **Standalone functionalities:** Standalone functionalities for both legacy `picamera` and newer `picamera2` backends for clarity. + + **Advanced optional parameters handling:** Handles camera configurational parameters and user-defined settings for various camera types. + * **New optional configurational parameters:** Currently Supports `sensor`, `format`, `controls`, `transform`, `stride`, `buffer_count`, and `queue` with sanity checks. + * **New user-defined optional parameters:** Such as `auto_align_output_config`, `enable_verbose_logs`, and more. + - [x] **StreamGear:** + * Introduced new `-enable_force_termination` attribute for immediate FFmpeg process termination. + - [x] **Helper:** + * Added support for SRTP/RTSPS in `is_valid_url` function (Fixes #410) + + Enhanced `is_valid_url` in `helper.py` to recognize and support both `rtsp` and `rtsps` protocols. (Suggested by @jonra1993) + + SRTP/RTSPS extends RTSP/RTP to encrypt video and audio data using the same ciphers as HTTPS, typically AES with a 128-bit key length. + * Added a custom deprecated decorator to mark deprecated functions and parameters to display a warning message when a deprecated one is used. + - [x] **Docs:** + * Overhauled mkdocs material theme: + + Added `unrecognized_links: ignore` to `mkdocs.yml` for validations. + + Added custom admonition icons. + + Added new `git-authors` plugin. + + Added new tables markdown extension. + + Added custom fences to `pymdownx.superfences` markdown extension. + + Added `line_spans: __span` and `pygments_lang_class: true` parameters to `pymdownx.highlight` markdown extension. + + Added `normalize_issue_symbols: true` to `pymdownx.magiclink` markdown extension. + + Added new mkdocs feature dependency `mkdocs-git-authors-plugin`. + * Added the use of new `-enable_force_termination` parameter. + * Added a new FAQ entry about the deprecated `rgb_mode` parameter. + * Added new `screengear_error11.png` asset. + - [x] **CI:** + * Added test cases for `import_dependency_safe` function to validate different scenarios and error handling in `import_dependency_safe`. + +??? success "Updates/Improvements" + - [x] Core: + * Improved exception handling for module imports: + + Updated `import_dependency_safe` in `helper.py`: + * Added specific handling for `ModuleNotFoundError`. + * Included original exception in `ImportError` for better error tracing. + * Enhanced logging to include exception traceback when error is set to "log". + + Enhanced `import_core_dependency` in `__init__.py`: + * Added specific handling for `ModuleNotFoundError`. + * Included original exception in `ImportError` for better error tracing. + * Improved colorspace handling in videocapture gears: + + Logged a warning and discarded invalid colorspace values instead of raising an exception. + + Consolidated colorspace logging into a single line using a ternary operation. + - [x] Asyncio: + * Replaced deprecated Starlette's `on_shutdown` parameter with an async context manager `lifespan` in WebGear and WebGear_RTC APIs. (Fixes #397) + + Moved shutdown logic for VideoGear and peer RTC connections to this new `lifespan` context manager. + + Added new `contextlib` import for using `asynccontextmanager`. + - [x] NetGear_Async API: + * Modified `__init__` method to handle event loop more robustly: + + Try to get the running event loop using `asyncio.get_running_loop()` + + If no running event loop found, create a new one with `asyncio.new_event_loop()` + + Log if creating a new event loop + * Changed launch method to use `self.loop.create_task()` instead of `asyncio.ensure_future()` + + Ensures the task is created using the correct event loop instance. + * Moved the event loop initialization code to an earlier point before setting event loop policy to ensure it is set up correctly before selecting `WindowsSelectorEventLoop` policy. + + On Windows, vidgear requires the `WindowsSelectorEventLoop`, but Python 3.8 and above defaults to the `ProactorEventLoop` which is not compatible. + * Removed redundant python version check to set `WindowsSelectorEventLoop` policy, as minimum supported version is already `3.8`. + * Move event loop setup and policy assignment to the beginning of `__init__` before zmq Context creation. + * Refactored return data handling. + - [x] StreamGear: + * Updated `close()` methods for handling gracefully signal interruptions based on different operating systems with device audio streams. + * Deprecated `terminate()` method, introducing `close()` for safer process termination. + * Enhanced stream copy support in Single Source mode (Fixes #396). + + Moved settings for "-vf" and "-aspect" inside conditional blocks. + + Added warnings and discarded these parameters in stream copy mode. + + Ignored stream copy parameter in Real-time Frames Mode or Custom Streams with appropriate warnings. + + Updated `-acodec` handling: + * Default to `aac` for Custom Streams. + * Use stream copy (`-acodec copy`) for input video’s audio when Custom Streams are disabled. + + Refined `-livestream` parameter usage to Real-time Frames Mode only. + + Adjusted video and audio bitrate assignment to skip when stream copy is enabled. + + Improved log message for `-clear_prev_assets` parameter. + * Restricted `-livestream` parameter to Real-time Frames Mode only. + + Disabled live streaming for video files and updated relevant logging. + * Enhanced warning messages and clarified description. + - [x] PiGear: + * Logging optimization with warning for common `libcamera` messages. + * Lowered `framerate` minimum value to `0.0`. + * Moved `sensor` optional parameter to commonly supported picamera2 configurational parameters. + * Removed unsupported `bit_depth` optional parameters. + * Updated PiGear API tagline and introduction. + - [x] NetGear: + * Enhanced logging and error handling for secure mode. + * Logged Authenticator start/stop events. + * Handled socket session expiration more gracefully in `recv_handler`. + * Ensured proper termination of the ZMQ context and socket when closing the NetGear instance. + - [x] WebGear: + * Enhanced error messages for WebGear auto-generation workflow (Fixes #403) + + Updated `homepage`, `not_found`, and `server_error` methods to include more detailed JSON error messages. + + Added specific error and message prefixes to improve clarity. + - [x] WebGear_RTC: + * Optimized peer connection closure to avoid redundant closures. + * Reduced unnecessary logging by only logging ICE connection state changes when they are not in a "failed" state. + - [x] WriteGear: + * Simplified the logic for formatting output parameters. + * Improved error handling in `execute_ffmpeg_cmd` method: + + Raised `ValueError` with descriptive messages for `BrokenPipeError` or `IOError`. + + Updated error handling per PEP 409 to preserve original exception context or suppress it based on logging settings. + - [x] CamGear: + * Removed GStreamer support check. + * Improved readability of livestream warning logs. + - [x] Setup.py: + * Dropped legacy picamera dependency in `setup.py`. + * Updated setup.py to use the latest `pyzmq` version to address installation issues (Fixes #399). + - [x] Helper: + * Added patch for substring index bug in `get_supported_demuxers` helper method. + * Updated `extract_time` helper function regex to handle milliseconds. + - [x] Docs: + * Update StreamGear documentation: + + Updated documentation to deprecated `terminate()` method, and introducing `close()` for safer process termination. + + Improved the overview section's description and wording. + + Updated usage examples for both Single-Source Mode and Real-time Frames Mode. + + Updated StreamGear usage examples for device audio input. + + Refactored sections for Live Streaming usage. + * Added warning for unsupported `-livestream` parameter in Single-Source Mode. + + Added a tip box on benefits of using stream copy (`-vcodec copy`) for faster HLS/DASH transcoding. + * Highlighted limitations of stream copy, including incompatibility with Real-time Frames Mode and Custom Streams. + * Clarified automatic audio stream copy (`-acodec copy`) usage with input video’s audio stream. + + Updated usage example for device video source. + + Addressed deprecation of the `terminate()` method in favor of the new `close()` method. + + Updated respective notices for the deprecated `terminate()` method and `rgb_mode` parameter. + + Added a deprecation warning admonition for the `rgb_mode` parameter in the `stream()` method. + + Removed the obsolete usage example for deprecation RGB mode with StreamGear. + + Added documentation and usage of the new `-enable_force_termination` parameter. + + Modified the warning message to mention that forced termination can cause corrupted output in certain scenarios. + + Updated the docstring for the `stream()` method and `transcode_source()` method. + + Refactored the StreamGear API Parameters documentation to enhance clarity and readability. + + Refined the description of the `-streams` attribute of the StreamGear API. + * Update PiGear documentation: + + Added a warning advising users to disable common `libcamera` messages when logging is disabled. + + Updated Picamera2 installation instructions _(including `apt`, `pip`, pre-installation on Raspberry Pi images, and compatibility warnings)_ + + Moved legacy Picamera library installation instructions to an admonition. + + Removed Importing section from overview to avoid confusion. + * Update NetGear documentation: + + Added Admonition for warning users about the Client's end must run before the Server's end to establish a secure connection in Secure Mode. + + Added warning log for potential issues with `flag=1` (NOBLOCK). (Fixes #390) + * Changed default value of `copy` to `True` in NetGear API documentation. + + Noted that `track` option is ignored when `copy=True`. + * Update WriteGear documentation: + + Updated the documentation for the `-disable_force_termination` parameter. + * Update `README.md`: + + Replaced deprecated options (`frame_jpeg_quality`, `frame_jpeg_optimize`, `frame_jpeg_progressive`) with their newer equivalents (`jpeg_compression_quality,` `jpeg_compression_fastdct`, `jpeg_compression_fastupsample`) in WebGear usage example. + * Update `mkdocs.yml`: + + Set `edit_uri` for GitHub edit links. + + Add new theme features like content actions, tooltips, etc. + + Update palette settings for light/dark mode. + + Enable new markdown extensions. + + Add custom javascript hook support. + + Migrated to new Google Analytics 4. + + Replaced depreciated `materialx `with supported emoji extension. + + Replaced permalink icon with default one. + + Change system mode toggle icon and name in `mkdocs.yml`. + * Improved overall documentation quality by added detailed explanations, practical examples, following best practices, and clearer usage patterns. + * Updated sections, code examples, admonitions, and comments for better readability, consistency, and precision. + * Added missing version contributors to `changelog.md`. + * Added new icons to make headings more readable. + * Replaced unsupported admonitions with supported ones. + * Removed all custom admonition icons and color CSS from `custom.css`. + * Removed Twitter section from help and docs site metadata. + * Updated Zenodo badge and BibTeX entry. + * Added workaround for 'AttributeError: 'DXCamera' object has no attribute 'is_capturing'' error on Windows. + * Remove script tags from `main.html` and use a custom hook for adding javascripts on certain pages. + * Refactored all APIs and bonus examples to use `linenums` and `hl_lines` which makes it easier to highlight specific lines in code blocks. + * Removed Gitter community chat sidecard javascript file. + * Redefined spacing between sections. + * Add failure warning in various docs about `picamera` incompatibility on 64-bit OS. + * Update announcement icon in `main.html`. + * Remove `site.webmanifest` file. + - [x] Maintenance: + * Improved logging, parameter validation, and added descriptive dialogs across various APIs. + * Moved logging enablement before version logging for consistency in vidgear APIs. + * Removed redundant boolean assignment for various APIs internal logging. + * Simplified conditional statements and assignments using short-circuiting, Boolean operations, and ternary operators across various APIs and tests. + * Refactored vidgear code to improve readability, maintainability, and performance. + * Added `.cache` directory to `.gitignore`, + * Updated vidgear library version to `v0.3.3`. + * Improved code efficiency with short-circuiting and formatting. + * Updated logging practices to be more developer-friendly. + * Removed unnecessary parentheses and type checks. + * Removed unused imports. + * Updated code comments. + - [x] CI: + * Temporarily removed PiGear API from code coverage due to hardware limitations. + * Deprecated custom `event_loop` fixture overrides in WebGear_RTC and NetGear_Async tests + + Removed redundant `pytest.mark.asyncio` decorators from several test functions. + * Add a new `event_loop_policy` fixture for pytest to override the event loop policy: + + Added new recommended approach of using `pytest.mark.asyncio(scope="module")` to mark all WebGear_RTC and NetGear_Async tests as asynchronous and utilize the same event loop throughout the module. + + Log the event loop being used for debugging. + * Updated NetGear unit tests to reflect the new default for `copy`. + * Ensured coverage for `raise`, `log`, `silent`, and unknown error types. + * Improved parameterized test cases to support floating point values. + * Updated StreamGear tests to use the new `close()` method instead of the deprecated terminate() method. + * Updated tests of various APIs for better coverage and reliability. + * Enabled `kill=True` in `close()` in NetGear Tests. + * Removed pinned `cryptography==38.0.4` dependency. + * Remove unused imports and code cleanup. + * Rearranged the dependencies. + +??? danger "Breaking Updates/Changes" + - StreamGear: + - [ ] Deprecated `terminate()` method and introduce `close()` method. + + The `terminate()` method in StreamGear is now deprecated and will be removed in a future release. Developers should use the new `close()` method instead, which provides a more descriptive name like in WriteGear API for terminating StreamGear processes safely. + - [ ] Deprecated `rgb_mode` parameter in `stream()` method. + + This parameter will be removed in a future version, and only BGR format frames will be supported. + - [ ] Restricted `-livestream` parameter to Real-time Frames Mode only. + + Live streaming is intended for low-latency streaming of real-time frames, where chunks contain only the most recent frames. It doesn't make sense when streaming from a video file, as the entire file can be streamed normally without the need for live streaming. + + +??? bug "Bug-fixes" + - [x] PiGear: + * Modify PiGear class behavior when `enforce_legacy_picamera=True` on unsupported system + + Instead of silently disabling `picamera2` API directly, PiGear now raises an error if `picamera` is unavailable or unsupported + + Prevented incorrect initialization of `PiGear` class on unsupported 64-bit OS systems. + * Fixed `UnboundLocalError` bug for 'picamera2' variable assigment. + * Fixed `UnboundLocalError` bug for 'queue' variable assignment. + * Fixed colorspace typo bug. + - [x] StreamGear: + * Fixed incompatibility of stream copy with Real-time Frames Mode. + + Added warnings and discarded `-vcodec copy` when using this mode. + * Removed non-essential aspect ratio parameter to prevent unwanted distortions (Fixes #385). + * Moved handle streaming format to beginning to fix 'StreamGear' object has no attribute '_StreamGear__format' bug. + - [x] NetGear: + * Fixed Secure Mode failing to work on conflicting ZMQ Contexts: + + Handled "Address in use" error more gracefully and disabled secure mode if errors occur. + + Improved handling of ZMQ Authenticator and Certificates. + * Fixed `msg_json` undefined when terminating context in the `recv_handler` method. + - [x] CamGear: + * Fixed logging condition for yt-dlp (Fixes #394) + + Updated `no_warnings` parameter in `CamGear` to be `False` when logging is enabled and `True` otherwise. + - [x] Docs: + * Replaced buggy kofi widget with a button image in `index.md`. + * Fixed Ko-fi sponsorship heart hover effect in footer + + Replaced `twemoji` heart emoji with `heart-pulse` fontawesome SVG + * Fixed titles and indentations in various admonitions. + * Fixed various issues in code comments, and hyperlinks URLs. + * Fixed typos, formatting, code highlighting, and grammar inconsistencies. + * Fixed minor typo in `js_hook.py`. + - [x] CI: + * Fixed simplejpeg and opencv not compatible with `numpy==2.x.x` versions. + + Pinned `numpy<2.0.0` in all CI envs. + * Fixed expected duration value in parameterized test case from `8` to `8.44` since `test_extract_time` function now supports floating point values. + * Fixed `test_secure_mode` NetGear test: + + Added `"127.0.0.1"` address to allow common endpoint for connection. + + Added `"jpeg_compression":False` to disable frame compression, allowing frame to be the same while assertion. + * Fixed `pip install` hash bug in Azure Pipelines CI. + * Fixed various typos and code issues in tests. + * Fixed invalid escape sequence in test case string. + * Fixed python environment bugs in `appveyor.yml`. + +??? question "Pull Requests" + * PR #411 + * PR #409 + * PR #406 + * PR #401 + * PR #398 + * PR #392 + +??? new "New Contributors" + * @jonra1993 + +  + +  + +## v0.3.2 (2023-09-10) + +??? tip "New Features" - [x] **NetGear:** * Added new `kill` parameter to `close()` method to forcefully kill ZMQ context instead of graceful exit only in the `receive` mode. * Added new `subscriber_timeout` integer optional parameter to support timeout with `pattern=2` _(or Publisher-Subscriber)_ pattern. @@ -92,6 +365,9 @@ limitations under the License. * PR #375 * PR #370 +??? new "New Contributors" + * @ibtsam3301 +     @@ -374,6 +650,10 @@ limitations under the License. * PR #350 * PR #351 +??? new "New Contributors" + * @sueskind + +     diff --git a/docs/contribution.md b/docs/contribution.md index 19b50b66b..4c9d675b0 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -42,22 +42,22 @@ limitations under the License. ## Submission Contexts -### Got a question or problem? +### Got a question or problem? :material-lightbulb-question: For quick questions, please refrain from opening an issue, instead read our [FAQ & Troubleshooting](../help/get_help/#frequently-asked-questions) section or you can reach us on [Gitter](https://gitter.im/vidgear/community) community channel. -### Found a typo? +### Found a typo? :material-eraser: There's no need to contribute for some typos. Just reach us on [Gitter ➶](https://gitter.im/vidgear/community) community channel, We will correct them in (less than) no time. -### Found a bug? +### Found a bug? :material-bug-outline: If you encountered a bug, you can help us by [submitting an issue](../contribution/issue/) in our GitHub repository. Even better, you can submit a Pull Request(PR) with a fix, but make sure to read the [guidelines ➶](#submission-guidelines). -### Request for a feature/improvement? +### Request for a feature/improvement? :material-new-box: ??? tip "Subscribe to Github Repository" diff --git a/docs/gears.md b/docs/gears.md index 21deb936f..4cef0fdb5 100644 --- a/docs/gears.md +++ b/docs/gears.md @@ -27,7 +27,7 @@ limitations under the License. ## Gears :octicons-gear-24:, What are these? -VidGear is built on Standalone APIs - also known as **Gears**, each with some unique functionality. Each Gears is designed exclusively to handle/control/process different data-specific & device-specific video streams, network streams, and media encoders/decoders. +VidGear is built on Standalone APIs - also known as **Gears :fontawesome-solid-gears:**, each with some unique functionality. Each Gears is designed exclusively to handle/control/process different data-specific & device-specific video streams, network streams, and media encoders/decoders. Gears allows users to work with an inherently optimized, easy-to-use, extensible, and exposed API Framework on top of many state-of-the-art libraries, while silently delivering robust error handling and unmatched real-time performance. @@ -40,7 +40,7 @@ These Gears can be classified as follows: > **Basic Function:** Retrieves [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) frames from various sources. * [CamGear](camgear/overview/): Multi-Threaded API targeting various IP-USB-Cameras/Network-Streams/Streaming-Sites-URLs. -* [PiGear](pigear/overview/): Multi-Threaded API targeting various Raspberry-Pi Camera Modules. +* [PiGear](pigear/overview/): Multi-Threaded API targeting various Camera Modules and _(limited)_ USB cameras on Raspberry Pis :fontawesome-brands-raspberry-pi:. * [ScreenGear](screengear/overview/): High-performance API targeting rapid Screencasting Capabilities. * [VideoGear](videogear/overview/): Common Video-Capture API with internal [Video Stabilizer](stabilizer/overview/) wrapper. diff --git a/docs/gears/camgear/overview.md b/docs/gears/camgear/overview.md index b7cdadbed..9b9dc83e6 100644 --- a/docs/gears/camgear/overview.md +++ b/docs/gears/camgear/overview.md @@ -46,23 +46,13 @@ CamGear internally implements [`yt_dlp`][yt_dlp] backend class for seamlessly pi   -## Importing - -You can import CamGear API in your program as follows: - -```python -from vidgear.gears import CamGear -``` - -  - ## Usage Examples
See here 🚀
-!!! experiment "After going through CamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/camgear_ex/)" +!!! example "After going through CamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/camgear_ex/)" ## Parameters diff --git a/docs/gears/camgear/usage.md b/docs/gears/camgear/usage.md index 3320eb4b3..b92a7fc66 100644 --- a/docs/gears/camgear/usage.md +++ b/docs/gears/camgear/usage.md @@ -20,7 +20,7 @@ limitations under the License. # CamGear API Usage Examples: -!!! experiment "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/camgear_ex/)" +!!! example "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/camgear_ex/)"   @@ -28,7 +28,7 @@ limitations under the License. Following is the bare-minimum code you need to get started with CamGear API: -```python +```python linenums="1" # import required libraries from vidgear.gears import CamGear import cv2 @@ -79,7 +79,7 @@ The complete usage example for Dailymotion and Twitch URLs are as follows: **Checkout [this FAQ ➶](../../../help/camgear_faqs/#how-to-compile-opencv-with-gstreamer-support) for compiling OpenCV with GStreamer support.** - !!! fail "Not all resolutions are supported with GStreamer Backend. See issue #244" + !!! failure "Not all resolutions are supported with GStreamer Backend. See issue #244" ???+ info "Exclusive CamGear Attributes for `yt_dlp` backend" @@ -124,7 +124,8 @@ The complete usage example for Dailymotion and Twitch URLs are as follows: ``` === "Dailymotion :fontawesome-brands-dailymotion:" - ```python hl_lines="12-13" + + ```python linenums="1" hl_lines="12-13" # import required libraries from vidgear.gears import CamGear import cv2 @@ -172,7 +173,7 @@ The complete usage example for Dailymotion and Twitch URLs are as follows: !!! warning "If Twitch user is offline, CamGear will throw ValueError." - ```python hl_lines="12-13" + ```python linenums="1" hl_lines="12-13" # import required libraries from vidgear.gears import CamGear import cv2 @@ -224,7 +225,7 @@ The complete usage example for Dailymotion and Twitch URLs are as follows: CamGear API also provides out-of-the-box support for pipelining live video-frames and metadata from **:fontawesome-brands-youtube: YouTube (Livestream + Normal) Videos**. -!!! fail "YouTube Playlists :material-youtube-subscription: are not supported yet." +!!! failure "YouTube Playlists :material-youtube-subscription: are not supported yet." The complete usage example is as follows: @@ -234,7 +235,7 @@ The complete usage example is as follows: **Checkout [this FAQ ➶](../../../help/camgear_faqs/#how-to-compile-opencv-with-gstreamer-support) for compiling OpenCV with GStreamer support.** - !!! fail "Not all resolutions are supported with GStreamer Backend. See issue #244" + !!! failure "Not all resolutions are supported with GStreamer Backend. See issue #244" ??? info "Exclusive CamGear Attributes for `yt_dlp` backend" @@ -274,7 +275,7 @@ The complete usage example is as follows: print(video_metadata["title"]) ``` -```python hl_lines="8-9" +```python linenums="1" hl_lines="8-9" # import required libraries from vidgear.gears import CamGear import cv2 @@ -325,7 +326,7 @@ The complete usage example is as follows: !!! tip "All the supported Source Tweak Parameters can be found [here ➶](../advanced/source_params/#source-tweak-parameters-for-camgear-api)" -```python hl_lines="8-10" +```python linenums="1" hl_lines="8-10" # import required libraries from vidgear.gears import CamGear import cv2 @@ -377,13 +378,13 @@ CamGear API also supports **Direct Colorspace Manipulation**, which is ideal for !!! info "A more detailed information on colorspace manipulation can be found [here ➶](../../../bonus/colorspace_manipulation/)" -In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when `w` key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when `e` key is pressed, finally default colorspace _(i.e. **BGR**)_ when `s` key is pressed. Also, quit when `q` key is pressed: +In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when ++"W"++ key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when ++"E"++ key is pressed, finally default colorspace _(i.e. **BGR**)_ when ++"S"++ key is pressed. Also, quit when ++"Q"++ key is pressed: -!!! fail "Any incorrect or None-type value, will immediately revert the colorspace to default i.e. `BGR`." +!!! failure "Any incorrect or None-type value, will immediately revert the colorspace to default i.e. `BGR`." -```python hl_lines="7 30 34 38" +```python linenums="1" hl_lines="7 30 34 38" # import required libraries from vidgear.gears import CamGear import cv2 diff --git a/docs/gears/netgear/advanced/bidirectional_mode.md b/docs/gears/netgear/advanced/bidirectional_mode.md index dfb99fd5f..8cbcb56fd 100644 --- a/docs/gears/netgear/advanced/bidirectional_mode.md +++ b/docs/gears/netgear/advanced/bidirectional_mode.md @@ -96,7 +96,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9 31" +```python linenums="1" hl_lines="9 31" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -151,7 +151,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 18" +```python linenums="1" hl_lines="6 18" # import required libraries from vidgear.gears import NetGear import cv2 @@ -219,7 +219,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="11-17" +```python linenums="1" hl_lines="11-17" # import required libraries from vidgear.gears import NetGear import cv2 @@ -290,73 +290,162 @@ Now, Open the terminal on another Server System _(a Raspberry Pi with Camera Mod !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="25-30" -# import required libraries -from vidgear.gears import VideoGear -from vidgear.gears import NetGear -from vidgear.gears import PiGear +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# activate Bidirectional mode -options = {"bidirectional_mode": True} +=== "New Picamera2 backend" -# Define NetGear server at given IP address and define parameters -# !!! change following IP address '192.168.x.xxx' with client's IP address !!! -server = NetGear( - address="192.168.x.xxx", - port="5454", - protocol="tcp", - pattern=1, - logging=True, - **options -) + ```python linenums="1" hl_lines="25-30" + # import required libraries + from vidgear.gears import VideoGear + from vidgear.gears import NetGear + from vidgear.gears import PiGear + from libcamera import Transform -# loop over until KeyBoard Interrupted -while True: + # add various Picamera2 API tweaks + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } - try: - # read frames from stream - frame = stream.read() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # check for frame if Nonetype - if frame is None: + # activate Bidirectional mode + options = {"bidirectional_mode": True} + + # Define NetGear server at given IP address and define parameters + # !!! change following IP address '192.168.x.xxx' with client's IP address !!! + server = NetGear( + address="192.168.x.xxx", + port="5454", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # send frame & data and also receive data from Client + recv_data = server.send(frame, message=target_data) # (1) + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + except KeyboardInterrupt: break - # {do something with the frame here} + # safely close video stream + stream.stop() - # prepare data to be sent(a simple text in our case) - target_data = "Hello, I am a Server." + # safely close server + server.close() + ``` - # send frame & data and also receive data from Client - recv_data = server.send(frame, message=target_data) # (1) + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter. - # print data just received from Client - if not (recv_data is None): - print(recv_data) + +=== "Legacy Picamera backend" - except KeyboardInterrupt: - break + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." -# safely close video stream -stream.stop() + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." -# safely close server -server.close() -``` + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." + + ```python linenums="1" hl_lines="25-30" + # import required libraries + from vidgear.gears import VideoGear + from vidgear.gears import NetGear + from vidgear.gears import PiGear + + # add various Picamera tweak parameters to dictionary + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # activate Bidirectional mode + options = {"bidirectional_mode": True} + + # Define NetGear server at given IP address and define parameters + # !!! change following IP address '192.168.x.xxx' with client's IP address !!! + server = NetGear( + address="192.168.x.xxx", + port="5454", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # send frame & data and also receive data from Client + recv_data = server.send(frame, message=target_data) # (1) + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + except KeyboardInterrupt: + break + + # safely close video stream + stream.stop() + + # safely close server + server.close() + ``` + + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter. -1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter.   @@ -380,7 +469,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="35-45" +```python linenums="1" hl_lines="35-45" # import required libraries from vidgear.gears import NetGear from vidgear.gears.helper import reducer @@ -447,7 +536,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="29" +```python linenums="1" hl_lines="29" # import required libraries from vidgear.gears import NetGear from vidgear.gears.helper import reducer diff --git a/docs/gears/netgear/advanced/compression.md b/docs/gears/netgear/advanced/compression.md index 41ffbf8b7..030ede0d4 100644 --- a/docs/gears/netgear/advanced/compression.md +++ b/docs/gears/netgear/advanced/compression.md @@ -117,7 +117,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="11-14" +```python linenums="1" hl_lines="11-14" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -173,7 +173,7 @@ Then open another terminal on the same system and execute the following python c !!! note "If compression is enabled at Server, then Client will automatically enforce Frame Compression with its performance attributes." -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -230,7 +230,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7 11" +```python linenums="1" hl_lines="7 11" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -288,7 +288,7 @@ Then open another terminal on the same system and execute the following python c !!! info "Client's end also automatically enforces Server's colorspace, there's no need to define it again." -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -340,7 +340,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9-15" +```python linenums="1" hl_lines="9-15" # import required libraries from vidgear.gears import NetGear import cv2 @@ -395,7 +395,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="20-25" +```python linenums="1" hl_lines="20-25" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -475,7 +475,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="12-16 42" +```python linenums="1" hl_lines="12-16 42" # import required libraries from vidgear.gears import NetGear from vidgear.gears.helper import reducer @@ -548,7 +548,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="8-12 35" +```python linenums="1" hl_lines="8-12 35" # import required libraries from vidgear.gears import NetGear from vidgear.gears.helper import reducer diff --git a/docs/gears/netgear/advanced/multi_client.md b/docs/gears/netgear/advanced/multi_client.md index 7f1ea5fba..101465cb6 100644 --- a/docs/gears/netgear/advanced/multi_client.md +++ b/docs/gears/netgear/advanced/multi_client.md @@ -103,7 +103,7 @@ Now, Open the terminal on a Server System _(with a webcam connected to it at ind !!! tip "You can terminate streaming anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9 16 39-52" +```python linenums="1" hl_lines="9 16 39-52" # import required libraries from vidgear.gears import NetGear from vidgear.gears import CamGear @@ -176,7 +176,7 @@ Now, Open a terminal on another Client System _(where you want to display the in !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 11-17" +```python linenums="1" hl_lines="6 11-17" # import required libraries from vidgear.gears import NetGear import cv2 @@ -233,7 +233,7 @@ Finally, Open a terminal on another Client System _(where you want to display th !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 11-17" +```python linenums="1" hl_lines="6 11-17" # import required libraries from vidgear.gears import NetGear import cv2 @@ -302,7 +302,7 @@ Now, Open the terminal on a Server System _(with a webcam connected to it at ind !!! tip "You can terminate streaming anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -374,7 +374,7 @@ Now, Open a terminal on another Client System _(where you want to display the in !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -430,7 +430,7 @@ Finally, Open a terminal on another Client System _(also, where you want to disp !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -505,77 +505,167 @@ Now, Open the terminal on a Server System _(with a webcam connected to it at ind !!! tip "You can terminate streaming anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="47-60" -# import required libraries -from vidgear.gears import PiGear -from vidgear.gears import NetGear +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# activate multiclient_mode mode -options = {"multiclient_mode": True} +=== "New Picamera2 backend" -# Define NetGear Client at given IP address and assign list/tuple of all unique Server((5577,5578) in our case) and other parameters -server = NetGear( - address="192.168.x.x", - port=(5577, 5578), - protocol="tcp", - pattern=1, - logging=True, - **options -) # !!! change following IP address '192.168.x.xxx' with yours !!! + ```python linenums="1" hl_lines="47-60" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import NetGear -# Define received data dictionary -data_dict = {} + # add various Picamera2 tweak parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } -# loop over until KeyBoard Interrupted -while True: + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - try: - # read frames from stream - frame = stream.read() + # activate multiclient_mode mode + options = {"multiclient_mode": True} - # check for frame if Nonetype - if frame is None: + # Define NetGear Client at given IP address and assign list/tuple of all unique Server((5577,5578) in our case) and other parameters + server = NetGear( + address="192.168.x.x", + port=(5577, 5578), + protocol="tcp", + pattern=1, + logging=True, + **options + ) # !!! change following IP address '192.168.x.xxx' with yours !!! + + # Define received data dictionary + data_dict = {} + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # send frame and also receive data from Client(s) + recv_data = server.send(frame) + + # check if valid data received + if not (recv_data is None): + # extract unique port address and its respective data + unique_address, data = recv_data + # update the extracted data in the data dictionary + data_dict[unique_address] = data + + if data_dict: + # print data just received from Client(s) + for key, value in data_dict.items(): + print("Client at port {} said: {}".format(key, value)) + + except KeyboardInterrupt: break - # {do something with the frame here} + # safely close video stream + stream.stop() - # send frame and also receive data from Client(s) - recv_data = server.send(frame) + # safely close server + server.close() + ``` + +=== "Legacy Picamera backend" - # check if valid data received - if not (recv_data is None): - # extract unique port address and its respective data - unique_address, data = recv_data - # update the extracted data in the data dictionary - data_dict[unique_address] = data + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." - if data_dict: - # print data just received from Client(s) - for key, value in data_dict.items(): - print("Client at port {} said: {}".format(key, value)) + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." - except KeyboardInterrupt: - break + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -# safely close video stream -stream.stop() + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." -# safely close server -server.close() -``` + ```python linenums="1" hl_lines="47-60" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import NetGear + + # add various Picamera tweak parameters to dictionary + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # activate multiclient_mode mode + options = {"multiclient_mode": True} + + # Define NetGear Client at given IP address and assign list/tuple of all unique Server((5577,5578) in our case) and other parameters + server = NetGear( + address="192.168.x.x", + port=(5577, 5578), + protocol="tcp", + pattern=1, + logging=True, + **options + ) # !!! change following IP address '192.168.x.xxx' with yours !!! + + # Define received data dictionary + data_dict = {} + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # send frame and also receive data from Client(s) + recv_data = server.send(frame) + + # check if valid data received + if not (recv_data is None): + # extract unique port address and its respective data + unique_address, data = recv_data + # update the extracted data in the data dictionary + data_dict[unique_address] = data + + if data_dict: + # print data just received from Client(s) + for key, value in data_dict.items(): + print("Client at port {} said: {}".format(key, value)) + + except KeyboardInterrupt: + break + + # safely close video stream + stream.stop() + + # safely close server + server.close() + ```   @@ -588,7 +678,7 @@ Now, Open a terminal on another Client System _(where you want to display the in !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="27" +```python linenums="1" hl_lines="27" # import required libraries from vidgear.gears import NetGear import cv2 @@ -649,7 +739,7 @@ Finally, Open a terminal on another Client System _(also, where you want to disp !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="27" +```python linenums="1" hl_lines="27" # import required libraries from vidgear.gears import NetGear import cv2 @@ -732,83 +822,180 @@ Now, Open the terminal on a Server System _(with a webcam connected to it at ind !!! tip "You can terminate streaming anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="19 48-64" -# import required libraries -from vidgear.gears import PiGear -from vidgear.gears import NetGear +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# activate both multiclient and bidirectional modes -options = {"multiclient_mode": True, "bidirectional_mode": True} +=== "New Picamera2 backend" -# Define NetGear Client at given IP address and assign list/tuple of -# all unique Server((5577,5578) in our case) and other parameters -server = NetGear( - address="192.168.x.x", - port=(5577, 5578), - protocol="tcp", - pattern=1, - logging=True, - **options -) # !!! change following IP address '192.168.x.xxx' with yours !!! + ```python linenums="1" hl_lines="19 48-64" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import NetGear + from libcamera import Transform -# Define received data dictionary -data_dict = {} + # add various Picamera2 tweak parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } -# loop over until KeyBoard Interrupted -while True: + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - try: - # read frames from stream - frame = stream.read() + # activate both multiclient and bidirectional modes + options = {"multiclient_mode": True, "bidirectional_mode": True} - # check for frame if Nonetype - if frame is None: + # Define NetGear Client at given IP address and assign list/tuple of + # all unique Server((5577,5578) in our case) and other parameters + server = NetGear( + address="192.168.x.x", + port=(5577, 5578), + protocol="tcp", + pattern=1, + logging=True, + **options + ) # !!! change following IP address '192.168.x.xxx' with yours !!! + + # Define received data dictionary + data_dict = {} + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # send frame & data and also receive data from Client(s) + recv_data = server.send(frame, message=target_data) # (1) + + # check if valid data received + if not (recv_data is None): + # extract unique port address and its respective data + unique_address, data = recv_data + # update the extracted data in the data dictionary + data_dict[unique_address] = data + + if data_dict: + # print data just received from Client(s) + for key, value in data_dict.items(): + print("Client at port {} said: {}".format(key, value)) + + except KeyboardInterrupt: break - # {do something with the frame here} + # safely close video stream + stream.stop() - # prepare data to be sent(a simple text in our case) - target_data = "Hello, I am a Server." + # safely close server + server.close() + ``` - # send frame & data and also receive data from Client(s) - recv_data = server.send(frame, message=target_data) # (1) + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter. + +=== "Legacy Picamera backend" - # check if valid data received - if not (recv_data is None): - # extract unique port address and its respective data - unique_address, data = recv_data - # update the extracted data in the data dictionary - data_dict[unique_address] = data + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." - if data_dict: - # print data just received from Client(s) - for key, value in data_dict.items(): - print("Client at port {} said: {}".format(key, value)) + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." - except KeyboardInterrupt: - break + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -# safely close video stream -stream.stop() + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." -# safely close server -server.close() -``` + ```python linenums="1" hl_lines="19 48-64" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import NetGear + + # add various Picamera tweak parameters to dictionary + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # activate both multiclient and bidirectional modes + options = {"multiclient_mode": True, "bidirectional_mode": True} + + # Define NetGear Client at given IP address and assign list/tuple of + # all unique Server((5577,5578) in our case) and other parameters + server = NetGear( + address="192.168.x.x", + port=(5577, 5578), + protocol="tcp", + pattern=1, + logging=True, + **options + ) # !!! change following IP address '192.168.x.xxx' with yours !!! + + # Define received data dictionary + data_dict = {} + + # loop over until KeyBoard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # send frame & data and also receive data from Client(s) + recv_data = server.send(frame, message=target_data) # (1) + + # check if valid data received + if not (recv_data is None): + # extract unique port address and its respective data + unique_address, data = recv_data + # update the extracted data in the data dictionary + data_dict[unique_address] = data + + if data_dict: + # print data just received from Client(s) + for key, value in data_dict.items(): + print("Client at port {} said: {}".format(key, value)) + + except KeyboardInterrupt: + break + + # safely close video stream + stream.stop() + + # safely close server + server.close() + ``` -1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter. + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter.   @@ -822,7 +1009,7 @@ Now, Open a terminal on another Client System _(where you want to display the in !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 23-34 42-44" +```python linenums="1" hl_lines="6 23-34 42-44" # import required libraries from vidgear.gears import NetGear import cv2 @@ -894,7 +1081,7 @@ Finally, Open a terminal on another Client System _(also, where you want to disp !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 23-34 42-44" +```python linenums="1" hl_lines="6 23-34 42-44" # import required libraries from vidgear.gears import NetGear import cv2 diff --git a/docs/gears/netgear/advanced/multi_server.md b/docs/gears/netgear/advanced/multi_server.md index 761ad0823..c7ca3914a 100644 --- a/docs/gears/netgear/advanced/multi_server.md +++ b/docs/gears/netgear/advanced/multi_server.md @@ -101,7 +101,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7 14 40-52" +```python linenums="1" hl_lines="7 14 40-52" # import required libraries from vidgear.gears import NetGear from imutils import build_montages # (1) @@ -184,7 +184,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9 14" +```python linenums="1" hl_lines="9 14" # import libraries from vidgear.gears import NetGear from vidgear.gears import CamGear @@ -237,7 +237,7 @@ Finally, Open the terminal on another Server System _(also with a webcam connect !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9 14" +```python linenums="1" hl_lines="9 14" # import libraries from vidgear.gears import NetGear from vidgear.gears import CamGear @@ -300,7 +300,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear from imutils import build_montages # (1) @@ -381,7 +381,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import libraries from vidgear.gears import NetGear import cv2 @@ -434,7 +434,7 @@ Finally, Open the terminal on another Server System _(also with a webcam connect !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import libraries from vidgear.gears import NetGear import cv2 @@ -503,7 +503,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="38-61" +```python linenums="1" hl_lines="38-61" # import required libraries from vidgear.gears import NetGear from imutils import build_montages # (1) @@ -595,7 +595,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="40" +```python linenums="1" hl_lines="40" # import libraries from vidgear.gears import NetGear from vidgear.gears import VideoGear @@ -660,67 +660,148 @@ Finally, Open the terminal on another Server System _(this time a Raspberry Pi w !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="50" -# import libraries -from vidgear.gears import NetGear -from vidgear.gears import PiGear -import cv2 +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# activate multiserver_mode -options = {"multiserver_mode": True} +=== "New Picamera2 backend" -# Define NetGear Server at Client's IP address and assign a unique port address and other parameters -# !!! change following IP address '192.168.x.xxx' with yours !!! -server = NetGear( - address="192.168.1.xxx", - port="5578", - protocol="tcp", - pattern=1, - logging=True, - **options -) + ```python linenums="1" hl_lines="50" + # import libraries + from vidgear.gears import NetGear + from vidgear.gears import PiGear + from libcamera import Transform + import cv2 -# loop over until Keyboard Interrupted -while True: + # add various Picamera tweak parameters to dictionary + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } - try: - # read frames from stream - frame = stream.read() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # check for frame if Nonetype - if frame is None: + # activate multiserver_mode + options = {"multiserver_mode": True} + + # Define NetGear Server at Client's IP address and assign a unique port address and other parameters + # !!! change following IP address '192.168.x.xxx' with yours !!! + server = NetGear( + address="192.168.1.xxx", + port="5578", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until Keyboard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with frame and data(to be sent) here} + + # let's prepare a text string as data + text = "I'm Server-2 at Port: 5578" + + # send frame and data through server + server.send(frame, message=text) + + except KeyboardInterrupt: break - # {do something with frame and data(to be sent) here} + # safely close video stream. + stream.stop() - # let's prepare a text string as data - text = "I'm Server-2 at Port: 5578" + # safely close server + server.close() + ``` + +=== "Legacy Picamera backend" - # send frame and data through server - server.send(frame, message=text) + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." - except KeyboardInterrupt: - break + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." -# safely close video stream. -stream.stop() + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -# safely close server -server.close() -``` + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." + + ```python linenums="1" hl_lines="50" + # import libraries + from vidgear.gears import NetGear + from vidgear.gears import PiGear + import cv2 + + # add various Picamera tweak parameters to dictionary + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # activate multiserver_mode + options = {"multiserver_mode": True} + + # Define NetGear Server at Client's IP address and assign a unique port address and other parameters + # !!! change following IP address '192.168.x.xxx' with yours !!! + server = NetGear( + address="192.168.1.xxx", + port="5578", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until Keyboard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with frame and data(to be sent) here} + + # let's prepare a text string as data + text = "I'm Server-2 at Port: 5578" + + # send frame and data through server + server.send(frame, message=text) + + except KeyboardInterrupt: + break + + # safely close video stream. + stream.stop() + + # safely close server + server.close() + ```   @@ -753,7 +834,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7 27-31 38 41-64" +```python linenums="1" hl_lines="7 27-31 38 41-64" # import required libraries from vidgear.gears import NetGear from imutils import build_montages # (1) @@ -847,7 +928,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="10 36-44" +```python linenums="1" hl_lines="10 36-44" # import libraries from vidgear.gears import NetGear from vidgear.gears import VideoGear @@ -915,71 +996,156 @@ Finally, Open the terminal on another Server System _(this time a Raspberry Pi w !!! tip "You can terminate stream anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="20 46-54" -# import libraries -from vidgear.gears import NetGear -from vidgear.gears import PiGear -import cv2 +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# activate both multiserver and bidirectional modes -options = {"multiserver_mode": True, "bidirectional_mode": True} +=== "New Picamera2 backend" -# Define NetGear Server at Client's IP address and assign a unique port address and other parameters -# !!! change following IP address '192.168.x.xxx' with yours !!! -server = NetGear( - address="192.168.1.xxx", - port="5578", - protocol="tcp", - pattern=1, - logging=True, - **options -) + ```python linenums="1" hl_lines="20 46-54" + # import libraries + from vidgear.gears import NetGear + from vidgear.gears import PiGear + from libcamera import Transform + import cv2 -# loop over until Keyboard Interrupted -while True: + # add various Picamera2 tweak parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } - try: - # read frames from stream - frame = stream.read() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # check for frame if Nonetype - if frame is None: + # activate both multiserver and bidirectional modes + options = {"multiserver_mode": True, "bidirectional_mode": True} + + # Define NetGear Server at Client's IP address and assign a unique port address and other parameters + # !!! change following IP address '192.168.x.xxx' with yours !!! + server = NetGear( + address="192.168.1.xxx", + port="5578", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until Keyboard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with frame and data(to be sent) here} + + # let's prepare a text string as data + target_data = "I'm Server-2 at Port: 5578" + + # send frame & data and also receive data from Client + recv_data = server.send(frame, message=target_data) # (1) + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + except KeyboardInterrupt: break - # {do something with frame and data(to be sent) here} + # safely close video stream. + stream.stop() - # let's prepare a text string as data - target_data = "I'm Server-2 at Port: 5578" + # safely close server + server.close() + ``` + +=== "Legacy Picamera backend" - # send frame & data and also receive data from Client - recv_data = server.send(frame, message=target_data) # (1) + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." - # print data just received from Client - if not (recv_data is None): - print(recv_data) + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." - except KeyboardInterrupt: - break + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -# safely close video stream. -stream.stop() + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." -# safely close server -server.close() -``` + ```python linenums="1" hl_lines="20 46-54" + # import libraries + from vidgear.gears import NetGear + from vidgear.gears import PiGear + import cv2 + + # add various Picamera tweak parameters to dictionary + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # activate both multiserver and bidirectional modes + options = {"multiserver_mode": True, "bidirectional_mode": True} + + # Define NetGear Server at Client's IP address and assign a unique port address and other parameters + # !!! change following IP address '192.168.x.xxx' with yours !!! + server = NetGear( + address="192.168.1.xxx", + port="5578", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # loop over until Keyboard Interrupted + while True: + + try: + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with frame and data(to be sent) here} + + # let's prepare a text string as data + target_data = "I'm Server-2 at Port: 5578" + + # send frame & data and also receive data from Client + recv_data = server.send(frame, message=target_data) # (1) + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + except KeyboardInterrupt: + break + + # safely close video stream. + stream.stop() + + # safely close server + server.close() + ``` 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted as `target_data` in `message` parameter. diff --git a/docs/gears/netgear/advanced/secure_mode.md b/docs/gears/netgear/advanced/secure_mode.md index fa225e25c..51615363b 100644 --- a/docs/gears/netgear/advanced/secure_mode.md +++ b/docs/gears/netgear/advanced/secure_mode.md @@ -33,7 +33,6 @@ Secure Mode uses a new wire protocol, [**ZMTP 3.0**](http://zmtp.org/) that adds Secure Mode can be easily activated in NetGear API through `secure_mode` attribute of its [`options`](../../params/#options) dictionary parameter, during initialization. Furthermore, for managing this mode, NetGear API provides additional `custom_cert_location` & `overwrite_cert` like attribute too. -   ## Supported ZMQ Security Layers @@ -47,11 +46,12 @@ Secure mode supports the two most powerful ZMQ security layers:   - !!! danger "Important Information regarding Secure Mode" * The `secure_mode` attribute value at the Client's end **MUST** match exactly the Server's end _(i.e. **IronHouse** security layer is only compatible with **IronHouse**, and **NOT** with **StoneHouse**)_. + * In Secure Mode, The Client's end **MUST** run before the Server's end to establish a secure connection. + * The Public+Secret Keypairs generated at the Server end **MUST** be made available at the Client's end too for successful authentication. If mismatched, connection failure will occur. * By Default, the Public+Secret Keypairs will be generated/stored at the `$HOME/.vidgear/keys` directory of your machine _(e.g. `/home/foo/.vidgear/keys` on Linux)_. But you can also use [`custom_cert_location`](../../params/#options) attribute to set your own Custom-Path for a directory to generate/store these Keypairs. @@ -60,8 +60,11 @@ Secure mode supports the two most powerful ZMQ security layers: * **IronHouse** is the strongest Security Layer available, but it involves certain security checks that lead to **ADDITIONAL LATENCY**. + * Secure Mode only supports `libzmq` library version `>= 4.0`. + +   @@ -125,13 +128,59 @@ For implementing Secure Mode, NetGear API currently provide following exclusive Following is the bare-minimum code you need to get started with Secure Mode in NetGear API: -#### Server's End +!!! alert "In Secure Mode, Client's end MUST run before the Server's end to establish a secure connection!" + +#### Client's End Open your favorite terminal and execute the following python code: +!!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" + +```python linenums="1" hl_lines="6" +# import required libraries +from vidgear.gears import NetGear +import cv2 + +# activate StoneHouse security mechanism +options = {"secure_mode": 1} + +# define NetGear Client with `receive_mode = True` and defined parameter +client = NetGear(pattern=1, receive_mode=True, logging=True, **options) + +# loop over +while True: + + # receive frames from network + frame = client.recv() + + # check for received frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + +# close output window +cv2.destroyAllWindows() + +# safely close client +client.close() +``` + +#### Server's End + +Then open another terminal on the same system and execute the following python code to send the frames to our client: + !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="9" +```python linenums="1" hl_lines="9" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -171,49 +220,7 @@ stream.stop() server.close() ``` -#### Client's End - -Then open another terminal on the same system and execute the following python code and see the output: - -!!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" - -```python hl_lines="6" -# import required libraries -from vidgear.gears import NetGear -import cv2 - -# activate StoneHouse security mechanism -options = {"secure_mode": 1} - -# define NetGear Client with `receive_mode = True` and defined parameter -client = NetGear(pattern=1, receive_mode=True, logging=True, **options) - -# loop over -while True: - - # receive frames from network - frame = client.recv() - - # check for received frame if Nonetype - if frame is None: - break - - # {do something with the frame here} - # Show output window - cv2.imshow("Output Frame", frame) - - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break - -# close output window -cv2.destroyAllWindows() - -# safely close client -client.close() -```   @@ -222,18 +229,19 @@ client.close() ### Using Secure Mode with Variable Parameters - #### Client's End Open a terminal on Client System _(where you want to display the input frames received from the Server)_ and execute the following python code: +!!! alert "In Secure Mode, Client's end MUST run before the Server's end to establish a secure connection!" + !!! info "Note down the local IP-address of this system(required at Server's end) and also replace it in the following code. You can follow [this FAQ](../../../../help/netgear_faqs/#how-to-find-local-ip-address-on-different-os-platforms) for this purpose." !!! danger "You need to paste the Public+Secret Keypairs _(generated at the Server End)_ at the `$HOME/.vidgear/keys` directory of your Client machine for a successful authentication!" !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 11-17" +```python linenums="1" hl_lines="6 11-17" # import required libraries from vidgear.gears import NetGear import cv2 @@ -292,7 +300,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7 15-20" +```python linenums="1" hl_lines="7 15-20" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear diff --git a/docs/gears/netgear/advanced/ssh_tunnel.md b/docs/gears/netgear/advanced/ssh_tunnel.md index a3383d968..82e52ef80 100644 --- a/docs/gears/netgear/advanced/ssh_tunnel.md +++ b/docs/gears/netgear/advanced/ssh_tunnel.md @@ -73,7 +73,7 @@ SSH Tunnel Mode requires [`pexpect`](http://www.noah.org/wiki/pexpect) or [`para === "Pexpect" - !!! fail "`pexpect` is NOT compatible with Windows Machines." + !!! failure "`pexpect` is NOT compatible with Windows Machines." ```sh # install pexpect @@ -91,7 +91,7 @@ For implementing SSH Tunneling Mode, NetGear API currently provide following exc * **`ssh_tunnel_mode`** (_string_) : This attribute activates SSH Tunneling Mode and assigns the `"@:"` SSH URL for tunneling at Server end. Its usage is as follows: - !!! fail "On Server end, NetGear automatically validates if the `port` is open at specified Client's Public IP Address or not, and if it fails _(i.e. port is closed)_, NetGear will throw `AssertionError`!" + !!! failure "On Server end, NetGear automatically validates if the `port` is open at specified Client's Public IP Address or not, and if it fails _(i.e. port is closed)_, NetGear will throw `AssertionError`!" === "With Default Port" @@ -230,7 +230,7 @@ Open a terminal on Client System _(A Regular PC where you want to display the in For more information on Forwarding Port in Popular Home Routers. See [this document ➶](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/) -??? fail "Secsh channel X open FAILED: open failed: Administratively prohibited" +??? failure "Secsh channel X open FAILED: open failed: Administratively prohibited" **Error:** This error means that installed OpenSSH is preventing connections to forwarded ports from outside your Client Machine. @@ -239,7 +239,7 @@ Open a terminal on Client System _(A Regular PC where you want to display the in !!! info "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7" +```python linenums="1" hl_lines="7" # import required libraries from vidgear.gears import NetGear import cv2 @@ -292,7 +292,7 @@ Now, Open the terminal on Remote Server System _(A Raspberry Pi with a webcam co !!! info "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="8-9 17" +```python linenums="1" hl_lines="8-9 17" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear diff --git a/docs/gears/netgear/overview.md b/docs/gears/netgear/overview.md index 82c3dafe3..f2cb749c6 100644 --- a/docs/gears/netgear/overview.md +++ b/docs/gears/netgear/overview.md @@ -125,16 +125,6 @@ In addition to the primary modes, NetGear API also offers application-specific E   -## Importing - -You can import NetGear API in your program as follows: - -```python -from vidgear.gears import NetGear -``` - -  - ## Usage Examples
diff --git a/docs/gears/netgear/params.md b/docs/gears/netgear/params.md index 949069b2f..0be8d45f2 100644 --- a/docs/gears/netgear/params.md +++ b/docs/gears/netgear/params.md @@ -173,11 +173,13 @@ This parameter provides the flexibility to alter various NetGear API's internal * **`subscriber_timeout`**(_integer_): Similar to `request_timeout`, this internal attribute also controls the timeout value _(in seconds)_ but for non-synchronous `zmq.PUB/zmq.SUB` pattern in compression mode, after which the Client(Subscriber) exit itself with `Nonetype` value if it's unable to get any response from the socket. It's value can anything greater than `0`, and its disabled by default _(meaning the client will wait forever for response)_. - * **`flag`**(_integer_): This PyZMQ attribute value can be either `0` or `zmq.NOBLOCK`_( i.e. 1)_. More information can be found [here ➶](https://pyzmq.readthedocs.io/en/latest/api/zmq.html). + * **`flag`**(_integer_): This PyZMQ attribute value can be either `0` or `zmq.NOBLOCK`_( i.e. 1)_. More information can be found [here ➶](https://pyzmq.readthedocs.io/en/latest/api/zmq.html#zmq.Socket.recv). + + !!! warning "With flags=1 (i.e. `NOBLOCK`), NetGear raises `ZMQError` if no messages have arrived; otherwise, this waits until a message arrives." * **`copy`**(_boolean_): This PyZMQ attribute selects if message be received in a copying or non-copying manner. If `False` a object is returned, if `True` a string copy of the message is returned. - * **`track`**(_boolean_): This PyZMQ attribute check if the message is tracked for notification that ZMQ has finished with it. (_ignored if copy=True_). + * **`track`**(_boolean_): This PyZMQ attribute check if the message is tracked for notification that ZMQ has finished with it. _(ignored if `copy=True`)_. The desired attributes can be passed to NetGear API as follows: @@ -188,9 +190,9 @@ options = { "secure_mode": 2, "custom_cert_location": "/home/foo/foo1/foo2", "overwrite_cert": True, - "flag": 0, - "copy": False, - "track": False, + "flag": 0, + "copy": True, + "track": False } # assigning it NetGear(logging=True, **options) diff --git a/docs/gears/netgear/usage.md b/docs/gears/netgear/usage.md index b8f9152fa..2fa4847c9 100644 --- a/docs/gears/netgear/usage.md +++ b/docs/gears/netgear/usage.md @@ -20,7 +20,7 @@ limitations under the License. # NetGear API Usage Examples: -!!! danger Important Information +!!! danger "Important Information" * Kindly go through each given examples thoroughly, any incorrect settings/parameter may result in errors or no output at all. @@ -45,7 +45,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -89,7 +89,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -137,13 +137,13 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 11-17" +```python linenums="1" hl_lines="6 11-17" # import required libraries from vidgear.gears import NetGear import cv2 # define various tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear Client at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with yours !!! @@ -192,13 +192,13 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 14-19" +```python linenums="1" hl_lines="6 14-19" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear # define various tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Open live video stream on webcam at first index(i.e. 0) device stream = VideoGear(source=0).start() @@ -254,13 +254,13 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 # define tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear Client at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with yours !!! @@ -309,7 +309,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 @@ -318,7 +318,7 @@ import cv2 stream = cv2.VideoCapture(0) # define tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear Client at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with yours !!! @@ -371,13 +371,13 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear import cv2 # define various tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear Client at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with yours !!! @@ -426,13 +426,13 @@ Now, Open the terminal on another Server System _(let's say you want to transmit !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import ScreenGear from vidgear.gears import NetGear # define various tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Start capturing live Monitor screen frames with default settings stream = ScreenGear().start() diff --git a/docs/gears/netgear_async/advanced/bidirectional_mode.md b/docs/gears/netgear_async/advanced/bidirectional_mode.md index ba1f88e3f..0ba838c03 100644 --- a/docs/gears/netgear_async/advanced/bidirectional_mode.md +++ b/docs/gears/netgear_async/advanced/bidirectional_mode.md @@ -87,7 +87,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate both sides anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 33 40 53" +```python linenums="1" hl_lines="6 33 40 53" # import library from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -163,7 +163,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="6 15 31" +```python linenums="1" hl_lines="6 15 31" # import libraries from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -233,7 +233,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="11-17" +```python linenums="1" hl_lines="11-17" # import libraries from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -305,96 +305,204 @@ Now, Open the terminal on another Server System _(a Raspberry Pi with Camera Mod !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="12-18" -# import library -from vidgear.gears.asyncio import NetGear_Async -from vidgear.gears import VideoGear -import cv2, asyncio +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# activate Bidirectional mode -options = {"bidirectional_mode": True} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# initialize Server without any source at given IP address and define parameters -# !!! change following IP address '192.168.x.xxx' with client's IP address !!! -server = NetGear_Async( - source=None, - address="192.168.x.xxx", - port="5454", - protocol="tcp", - pattern=1, - logging=True, - **options -) +=== "New Picamera2 backend" -# Create a async frame generator as custom source -async def my_frame_generator(): + ```python linenums="1" hl_lines="13-19 23-67 74" + # import libs + from vidgear.gears.asyncio import NetGear_Async + from vidgear.gears import VideoGear + from libcamera import Transform + import cv2, asyncio - # !!! define your own video source here !!! - # Open any video stream such as live webcam - # video stream on first index(i.e. 0) device - # add various Picamera tweak parameters to dictionary - options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, - } - - # open pi video stream with defined parameters - stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + # activate Bidirectional mode + options = {"bidirectional_mode": True} - # loop over stream until its terminated - while True: - # read frames - frame = stream.read() + # initialize Server without any source at given IP address and define parameters + # !!! change following IP address '192.168.x.xxx' with client's IP address !!! + server = NetGear_Async( + source=None, + address="192.168.x.xxx", + port="5454", + protocol="tcp", + pattern=1, + logging=True, + **options + ) - # check for frame if Nonetype - if frame is None: - break + # Create a async frame generator as custom source + async def my_frame_generator(): - # {do something with the frame to be sent here} + # !!! define your own video source below !!! - # prepare data to be sent(a simple text in our case) - target_data = "Hello, I am a Server." + # define various Picamera2 tweak parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } - # receive data from Client - recv_data = await server.transceive_data() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # print data just received from Client - if not (recv_data is None): - print(recv_data) + # loop over stream until its terminated + while True: + # read frames + frame = stream.read() - # send our frame & data - yield (target_data, frame) # (1) + # check for frame if Nonetype + if frame is None: + break - # sleep for sometime - await asyncio.sleep(0) + # {do something with the frame to be sent here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # receive data from Client + recv_data = await server.transceive_data() + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + # send our frame & data + yield (target_data, frame) # (1) + + # sleep for sometime + await asyncio.sleep(0) + + # safely close video stream + stream.stop() + + + if __name__ == "__main__": + # set event loop + asyncio.set_event_loop(server.loop) + # Add your custom source generator to Server configuration + server.config["generator"] = my_frame_generator() + # Launch the Server + server.launch() + try: + # run your main function task until it is complete + server.loop.run_until_complete(server.task) + except (KeyboardInterrupt, SystemExit): + # wait for interrupts + pass + finally: + # finally close the server + server.close() + ``` + + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted in `target_data`. + +=== "Legacy Picamera backend" + + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. - # safely close video stream - stream.stop() + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -if __name__ == "__main__": - # set event loop - asyncio.set_event_loop(server.loop) - # Add your custom source generator to Server configuration - server.config["generator"] = my_frame_generator() - # Launch the Server - server.launch() - try: - # run your main function task until it is complete - server.loop.run_until_complete(server.task) - except (KeyboardInterrupt, SystemExit): - # wait for interrupts - pass - finally: - # finally close the server - server.close() -``` + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." -1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted in `target_data`. + + ```python linenums="1" hl_lines="12-18 22-67 74" + # import library + from vidgear.gears.asyncio import NetGear_Async + from vidgear.gears import VideoGear + import cv2, asyncio + + # activate Bidirectional mode + options = {"bidirectional_mode": True} + + # initialize Server without any source at given IP address and define parameters + # !!! change following IP address '192.168.x.xxx' with client's IP address !!! + server = NetGear_Async( + source=None, + address="192.168.x.xxx", + port="5454", + protocol="tcp", + pattern=1, + logging=True, + **options + ) + + # Create a async frame generator as custom source + async def my_frame_generator(): + + # !!! define your own video source below !!! + + # define various Picamera tweak parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over stream until its terminated + while True: + # read frames + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame to be sent here} + + # prepare data to be sent(a simple text in our case) + target_data = "Hello, I am a Server." + + # receive data from Client + recv_data = await server.transceive_data() + + # print data just received from Client + if not (recv_data is None): + print(recv_data) + + # send our frame & data + yield (target_data, frame) # (1) + + # sleep for sometime + await asyncio.sleep(0) + + # safely close video stream + stream.stop() + + + if __name__ == "__main__": + # set event loop + asyncio.set_event_loop(server.loop) + # Add your custom source generator to Server configuration + server.config["generator"] = my_frame_generator() + # Launch the Server + server.launch() + try: + # run your main function task until it is complete + server.loop.run_until_complete(server.task) + except (KeyboardInterrupt, SystemExit): + # wait for interrupts + pass + finally: + # finally close the server + server.close() + ``` + + 1. :warning: Everything except [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype data is accepted in `target_data`.   @@ -420,7 +528,7 @@ Open your favorite terminal and execute the following python code: !!! alert "Server end can only send [numpy.ndarray](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) datatype as frame but not as data." -```python hl_lines="8 33-48 54 67" +```python linenums="1" hl_lines="8 33-48 54 67" # import library from vidgear.gears.asyncio import NetGear_Async from vidgear.gears.asyncio.helper import reducer @@ -512,7 +620,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7 18 34-43" +```python linenums="1" hl_lines="7 18 34-43" # import libraries from vidgear.gears.asyncio import NetGear_Async from vidgear.gears.asyncio.helper import reducer diff --git a/docs/gears/netgear_async/overview.md b/docs/gears/netgear_async/overview.md index 102ab6215..569b705f1 100644 --- a/docs/gears/netgear_async/overview.md +++ b/docs/gears/netgear_async/overview.md @@ -57,16 +57,6 @@ Whereas supported protocol are: `tcp` and `ipc`.   -## Importing - -You can import NetGear_Async API in your program as follows: - -```python -from vidgear.gears.asyncio import NetGear_Async -``` - -  - ## Usage Examples
diff --git a/docs/gears/netgear_async/params.md b/docs/gears/netgear_async/params.md index 2abdcbf1f..34f6302ee 100644 --- a/docs/gears/netgear_async/params.md +++ b/docs/gears/netgear_async/params.md @@ -396,9 +396,7 @@ NetGear_Async(source=0, **options) ### **`camera_num`** -This parameter selects the camera module index which will be used as source, if you're having multiple camera modules connected. Its value can only be greater than zero, otherwise, it will throw `ValueError` for any negative value. - -!!! warning "This parameter shouldn't be altered, until unless you using [Raspberry Pi 3/3+ Compute Module IO Board](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md)."" +This parameter selects the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. Its value can only be zero or greater, otherwise, NetGear_Async API will throw `ValueError` for any negative value. **Data-Type:** Integer @@ -407,18 +405,23 @@ This parameter selects the camera module index which will be used as source, if **Usage:** ```python -NetGear_Async(enablePiCamera=True, camera_num=0) +# select Camera Module at index `1` +NetGear_Async(enablePiCamera=True, camera_num=1) ``` + +!!! example "The complete usage example demonstrating the usage of the `camera_num` parameter is available [here ➶](../../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)." +   ### **`resolution`** -This parameter sets the resolution (i.e. `(width,height)`) of the source. +This parameter controls the **resolution** - a tuple _(i.e. `(width,height)`)_ of two values giving the width and height of the output frames. -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.resolution)" +!!! warning "Make sure both width and height values should be at least `64`." +!!! danger "When using the Picamera2 backend, the `resolution` parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor`](#a-configurational-camera-parameters) configurational parameter." **Data-Type:** Tuple @@ -434,11 +437,8 @@ NetGear_Async(enablePiCamera=True, resolution=(1280,720)) # sets 1280x720 resolu ### **`framerate`** -This parameter sets the framerate of the source. - - -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.framerate)" +This parameter controls the framerate of the source. **Data-Type:** integer/float @@ -455,46 +455,24 @@ NetGear_Async(enablePiCamera=True, framerate=60) # sets 60fps framerate ### **`options`** -This parameter provides the ability to alter various **Tweak Parameters** `like brightness, saturation, senor_mode, resolution, etc.` available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). +This dictionary parameter in the internal PiGear API backend allows you to control various camera settings for both the `picamera2` and legacy `picamera` backends and some internal API tasks. These settings include: -**Data-Type:** Dictionary +#### A. Configurational Camera Parameters +- [x] These parameters are provided by the underlying backend library _(depending upon backend in use)_, and must be applied to the camera system before the camera can be started. +- [x] **These parameter include:** _Brightness, Contrast, Saturation, Exposure, Colour Temperature, Colour Gains, etc._ +- [x] All supported parameters are listed in this [Usage example ➶](../../pigear/usage/#using-pigear-with-variable-camera-properties) -**Default Value:** Its default value is `{}` -**Usage:** - -!!! tip "All supported parameters are listed in [PiCamera Docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" +#### B. User-defined Parameters +- [x] These user-defined parameters control specific internal behaviors of the API and perform certain tasks on the camera objects. +- [x] All supported User-defined Parameters are listed [here ➶](../../pigear/params/#b-user-defined-parameters) -The desired parameters can be passed to NetGear_Async API by formatting them as this parameter's attributes, as follows: - -```python -# formatting parameters as dictionary attributes -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# assigning it -NetGear_Async(enablePiCamera=True, logging=True, **options) -``` - -**User-specific attributes:** - -Additionally, `options` parameter also support some User-specific attributes, which are as follows: - -* **`HWFAILURE_TIMEOUT`** (float): PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur at a timeout value. This parameter can be used to control that timeout value i.e. the maximum waiting time _(in seconds)_ after which PiGear exits with a `SystemError` to save resources. Its value can only be between `1.0` _(min)_ and `10.0` _(max)_ and its default value is `2.0`. Its usage is as follows: - - ```python - options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds - ```     + ## Common Parameters !!! summary "These are common parameters that works with every backend in NetGear_Async." diff --git a/docs/gears/netgear_async/usage.md b/docs/gears/netgear_async/usage.md index fb31159de..7d44db1ef 100644 --- a/docs/gears/netgear_async/usage.md +++ b/docs/gears/netgear_async/usage.md @@ -51,7 +51,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" hl_lines="6 13" # import libraries from vidgear.gears.asyncio import NetGear_Async import asyncio @@ -81,7 +81,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" hl_lines="6 9-20 27" # import libraries from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -89,7 +89,6 @@ import cv2, asyncio # define and launch Client with `receive_mode=True` client = NetGear_Async(receive_mode=True).launch() - # Create a async function where you want to show/manipulate your received frames async def main(): # loop over Client's Asynchronous Frame Generator @@ -104,7 +103,6 @@ async def main(): # await before continuing await asyncio.sleep(0) - if __name__ == "__main__": # Set event loop to client's asyncio.set_event_loop(client.loop) @@ -136,7 +134,7 @@ Open a terminal on Client System _(where you want to display the input frames re !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7-12" +```python linenums="1" hl_lines="7-12" # import libraries from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -191,7 +189,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7-12" +```python linenums="1" hl_lines="7-12" # import libraries from vidgear.gears.asyncio import NetGear_Async import asyncio @@ -235,7 +233,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="14-31 38" +```python linenums="1" hl_lines="14-31 38" # import library from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio @@ -351,7 +349,7 @@ Open your favorite terminal and execute the following python code: !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python hl_lines="7" +```python linenums="1" hl_lines="7" # import libraries from vidgear.gears.asyncio import NetGear_Async import asyncio @@ -383,7 +381,7 @@ Then open another terminal on the same system and execute the following python c !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" hl_lines="20" # import libraries from vidgear.gears.asyncio import NetGear_Async from vidgear.gears import WriteGear @@ -391,10 +389,10 @@ import cv2, asyncio # define and launch Client with `receive_mode=True` client = NetGear_Async(receive_mode=True).launch() + # Define writer with output filename 'Output.mp4' writer = WriteGear(output="Output.mp4", logging=True) - # Create a async function where you want to show/manipulate your received frames async def main(): # loop over Client's Asynchronous Frame Generator diff --git a/docs/gears/pigear/overview.md b/docs/gears/pigear/overview.md index 0c40f979a..f9277b210 100644 --- a/docs/gears/pigear/overview.md +++ b/docs/gears/pigear/overview.md @@ -27,32 +27,36 @@ limitations under the License. ## Overview -> PiGear is similar to [CamGear API](../../camgear/overview/) but exclusively made to support various Raspberry Pi Camera Modules _(such as OmniVision OV5647 Camera Module and Sony IMX219 Camera Module)_. +> PiGear is a specialized API similar to the [CamGear API](../../camgear/overview/) but optimized for **Raspberry Pi :fontawesome-brands-raspberry-pi: Boards**, offering comprehensive **support for camera modules** _(e.g., OmniVision OV5647, Sony IMX219)_, along with **limited compatibility for USB cameras**. -PiGear provides a flexible multi-threaded framework around complete [picamera](https://picamera.readthedocs.io/en/release-1.13/index.html) python library, and provide us the ability to exploit almost all of its parameters like `brightness, saturation, sensor_mode, iso, exposure, etc.` effortlessly. Furthermore, PiGear also supports multiple camera modules, such as in the case of Raspberry-Pi Compute Module IO boards. -Best of all, PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur. That means that if you're running PiGear API in your script and someone accidentally pulls the Camera-Module cable out, instead of going into possible kernel panic, API will exit safely to save resources. +PiGear implements a seamless and robust wrapper around the [picamera2](https://github.com/raspberrypi/picamera2) python library, simplifying integration with minimal code changes and ensuring a smooth transition for developers already familiar with the Picamera2 API. PiGear leverages the `libcamera` API under the hood with multi-threading, providing high-performance :fire:, enhanced control and functionality for Raspberry Pi camera modules. -!!! error "Make sure to [enable Raspberry Pi hardware-specific settings](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior using this API, otherwise nothing will work." +PiGear handles common configuration parameters and non-standard settings for various camera types, simplifying the integration process. PiGear currently supports PiCamera2 API parameters such as `sensor`, `controls`, `transform`, and `format` etc., with internal type and sanity checks for robust performance. -!!! tip "Helpful Tips" +While primarily focused on Raspberry Pi camera modules, PiGear also provides basic functionality for USB webcams only with Picamera2 API, along with the ability to accurately differentiate between USB and Raspberry Pi cameras using metadata. - * If you're already familar with [OpenCV](https://github.com/opencv/opencv) library, then see [Switching from OpenCV ➶](../../../switch_from_cv/#switching-videocapture-apis) +???+ info "Backward compatibility with `picamera` library" + PiGear seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) library if the `picamera2` library is unavailable, ensuring seamless backward compatibility. For this, PiGear also provides a flexible multi-threaded framework around complete `picamera` API, allowing developers to effortlessly exploit a wide range of parameters, such as `brightness`, `saturation`, `sensor_mode`, `iso`, `exposure`, and more. - * It is advised to enable logging(`logging = True`) on the first run for easily identifying any runtime errors. + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." +Furthermore, PiGear supports the use of multiple camera modules, including those found on Raspberry Pi Compute Module IO boards and USB cameras _(only with Picamera2 API)_. -  +???+ new "Threaded Internal Timer :material-camera-timer:" + PiGear ensures proper resource release during the termination of the API, preventing potential issues or resource leaks. PiGear API internally implements a ==Threaded Internal Timer== that silently keeps active track of any frozen-threads or hardware-failures and exits safely if any do occur. This means that if you're running the PiGear API in your script and someone accidentally pulls the Camera-Module cable out, instead of going into a possible kernel panic, the API will exit safely to save resources. -## Importing +!!! failure "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this API, otherwise nothing will work." -You can import PiGear API in your program as follows: +!!! tip "Helpful Tips" + * Follow [PiCamera2 documentation](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) and [Picamera documentation](https://picamera.readthedocs.io/en/release-1.13/) which should help you quickly get started. -```python -from vidgear.gears import PiGear -``` + * If you're already familiar with [OpenCV](https://github.com/opencv/opencv) library, then see [Switching from OpenCV ➶](../../../switch_from_cv/#switching-videocapture-apis). + + * It is advised to enable logging(`logging = True`) on the first run for easily identifying any runtime errors. -  + +  ## Usage Examples @@ -60,9 +64,7 @@ from vidgear.gears import PiGear See here 🚀
-!!! experiment "After going through PiGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/pigear_ex/)" - - +!!! example "After going through PiGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/pigear_ex/)" ## Parameters diff --git a/docs/gears/pigear/params.md b/docs/gears/pigear/params.md index b28e72fd1..9f8fefa5b 100644 --- a/docs/gears/pigear/params.md +++ b/docs/gears/pigear/params.md @@ -24,9 +24,7 @@ limitations under the License. ## **`camera_num`** -This parameter selects the camera module index which will be used as source, if you're having multiple camera modules connected. Its value can only be greater than zero, otherwise, it will throw `ValueError` for any negative value. - -!!! warning "This parameter shouldn't be altered, until unless you using [Raspberry Pi 3/3+ Compute Module IO Board](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md)."" +This parameter selects the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. Its value can only be zero or greater, otherwise, PiGear API will throw `ValueError` for any negative value. **Data-Type:** Integer @@ -35,18 +33,23 @@ This parameter selects the camera module index which will be used as source, if **Usage:** ```python -PiGear(camera_num=0) +# select Camera Module at index `1` +PiGear(camera_num=1) ``` + +!!! example "The complete usage example demonstrating the usage of the `camera_num` parameter is available [here ➶](../../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)." +   ## **`resolution`** -This parameter sets the resolution (i.e. `(width,height)`) of the source. +This parameter controls the **resolution** - a tuple _(i.e. `(width,height)`)_ of two values giving the width and height of the output frames. -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.resolution)" +!!! warning "Make sure both width and height values should be at least `64`." +!!! danger "When using the Picamera2 backend, the `resolution` parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor`](#a-configurational-camera-parameters) configurational parameter in PiGear API." **Data-Type:** Tuple @@ -63,11 +66,7 @@ PiGear(resolution=(1280,720)) # sets 1280x720 resolution ## **`framerate`** -This parameter sets the framerate of the source. - - -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.framerate)" - +This parameter controls the framerate of the source. **Data-Type:** integer/float @@ -83,15 +82,17 @@ PiGear(framerate=60) # sets 60fps framerate ## **`colorspace`** -This parameter selects the colorspace of the source stream. +This parameter controls the colorspace of the output frames. + +!!! example "With the Picamera2 backend, you can also define a custom `format` _(format of output frame pixels)_ in PiGear API. Checkout this [bonus example ➶](../../../help/pigear_ex/#changing-output-pixel-format-in-pigear-api-with-picamera2-backend)" **Data-Type:** String -**Default Value:** Its default value is `None`. +**Default Value:** Its default value is `None` _(i.e. Default `BGR` colorspace)_. **Usage:** -!!! tip "All supported `colorspace` values are given [here ➶](../../../bonus/colorspace_manipulation/)" +!!! tip "All supported `colorspace` values are described [here ➶](../../../bonus/colorspace_manipulation/)" ```python PiGear(colorspace="COLOR_BGR2HSV") @@ -103,7 +104,61 @@ PiGear(colorspace="COLOR_BGR2HSV") ## **`options`** -This parameter provides the ability to alter various **Tweak Parameters** `like brightness, saturation, senor_mode, resolution, etc.` available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). +This dictionary parameter in the PiGear API allows you to control various camera settings for both the `picamera2` and legacy `picamera` backends and some internal API tasks. These settings include: + +### A. Configurational Camera Parameters +- [x] These parameters are provided by the underlying backend library _(depending upon backend in use)_, and must be applied to the camera system before the camera can be started. +- [x] **These parameter include:** _Brightness, Contrast, Saturation, Exposure, Colour Temperature, Colour Gains, etc._ +- [x] All supported parameters are listed in this [Usage example ➶](../usage/#using-pigear-with-variable-camera-properties) + + +### B. User-defined Parameters +- [x] These user-defined parameters control specific internal behaviors of the API and perform certain tasks on the camera objects. +- [x] **All supported User-defined Parameters are listed below:** + + * **`enforce_legacy_picamera`** (bool): This user-defined boolean parameter, if `True`, forces the use of the legacy `picamera` backend in PiGear API, even if the newer `picamera2` backend is available on the system. It's default value is `False`. Its usage is as follows: + + !!! info "PiGear API will verify if the `picamera` Python library is installed before enabling the `enforce_legacy_picamera` parameter." + + ```python + options = {"enforce_legacy_picamera": True} # enforces `picamera` backend + ``` + + * **`enable_verbose_logs`** (bool): **[`picamera2` backend only]** This `picamera2` backend specific parameter, if `True`, will set the logging level to output all debug messages from Picamera2 library. This parameter can be used in conjunction with enabling general logging (`logging=True`) in the PiGear API for even more granular control over logging output. It's default value is `False` _(meaning only warning message will be outputted)_. Its usage is as follows: + + !!! warning "This parameter requires logging to be enabled _(i.e. [`logging=True`](#logging))_ in PiGear API, otherwise it will be discarded." + + ```python + options = {"enable_verbose_logs": True} # enables debug logs from `picamera2` backend + ``` + + * **`auto_align_output_size`** (bool): **[`picamera2` backend only]** The Picamera2 backend in PiGear API has certain hardware restrictions and optimal frame size _(or `resolution`)_ for efficient processing. Although user-specified frame sizes are allowed, Picamera2 can make minimal adjustments to the configuration if it detects an invalid or inefficient size. This parameter, if `True`, will request these optimal frame size adjustments from Picamera2. It's default value is `False` _(meaning no changes will be made to user-specified resolution)_. Its usage is explained in detail below: + + !!! danger "This parameter may override any invalid or inefficient size inputted by user through [`resolution`](#resolution) parameter in PiGear API." + + ```python + # auto-aligns output resolution to optimal + options = {"auto_align_output_size": True} + + # open pi video stream with user-specified resolution `(808, 606)` + stream = PiGear(resolution=(808, 606), logging=True, **options).start() + + # read frame from stream + frame = stream.read() + + # print final resolution of frame + print('width: ', frame.shape[1]) # height: 800 (changed) + print('height: ', frame.shape[0]) # height: 606 + # Picamera2 has decided an 800x606 image will be more efficient. + ``` + **Explanation:** In the example code, Picamera2 adjusts the requested output resolution of `(808, 606)` to the more efficient `(800, 606)` size. + + * **`HWFAILURE_TIMEOUT`** (float): PiGear API provides a ==**Threaded Internal Timer**== that silently keeps track of any frozen threads/hardware failures and exits safely if any occur at a timeout value. This parameter controls the timeout value, which is the maximum waiting time _(in seconds)_ after which API exits itself with a `SystemError` to save resources. Its value can only be set between `1.0` _(min)_ and `10.0` _(max)_, with a default value of `2.0`. Its usage is as follows: + + ```python + options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds + ``` + **Data-Type:** Dictionary @@ -111,32 +166,41 @@ This parameter provides the ability to alter various **Tweak Parameters** `like **Usage:** -!!! tip "All supported parameters are listed in [PiCamera Docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" +!!! example "The complete usage example demonstrating the usage of the `options` parameter is available [here ➶](../usage/#using-pigear-with-variable-camera-properties)." -The desired parameters can be passed to PiGear API by formatting them as this parameter's attributes, as follows: +You can format these user-defined and configurational parameters as attributes of this `options` dictionary parameter as follows: -```python -# formatting parameters as dictionary attributes -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# assigning it -PiGear(logging=True, **options) -``` +=== "New Picamera2 backend" -**User-specific attributes:** - -Additionally, `options` parameter also support some User-specific attributes, which are as follows: + ```python + # formulate various Picamera2 API parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "exposure_compensation": 15, + "sensor": {"output_size": (480, 320)}, # !!! will override `resolution` !!! + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + ``` -* **`HWFAILURE_TIMEOUT`** (float): PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur at a timeout value. This parameter can be used to control that timeout value i.e. the maximum waiting time _(in seconds)_ after which PiGear exits with a `SystemError` to save resources. Its value can only be between `1.0` _(min)_ and `10.0` _(max)_ and its default value is `2.0`. Its usage is as follows: +=== "Legacy Picamera backend" ```python - options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds + # formulate various Picamera API parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() ```   diff --git a/docs/gears/pigear/usage.md b/docs/gears/pigear/usage.md index 69621a168..fb3b36d7e 100644 --- a/docs/gears/pigear/usage.md +++ b/docs/gears/pigear/usage.md @@ -20,10 +20,11 @@ limitations under the License. # PiGear API Usage Examples: +!!! new "PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../../installation/pip_install/#picamera2) for its installation." -!!! warning "Make sure to [enable Raspberry Pi hardware-specific settings](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior using this API, otherwise nothing will work." +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this API, otherwise nothing will work." -!!! experiment "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/pigear_ex/)" +!!! example "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/pigear_ex/)" @@ -34,13 +35,47 @@ limitations under the License. Following is the bare-minimum code you need to get started with PiGear API: -```python +??? info "Under the hood, PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, PiGear API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." + +??? danger "Disabling common `libcamera` API messages in silent mode." + + The picamera2 backend can be a bit verbose with logging messages from the underlying `libcamera` library, even when logging is disabled (`logging=False`) in the PiGear API. + + - [x] To suppress these messages, you'll need to set `LIBCAMERA_LOG_LEVELS=2` environment variable before running your application. This will disable common `libcamera` API messages, keeping your console output cleaner. + - [x] This can be done on various Operating Systems as follows: + + === ":material-linux: Linux" + + ```sh + export LIBCAMERA_LOG_LEVELS=2 + ``` + + === ":fontawesome-brands-windows: Windows (Powershell)" + + ```powershell + $Env:LIBCAMERA_LOG_LEVELS=2 + ``` + + === ":material-apple: MacOS" + + ```sh + export LIBCAMERA_LOG_LEVELS=2 + ``` + +```python linenums="1" # import required libraries from vidgear.gears import PiGear import cv2 - -# open pi video stream with default parameters +# open stream with default parameters stream = PiGear().start() # loop over @@ -72,58 +107,157 @@ stream.stop()   -## Using PiGear with Variable Camera Module Properties +## Using PiGear with Variable Camera Properties -PiGear supports almost every parameter available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). These parameters can be easily applied to the source stream in PiGear API through its [`options`](../params/#options) dictionary parameter by formatting them as its attributes. The complete usage example is as follows: +=== "New Picamera2 backend" + > PiGear provides a user-friendly interface for the underlying picamera2 library, offering access to almost all of its important configurational parameters. It simplifies configuration for developers with even basic knowledge of Raspberry Pi camera modules, allowing them to easily configure and control the camera functionality with just a few lines of code. + + This example doc showcases the capabilities of PiGear and demonstrates how it simplifies camera configuration with Picamera2 API backend. -!!! tip "All supported parameters are listed in [PiCamera Docs ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" + ??? info "All supported Picamera2 Library Configurational Parameters [IMPORTANT]" + Following are the list of Picamera2 parameters, i.e. if supported, can be applied to the source stream in PiGear API through its [`options`](../params/#options) dictionary parameter by formatting them as its attributes. -```python hl_lines="7-12" -# import required libraries -from vidgear.gears import PiGear -import cv2 + ???+ warning "Few Important points" + - These PiCamera2 parameters must be formatted as PiGear API's [`options`](../params/#options) dictionary parameter keys, and their values **MUST** strictly adhere to the specified data types _(see table below)_. If the values do not follow the specified data types, they will be discarded. + - PiGear API only defines the default `main` stream configuration that is delivered to the PiCamera2 API. You **CANNOT** define other streams _(such as `lores`, `raw`)_ manually. + - The `FrameDuration` and `FrameDurationLimits` properties of [`control` configurational parameter](../params/#a-configurational-camera-parameters) are **NOT** supported and will be discarded, since camera FPS is handled by [`framerate`](../params/#framerate) parameter in PiGear API. + - The [`resolution`](../params/#resolution) parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor` configurational parameter](../params/#a-configurational-camera-parameters) in PiGear API. -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} + | Parameters | Datatype | Description | Supported | Supported on USB Cameras| Remarks | + |:----------:|:-----:|:-------------:|:-----------:|:--------:|:-------:| + | `buffer_count` | `int`, `>=1` | number of sets of buffers to allocate for the camera system | :white_check_mark: | :x: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) | + | `queue` | `bool` | whether the system is allowed to queue up a frame ready for a capture request | :white_check_mark: | :x: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf)| + | `controls` | `dict` | specify a set of runtime controls that can be regarded as part of the camera configuration | :white_check_mark: | :x: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) | + | `sensor` | `dict` | allow to select a particular mode of operation for the sensor | :white_check_mark: | :white_check_mark: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) | + | `format` | `str` | Pixel formats | :white_check_mark: | :white_check_mark: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) and see [Bonus example ➶](../../../help/pigear_ex/#changing-output-pixel-format-in-pigear-api-with-picamera2-backend) | + | `transform` | `Transform`[^1] | The 2D plane transform that is applied to all images from all the configured streams. | :white_check_mark: | :x: | Read Docs [here ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) | + | `colour_space` | :octicons-dash-16: | colour space of the output images | :x: | :no_entry_sign: | Handled by [`colorspace`](../params/#colorspace) parameter of PiGear API | + | `size` | :octicons-dash-16: | A tuple of two values giving the width and height of the output image. _(Both numbers should be no less than 64)_ | :x: | :no_entry_sign: | Handled by [`resolution`](../params/#resolution) parameter of PiGear API | + | `display` | :octicons-dash-16: | name of the stream that will be displayed in the preview window. | :x: | :no_entry_sign: | Not-Required | + | `encode` | :octicons-dash-16: | name of the stream that will be used for video recording. | :x: | :no_entry_sign: | Not-Required | -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + ??? failure "Limited support for USB Cameras" -# loop over -while True: + This example also works with USB Cameras, However: - # read frames from stream - frame = stream.read() + - Users should assume that features such as: **Camera controls** (`"controls"`), **Transformations** (`"transform"`), **Queue** (`"queue"`) , and **Buffer Count** (`"buffer_count"`) that are supported on Raspberry Pi cameras, and so forth, are not available on USB Cameras. + - Hot-plugging of USB cameras is also **NOT** supported - PiGear API should be completely shut down and restarted when cameras are added or removed. - # check for frame if Nonetype - if frame is None: - break + ??? tip "Enabling verbose logs for backend PiCamera2 Library" + The PiGear API allows you to enable more detailed logging from the `picamera2` backend library using the [`enable_verbose_logs`](../params/#b-user-defined-parameters) user-defined optional parameter attribute. This can be used in conjunction with enabling general logging (`logging=True`) in the PiGear API for even more granular control over logging output. - # {do something with the frame here} + !!! example "PiGear also support changing parameter at runtime. Checkout this bonus example [here ➶](../../../help/pigear_ex/#dynamically-adjusting-raspberry-pi-camera-parameters-at-runtime-in-pigear-api)" - # Show output window - cv2.imshow("Output Frame", frame) + ```python linenums="1" hl_lines="3 9-14" + # import required libraries + from vidgear.gears import PiGear + from libcamera import Transform + import cv2 - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break + # formulate various Picamera2 API + # configurational parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "sensor": {"output_size": (480, 320)}, # !!! will override `resolution` !!! + "auto_align_output_size": True, # auto-align output size + } -# close output window -cv2.destroyAllWindows() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# safely close video stream -stream.stop() -``` + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` + +=== "Legacy Picamera backend" + + !!! danger "PiGear API switches to the legacy `picamera`backend if the `picamera2` library is unavailable." + + It is advised to enable logging(`logging=True`) to see which backend is being used. + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." + + PiGear also supports almost every parameter available within [`picamera`](https://picamera.readthedocs.io/en/release-1.13/api_camera.html) python library. These parameters can be easily applied to the source stream in PiGear API through its [`options`](../params/#options) dictionary parameter by formatting them as its attributes. The complete usage example is as follows: + + !!! tip "All supported parameters are listed in [PiCamera Docs ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" + + !!! example "PiGear also support changing parameter at runtime. Checkout this bonus example [here ➶](../../../help/pigear_ex/#dynamically-adjusting-raspberry-pi-camera-parameters-at-runtime-in-pigear-api)" + + ```python linenums="1" hl_lines="8-13" + # import required libraries + from vidgear.gears import PiGear + import cv2 + + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ```   @@ -133,35 +267,23 @@ PiGear API also supports **Direct Colorspace Manipulation**, which is ideal for !!! info "A more detailed information on colorspace manipulation can be found [here ➶](../../../bonus/colorspace_manipulation/)" -In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when `w` key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when `e` key is pressed, finally default colorspace _(i.e. **BGR**)_ when `s` key is pressed. Also, quit when `q` key is pressed: +In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when ++"W"++ key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when ++"E"++ key is pressed, finally default colorspace _(i.e. **BGR**)_ when ++"S"++ key is pressed. Also, quit when ++"Q"++ key is pressed: !!! warning "Any incorrect or None-Type value will immediately revert the colorspace to default _(i.e. `BGR`)_." -```python hl_lines="20 47 51 55" +```python linenums="1" hl_lines="9 35 39 43" # import required libraries from vidgear.gears import PiGear import cv2 - -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} - # open pi video stream with defined parameters and change colorspace to `HSV` stream = PiGear( resolution=(640, 480), framerate=60, colorspace="COLOR_BGR2HSV", - logging=True, - **options + logging=True ).start() @@ -213,64 +335,141 @@ stream.stop() PiGear can be easily used with WriteGear API directly without any compatibility issues. The suitable example is as follows: -```python -# import required libraries -from vidgear.gears import PiGear -from vidgear.gears import WriteGear -import cv2 +=== "New Picamera2 backend" -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import WriteGear + from libcamera import Transform + import cv2 -# define suitable (Codec,CRF,preset) FFmpeg parameters for writer -output_params = {"-vcodec": "libx264", "-crf": 0, "-preset": "fast"} + # formulate various Picamera2 API + # configurational parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "sensor": {"output_size": (480, 320)}, # will override `resolution` + "auto_align_output_config": True, # auto-align camera configuration + } -# open pi video stream with defined parameters -stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() -# Define writer with defined parameters and suitable output filename for e.g. `Output.mp4` -writer = WriteGear(output="Output.mp4", logging=True, **output_params) + # define suitable (Codec,CRF,preset) FFmpeg parameters for writer + output_params = {"-vcodec": "libx264", "-crf": 0, "-preset": "fast"} -# loop over -while True: + # Define writer with defined parameters and suitable output filename for e.g. `Output.mp4` + writer = WriteGear(output="Output.mp4", logging=True, **output_params) - # read frames from stream - frame = stream.read() + # loop over + while True: - # check for frame if Nonetype - if frame is None: - break + # read frames from stream + frame = stream.read() - # {do something with the frame here} - # lets convert frame to gray for this example - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + # check for frame if Nonetype + if frame is None: + break - # write gray frame to writer - writer.write(gray) + # {do something with the frame here} + # lets convert frame to gray for this example + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - # Show output window - cv2.imshow("Output Frame", frame) + # write gray frame to writer + writer.write(gray) - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break + # Show output window + cv2.imshow("Output Gray Frame", gray) -# close output window -cv2.destroyAllWindows() + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break -# safely close video stream -stream.stop() + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + + # safely close writer + writer.close() + ``` + +=== "Legacy Picamera backend" + + ??? danger "PiGear API switches to the legacy `picamera`backend if the `picamera2` library is unavailable." + + It is advised to enable logging(`logging=True`) to see which backend is being used. + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." + + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import WriteGear + import cv2 + + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # define suitable (Codec,CRF,preset) FFmpeg parameters for writer + output_params = {"-vcodec": "libx264", "-crf": 0, "-preset": "fast"} -# safely close writer -writer.close() -``` + # Define writer with defined parameters and suitable output filename for e.g. `Output.mp4` + writer = WriteGear(output="Output.mp4", logging=True, **output_params) + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + # lets convert frame to gray for this example + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # write gray frame to writer + writer.write(gray) + + # Show output window + cv2.imshow("Output Gray Frame", gray) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + + # safely close writer + writer.close() + ``` + +  -  \ No newline at end of file +[^1]: A custom `libcamera` API class. Must be imported as `from libcamera import Transform`. diff --git a/docs/gears/screengear/overview.md b/docs/gears/screengear/overview.md index a64d71985..328679693 100644 --- a/docs/gears/screengear/overview.md +++ b/docs/gears/screengear/overview.md @@ -43,16 +43,6 @@ ScreenGear API implements a multi-threaded wrapper around [**dxcam**](https://gi   -## Importing - -You can import ScreenGear API in your program as follows: - -```python -from vidgear.gears import ScreenGear -``` - -  - ## Usage Examples
@@ -61,8 +51,6 @@ from vidgear.gears import ScreenGear !!! experiment "After going through ScreenGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/screengear_ex/)" - - ## Parameters
diff --git a/docs/gears/screengear/params.md b/docs/gears/screengear/params.md index c12f96ad5..350c203cb 100644 --- a/docs/gears/screengear/params.md +++ b/docs/gears/screengear/params.md @@ -90,7 +90,7 @@ This parameter enables [`pyscreenshot`](https://github.com/BoboTiG/python-mss) u !!! warning "Remember to install backend library and all of its dependencies you're planning to use with ScreenGear API." -!!! error "Any value on [`monitor`](#monitor) parameter will disable the `backend` parameter. You cannot use both parameters at same time." +!!! failure "Any value on [`monitor`](#monitor) parameter will disable the `backend` parameter. You cannot use both parameters at same time." !!! info "Backend defaults to `dxcam` library on Windows _(if installed)_, and `pyscreenshot` otherwise." diff --git a/docs/gears/screengear/usage.md b/docs/gears/screengear/usage.md index 52bb5f8a8..9587f10dd 100644 --- a/docs/gears/screengear/usage.md +++ b/docs/gears/screengear/usage.md @@ -20,7 +20,7 @@ limitations under the License. # ScreenGear API Usage Examples: -!!! experiment "After going through ScreenGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/screengear_ex/)" +!!! example "After going through ScreenGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/screengear_ex/)" !!! success "Recommended: Install DXcam library on Windows :fontawesome-brands-windows: Machines" @@ -36,7 +36,7 @@ limitations under the License. Following is the bare-minimum code you need to get started with ScreenGear API: -```python +```python linenums="1" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -94,7 +94,7 @@ ScreenGear API provides us the flexibility to directly set the dimensions of cap The complete usage example is as follows: -```python hl_lines="6" +```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -150,7 +150,7 @@ ScreenGear API provides us the flexibility to select any connected display for f ??? tip "Using GPU acceleration on Windows :fontawesome-brands-windows:" With `dxcam` library backend, you can also assign which GPU devices ids to use along with monitor device ids as tuple `(monitor_idx, gpu_idx)`, as follows: - ```python + ```python linenums="1" # open video stream with defined parameters with # monitor at index `1` and GPU at index `0`. stream = ScreenGear(monitor=(1,0), logging=True).start() @@ -167,7 +167,7 @@ ScreenGear API provides us the flexibility to select any connected display for f 'Device[0] Output[0]: Res:(1920, 1080) Rot:0 Primary:True\nDevice[0] Output[1]: Res:(1920, 1080) Rot:0 Primary:False\n' ``` - ```python hl_lines="6" + ```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -209,7 +209,7 @@ ScreenGear API provides us the flexibility to select any connected display for f !!! danger "With `mss` library backend, API will output [`BGRA`](https://en.wikipedia.org/wiki/RGBA_color_model) colorspace frames instead of default `BGR`." - ```python hl_lines="6" + ```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -258,9 +258,9 @@ With ScreenGear API, you can select from many different backends that generates !!! note "Backend defaults to `dxcam` library on Windows _(if installed)_, and `pyscreenshot` otherwise." -!!! error "Any value on `monitor` parameter will disable the `backend` parameter. You cannot use them simultaneously." +!!! failure "Any value on `monitor` parameter will disable the `backend` parameter. You cannot use them simultaneously." -```python hl_lines="7" +```python linenums="1" hl_lines="7" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -304,13 +304,13 @@ ScreenGear API also supports **Direct Colorspace Manipulation**, which is ideal !!! info "A more detailed information on colorspace manipulation can be found [here ➶](../../../bonus/colorspace_manipulation/)" -In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when `w` key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when `e` key is pressed, finally default colorspace _(i.e. **BGR**)_ when `s` key is pressed. Also, quit when `q` key is pressed: +In following example code, we will start with [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) as source colorspace, and then we will switch to [**GRAY**](https://en.wikipedia.org/wiki/Grayscale) colorspace when ++"W"++ key is pressed, and then [**LAB**](https://en.wikipedia.org/wiki/CIELAB_color_space) colorspace when ++"E"++ key is pressed, finally default colorspace _(i.e. **BGR**)_ when ++"S"++ key is pressed. Also, quit when ++"Q"++ key is pressed: !!! warning "Any incorrect or None-type value, will immediately revert the colorspace to default i.e. `BGR`." -```python hl_lines="6 29 33 37" +```python linenums="1" hl_lines="6 29 33 37" # import required libraries from vidgear.gears import ScreenGear import cv2 @@ -367,7 +367,7 @@ stream.stop() ScreenGear can be used in conjunction with WriteGear API directly without any compatibility issues. The suitable example is as follows: -```python +```python linenums="1" # import required libraries from vidgear.gears import ScreenGear from vidgear.gears import WriteGear diff --git a/docs/gears/stabilizer/overview.md b/docs/gears/stabilizer/overview.md index e90cf060a..b1c468948 100644 --- a/docs/gears/stabilizer/overview.md +++ b/docs/gears/stabilizer/overview.md @@ -67,23 +67,13 @@ The basic idea behind it is to tracks and save the salient feature array for the   -## Importing - -You can import Stabilizer Class in your program as follows: - -```python -from vidgear.gears.stabilizer import Stabilizer -``` - -  - ## Usage Examples -!!! experiment "After going through Stabilizer Class Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/stabilizer_ex/)" +!!! example "After going through Stabilizer Class Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/stabilizer_ex/)" ## Parameters diff --git a/docs/gears/stabilizer/usage.md b/docs/gears/stabilizer/usage.md index c11233a53..a508b0453 100644 --- a/docs/gears/stabilizer/usage.md +++ b/docs/gears/stabilizer/usage.md @@ -22,13 +22,13 @@ limitations under the License.   -!!! fail "The stabilizer may not perform well against High-frequency jitter in video. Use at your own risk!" +!!! failure "The stabilizer may not perform well against High-frequency jitter in video. Use at your own risk!" !!! warning "The stabilizer might be slower :snail: for High-Quality/Resolution :material-high-definition-box: videos-frames." !!! tip "It is advised to enable logging on the first run for easily identifying any runtime errors." -!!! experiment "After going through Stabilizer Class Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/stabilizer_ex/)" +!!! example "After going through Stabilizer Class Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/stabilizer_ex/)"   @@ -41,7 +41,7 @@ Following is the bare-minimum code you need to get started with Stabilizer Class !!! tip "You can use any VideoCapture Gear instead of CamGear in the similar manner, as shown in this usage example." -```python +```python linenums="1" # import required libraries from vidgear.gears.stabilizer import Stabilizer from vidgear.gears import CamGear @@ -96,7 +96,7 @@ stream.stop() The VidGear's stabilizer class can also work standalone easily with any Computer Vision library such as OpenCV itself. Following is the bare-minimum code you need to get started with Stabilizer Class and OpenCV: -```python +```python linenums="1" # import required libraries from vidgear.gears.stabilizer import Stabilizer import cv2 @@ -150,7 +150,7 @@ stream.release() Stabilizer class provide certain [parameters](../params/) which you can use to tweak its internal properties. The complete usage example is as follows: -```python hl_lines="10" +```python linenums="1" hl_lines="10" # import required libraries from vidgear.gears.stabilizer import Stabilizer from vidgear.gears import CamGear @@ -206,9 +206,9 @@ stream.stop() VideoGear's stabilizer can be used in conjunction with WriteGear API directly without any compatibility issues. The complete usage example is as follows: -!!! tip "You can also add live audio input to WriteGear pipeline. See this [bonus example](../../../help)" +!!! example "You can also add live audio input to WriteGear pipeline. See this [bonus example ➶](../../../help)" -```python +```python linenums="1" # import required libraries from vidgear.gears.stabilizer import Stabilizer from vidgear.gears import CamGear diff --git a/docs/gears/streamgear/introduction.md b/docs/gears/streamgear/introduction.md index 21ebdbcf0..9a27fc9c1 100644 --- a/docs/gears/streamgear/introduction.md +++ b/docs/gears/streamgear/introduction.md @@ -18,6 +18,7 @@ limitations under the License. =============================================== --> + # StreamGear API @@ -29,15 +30,15 @@ limitations under the License. ## Overview -> StreamGear automates transcoding workflow for generating _Ultra-Low Latency, High-Quality, Dynamic & Adaptive Streaming Formats (such as MPEG-DASH and Apple HLS)_ in just few lines of python code. +> StreamGear streamlines and simplifies the transcoding workflow to generate _Ultra-Low Latency, High-Quality, Dynamic & Adaptive Streaming Formats like MPEG-DASH and Apple HLS_ with just a few lines of Python code, allowing developers to focus on their application logic rather than dealing with the complexities of transcoding and chunking media files. -StreamGear provides a standalone, highly extensible, and flexible wrapper around [**FFmpeg**](https://ffmpeg.org/) multimedia framework for generating chunked-encoded media segments of the content. +StreamGear API provides a standalone, highly extensible, and flexible wrapper around the [**FFmpeg**](https://ffmpeg.org/) multimedia framework for generating chunk-encoded media segments from your multimedia content effortlessly. -SteamGear is an out-of-the-box solution for transcoding source videos/audio files & real-time video frames and breaking them into a sequence of multiple smaller chunks/segments of suitable lengths. These segments make it possible to stream videos at different quality levels _(different bitrates or spatial resolutions)_ and can be switched in the middle of a video from one quality level to another – if bandwidth permits – on a per-segment basis. A user can serve these segments on a web server that makes it easier to download them through HTTP standard-compliant GET requests. +With StreamGear, you can transcode source video/audio files and real-time video frames into a sequence of multiple smaller chunks/segments of suitable lengths. These segments facilitate streaming at different quality levels _(bitrates or spatial resolutions)_ and allow for seamless switching between quality levels during playback based on available bandwidth. You can serve these segments on a web server, making them easily accessible via standard **HTTP GET** requests. -SteamGear currently supports [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/) _(Dynamic Adaptive Streaming over HTTP, ISO/IEC 23009-1)_ and [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming) _(HTTP Live Streaming)_. +SteamGear currently supports both [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/) _(Dynamic Adaptive Streaming over HTTP, ISO/IEC 23009-1)_ and [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming) _(HTTP Live Streaming)_. -SteamGear also creates a Manifest file _(such as MPD in-case of DASH)_ or a Master Playlist _(such as M3U8 in-case of Apple HLS)_ besides segments that describe these segment information _(timing, URL, media characteristics like video resolution and adaptive bit rates)_ and is provided to the client before the streaming session. +Additionally, StreamGear generates a manifest file _(such as MPD for DASH)_ or a master playlist _(such as M3U8 for Apple HLS)_ alongside the segments. These files contain essential segment information, _including timing, URLs, and media characteristics like video resolution and adaptive bitrate_. They are provided to the client before the streaming session begins. !!! alert "For streaming with older traditional protocols such as RTMP, RTSP/RTP you could use [WriteGear](../../writegear/introduction/) API instead." @@ -58,9 +59,9 @@ SteamGear also creates a Manifest file _(such as MPD in-case of DASH)_ or a Mast !!! tip "Useful Links" - - Checkout [this detailed blogpost](https://ottverse.com/mpeg-dash-video-streaming-the-complete-guide/) on how MPEG-DASH works. - - Checkout [this detailed blogpost](https://ottverse.com/hls-http-live-streaming-how-does-it-work/) on how HLS works. - - Checkout [this detailed blogpost](https://ottverse.com/hls-http-live-streaming-how-does-it-work/) for HLS vs. MPEG-DASH comparison. + - Checkout [this detailed blogpost ➶](https://ottverse.com/mpeg-dash-video-streaming-the-complete-guide/) on how MPEG-DASH works. + - Checkout [this detailed blogpost ➶](https://ottverse.com/hls-http-live-streaming-how-does-it-work/) on how HLS works. + - Checkout [this detailed blogpost ➶](https://imagekit.io/blog/hls-vs-dash/) for HLS vs. MPEG-DASH comparison.   @@ -70,28 +71,15 @@ SteamGear also creates a Manifest file _(such as MPD in-case of DASH)_ or a Mast StreamGear primarily operates in following independent modes for transcoding: -??? warning "Real-time Frames Mode is NOT Live-Streaming." - - Rather, you can enable live-streaming in Real-time Frames Mode by using the exclusive [`-livestream`](../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter in StreamGear API. Checkout [this usage example](../rtfm/usage/#bare-minimum-usage-with-live-streaming) for more information. - +???+ alert "Real-time Frames Mode itself is NOT Live-Streaming :material-video-wireless-outline:" + To enable live-streaming in Real-time Frames Mode, use the exclusive [`-livestream`](../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter in the StreamGear API. Checkout [this usage example ➶](../rtfm/usage/#bare-minimum-usage-with-live-streaming) for more information. -- [**Single-Source Mode**](../ssm/overview): In this mode, StreamGear **transcodes entire video file** _(as opposed to frame-by-frame)_ into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you're transcoding long-duration lossless videos(with audio) for streaming that required no interruptions. But on the downside, the provided source cannot be flexibly manipulated or transformed before sending onto FFmpeg Pipeline for processing. +- [**Single-Source Mode :material-file-video-outline:**](../ssm/overview) : In this mode, StreamGear **transcodes entire video file** _(as opposed to frame-by-frame)_ into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you're transcoding long-duration lossless videos(with audio) for streaming that required no interruptions. But on the downside, the provided source cannot be flexibly manipulated or transformed before sending onto FFmpeg Pipeline for processing. -- [**Real-time Frames Mode**](../rtfm/overview): In this mode, StreamGear directly **transcodes frame-by-frame** _(as opposed to a entire video file)_, into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you desire to flexibility manipulate or transform [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) frames in real-time before sending them onto FFmpeg Pipeline for processing. But on the downside, audio has to added manually _(as separate source)_ for streams. +- [**Real-time Frames Mode :material-camera-burst:**](../rtfm/overview) : In this mode, StreamGear directly **transcodes frame-by-frame** _(as opposed to a entire video file)_, into a sequence of multiple smaller chunks/segments for streaming. This mode works exceptionally well when you desire to flexibility manipulate or transform [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) frames in real-time before sending them onto FFmpeg Pipeline for processing. But on the downside, audio has to added manually _(as separate source)_ for streams.   -## Importing - -You can import StreamGear API in your program as follows: - -```python -from vidgear.gears import StreamGear -``` - -  - - ## Watch Demo === "Watch MPEG-DASH Stream" diff --git a/docs/gears/streamgear/params.md b/docs/gears/streamgear/params.md index f5c7f1afb..5e272a386 100644 --- a/docs/gears/streamgear/params.md +++ b/docs/gears/streamgear/params.md @@ -24,15 +24,13 @@ limitations under the License. ## **`output`** -This parameter sets the valid filename/path for storing the StreamGear assets _(Manifest file (such as MPD in-case of DASH) or a Master Playlist (such as M3U8 in-case of Apple HLS) & Transcoded sequence of segments)_. +This parameter sets the valid filename/path for storing the StreamGear assets, including Manifest file _(such as MPD in case of DASH)_ or a Master Playlist _(such as M3U8 in case of Apple HLS)_ and generated sequence of chunks/segments. -!!! warning "StreamGear API will throw `ValueError` if `output` provided is empty or invalid." +!!! warning "StreamGear API will throw `ValueError` if the provided `output` is empty or invalid." -!!! error "Make sure to provide _valid filename with valid file-extension_ for selected [`format`](#format) value _(such as `.mpd` in case of MPEG-DASH and `.m3u8` in case of APPLE-HLS)_, otherwise StreamGear will throw `AssertionError`." +!!! failure "Make sure to provide a valid filename with a valid file extension for the selected `format` value _(such as `.mpd` for MPEG-DASH and `.m3u8` for APPLE-HLS)_, otherwise StreamGear will throw `AssertionError`." -!!! note "StreamGear generated sequence of multiple chunks/segments are also stored in the same directory." - -!!! tip "You can easily delete all previous assets at `output` location, by using [`-clear_prev_assets`](#a-exclusive-parameters) attribute of [`stream_params`](#stream_params) dictionary parameter." +!!! tip "You can easily delete all previous assets at the `output` location by using the [`-clear_prev_assets`](#a-exclusive-parameters) attribute of the [`stream_params`](#stream_params) dictionary parameter." **Data-Type:** String @@ -40,60 +38,63 @@ This parameter sets the valid filename/path for storing the StreamGear assets _( Its valid input can be one of the following: -* **Path to directory**: Valid path of the directory. In this case, StreamGear API will automatically assign a unique filename for Manifest file. This can be defined as follows: +* **Path to directory**: Valid path of the directory. In this case, StreamGear API will automatically assign a unique filename for the Manifest file. This can be defined as follows: === "DASH" ```python - streamer = StreamGear(output = "/home/foo/foo1") # Define streamer with manifest saving directory path + # Define streamer with output directory path for saving DASH assets + streamer = StreamGear(output = "/home/foo/bar") ``` === "HLS" ```python - streamer = StreamGear(output = "/home/foo/foo1", format="hls") # Define streamer with playlist saving directory path + # Define streamer with output directory path for saving HLS assets + streamer = StreamGear(output = "/home/foo/bar", format="hls") ``` -* **Filename** _(with/without path)_: Valid filename(_with valid extension_) of the output Manifest file. In case filename is provided without path, then current working directory will be used. +* **Filename** _(with/without path)_: Valid filename _(with a valid extension)_ of the output Manifest or Playlist file. If the filename is provided without a path, the current working directory will be used. This can be defined as follows: === "DASH" ```python - streamer = StreamGear(output = "output_foo.mpd") # Define streamer with manifest file name + # Define streamer with output manifest filename + streamer = StreamGear(output = "output_dash.mpd") ``` === "HLS" ```python - streamer = StreamGear(output = "output_foo.m3u8", format="hls") # Define streamer with playlist file name + # Define streamer with output playlist filename + streamer = StreamGear(output = "output_hls.m3u8", format="hls") ``` -* **URL**: Valid URL of a network stream with a protocol supported by installed FFmpeg _(verify with command `ffmpeg -protocols`)_ only. This is useful for directly storing assets to a network server. For example, you can use a `http` protocol URL as follows: - +* **URL**: Valid URL of a network stream with a protocol supported by the installed FFmpeg _(verify with the `ffmpeg -protocols` command)_. This is useful for directly storing assets to a network server. For example, you can use an `HTTP` protocol URL as follows: === "DASH" ```python - streamer = StreamGear(output = "http://195.167.1.101/live/test.mpd") #Define streamer + # Define streamer with output manifest URL + streamer = StreamGear(output = "http://some_dummy_serverip/live/output_dash.mpd") ``` === "HLS" ```python - streamer = StreamGear(output = "http://195.167.1.101/live/test.m3u8", format="hls") #Define streamer + # Define streamer with output playlist URL + streamer = StreamGear(output = "http://some_dummy_serverip/live/output_hls.m3u8", format="hls") ```   ## **`format`** +This parameter enables the adaptive HTTP streaming format. This parameter currently supported these formats: `dash` _(i.e [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/))_ and `hls` _(i.e [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming))_. -This parameter select the adaptive HTTP streaming formats. For now, the supported format are: `dash` _(i.e [**MPEG-DASH**](https://www.encoding.com/mpeg-dash/))_ and `hls` _(i.e [**Apple HLS**](https://developer.apple.com/documentation/http_live_streaming))_. - -!!! warning "Any invalid value to `format` parameter will result in ValueError!" - -!!! error "Make sure to provide _valid filename with valid file-extension_ in [`output`](#output) for selected `format` value _(such as `.mpd` in case of MPEG-DASH and `.m3u8` in case of APPLE-HLS)_, otherwise StreamGear will throw `AssertionError`." +!!! danger "Make sure to provide a valid filename with a valid file extension in the [`output`](#output) parameter for the selected `format` value _(i.e., `.mpd` for MPEG-DASH and `.m3u8` for APPLE-HLS)_, otherwise StreamGear will throw an `AssertionError`." +!!! warning "Any improper value assigned to `format` parameter will result in a `ValueError`!" **Data-Type:** String @@ -104,16 +105,17 @@ This parameter select the adaptive HTTP streaming formats. For now, the supporte === "DASH" ```python - StreamGear(output = "output_foo.mpd", format="dash") + # Define streamer with DASH format + StreamGear(output = "output_dash.mpd", format="dash") ``` === "HLS" ```python - StreamGear(output = "output_foo.m3u8", format="hls") + # Define streamer with HLS format + StreamGear(output = "output_hls.m3u8", format="hls") ``` -   @@ -121,9 +123,10 @@ This parameter select the adaptive HTTP streaming formats. For now, the supporte This parameter assigns the custom _path/directory_ where the custom/downloaded FFmpeg executables are located. -!!! info "Behavior on Windows" +!!! info "Behavior on :fontawesome-brands-windows: Windows Systems" + + On Windows, if a custom FFmpeg executable's path/directory is not provided through this `custom_ffmpeg` parameter, the StreamGear API will automatically attempt to download and extract suitable Static FFmpeg binaries at a suitable location on your Windows machine. More information can be found [here ➶](../ffmpeg_install/#a-auto-installation). - If a custom FFmpeg executable's path | directory is not provided through `custom_ffmpeg` parameter on Windows machine, then StreamGear API will ==automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine==. More information can be found [here ➶](../ffmpeg_install/#a-auto-installation). **Data-Type:** String @@ -132,8 +135,8 @@ This parameter assigns the custom _path/directory_ where the custom/downloaded F **Usage:** ```python -# If ffmpeg executables are located at "/foo/foo1/ffmpeg" -StreamGear(output = 'output_foo.mpd', custom_ffmpeg="/foo/foo1/ffmpeg") +# Define streamer with custom ffmpeg binary +StreamGear(output = 'output_foo.mpd', custom_ffmpeg="C://foo//bar//ffmpeg.exe") ```   @@ -141,11 +144,9 @@ StreamGear(output = 'output_foo.mpd', custom_ffmpeg="/foo/foo1/ffmpeg") ## **`stream_params`** -This parameter allows us to exploit almost all FFmpeg supported parameters effortlessly and flexibly change its internal settings for transcoding and seamlessly generating high-quality streams. All [supported parameters](#supported-parameters) can formatting as attributes for this dictionary parameter: - - -!!! danger "Kindly read [**FFmpeg Docs**](https://ffmpeg.org/documentation.html) carefully, before passing any additional values to `stream_params` parameter. Wrong values may result in undesired errors or no output at all." +This parameter allows developers to leverage nearly all FFmpeg options, providing effortless and flexible control over its internal settings for transcoding and generating high-quality streams. All [supported parameters](#supported-parameters) can be formatted as attributes within this dictionary parameter. +!!! danger "Please read the [**FFmpeg Documentation**](https://ffmpeg.org/documentation.html) carefully before passing any additional values to the `stream_params` parameter. Incorrect values may cause errors or result in no output." **Data-Type:** Dictionary @@ -158,183 +159,220 @@ This parameter allows us to exploit almost all FFmpeg supported parameters effor StreamGear API provides some exclusive internal parameters to easily generate Streaming Assets and effortlessly tweak its internal properties. These parameters are discussed below: -* **`-streams`** _(list of dicts)_: This important attribute makes it simple and pretty straight-forward to define additional multiple streams as _list of dictionaries_ of different quality levels _(i.e. different bitrates or spatial resolutions)_ for streaming. +* **`-streams`** _(list of dicts)_: This important attribute makes it simple and pretty straight-forward to define additional multiple streams as _list of dictionaries_ of different quality levels _(i.e. different bitrate or spatial resolutions)_ for streaming. + + ???+ danger "Important Information about `-streams` attribute :material-file-document-alert-outline:" - !!! danger "Important `-streams` attribute facts" - * ==On top of these additional streams, StreamGear by default, generates a primary stream of same resolution and framerate[^1] as the input Video, at the index `0`.== - * You **MUST** need to define `-resolution` value for your stream, otherwise stream will be discarded! - * You only need either of `-video_bitrate` or `-framerate` for defining a valid stream. Since with `-framerate` value defined, video-bitrate is calculated automatically using `-bpps` and `-resolution` values. - * If you define both `-video_bitrate` and `-framerate` values at the same time, StreamGear will discard the `-framerate` value automatically. + * In addition to the user-defined Secondary Streams, ==StreamGear automatically generates a Primary Stream _(at index `0`)_ with the same resolution as the input frames and at default framerate[^1], at the index `0`.== + * You **MUST** define the `-resolution` value for each stream; otherwise, the stream will be discarded. + * You only need to define either the `-video_bitrate` or the `-framerate` for a valid stream. + * If you specify the `-framerate`, the video bitrate will be calculated automatically. + * If you define both the `-video_bitrate` and the `-framerate`, the `-framerate` will get discard automatically. - **To construct the additional stream dictionaries, you'll will need following sub-attributes:** + **To construct the additional stream dictionaries, you will need the following sub-attributes::** - * `-resolution` _(string)_: It is **compulsory** to define the required resolution/dimension/size for the stream, otherwise given stream will be rejected. Its value can be a `"{width}x{height}"` as follows: + * `-resolution` _(string)_: It is **compulsory** to define the required resolution/dimension/size for the stream, otherwise, the given stream will be rejected. Its value should be in the format `"{width}x{height}"`, as shown below: ```python - "-streams" = [{"-resolution": "1280x720"}] # to produce a 1280x720 resolution/scale + # produce a 1280x720 resolution/scale stream + "-streams" = [{"-resolution": "1280x720"}] ``` - * `-video_bitrate` _(string)_: It is an **optional** _(can be ignored if `-framerate` parameter is defined)_ sub-attribute that generally determines the bandwidth and quality of stream, i.e. the higher the bitrate, the better the quality and the larger will be bandwidth and more will be strain on network. It value is generally in `kbps` _(kilobits per second)_ for OBS (Open Broadcasting Softwares). You can easily define this attribute as follows: + * `-video_bitrate` _(string)_: This is an **optional** sub-attribute _(can be ignored if the `-framerate` parameter is defined)_ that generally determines the bandwidth and quality of the stream. The higher the bitrate, the better the quality and the larger the bandwidth, which can place more strain on the network. Its value is typically in `k` _(kilobits per second)_ or `M` _(Megabits per second)_. Define this attribute as follows: ```python - "-streams" : [{"-resolution": "1280x720", "-video_bitrate": "2000k"}] # to produce a 1280x720 resolution and 2000kbps bitrate stream + # produce a 1280x720 resolution and 2000 kbps bitrate stream + "-streams" : [{"-resolution": "1280x720", "-video_bitrate": "2000k"}] ``` - * `-framerate` _(float/int)_: It is another **optional** _(can be ignored if `-video_bitrate` parameter is defined)_ sub-attribute that defines the assumed framerate for the stream. It's value can be float/integer as follows: + * `-framerate` _(float/int)_: This is another **optional** sub-attribute _(can be ignored if the `-video_bitrate` parameter is defined)_ that defines the assumed framerate for the stream. Its value can be a float or integer, as shown below: ```python - "-streams" : [{"-resolution": "1280x720", "-framerate": "60.0"}] # to produce a 1280x720 resolution and 60fps framerate stream + # produce a 1280x720 resolution and 60fps framerate stream + "-streams" : [{"-resolution": "1280x720", "-framerate": "60.0"}] ``` **Usage:** You can easily define any number of streams using `-streams` attribute as follows: - !!! tip "Usage example can be found [here ➶](../ssm/usage/#usage-with-additional-streams)" - ```python stream_params = {"-streams": - [{"-resolution": "1920x1080", "-video_bitrate": "4000k"}, # Stream1: 1920x1080 at 4000kbs bitrate - {"-resolution": "1280x720", "-framerate": "30.0"}, # Stream2: 1280x720 at 30fps - {"-resolution": "640x360", "-framerate": "60.0"}, # Stream3: 640x360 at 60fps - ]} + [ + {"-resolution": "1920x1080", "-video_bitrate": "4000k"}, # Stream1: 1920x1080 at 4000kbs bitrate + {"-resolution": "1280x720", "-framerate": 30}, # Stream2: 1280x720 at 30fps + {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps + ] + } ``` + !!! example "Its usage example can be found [here ➶](../ssm/usage/#usage-with-additional-streams)" +   -* **`-video_source`** _(string)_: This attribute takes valid Video path as input and activates [**Single-Source Mode**](../ssm/overview), for transcoding it into multiple smaller chunks/segments for streaming after successful validation. Its value be one of the following: +* **`-video_source`** _(string)_: This attribute takes a valid video path as input and activates [**Single-Source Mode**](../ssm/overview), for transcoding it into multiple smaller chunks/segments for streaming after successful validation. Its value can be one of the following: - !!! tip "Usage example can be found [here ➶](../ssm/usage/#bare-minimum-usage)" + * **Video Filename**: Valid path to a video file as follows: - * **Video Filename**: Valid path to Video file as follows: ```python - stream_params = {"-video_source": "/home/foo/foo1.mp4"} # set input video source: /home/foo/foo1.mp4 + # set video source as `/home/foo/bar.mp4` + stream_params = {"-video_source": "/home/foo/bar.mp4"} ``` + * **Video URL**: Valid URL of a network video stream as follows: - !!! danger "Make sure given Video URL has protocol that is supported by installed FFmpeg. _(verify with `ffmpeg -protocols` terminal command)_" + !!! danger "Ensure the given video URL uses a protocol supported by the installed FFmpeg _(verify with `ffmpeg -protocols` terminal command)_." ```python - stream_params = {"-video_source": "http://livefeed.com:5050"} # set input video source: http://livefeed.com:5050 + # set video source as `http://livefeed.com:5050` + stream_params = {"-video_source": "http://livefeed.com:5050"} ``` + !!! example "Its usage example can be found [here ➶](../ssm/usage/#bare-minimum-usage)" +   +* **`-audio`** _(string/list)_: This attribute takes an external custom audio path _(as a string)_ or an audio device name followed by a suitable demuxer _(as a list)_ as the audio source input for all StreamGear streams. Its value can be one of the following: -* **`-audio`** _(string/list)_: This attribute takes external custom audio path _(as `string`)_ or audio device name followed by suitable demuxer _(as `list`)_ as audio source input for all StreamGear streams. Its value be one of the following: + !!! failure "Ensure the provided `-audio` audio source is compatible with the input video source. Incompatibility can cause multiple errors or result in no output at all." - !!! failure "Make sure this audio-source is compatible with provided video -source, otherwise you could encounter multiple errors, or even no output at all!" + * **Audio Filename** _(string)_: Valid path to an audio file as follows: - * **Audio Filename** _(string)_: Valid path to Audio file as follows: ```python - stream_params = {"-audio": "/home/foo/foo1.aac"} # set input audio source: /home/foo/foo1.aac + # set audio source as `/home/foo/foo1.aac` + stream_params = {"-audio": "/home/foo/foo1.aac"} ``` - !!! tip "Usage example can be found [here ➶](../ssm/usage/#usage-with-custom-audio)" + + !!! example "Its usage examples can be found [here ➶](../ssm/usage/#usage-with-custom-audio) and [here ➶](../ssm/usage/#usage-with-file-audio-input)" * **Audio URL** _(string)_: Valid URL of a network audio stream as follows: - !!! danger "Make sure given Video URL has protocol that is supported by installed FFmpeg. _(verify with `ffmpeg -protocols` terminal command)_" + !!! danger "Ensure the given audio URL uses a protocol supported by the installed FFmpeg _(verify with `ffmpeg -protocols` terminal command)_." ```python - stream_params = {"-audio": "https://exampleaudio.org/example-160.mp3"} # set input audio source: https://exampleaudio.org/example-160.mp3 + # set input audio source as `https://exampleaudio.org/example-160.mp3` + stream_params = {"-audio": "https://exampleaudio.org/example-160.mp3"} ``` - * **Device name and Demuxer** _(list)_: Valid audio device name followed by suitable demuxer as follows: + * **Device name and Demuxer** _(list)_: Valid audio device name followed by a suitable demuxer as follows: ```python - stream_params = {"-audio": "https://exampleaudio.org/example-160.mp3"} # set input audio source: https://exampleaudio.org/example-160.mp3 + # Assign appropriate input audio-source device (compatible with video source) and its demuxer + stream_params = {"-audio": [ + "-f", + "dshow", + "-i", + "audio=Microphone (USB2.0 Camera)", + ]} ``` - !!! tip "Usage example can be found [here ➶](../rtfm/usage/#usage-with-device-audio--input)" - + !!! example "Its usage example can be found [here ➶](../rtfm/usage/#usage-with-device-audio--input)"   -* **`-livestream`** _(bool)_: ***(optional)*** specifies whether to enable **Livestream Support**_(chunks will contain information for new frames only)_ for the selected mode, or not. You can easily set it to `True` to enable this feature, and default value is `False`. It can be used as follows: +* **`-livestream`** _(bool)_: ***(optional)*** specifies whether to enable **Low-latency Live-Streaming :material-video-wireless-outline:** in [**Real-time Frames Mode**](../rtfm/overview) only, where chunks will contain information for new frames only and forget previous ones, or not. The default value is `False`. It can be used as follows: - !!! tip "Use `window_size` & `extra_window_size` FFmpeg parameters for controlling number of frames to be kept in New Chunks." + !!! warning "The `-livestream` optional parameter is **NOT** supported in [Single-Source mode](../ssm/overview)." ```python - stream_params = {"-livestream": True} # enable livestreaming + stream_params = {"-livestream": True} # enable live-streaming ``` -  + !!! example "Its usage example can be found [here ➶](../rtfm/usage/#bare-minimum-usage-with-live-streaming)" -* **`-input_framerate`** _(float/int)_ : ***(optional)*** specifies the assumed input video source framerate, and only works in [Real-time Frames Mode](../usage/#b-real-time-frames-mode). It can be used as follows: +  - !!! tip "Usage example can be found [here ➶](../rtfm/usage/#bare-minimum-usage-with-controlled-input-framerate)" +* **`-input_framerate`** _(float/int)_ : ***(optional)*** This parameter specifies the assumed input video source framerate and only works in [Real-time Frames Mode](../usage/#b-real-time-frames-mode). Its default value is `25.0` fps. Its value can be a float or integer, as shown below: ```python - stream_params = {"-input_framerate": 60.0} # set input video source framerate to 60fps + # set input video source framerate to 60fps + stream_params = {"-input_framerate": 60.0} ``` + !!! example "Its usage example can be found [here ➶](../rtfm/usage/#bare-minimum-usage-with-controlled-input-framerate)" +   -* **`-bpp`** _(float/int)_: ***(optional)*** This attribute controls constant _Bits-Per-Pixel_(BPP) value, which is kind of a constant value to ensure good quality of high motion scenes ,and thereby used in calculating desired video-bitrate for streams. Higher the BPP, better will be motion quality. Its default value is `0.1`. Going over `0.1`helps to fill gaps between current bitrate and upload limit/ingest cap. Its value can be anything above `0.001`, can be used as follows: +* **`-bpp`** _(float/int)_: ***(optional)*** This attribute controls the constant **BPP** _(Bits-Per-Pixel)_ value, which helps ensure good quality in high motion scenes by determining the desired video bitrate for streams. A higher BPP value improves motion quality. The default value is `0.1`. Increasing the BPP value helps fill the gaps between the current bitrate and the upload limit/ingest cap. Its value can be anything above `0.001` and can be used as follows: - !!! tip "Important BPP tips for streaming" - * `-bpp` a sensitive value, try 0.001, and then make increments in 0.0001 to fine tune - * If your desired resolution/fps/audio combination is below maximum service bitrate, raise BPP to match it for extra quality. - * It is generally better to lower resolution (and/or fps) and raise BPP than raise resolution and loose on BPP. + !!! tip "Important points while tweaking BPP" + * BPP is a sensitive value; start with `0.001` and make small increments (`0.0001`) to fine-tune. + * If your desired resolution/fps/audio combination is below the maximum service bitrate, raise BPP to match it for extra quality. + * It is generally better to lower resolution _(and/or `fps`)_ and raise BPP than to raise resolution and lose BPP. ```python - stream_params = {"-bpp": 0.05} # sets BPP to 0.05 + # sets BPP to 0.05 + stream_params = {"-bpp": 0.05} ```   -* **`-gop`** _(float/int)_ : ***(optional)*** specifies the number of frames between two I-frames for accurate GOP length. By increasing the length of the GOP, there will be fewer I-frames per time frame, which minimizes bandwidth consumption. So, for example, with extremely complex subjects such as water sports or action mode, you’ll want to use a shorter GOP length such as 15 or below that results in excellent video quality. For more static video such as talking heads, then much longer GOP sizes are not only sufficient but also more efficient. It can be used as follows: +* **`-gop`** _(float/int)_ : ***(optional)*** This parameter specifies the number of frames between two I-frames for accurate **GOP** _(Group of Pictures)_ length. Increasing the GOP length reduces the number of I-frames per time frame, minimizing bandwidth consumption. For example, with complex subjects such as water sports or action scenes, a shorter GOP length _(e.g., `15` or below)_ results in excellent video quality. For more static video, such as talking heads, much longer GOP sizes are not only sufficient but also more efficient. It can be used as follows: - !!! tip "The larger the GOP size, the more efficient the compression and the less bandwidth you will need" + !!! tip "The larger the GOP size, the more efficient the compression and the less bandwidth you will need." - !!! info "By default, StreamGear automatically sets recommended fixed GOP value _(i.e. every two seconds)_ w.r.t input framerate and selected encoder." + !!! info "By default, StreamGear automatically sets a recommended fixed GOP value _(i.e., every two seconds)_ based on the input framerate and selected encoder." ```python - stream_params = {"-gop": 70} # set GOP length to 70 + # set GOP length to 70 + stream_params = {"-gop": 70} ```   -* **`-clones`** _(list)_: ***(optional)*** sets the special FFmpeg parameters that are repeated more than once in the command _(For more info., see [this issue](https://github.com/abhiTronix/vidgear/issues/141))_ as **list** only. Usage is as follows: +* **`-clones`** _(list)_: ***(optional)*** This parameter sets special FFmpeg options that need to be repeated more than once in the command. For more information, see [this issue](https://github.com/abhiTronix/vidgear/issues/141). It accepts values as a **list** only. Usage is as follows: ```python + # sets special FFmpeg options repeated multiple times stream_params = {"-clones": ['-map', '0:v:0', '-map', '1:a?']} ```   -* **`-ffmpeg_download_path`** _(string)_: ***(optional)*** sets the custom directory for downloading FFmpeg Static Binaries in Compression Mode, during the [Auto-Installation](../ffmpeg_install/#a-auto-installation) on Windows Machines Only. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. `C:/User/temp`) on your windows machine. It can be used as follows: +* **`-ffmpeg_download_path`** _(string)_: ***(optional)*** This parameter sets a custom directory for downloading FFmpeg static binaries in Compression Mode during the [**Auto-Installation**](../ffmpeg_install/#a-auto-installation) step on Windows machines only. If this parameter is not altered, the binaries will be saved to the default temporary directory _(e.g., `C:/User/foo/temp`)_ on your Windows machine. It can be used as follows: ```python - stream_params = {"-ffmpeg_download_path": "C:/User/foo/foo1"} # will be saved to "C:/User/foo/foo1" + # download FFmpeg static binaries to `C:/User/foo/bar` + stream_params = {"-ffmpeg_download_path": "C:/User/foo/bar"} ```   -* **`-clear_prev_assets`** _(bool)_: ***(optional)*** specify whether to force-delete any previous copies of StreamGear Assets _(i.e. Manifest files(.mpd) & streaming chunks(.m4s) etc.)_ present at path specified by [`output`](#output) parameter. You can easily set it to `True` to enable this feature, and default value is `False`. It can be used as follows: +* **`-clear_prev_assets`** _(bool)_: ***(optional)*** This parameter specifies whether to remove/delete all previous copies of StreamGear assets files for selected [`format`](#format) _(i.e., manifest (`mpd`) in DASH, playlist (`mu38`) in HLS, and respective streaming chunks (`.ts`,`.m4s`), etc.)_ present at the path specified by the [`output`](#output) parameter. The default value is `False`. It can be enabled as follows: - !!! info "In Single-Source Mode, additional segments _(such as `.webm`, `.mp4` chunks)_ are also cleared automatically." + !!! info "Additional segments _(such as `.webm`, `.mp4` chunks)_ are also removed automatically." ```python - stream_params = {"-clear_prev_assets": True} # will delete all previous assets + # delete all previous assets + stream_params = {"-clear_prev_assets": True} ```   -#### B. FFmpeg Parameters +* **`-enable_force_termination`** _(bool)_: sets a special flag to enable the forced termination of the FFmpeg process, required only if StreamGear is getting frozen when terminated. Its usage is as follows: + + !!! warning "The `-enable_force_termination` flag can potentially cause unexpected behavior or corrupted output in certain scenarios. It is recommended to use this flag with caution." + + ```python + # enables forced termination of FFmpeg process + stream_params = {"-enable_force_termination": True} + ``` -Almost all FFmpeg parameter can be passed as dictionary attributes in `stream_params`. For example, for using `libx264 encoder` to produce a lossless output video, we can pass required FFmpeg parameters as dictionary attributes, as follows: +  -!!! tip "Kindly check [H.264 docs ➶](https://trac.ffmpeg.org/wiki/Encode/H.264) and other [FFmpeg Docs ➶](https://ffmpeg.org/documentation.html) for more information on these parameters" +#### B. FFmpeg Parameters +Almost all FFmpeg parameters can be passed as dictionary attributes in `stream_params`. For example, to use the `libx264` encoder to produce a lossless output video, you can pass the required FFmpeg parameters as dictionary attributes as follows: -!!! error "All ffmpeg parameters are case-sensitive. Remember to double check every parameter if any error occurs." +!!! tip "Please check the [H.264 documentation ➶](https://trac.ffmpeg.org/wiki/Encode/H.264) and [FFmpeg Documentation ➶](https://ffmpeg.org/documentation.html) for more information on following parameters." +!!! failure "All FFmpeg parameters are case-sensitive. Double-check each parameter if any errors occur." -!!! note "In addition to these parameters, almost any FFmpeg parameter _(supported by installed FFmpeg)_ is also supported. But make sure to read [**FFmpeg Docs**](https://ffmpeg.org/documentation.html) carefully first." +!!! note "In addition to these parameters, almost any FFmpeg parameter _(supported by the installed FFmpeg)_ is also supported. Be sure to read the [**FFmpeg Documentation**](https://ffmpeg.org/documentation.html) carefully first." ```python +# libx264 encoder and its supported parameters stream_params = {"-vcodec":"libx264", "-crf": 0, "-preset": "fast", "-tune": "zerolatency"} ``` @@ -342,9 +380,15 @@ stream_params = {"-vcodec":"libx264", "-crf": 0, "-preset": "fast", "-tune": "ze ### Supported Encoders and Decoders -All the encoders and decoders that are compiled with FFmpeg in use, are supported by WriteGear API. You can easily check the compiled encoders by running following command in your terminal: +All encoders and decoders compiled with the FFmpeg in use are supported by the StreamGear API. You can check the compiled encoders by running the following command in your terminal: -!!! info "Similarily, supported demuxers and filters depends upons compiled FFmpeg in use." +???+ tip "Faster Transcoding with Stream Copy in Single Source Mode" + + For faster transcoding of input video, utilize Stream copy (`-vcodec copy`) as the input video encoder in the [**Single-Source Mode**](../ssm/overview) for creating HLS/DASH chunks of the primary stream efficiently. However, consider the following points: + + - :warning: Stream copy is **NOT** compatible with [**Real-time Frames Mode**](../rtfm/overview), as this mode necessitates re-encoding of incoming frames. Therefore, the `-vcodec copy` parameter will be ignored. + - :warning: Stream copying **NOT** compatible with Custom Streams ([`-streams`](#a-exclusive-parameters)), which also require re-encoding for each additional stream. Consequently, the `-vcodec copy` parameter will be ignored. + - When using the audio stream from the input video, the Audio Stream copy (`-acodec copy`) encoder will be automatically applied. ```sh # for checking encoder @@ -353,6 +397,8 @@ ffmpeg -encoders # use `ffmpeg.exe -encoders` on windows ffmpeg -decoders # use `ffmpeg.exe -decoders` on windows ``` +!!! info "Similarly, supported audio/video demuxers and filters depend on the FFmpeg binaries in use." +   ## **`logging`** diff --git a/docs/gears/streamgear/rtfm/overview.md b/docs/gears/streamgear/rtfm/overview.md index 1f892d594..f213821cb 100644 --- a/docs/gears/streamgear/rtfm/overview.md +++ b/docs/gears/streamgear/rtfm/overview.md @@ -18,7 +18,7 @@ limitations under the License. =============================================== --> -# StreamGear API: Real-time Frames Mode +# StreamGear API: Real-time Frames Mode :material-camera-burst:
@@ -44,18 +44,15 @@ For this mode, StreamGear API provides exclusive [`stream()`](../../../../bonus/ Apple HLS support was added in `v0.2.2`. -!!! alert "Real-time Frames Mode is NOT Live-Streaming." +!!! alert "Real-time Frames Mode itself is NOT Live-Streaming :material-video-wireless-outline:" + To enable live-streaming in Real-time Frames Mode, use the exclusive [`-livestream`](../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter in the StreamGear API. Checkout [this usage example ➶](../usage/#bare-minimum-usage-with-live-streaming) for more information. - Rather, you can easily enable live-streaming in Real-time Frames Mode by using StreamGear API's exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. Checkout its [usage example here](../usage/#bare-minimum-usage-with-live-streaming). +!!! danger "Please Remember :material-police-badge-outline:" -!!! danger + * Using [`transcode_source()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.transcode_source) function instead of [`stream()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) in Real-time Frames Mode will immediately result in **`RuntimeError`**! - * Using [`transcode_source()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.transcode_source) function instead of [`stream()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) in Real-time Frames Mode will instantly result in **`RuntimeError`**! - - * **NEVER** assign anything to [`-video_source`](../../params/#a-exclusive-parameters) attribute of [`stream_params`](../../params/#supported-parameters) dictionary parameter, otherwise [Single-Source Mode](../#a-single-source-mode) may get activated, and as a result, using [`stream()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) function will throw **`RuntimeError`**! - - * You **MUST** use [`-input_framerate`](../../params/#a-exclusive-parameters) attribute to set exact value of input framerate when using external audio in this mode, otherwise audio delay will occur in output streams. + * **NEVER** assign anything to [`-video_source`](../../params/#a-exclusive-parameters) attribute of [`stream_params`](../../params/#supported-parameters) dictionary parameter, otherwise [Single-Source Mode](../#a-single-source-mode) get activated, and as a result, using [`stream()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) function will throw **`RuntimeError`**! * Input framerate defaults to `25.0` fps if [`-input_framerate`](../../params/#a-exclusive-parameters) attribute value not defined. @@ -68,7 +65,7 @@ For this mode, StreamGear API provides exclusive [`stream()`](../../../../bonus/ See here 🚀
-!!! experiment "After going through StreamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" +!!! example "After going through StreamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" ## Parameters diff --git a/docs/gears/streamgear/rtfm/usage.md b/docs/gears/streamgear/rtfm/usage.md index a2e280ab9..5833545bb 100644 --- a/docs/gears/streamgear/rtfm/usage.md +++ b/docs/gears/streamgear/rtfm/usage.md @@ -18,24 +18,27 @@ limitations under the License. =============================================== --> -# StreamGear API Usage Examples: Real-time Frames Mode +# StreamGear API Usage Examples: Real-time Frames Mode :material-camera-burst: -!!! alert "Real-time Frames Mode is NOT Live-Streaming." +!!! alert "Real-time Frames Mode itself is NOT Live-Streaming :material-video-wireless-outline:" - Rather you can easily enable live-streaming in Real-time Frames Mode by using StreamGear API's exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. Checkout following [usage example](#bare-minimum-usage-with-live-streaming). + To enable live-streaming in Real-time Frames Mode, use the exclusive [`-livestream`](../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter in the StreamGear API. Checkout following [usage example ➶](#bare-minimum-usage-with-live-streaming) for more information. -!!! warning "Important Information" +!!! warning "Important Information :fontawesome-solid-person-military-pointing:" - * StreamGear **MUST** requires FFmpeg executables for its core operations. Follow these dedicated [Platform specific Installation Instructions ➶](../../ffmpeg_install/) for its installation. + - [x] StreamGear API **MUST** requires FFmpeg executables for its core operations. Follow these dedicated [Platform specific Installation Instructions ➶](../../ffmpeg_install/) for its installation. API will throw **RuntimeError**, if it fails to detect valid FFmpeg executables on your system. + - [x] In this mode, ==API by default generates a primary stream _(at the index `0`)_ of same resolution as the input frames and at default framerate[^1].== + - [x] In this mode, API **DOES NOT** automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. + - [x] In this mode, Stream copy (`-vcodec copy`) encoder is unsupported as it requires re-encoding of incoming frames. + - [x] Always use `close()` function at the very end of the main code. - * StreamGear API will throw **RuntimeError**, if it fails to detect valid FFmpeg executables on your system. - - * By default, ==StreamGear generates a primary stream of same resolution and framerate[^1] as the input video _(at the index `0`)_.== - - * Always use `terminate()` function at the very end of the main code. +??? danger "DEPRECATION NOTICES for `v0.3.3` and above" + + - [ ] The `terminate()` method in StreamGear is now deprecated and will be removed in a future release. Developers should use the new [`close()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.close) method instead, as it offers a more descriptive name, similar to the WriteGear API, for safely terminating StreamGear processes. + - [ ] The `rgb_mode` parameter in [`stream()`](../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) method, which earlier used to support RGB frames in Real-time Frames Mode is now deprecated, and will be removed in a future version. Only BGR format frames will be supported going forward. Please update your code to handle BGR format frames. -!!! experiment "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" +!!! example "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)"   @@ -47,10 +50,11 @@ Following is the bare-minimum code you need to get started with StreamGear API i !!! note "We are using [CamGear](../../../camgear/overview/) in this Bare-Minimum example, but any [VideoCapture Gear](../../../#a-videocapture-gears) will work in the similar manner." +!!! danger "In this mode, StreamGear **DOES NOT** automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." === "DASH" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -94,12 +98,12 @@ Following is the bare-minimum code you need to get started with StreamGear API i stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -143,41 +147,36 @@ Following is the bare-minimum code you need to get started with StreamGear API i stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` !!! success "After running this bare-minimum example, StreamGear will produce a Manifest file _(`dash.mpd`)_ with streamable chunks that contains information about a Primary Stream of same resolution and framerate[^1] as input _(without any audio)_." -   -## Bare-Minimum Usage with Live-Streaming - -You can easily activate ==Low-latency Livestreaming in Real-time Frames Mode==, where chunks will contain information for few new frames only and forgets all previous ones), using exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter as follows: +## Bare-Minimum Usage with controlled Input-framerate -!!! note "In this mode, StreamGear **DOES NOT** automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." +> In Real-time Frames Mode, StreamGear API provides the exclusive [`-input_framerate`](../../params/#a-exclusive-parameters) attribute for the `stream_params` dictionary parameter, which allows you to set the assumed constant framerate for incoming frames. -=== "DASH" +In this example, we will retrieve the framerate from a webcam video stream and set it as the value for the `-input_framerate` attribute in StreamGear. - !!! tip "Chunk size in DASH" - Use `-window_size` & `-extra_window_size` FFmpeg parameters for controlling number of frames to be kept in Chunks in DASH stream. Less these value, less will be latency. +!!! danger "Remember, the input framerate defaults to 25.0 fps if the `-input_framerate` attribute value is not defined in Real-time Frames mode." - !!! alert "After every few chunks _(equal to the sum of `-window_size` & `-extra_window_size` values)_, all chunks will be overwritten in Live-Streaming. Thereby, since newer chunks in manifest will contain NO information of any older ones, and therefore resultant DASH stream will play only the most recent frames." +=== "DASH" - ```python hl_lines="11" + ```python linenums="1" hl_lines="10" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # open any valid video stream(from web-camera attached at index `0`) + # Open live video stream on webcam at first index(i.e. 0) device stream = CamGear(source=0).start() - # enable livestreaming and retrieve framerate from CamGear Stream and - # pass it as `-input_framerate` parameter for controlled framerate - stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + # retrieve framerate from CamGear Stream and pass it as `-input_framerate` value + stream_params = {"-input_framerate":stream.framerate} - # describe a suitable manifest-file location/name + # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="dash_out.mpd", **stream_params) # loop over @@ -210,31 +209,24 @@ You can easily activate ==Low-latency Livestreaming in Real-time Frames Mode==, stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - !!! tip "Chunk size in HLS" - - Use `-hls_init_time` & `-hls_time` FFmpeg parameters for controlling number of frames to be kept in Chunks in HLS stream. Less these value, less will be latency. - - !!! alert "After every few chunks _(equal to the sum of `-hls_init_time` & `-hls_time` values)_, all chunks will be overwritten in Live-Streaming. Thereby, since newer chunks in playlist will contain NO information of any older ones, and therefore resultant HLS stream will play only the most recent frames." - - ```python hl_lines="11" + ```python linenums="1" hl_lines="10" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # open any valid video stream(from web-camera attached at index `0`) + # Open live video stream on webcam at first index(i.e. 0) device stream = CamGear(source=0).start() - # enable livestreaming and retrieve framerate from CamGear Stream and - # pass it as `-input_framerate` parameter for controlled framerate - stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + # retrieve framerate from CamGear Stream and pass it as `-input_framerate` value + stream_params = {"-input_framerate":stream.framerate} - # describe a suitable manifest-file location/name + # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) # loop over @@ -267,145 +259,39 @@ You can easily activate ==Low-latency Livestreaming in Real-time Frames Mode==, stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` -   -## Bare-Minimum Usage with RGB Mode +## Bare-Minimum Usage with Live-Streaming -In Real-time Frames Mode, StreamGear API provide [`rgb_mode`](../../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) boolean parameter with its `stream()` function, which if enabled _(i.e. `rgb_mode=True`)_, specifies that incoming frames are of RGB format _(instead of default BGR format)_, thereby also known as ==RGB Mode==. +You can easily activate **Low-latency Live-Streaming :material-video-wireless-outline:** in Real-time Frames Mode, where chunks will contain information for new frames only and forget previous ones, using the exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter. +The complete example is as follows: -The complete usage example is as follows: +!!! danger "In this mode, StreamGear **DOES NOT** automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." === "DASH" - ```python hl_lines="28" - # import required libraries - from vidgear.gears import CamGear - from vidgear.gears import StreamGear - import cv2 - - # open any valid video stream(for e.g `foo1.mp4` file) - stream = CamGear(source='foo1.mp4').start() - - # describe a suitable manifest-file location/name - streamer = StreamGear(output="dash_out.mpd") - - # loop over - while True: - - # read frames from stream - frame = stream.read() - - # check for frame if Nonetype - if frame is None: - break - - - # {simulating RGB frame for this example} - frame_rgb = frame[:,:,::-1] - - - # send frame to streamer - streamer.stream(frame_rgb, rgb_mode = True) #activate RGB Mode - - # Show output window - cv2.imshow("Output Frame", frame) - - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break - - # close output window - cv2.destroyAllWindows() - - # safely close video stream - stream.stop() - - # safely close streamer - streamer.terminate() - ``` - -=== "HLS" - - ```python hl_lines="28" - # import required libraries - from vidgear.gears import CamGear - from vidgear.gears import StreamGear - import cv2 - - # open any valid video stream(for e.g `foo1.mp4` file) - stream = CamGear(source='foo1.mp4').start() - - # describe a suitable manifest-file location/name - streamer = StreamGear(output="hls_out.m3u8", format = "hls") - - # loop over - while True: - - # read frames from stream - frame = stream.read() - - # check for frame if Nonetype - if frame is None: - break - - - # {simulating RGB frame for this example} - frame_rgb = frame[:,:,::-1] - - - # send frame to streamer - streamer.stream(frame_rgb, rgb_mode = True) #activate RGB Mode - - # Show output window - cv2.imshow("Output Frame", frame) - - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break - - # close output window - cv2.destroyAllWindows() - - # safely close video stream - stream.stop() - - # safely close streamer - streamer.terminate() - ``` - + !!! tip "Controlling chunk size in DASH" + To control the number of frames kept in Chunks for the DASH stream _(controlling latency)_, you can use the `-window_size` and `-extra_window_size` FFmpeg parameters. Lower values for these parameters will result in lower latency. -  - -## Bare-Minimum Usage with controlled Input-framerate - -In Real-time Frames Mode, StreamGear API provides exclusive [`-input_framerate`](../../params/#a-exclusive-parameters) attribute for its `stream_params` dictionary parameter, that allow us to set the assumed constant framerate for incoming frames. - -In this example, we will retrieve framerate from webcam video-stream, and set it as value for `-input_framerate` attribute in StreamGear: - -!!! danger "Remember, Input framerate default to `25.0` fps if [`-input_framerate`](../../params/#a-exclusive-parameters) attribute value not defined in Real-time Frames mode." + !!! alert "After every few chunks _(equal to the sum of `-window_size` and `-extra_window_size` values)_, all chunks will be overwritten while Live-Streaming. This means that newer chunks in the manifest will contain NO information from older chunks, and the resulting DASH stream will only play the most recent frames, reducing latency." - -=== "DASH" - - ```python hl_lines="10" + ```python linenums="1" hl_lines="11" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # Open live video stream on webcam at first index(i.e. 0) device + # open any valid video stream(from web-camera attached at index `0`) stream = CamGear(source=0).start() - # retrieve framerate from CamGear Stream and pass it as `-input_framerate` value - stream_params = {"-input_framerate":stream.framerate} + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} - # describe a suitable manifest-file location/name and assign params + # describe a suitable manifest-file location/name streamer = StreamGear(output="dash_out.mpd", **stream_params) # loop over @@ -418,10 +304,8 @@ In this example, we will retrieve framerate from webcam video-stream, and set it if frame is None: break - # {do something with the frame here} - # send frame to streamer streamer.stream(frame) @@ -440,24 +324,30 @@ In this example, we will retrieve framerate from webcam video-stream, and set it stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python hl_lines="10" + !!! tip "Controlling chunk size in HLS" + To control the number of frames kept in Chunks for the HLS stream _(controlling latency)_, you can use the `-hls_init_time` & `-hls_time` FFmpeg parameters. Lower values for these parameters will result in lower latency. + + !!! alert "After every few chunks _(equal to the sum of `-hls_init_time` & `-hls_time` values)_, all chunks will be overwritten while Live-Streaming. This means that newer chunks in the master playlist will contain NO information from older chunks, and the resulting HLS stream will only play the most recent frames, reducing latency." + + ```python linenums="1" hl_lines="11" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # Open live video stream on webcam at first index(i.e. 0) device + # open any valid video stream(from web-camera attached at index `0`) stream = CamGear(source=0).start() - # retrieve framerate from CamGear Stream and pass it as `-input_framerate` value - stream_params = {"-input_framerate":stream.framerate} + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} - # describe a suitable manifest-file location/name and assign params + # describe a suitable manifest-file location/name streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) # loop over @@ -470,10 +360,8 @@ In this example, we will retrieve framerate from webcam video-stream, and set it if frame is None: break - # {do something with the frame here} - # send frame to streamer streamer.stream(frame) @@ -492,22 +380,23 @@ In this example, we will retrieve framerate from webcam video-stream, and set it stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` +   ## Bare-Minimum Usage with OpenCV -You can easily use StreamGear API directly with any other Video Processing library(_For e.g. [OpenCV](https://github.com/opencv/opencv) itself_) in Real-time Frames Mode. +> You can easily use the StreamGear API directly with any other Video Processing library _(for e.g. [OpenCV](https://github.com/opencv/opencv))_ in Real-time Frames Mode. -The complete usage example is as follows: +The following is a complete StreamGear API usage example with OpenCV: -!!! tip "This just a bare-minimum example with OpenCV, but any other Real-time Frames Mode feature/example will work in the similar manner." +!!! note "This is a bare-minimum example with OpenCV, but any other Real-time Frames Mode feature or example will work in a similar manner." === "DASH" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import StreamGear import cv2 @@ -532,7 +421,6 @@ The complete usage example is as follows: # lets convert frame to gray for this example gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - # send frame to streamer streamer.stream(gray) @@ -551,12 +439,12 @@ The complete usage example is as follows: stream.release() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import StreamGear import cv2 @@ -581,7 +469,6 @@ The complete usage example is as follows: # lets convert frame to gray for this example gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - # send frame to streamer streamer.stream(gray) @@ -600,33 +487,35 @@ The complete usage example is as follows: stream.release() # safely close streamer - streamer.terminate() + streamer.close() ``` -   ## Usage with Additional Streams -Similar to Single-Source Mode, you can easily generate any number of additional Secondary Streams of variable bitrates or spatial resolutions, using exclusive [`-streams`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. You just need to add each resolution and bitrate/framerate as list of dictionaries to this attribute, and rest is done automatically. +> Similar to Single-Source Mode, in addition to the Primary Stream, you can easily generate any number of additional Secondary Streams with variable bitrate or spatial resolution, using the exclusive [`-streams`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter. + +To generate Secondary Streams, add each desired resolution and bitrate/framerate as a list of dictionaries to the `-streams` attribute. StreamGear will handle the rest automatically. The complete example is as follows: !!! info "A more detailed information on `-streams` attribute can be found [here ➶](../../params/#a-exclusive-parameters)" -The complete example is as follows: +!!! alert "In this mode, StreamGear DOES NOT automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." -??? danger "Important `-streams` attribute Information" - * On top of these additional streams, StreamGear by default, generates a primary stream of same resolution and framerate[^1] as the input, at the index `0`. - * :warning: Make sure your System/Machine/Server/Network is able to handle these additional streams, discretion is advised! - * You **MUST** need to define `-resolution` value for your stream, otherwise stream will be discarded! - * You only need either of `-video_bitrate` or `-framerate` for defining a valid stream. Since with `-framerate` value defined, video-bitrate is calculated automatically. - * If you define both `-video_bitrate` and `-framerate` values at the same time, StreamGear will discard the `-framerate` value automatically. +???+ danger "Important Information about `-streams` attribute :material-file-document-alert-outline:" -!!! fail "Always use `-stream` attribute to define additional streams safely, any duplicate or incorrect definition can break things!" + * In addition to the user-defined Secondary Streams, StreamGear automatically generates a Primary Stream _(at index `0`)_ with the same resolution as the input frames and at default framerate[^1]. + * :warning: Ensure that your system, machine, server, or network can handle the additional resource requirements of the Secondary Streams. Exercise discretion when configuring multiple streams. + * You **MUST** define the `-resolution` value for each stream; otherwise, the stream will be discarded. + * You only need to define either the `-video_bitrate` or the `-framerate` for a valid stream. + * If you specify the `-framerate`, the video bitrate will be calculated automatically. + * If you define both the `-video_bitrate` and the `-framerate`, the `-framerate` will get discard automatically. +!!! failure "Always use the `-streams` attribute to define additional streams safely. Duplicate or incorrect definitions can break the transcoding pipeline and corrupt the output chunks." === "DASH" - ```python hl_lines="11-15" + ```python linenums="1" hl_lines="11-15" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -638,8 +527,8 @@ The complete example is as follows: # define various streams stream_params = { "-streams": [ - {"-resolution": "1280x720", "-framerate": 30.0}, # Stream2: 1280x720 at 30fps framerate - {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps framerate + {"-resolution": "1280x720", "-framerate": 30.0}, # Stream1: 1280x720 at 30fps framerate + {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps framerate {"-resolution": "320x240", "-video_bitrate": "500k"}, # Stream3: 320x240 at 500kbs bitrate ], } @@ -657,10 +546,8 @@ The complete example is as follows: if frame is None: break - # {do something with the frame here} - # send frame to streamer streamer.stream(frame) @@ -679,12 +566,12 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python hl_lines="11-15" + ```python linenums="1" hl_lines="11-15" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -696,8 +583,8 @@ The complete example is as follows: # define various streams stream_params = { "-streams": [ - {"-resolution": "1280x720", "-framerate": 30.0}, # Stream2: 1280x720 at 30fps framerate - {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps framerate + {"-resolution": "1280x720", "-framerate": 30.0}, # Stream1: 1280x720 at 30fps framerate + {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps framerate {"-resolution": "320x240", "-video_bitrate": "500k"}, # Stream3: 320x240 at 500kbs bitrate ], } @@ -715,10 +602,8 @@ The complete example is as follows: if frame is None: break - # {do something with the frame here} - # send frame to streamer streamer.stream(frame) @@ -737,27 +622,26 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ```   ## Usage with File Audio-Input -In Real-time Frames Mode, if you want to add audio to your streams, you've to use exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. You just need to input the path of your audio file to this attribute as `string` value, and the API will automatically validate as well as maps it to all generated streams. +> In Real-time Frames Mode, if you want to add audio to your streams, you need to use the exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter. -The complete example is as follows: +To add a audio source, provide the path to your audio file as a string to the `-audio` attribute. The API will automatically validate and map the audio to all generated streams. The complete example is as follows: -!!! failure "Make sure this `-audio` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." +!!! failure "Ensure the provided `-audio` audio source is compatible with the input video source. Incompatibility can cause multiple errors or result in no output at all." !!! warning "You **MUST** use [`-input_framerate`](../../params/#a-exclusive-parameters) attribute to set exact value of input framerate when using external audio in Real-time Frames mode, otherwise audio delay will occur in output streams." -!!! tip "You can also assign a valid Audio URL as input, rather than filepath. More details can be found [here ➶](../../params/#a-exclusive-parameters)" - +!!! tip "You can also assign a valid audio URL as input instead of a file path. More details can be found [here ➶](../../params/#a-exclusive-parameters)" === "DASH" - ```python hl_lines="16-17" + ```python linenums="1" hl_lines="16-17" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -774,7 +658,7 @@ The complete example is as follows: {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps ], "-input_framerate": stream.framerate, # controlled framerate for audio-video sync !!! don't forget this line !!! - "-audio": "/home/foo/foo1.aac" # assigns input audio-source: "/home/foo/foo1.aac" + "-audio": "/home/foo/foo1.aac" # assign external audio-source } # describe a suitable manifest-file location/name and assign params @@ -812,12 +696,12 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python hl_lines="16-17" + ```python linenums="1" hl_lines="16-17" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear @@ -834,7 +718,7 @@ The complete example is as follows: {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps ], "-input_framerate": stream.framerate, # controlled framerate for audio-video sync !!! don't forget this line !!! - "-audio": "/home/foo/foo1.aac" # assigns input audio-source: "/home/foo/foo1.aac" + "-audio": "/home/foo/foo1.aac" # assign external audio-source } # describe a suitable manifest-file location/name and assign params @@ -872,25 +756,26 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ```   ## Usage with Device Audio-Input -In Real-time Frames Mode, you've can also use exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter for streaming live audio from external device. You just need to format your audio device name followed by suitable demuxer as `list` and assign to this attribute, and the API will automatically validate as well as map it to all generated streams. +> In Real-time Frames Mode, you can also use the exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter for streaming live audio from an external device. -The complete example is as follows: +To stream live audio, format your audio device name followed by a suitable demuxer as a list, and assign it to the `-audio` attribute. The API will automatically validate and map the audio to all generated streams. The complete example is as follows: +!!! alert "Example Assumptions :octicons-checklist-24:" -!!! alert "Example Assumptions" + - [x] You're running a Windows machine with all necessary audio drivers and software installed. + - [x] There's an audio device named "Microphone (USB2.0 Camera)" connected to your Windows machine. Check instructions below to use device sources with the `-audio` attribute on different OS platforms. - * You're running are Windows machine with all neccessary audio drivers and software installed. - * There's a audio device with named `"Microphone (USB2.0 Camera)"` connected to your windows machine. +??? info "Using devices sources with `-audio` attribute on different OS platforms" -??? tip "Using devices with `-audio` attribute on different OS platforms" + To use device sources with the `-audio` attribute on different OS platforms, follow these instructions: === ":fontawesome-brands-windows: Windows" @@ -922,12 +807,12 @@ The complete example is as follows: - [x] **Specify Sound Card:** Then, you can specify your located soundcard in StreamGear as follows: - ```python + ```python linenums="1" # assign appropriate input audio-source device and demuxer device and demuxer stream_params = {"-audio": ["-f","dshow", "-i", "audio=Microphone (USB2.0 Camera)"]} ``` - !!! fail "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" === ":material-linux: Linux" @@ -962,12 +847,12 @@ The complete example is as follows: !!! info "The easiest thing to do is to reference sound card directly, namely "card 0" (Intel ICH5) and "card 1" (Microphone on the USB web cam), as `hw:0` or `hw:1`" - ```python + ```python linenums="1" # assign appropriate input audio-source device and demuxer device and demuxer stream_params = {"-audio": ["-f","alsa", "-i", "hw:1"]} ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" === ":material-apple: MacOS" @@ -998,48 +883,47 @@ The complete example is as follows: - [x] **Specify Sound Card:** Then, you can specify your located soundcard in StreamGear as follows: - ```python + ```python linenums="1" # assign appropriate input audio-source device and demuxer stream_params = {"-audio": ["-f","avfoundation", "-audio_device_index", "0"]} ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" +!!! tip "It is advised to use this example with live-streaming enabled(`True`) by using StreamGear API's exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." -!!! danger "Make sure this `-audio` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." +!!! failure "Ensure the provided `-audio` audio source is compatible with the video source device. Incompatibility can cause multiple errors or result in no output at all." !!! warning "You **MUST** use [`-input_framerate`](../../params/#a-exclusive-parameters) attribute to set exact value of input framerate when using external audio in Real-time Frames mode, otherwise audio delay will occur in output streams." -!!! note "It is advised to use this example with live-streaming enabled(True) by using StreamGear API's exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." - - === "DASH" - ```python hl_lines="18-24" + ```python linenums="1" hl_lines="18-25" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # open any valid video stream(for e.g `foo1.mp4` file) - stream = CamGear(source="foo1.mp4").start() + # open any valid DEVICE video stream + stream = CamGear(source=0).start() # add various streams, along with custom audio stream_params = { "-streams": [ { - "-resolution": "1280x720", + "-resolution": "640x360", "-video_bitrate": "4000k", - }, # Stream1: 1280x720 at 4000kbs bitrate - {"-resolution": "640x360", "-framerate": 30.0}, # Stream2: 640x360 at 30fps + }, # Stream1: 640x360 at 4000kbs bitrate + {"-resolution": "320x240", "-framerate": 30.0}, # Stream2: 320x240 at 30fps ], "-input_framerate": stream.framerate, # controlled framerate for audio-video sync !!! don't forget this line !!! + "-livestream": True, "-audio": [ "-f", "dshow", "-i", "audio=Microphone (USB2.0 Camera)", - ], # assign appropriate input audio-source device and demuxer + ], # assign appropriate input audio-source device(compatible with video source) and its demuxer } # describe a suitable manifest-file location/name and assign params @@ -1075,40 +959,41 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python hl_lines="18-24" + ```python linenums="1" hl_lines="18-25" # import required libraries from vidgear.gears import CamGear from vidgear.gears import StreamGear import cv2 - # open any valid video stream(for e.g `foo1.mp4` file) - stream = CamGear(source="foo1.mp4").start() + # open any valid DEVICE video stream + stream = CamGear(source=0).start() # add various streams, along with custom audio stream_params = { "-streams": [ { - "-resolution": "1280x720", + "-resolution": "640x360", "-video_bitrate": "4000k", - }, # Stream1: 1280x720 at 4000kbs bitrate - {"-resolution": "640x360", "-framerate": 30.0}, # Stream2: 640x360 at 30fps + }, # Stream1: 640x360 at 4000kbs bitrate + {"-resolution": "320x240", "-framerate": 30.0}, # Stream2: 320x240 at 30fps ], "-input_framerate": stream.framerate, # controlled framerate for audio-video sync !!! don't forget this line !!! + "-livestream": True, "-audio": [ "-f", "dshow", "-i", "audio=Microphone (USB2.0 Camera)", - ], # assign appropriate input audio-source device and demuxer + ], # assign appropriate input audio-source device(compatible with video source) and its demuxer } # describe a suitable manifest-file location/name and assign params - streamer = StreamGear(output="dash_out.m3u8", format="hls", **stream_params) + streamer = StreamGear(output="hls_out.m3u8", format="hls", **stream_params) # loop over while True: @@ -1140,23 +1025,22 @@ The complete example is as follows: stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ```   ## Usage with Hardware Video-Encoder +> In Real-time Frames Mode, you can easily change the video encoder according to your requirements by passing the `-vcodec` FFmpeg parameter as an attribute in the `stream_params` dictionary parameter. Additionally, you can specify additional properties, features, and optimizations for your system's GPU. -In Real-time Frames Mode, you can also easily change encoder as per your requirement just by passing `-vcodec` FFmpeg parameter as an attribute in `stream_params` dictionary parameter. In addition to this, you can also specify the additional properties/features/optimizations for your system's GPU similarly. - -In this example, we will be using `h264_vaapi` as our hardware encoder and also optionally be specifying our device hardware's location (i.e. `'-vaapi_device':'/dev/dri/renderD128'`) and other features such as `'-vf':'format=nv12,hwupload'` like properties by formatting them as `option` dictionary parameter's attributes, as follows: +In this example, we will be using `h264_vaapi` as our Hardware Encoder and specifying the device hardware's location and compatible video filters by formatting them as attributes in the `stream_params` dictionary parameter. -!!! warning "Check VAAPI support" +!!! danger "This example is just conveying the idea of how to use FFmpeg's hardware encoders with the StreamGear API in Real-time Frames Mode, which MAY OR MAY NOT suit your system. Please use suitable parameters based on your supported system and FFmpeg configurations only." - **This example is just conveying the idea on how to use FFmpeg's hardware encoders with WriteGear API in Compression mode, which MAY/MAY-NOT suit your system. Kindly use suitable parameters based your supported system and FFmpeg configurations only.** +???+ info "Checking VAAPI Support for Hardware Encoding" - To use `h264_vaapi` encoder, remember to check if its available and your FFmpeg compiled with VAAPI support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows: + To use **VAAPI** (Video Acceleration API) as a hardware encoder in this example, follow these steps to ensure your FFmpeg supports VAAPI: ```sh ffmpeg -hide_banner -encoders | grep vaapi @@ -1168,10 +1052,12 @@ In this example, we will be using `h264_vaapi` as our hardware encoder and also V..... vp8_vaapi VP8 (VAAPI) (codec vp8) ``` +!!! failure "Please read the [**FFmpeg Documentation**](https://ffmpeg.org/documentation.html) carefully before passing any additional values to the `stream_params` parameter. Incorrect values may cause errors or result in no output." + === "DASH" - ```python hl_lines="16-18" + ```python linenums="1" hl_lines="16-18" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import StreamGear @@ -1189,7 +1075,7 @@ In this example, we will be using `h264_vaapi` as our hardware encoder and also ], "-vcodec": "h264_vaapi", # define custom Video encoder "-vaapi_device": "/dev/dri/renderD128", # define device location - "-vf": "format=nv12,hwupload", # define video pixformat + "-vf": "format=nv12,hwupload", # define video filters } # describe a suitable manifest-file location/name and assign params @@ -1227,12 +1113,12 @@ In this example, we will be using `h264_vaapi` as our hardware encoder and also stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ``` === "HLS" - ```python hl_lines="16-18" + ```python linenums="1" hl_lines="16-18" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import StreamGear @@ -1288,10 +1174,10 @@ In this example, we will be using `h264_vaapi` as our hardware encoder and also stream.stop() # safely close streamer - streamer.terminate() + streamer.close() ```   [^1]: - :bulb: In Real-time Frames Mode, the Primary Stream's framerate defaults to [`-input_framerate`](../../params/#a-exclusive-parameters) attribute value, if defined, else it will be 25fps. \ No newline at end of file + :bulb: In Real-time Frames Mode, the Primary Stream's framerate defaults to the value of the [`-input_framerate`](../../params/#a-exclusive-parameters) attribute, if defined. Otherwise, it will be set to 25 fps. \ No newline at end of file diff --git a/docs/gears/streamgear/ssm/overview.md b/docs/gears/streamgear/ssm/overview.md index 0fb479499..bbcdc32ce 100644 --- a/docs/gears/streamgear/ssm/overview.md +++ b/docs/gears/streamgear/ssm/overview.md @@ -18,7 +18,7 @@ limitations under the License. =============================================== --> -# StreamGear API: Single-Source Mode +# StreamGear API: Single-Source Mode :material-file-video-outline:
Single-Source Mode Flow Diagram @@ -45,7 +45,7 @@ This mode can be easily activated by assigning suitable video path as input to [ Apple HLS support was added in `v0.2.2`. -!!! warning +!!! danger "Please Remember :material-police-badge-outline:" * Using [`stream()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.stream) function instead of [`transcode_source()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.transcode_source) in Single-Source Mode will instantly result in **`RuntimeError`**! * Any invalid value to the [`-video_source`](../../params/#a-exclusive-parameters) attribute will result in **`AssertionError`**! @@ -58,7 +58,7 @@ This mode can be easily activated by assigning suitable video path as input to [ See here 🚀
-!!! experiment "After going through StreamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" +!!! example "After going through StreamGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" ## Parameters diff --git a/docs/gears/streamgear/ssm/usage.md b/docs/gears/streamgear/ssm/usage.md index b9835611b..2f881ef49 100644 --- a/docs/gears/streamgear/ssm/usage.md +++ b/docs/gears/streamgear/ssm/usage.md @@ -18,20 +18,28 @@ limitations under the License. =============================================== --> -# StreamGear API Usage Examples: Single-Source Mode +# StreamGear API Usage Examples: Single-Source Mode :material-file-video-outline: -!!! warning "Important Information" +!!! warning "Important Information :fontawesome-solid-person-military-pointing:" - * StreamGear **MUST** requires FFmpeg executables for its core operations. Follow these dedicated [Platform specific Installation Instructions ➶](../../ffmpeg_install/) for its installation. + - [x] StreamGear **MUST** requires FFmpeg executables for its core operations. Follow these dedicated [Platform specific Installation Instructions ➶](../../ffmpeg_install/) for its installation. API will throw **RuntimeError**, if it fails to detect valid FFmpeg executables on your system. + - [x] In this mode, ==API auto generates a primary stream of same resolution and framerate[^1] as the input video _(at the index `0`)_.== + - [x] In this mode, if input video-source _(i.e. `-video_source`)_ contains any audio stream/channel, then it automatically gets mapped to all generated streams. + - [x] Always use `close()` function at the very end of the main code. - * StreamGear API will throw **RuntimeError**, if it fails to detect valid FFmpeg executables on your system. - - * By default, ==StreamGear generates a primary stream of same resolution and framerate[^1] as the input video _(at the index `0`)_.== - - * Always use `terminate()` function at the very end of the main code. +??? danger "DEPRECATION NOTICES for `v0.3.3` and above" + + - [ ] The `terminate()` method in StreamGear is now deprecated and will be removed in a future release. Developers should use the new [`close()`](../../../../bonus/reference/streamgear/#vidgear.gears.streamgear.StreamGear.close) method instead, as it offers a more descriptive name, similar to the WriteGear API, for safely terminating StreamGear processes. + - [ ] The [`-livestream`](../../params/#a-exclusive-parameters) optional parameter is NOT supported in this Single-Source Mode. +??? tip "Faster Transcoding of Primary Stream with Stream Copy in Single Source Mode" + + For faster transcoding of input video in this mode, utilize Stream copy (`-vcodec copy`) as the input video encoder for creating HLS/DASH chunks of the primary stream efficiently. However, consider the following points: + + - :warning: Stream copying **NOT** compatible with Custom Streams ([`-streams`](../../params/#a-exclusive-parameters)), which require re-encoding for each additional stream. Therefore, the `-vcodec copy` parameter will be ignored. + - When using the audio stream from the input video, the Audio Stream copy (`-acodec copy`) encoder will be automatically applied. -!!! experiment "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)" +!!! example "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/streamgear_ex/)"   @@ -40,11 +48,11 @@ limitations under the License. Following is the bare-minimum code you need to get started with StreamGear API in Single-Source Mode: -!!! note "If input video-source _(i.e. `-video_source`)_ contains any audio stream/channel, then it automatically gets mapped to all generated streams without any extra efforts." +!!! note "If input video-source _(i.e. `-video_source`)_ contains any audio stream/channel, then it automatically gets mapped to all generated streams." === "DASH" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import StreamGear @@ -52,15 +60,17 @@ Following is the bare-minimum code you need to get started with StreamGear API i stream_params = {"-video_source": "foo.mp4"} # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="dash_out.mpd", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` + !!! success "After running this bare-minimum example, StreamGear will produce a Manifest file (`dash_out.mpd`) with streamable chunks, containing information about a Primary Stream with the same resolution and framerate as the input." + === "HLS" - ```python + ```python linenums="1" # import required libraries from vidgear.gears import StreamGear @@ -68,92 +78,41 @@ Following is the bare-minimum code you need to get started with StreamGear API i stream_params = {"-video_source": "foo.mp4"} # describe a suitable master playlist location/name and assign params streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` + !!! success "After running this bare-minimum example, StreamGear will produce a Master Playlist file (`hls_out.mpd`) with streamable chunks, containing information about a Primary Stream with the same resolution and framerate as the input." -!!! success "After running this bare-minimum example, StreamGear will produce a Manifest file _(`dash.mpd`)_ with streamable chunks that contains information about a Primary Stream of same resolution and framerate as the input." - -  - -## Bare-Minimum Usage with Live-Streaming - -You can easily activate ==Low-latency Livestreaming in Single-Source Mode==, where chunks will contain information for few new frames only and forgets all previous ones), using exclusive [`-livestream`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter as follows: - -!!! note "If input video-source _(i.e. `-video_source`)_ contains any audio stream/channel, then it automatically gets mapped to all generated streams without any extra efforts." - -=== "DASH" - - !!! tip "Chunk size in DASH" - Use `-window_size` & `-extra_window_size` FFmpeg parameters for controlling number of frames to be kept in Chunks in DASH stream. Less these value, less will be latency. - - !!! alert "After every few chunks _(equal to the sum of `-window_size` & `-extra_window_size` values)_, all chunks will be overwritten in Live-Streaming. Thereby, since newer chunks in manifest will contain NO information of any older ones, and therefore resultant DASH stream will play only the most recent frames." - - ```python hl_lines="5" - # import required libraries - from vidgear.gears import StreamGear - - # activate Single-Source Mode with valid video input and enable livestreaming - stream_params = {"-video_source": 0, "-livestream": True} - # describe a suitable manifest-file location/name and assign params - streamer = StreamGear(output="dash_out.mpd", **stream_params) - # trancode source - streamer.transcode_source() - # terminate - streamer.terminate() - ``` - -=== "HLS" - - !!! tip "Chunk size in HLS" - - Use `-hls_init_time` & `-hls_time` FFmpeg parameters for controlling number of frames to be kept in Chunks in HLS stream. Less these value, less will be latency. - - !!! alert "After every few chunks _(equal to the sum of `-hls_init_time` & `-hls_time` values)_, all chunks will be overwritten in Live-Streaming. Thereby, since newer chunks in playlist will contain NO information of any older ones, and therefore resultant HLS stream will play only the most recent frames." - - ```python hl_lines="5" - # import required libraries - from vidgear.gears import StreamGear - - # activate Single-Source Mode with valid video input and enable livestreaming - stream_params = {"-video_source": 0, "-livestream": True} - # describe a suitable master playlist location/name and assign params - streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) - # trancode source - streamer.transcode_source() - # terminate - streamer.terminate() - ```   ## Usage with Additional Streams -In addition to Primary Stream, you can easily generate any number of additional Secondary Streams of variable bitrates or spatial resolutions, using exclusive [`-streams`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. You just need to add each resolution and bitrate/framerate as list of dictionaries to this attribute, and rest is done automatically. +> In addition to the Primary Stream, you can easily generate any number of additional Secondary Streams with variable bitrate or spatial resolutions, using the exclusive [`-streams`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter. -!!! info "A more detailed information on `-streams` attribute can be found [here ➶](../../params/#a-exclusive-parameters)" +To generate Secondary Streams, add each desired resolution and bitrate/framerate as a list of dictionaries to the `-streams` attribute. StreamGear will handle the rest automatically. The complete example is as follows: -The complete example is as follows: +!!! info "A more detailed information on `-streams` attribute can be found [here ➶](../../params/#a-exclusive-parameters)" -!!! note "If input video-source contains any audio stream/channel, then it automatically gets assigned to all generated streams without any extra efforts." +!!! note "If input video-source _(i.e. `-video_source`)_ contains any audio stream/channel, then it automatically gets mapped to all generated streams without any extra efforts." -??? danger "Important `-streams` attribute Information" - - * On top of these additional streams, StreamGear by default, generates a primary stream of same resolution and framerate as the input, at the index `0`. - * :warning: Make sure your System/Machine/Server/Network is able to handle these additional streams, discretion is advised! - * You **MUST** need to define `-resolution` value for your stream, otherwise stream will be discarded! - * You only need either of `-video_bitrate` or `-framerate` for defining a valid stream. Since with `-framerate` value defined, video-bitrate is calculated automatically. - * If you define both `-video_bitrate` and `-framerate` values at the same time, StreamGear will discard the `-framerate` value automatically. +???+ danger "Important Information about `-streams` attribute :material-file-document-alert-outline:" -!!! fail "Always use `-stream` attribute to define additional streams safely, any duplicate or incorrect definition can break things!" + * In addition to the user-defined Secondary Streams, StreamGear automatically generates a Primary Stream _(at index `0`)_ with the same resolution and framerate as the input video-source _(i.e. `-video_source`)_. + * :warning: Ensure that your system, machine, server, or network can handle the additional resource requirements of the Secondary Streams. Exercise discretion when configuring multiple streams. + * You **MUST** define the `-resolution` value for each stream; otherwise, the stream will be discarded. + * You only need to define either the `-video_bitrate` or the `-framerate` for a valid stream. + * If you specify the `-framerate`, the video bitrate will be calculated automatically. + * If you define both the `-video_bitrate` and the `-framerate`, the `-framerate` will get discard automatically. +!!! failure "Always use the `-streams` attribute to define additional streams safely. Duplicate or incorrect definitions can break the transcoding pipeline and corrupt the output chunks." === "DASH" - ```python hl_lines="6-12" + ```python linenums="1" hl_lines="7-12" # import required libraries from vidgear.gears import StreamGear @@ -169,15 +128,15 @@ The complete example is as follows: } # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="dash_out.mpd", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` === "HLS" - ```python hl_lines="6-12" + ```python linenums="1" hl_lines="7-12" # import required libraries from vidgear.gears import StreamGear @@ -193,28 +152,28 @@ The complete example is as follows: } # describe a suitable master playlist location/name and assign params streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ```   -## Usage with Custom Audio +## Usage with Custom Audio-Input -By default, if input video-source _(i.e. `-video_source`)_ contains any audio, then it gets automatically mapped to all generated streams. But, if you want to add any custom audio, you can easily do it by using exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. You just need to input the path of your audio file to this attribute as `string`, and the API will automatically validate as well as map it to all generated streams. +> In single source mode, by default, if the input video source (i.e., `-video_source`) contains audio, it gets automatically mapped to all generated streams. However, if you want to add a custom audio source, you can use the exclusive [`-audio`](../../params/#a-exclusive-parameters) attribute of the `stream_params` dictionary parameter. -The complete example is as follows: +To add a custom audio source, provide the path to your audio file as a string to the `-audio` attribute. The API will automatically validate and map the audio to all generated streams. The complete example is as follows: -!!! failure "Make sure this `-audio` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." +!!! failure "Ensure the provided `-audio` audio source is compatible with the input video source (`-video_source`). Incompatibility can cause multiple errors or result in no output at all." -!!! tip "You can also assign a valid Audio URL as input, rather than filepath. More details can be found [here ➶](../../params/#a-exclusive-parameters)" +!!! tip "You can also assign a valid audio URL as input instead of a file path. More details can be found [here ➶](../../params/#a-exclusive-parameters)" === "DASH" - ```python hl_lines="12" + ```python linenums="1" hl_lines="11-12" # import required libraries from vidgear.gears import StreamGear @@ -222,23 +181,23 @@ The complete example is as follows: stream_params = { "-video_source": "foo.mp4", "-streams": [ - {"-resolution": "1920x1080", "-video_bitrate": "4000k"}, # Stream1: 1920x1080 at 4000kbs bitrate - {"-resolution": "1280x720", "-framerate": 30.0}, # Stream2: 1280x720 at 30fps - {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps + {"-resolution": "1280x720", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate + {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps ], - "-audio": "/home/foo/foo1.aac" # assigns input audio-source: "/home/foo/foo1.aac" + "-audio": "/home/foo/foo1.aac", # define custom audio-source + "-acodec": "copy", # define copy audio encoder } # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="dash_out.mpd", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` === "HLS" - ```python hl_lines="12" + ```python linenums="1" hl_lines="11-12" # import required libraries from vidgear.gears import StreamGear @@ -246,18 +205,18 @@ The complete example is as follows: stream_params = { "-video_source": "foo.mp4", "-streams": [ - {"-resolution": "1920x1080", "-video_bitrate": "4000k"}, # Stream1: 1920x1080 at 4000kbs bitrate - {"-resolution": "1280x720", "-framerate": 30.0}, # Stream2: 1280x720 at 30fps - {"-resolution": "640x360", "-framerate": 60.0}, # Stream3: 640x360 at 60fps + {"-resolution": "1280x720", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate + {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps ], - "-audio": "/home/foo/foo1.aac" # assigns input audio-source: "/home/foo/foo1.aac" + "-audio": "/home/foo/foo1.aac", # define custom audio-source + "-acodec": "copy", # define copy audio encoder } # describe a suitable master playlist location/name and assign params streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` @@ -266,75 +225,68 @@ The complete example is as follows: ## Usage with Variable FFmpeg Parameters -For seamlessly generating these streaming assets, StreamGear provides a highly extensible and flexible wrapper around [**FFmpeg**](https://ffmpeg.org/) and access to almost all of its parameter. Thereby, you can access almost any parameter available with FFmpeg itself as dictionary attributes in [`stream_params` dictionary parameter](../../params/#stream_params), and use it to manipulate transcoding as you like. +> For fine-grained control over the transcoding process, StreamGear provides a highly extensible and flexible wrapper around [**FFmpeg**](https://ffmpeg.org/) library and access to almost all of its configurational parameter. -For this example, let us use our own [H.265/HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) video and [AAC](https://trac.ffmpeg.org/wiki/Encode/AAC) audio encoder, and set custom audio bitrate, and various other optimizations: +In this example, we'll use the [H.265/HEVC](https://trac.ffmpeg.org/wiki/Encode/H.265) video encoder and [AAC](https://trac.ffmpeg.org/wiki/Encode/AAC) audio encoder, apply various optimal FFmpeg configurational parameters. +!!! warning "This example assumes that the given input video source (`-video_source`) contains at least one audio stream." -!!! tip "This example is just conveying the idea on how to use FFmpeg's encoders/parameters with StreamGear API. You can use any FFmpeg parameter in the similar manner." +!!! info "This example is just conveying the idea on how to use FFmpeg's internal encoders/parameters with StreamGear API. You can use any FFmpeg parameter in the similar manner." -!!! danger "Kindly read [**FFmpeg Docs**](https://ffmpeg.org/documentation.html) carefully, before passing any FFmpeg values to `stream_params` parameter. Wrong values may result in undesired errors or no output at all." +!!! danger "Please read the [**FFmpeg Documentation**](https://ffmpeg.org/documentation.html) carefully before passing any additional values to the `stream_params` parameter. Incorrect values may cause errors or result in no output." -!!! fail "Always use `-streams` attribute to define additional streams safely, any duplicate or incorrect stream definition can break things!" === "DASH" - ```python hl_lines="6-10 15-17" + ```python linenums="1" hl_lines="6-9 14" # import required libraries from vidgear.gears import StreamGear # activate Single-Source Mode and various other parameters stream_params = { "-video_source": "foo.mp4", # define Video-Source - "-vcodec": "libx265", # assigns H.265/HEVC video encoder + "-vcodec": "libx265", # specify H.265/HEVC video encoder "-x265-params": "lossless=1", # enables Lossless encoding - "-crf": 25, # Constant Rate Factor: 25 - "-bpp": "0.15", # Bits-Per-Pixel(BPP), an Internal StreamGear parameter to ensure good quality of high motion scenes + "-bpp": 0.15, # Bits-Per-Pixel(BPP), an Internal StreamGear parameter to ensure good quality of high motion scenes "-streams": [ - {"-resolution": "1280x720", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate - {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps + {"-resolution": "640x360", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate + {"-resolution": "320x240", "-framerate": 60.0}, # Stream2: 640x360 at 60fps ], - "-audio": "/home/foo/foo1.aac", # define input audio-source: "/home/foo/foo1.aac", - "-acodec": "libfdk_aac", # assign lossless AAC audio encoder - "-vbr": 4, # Variable Bit Rate: `4` + "-acodec": "aac", # specify AAC audio encoder } # describe a suitable manifest-file location/name and assign params streamer = StreamGear(output="dash_out.mpd", logging=True, **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ``` === "HLS" - ```python hl_lines="6-10 15-17" + ```python linenums="1" hl_lines="6-9 14" # import required libraries from vidgear.gears import StreamGear - # activate Single-Source Mode and various other parameters stream_params = { "-video_source": "foo.mp4", # define Video-Source - "-vcodec": "libx265", # assigns H.265/HEVC video encoder + "-vcodec": "libx265", # specify H.265/HEVC video encoder "-x265-params": "lossless=1", # enables Lossless encoding - "-crf": 25, # Constant Rate Factor: 25 - "-bpp": "0.15", # Bits-Per-Pixel(BPP), an Internal StreamGear parameter to ensure good quality of high motion scenes + "-bpp": 0.15, # Bits-Per-Pixel(BPP), an Internal StreamGear parameter to ensure good quality of high motion scenes "-streams": [ - {"-resolution": "1280x720", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate - {"-resolution": "640x360", "-framerate": 60.0}, # Stream2: 640x360 at 60fps + {"-resolution": "640x360", "-video_bitrate": "4000k"}, # Stream1: 1280x720 at 4000kbs bitrate + {"-resolution": "320x240", "-framerate": 60.0}, # Stream2: 640x360 at 60fps ], - "-audio": "/home/foo/foo1.aac", # define input audio-source: "/home/foo/foo1.aac", - "-acodec": "libfdk_aac", # assign lossless AAC audio encoder - "-vbr": 4, # Variable Bit Rate: `4` + "-acodec": "aac", # specify AAC audio encoder } # describe a suitable master playlist file location/name and assign params streamer = StreamGear(output="hls_out.m3u8", format = "hls", logging=True, **stream_params) - # trancode source + # transcode source streamer.transcode_source() - # terminate - streamer.terminate() + # close + streamer.close() ```   diff --git a/docs/gears/videogear/overview.md b/docs/gears/videogear/overview.md index b37518dd9..418ee25fa 100644 --- a/docs/gears/videogear/overview.md +++ b/docs/gears/videogear/overview.md @@ -43,23 +43,13 @@ VideoGear is ideal when you need to switch to different video sources without ch   -## Importing - -You can import VideoGear API in your program as follows: - -```python -from vidgear.gears import VideoGear -``` - -  - ## Usage Examples
See here 🚀
-!!! experiment "After going through VideoGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/videogear_ex/)" +!!! example "After going through VideoGear Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/videogear_ex/)" ## Parameters diff --git a/docs/gears/videogear/params.md b/docs/gears/videogear/params.md index fc5b3a5f0..ea4817a96 100644 --- a/docs/gears/videogear/params.md +++ b/docs/gears/videogear/params.md @@ -190,7 +190,7 @@ This parameter controls the Stream Mode, .i.e if enabled(`stream_mode=True`), th !!! warning "VideoGear automatically enforce GStreamer backend _(backend=`cv2.CAP_GSTREAMER`)_ for YouTube-livestreams!" -!!! error "VideoGear will exit with `RuntimeError` for YouTube livestreams, if OpenCV is not compiled with GStreamer(`>=v1.0.0`) support. Checkout [this FAQ](../../../help/camgear_faqs/#how-to-compile-opencv-with-gstreamer-support) for compiling OpenCV with GStreamer support." +!!! failure "VideoGear will exit with `RuntimeError` for YouTube livestreams, if OpenCV is not compiled with GStreamer(`>=v1.0.0`) support. Checkout [this FAQ](../../../help/camgear_faqs/#how-to-compile-opencv-with-gstreamer-support) for compiling OpenCV with GStreamer support." **Data-Type:** Boolean @@ -260,9 +260,7 @@ VideoGear(source=0, **options) ### **`camera_num`** -This parameter selects the camera module index which will be used as source, if you're having multiple camera modules connected. Its value can only be greater than zero, otherwise, it will throw `ValueError` for any negative value. - -!!! warning "This parameter shouldn't be altered, until unless you using [Raspberry Pi 3/3+ Compute Module IO Board](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md)."" +This parameter selects the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. Its value can only be zero or greater, otherwise, VideoGear API will throw `ValueError` for any negative value. **Data-Type:** Integer @@ -271,18 +269,23 @@ This parameter selects the camera module index which will be used as source, if **Usage:** ```python -VideoGear(enablePiCamera=True, camera_num=0) +# select Camera Module at index `1` +VideoGear(enablePiCamera=True, camera_num=1) ``` + +!!! example "The complete usage example demonstrating the usage of the `camera_num` parameter is available [here ➶](../../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)." +   ### **`resolution`** -This parameter sets the resolution (i.e. `(width,height)`) of the source. +This parameter controls the **resolution** - a tuple _(i.e. `(width,height)`)_ of two values giving the width and height of the output frames. -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.resolution)" +!!! warning "Make sure both width and height values should be at least `64`." +!!! danger "When using the Picamera2 backend, the `resolution` parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor`](#a-configurational-camera-parameters) configurational parameter." **Data-Type:** Tuple @@ -298,11 +301,8 @@ VideoGear(enablePiCamera=True, resolution=(1280,720)) # sets 1280x720 resolution ### **`framerate`** -This parameter sets the framerate of the source. - - -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.framerate)" +This parameter controls the framerate of the source. **Data-Type:** integer/float @@ -319,7 +319,18 @@ VideoGear(enablePiCamera=True, framerate=60) # sets 60fps framerate ### **`options`** -This parameter provides the ability to alter various **Tweak Parameters** `like brightness, saturation, senor_mode, resolution, etc.` available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). +This dictionary parameter in the internal PiGear API backend allows you to control various camera settings for both the `picamera2` and legacy `picamera` backends and some internal API tasks. These settings include: + +#### A. Configurational Camera Parameters +- [x] These parameters are provided by the underlying backend library _(depending upon backend in use)_, and must be applied to the camera system before the camera can be started. +- [x] **These parameter include:** _Brightness, Contrast, Saturation, Exposure, Colour Temperature, Colour Gains, etc._ +- [x] All supported parameters are listed in this [Usage example ➶](../../pigear/usage/#using-pigear-with-variable-camera-properties) + + +#### B. User-defined Parameters +- [x] These user-defined parameters control specific internal behaviors of the API and perform certain tasks on the camera objects. +- [x] All supported User-defined Parameters are listed [here ➶](../../pigear/params/#b-user-defined-parameters) + **Data-Type:** Dictionary @@ -327,32 +338,41 @@ This parameter provides the ability to alter various **Tweak Parameters** `like **Usage:** -!!! tip "All supported parameters are listed in [PiCamera Docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" +!!! example "The complete usage example demonstrating the usage of the `options` parameter is available [here ➶](../usage/#using-pigear-with-variable-camera-properties)." -The desired parameters can be passed to VideoGear API by formatting them as this parameter's attributes, as follows: - -```python -# formatting parameters as dictionary attributes -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# assigning it -VideoGear(enablePiCamera=True, logging=True, **options) -``` +You can format these user-defined and configurational parameters as attributes of this `options` dictionary parameter as follows: -**User-specific attributes:** +=== "New Picamera2 backend" -Additionally, `options` parameter also support some User-specific attributes, which are as follows: + ```python + # formulate various Picamera2 API parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "exposure_compensation": 15, + "sensor": {"output_size": (480, 320)}, # !!! will override `resolution` !!! + } + + # open pi video stream with defined parameters + stream = VideoGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() + ``` -* **`HWFAILURE_TIMEOUT`** (float): PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur at a timeout value. This parameter can be used to control that timeout value i.e. the maximum waiting time _(in seconds)_ after which PiGear exits with a `SystemError` to save resources. Its value can only be between `1.0` _(min)_ and `10.0` _(max)_ and its default value is `2.0`. Its usage is as follows: +=== "Legacy Picamera backend" ```python - options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds + # formulate various Picamera API parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = VideoGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() ```   diff --git a/docs/gears/videogear/usage.md b/docs/gears/videogear/usage.md index 17eb8a345..6ddfb1616 100644 --- a/docs/gears/videogear/usage.md +++ b/docs/gears/videogear/usage.md @@ -20,7 +20,7 @@ limitations under the License. # VideoGear API Usage Examples: -!!! experiment "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/videogear_ex/)" +!!! example "After going through following Usage Examples, Checkout more of its advanced configurations [here ➶](../../../help/videogear_ex/)"   @@ -31,7 +31,7 @@ limitations under the License. Following is the bare-minimum code you need to access CamGear API with VideoGear: -```python +```python linenums="1" # import required libraries from vidgear.gears import VideoGear import cv2 @@ -76,9 +76,19 @@ stream.stop() Following is the bare-minimum code you need to access PiGear API with VideoGear: -!!! warning "Make sure to [enable Raspberry Pi hardware-specific settings](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior using PiGear Backend, otherwise nothing will work." +??? info "Under the hood, PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." -```python hl_lines="6" + However, PiGear API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params) user-defined optional parameter boolean attribute." + +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this API, otherwise nothing will work." + +```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import VideoGear import cv2 @@ -125,7 +135,7 @@ The usage example is as follows: !!! warning "The stabilizer might be slower for High-Quality/Resolution videos-frames." -```python hl_lines="7" +```python linenums="1" hl_lines="7" # import required libraries from vidgear.gears import VideoGear import numpy as np @@ -169,12 +179,11 @@ stream_stab.stop() The usage example of VideoGear API with Variable Camera Properties is as follows: -???+ info - This example is basically a VideoGear API implementation of this [CamGear usage example](../../camgear/usage/#using-camgear-with-variable-camera-properties) for controlling its properties _(such as its brightness, saturation, resolution, framerate, gain etc.)_. Thereby, any [CamGear](../../camgear/usage/) or [PiGear](../../pigear/usage/) usage examples can be implemented with VideoGear API in the similar manner. +!!! info "This example demonstrates how to use the VideoGear API in a similar manner to the CamGear's [example](../../camgear/usage/#using-camgear-with-variable-camera-properties) for controlling variable source properties. Any [CamGear usage example](../../camgear/usage/) can be implemented using the VideoGear API in a similar way." !!! tip "All the supported Source Tweak Parameters can be found [here ➶](../../camgear/advanced/source_params/#source-tweak-parameters-for-camgear-api)" -```python hl_lines="15" +```python linenums="1" hl_lines="15" # import required libraries from vidgear.gears import VideoGear import cv2 @@ -224,59 +233,120 @@ stream.stop() !!! abstract "VideoGear provides internal access to both CamGear and PiGear APIs, and thereby all additional parameters of [PiGear API](../params/#parameters-with-pigear-backend) or [CamGear API](../params/#parameters-with-camgear-backend) are also easily accessible within VideoGear API." -The usage example of VideoGear API with Variable PiCamera Properties is as follows: +The usage example of VideoGear API with Variable Camera Properties is as follows: -???+ info - This example is basically a VideoGear API implementation of this [PiGear usage example](../../pigear/usage/#using-pigear-with-variable-camera-properties). Thereby, any [CamGear](../../camgear/usage/) or [PiGear](../../pigear/usage/) usage examples can be implemented with VideoGear API in the similar manner. +!!! info "This example demonstrates how to use the VideoGear API in a similar manner to the PiGear's [example](../../pigear/usage/#using-pigear-with-variable-camera-properties) for using variable camera properties. Any [PiGear usage example](../../pigear/usage/) can be implemented using the VideoGear API in a similar way." -!!! warning "Make sure to [enable Raspberry Pi hardware-specific settings](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior using PiGear Backend, otherwise nothing will work." +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../../installation/pip_install/#picamera2) for its installation." -```python hl_lines="16-18" -# import required libraries -from vidgear.gears import VideoGear -import cv2 +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# add various Picamera tweak parameters to dictionary -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# activate enablePiCamera and open pi video stream with defined parameters -stream = VideoGear( - enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options -).start() +=== "New Picamera2 backend" -# loop over -while True: + ```python linenums="1" hl_lines="3 9-13" + # import required libraries + from vidgear.gears import VideoGear + from libcamera import Transform + import cv2 - # read frames from stream - frame = stream.read() + # formulate various Picamera2 API + # configurational parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } - # check for frame if Nonetype - if frame is None: - break + # open pi video stream with defined parameters + stream = VideoGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() - # {do something with the frame here} + # loop over + while True: - # Show output window - cv2.imshow("Output Frame", frame) + # read frames from stream + frame = stream.read() - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break + # check for frame if Nonetype + if frame is None: + break -# close output window -cv2.destroyAllWindows() + # {do something with the frame here} -# safely close video stream -stream.stop() -``` + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` + +=== "Legacy Picamera backend" + + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params) user-defined optional parameter boolean attribute." + + ```python linenums="1" hl_lines="8-13" + # import required libraries + from vidgear.gears import VideoGear + import cv2 + + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = VideoGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ```   @@ -285,17 +355,16 @@ stream.stop() VideoGear API also supports **Colorspace Manipulation** but **NOT Direct** like other VideoCapture Gears. -!!! danger "Important" +!!! failure "Important: `color_space` global variable is NOT Supported in VideoGear API" * `color_space` global variable is **NOT Supported** in VideoGear API, calling it will result in `AttribueError`. More details can be found [here ➶](../../../bonus/colorspace_manipulation/#source-colorspace-manipulation) - * Any incorrect or None-type value on [`colorspace`](../params/#colorspace) parameter will be skipped automatically. In following example code, we will convert source colorspace to [**HSV**](https://en.wikipedia.org/wiki/HSL_and_HSV) on initialization: -```python hl_lines="6" +```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import VideoGear import cv2 diff --git a/docs/gears/webgear/advanced.md b/docs/gears/webgear/advanced.md index b8a0fdbf7..35105f53a 100644 --- a/docs/gears/webgear/advanced.md +++ b/docs/gears/webgear/advanced.md @@ -41,7 +41,7 @@ Let's implement a bare-minimum example using WebGear, where we will be sending [ !!! info "Supported `jpeg_compression_colorspace` colorspace values are `RGB`, `BGR`, `RGBX`, `BGRX`, `XBGR`, `XRGB`, `GRAY`, `RGBA`, `BGRA`, `ABGR`, `ARGB`, `CMYK`. More information can be found [here ➶](https://gitlab.com/jfolz/simplejpeg)" -```python hl_lines="8" +```python linenums="1" hl_lines="8" # import required libraries import uvicorn from vidgear.gears.asyncio import WebGear @@ -83,7 +83,7 @@ WebGear allows you to easily define your own custom Source that you want to use Let's implement a bare-minimum example with a Custom Source using WebGear API and OpenCV: -```python hl_lines="10-34 38" +```python linenums="1" hl_lines="10-34 38" # import necessary libs import uvicorn, asyncio, cv2 from vidgear.gears.asyncio import WebGear @@ -139,7 +139,7 @@ web.shutdown() With our highly extensible WebGear API, you can add your own mounting points, where additional files located, as follows: -```python hl_lines="21-23" +```python linenums="1" hl_lines="21-23" # import libs import uvicorn from starlette.routing import Mount @@ -200,7 +200,7 @@ Suppose we want to add a simple **`hello world` webpage** to our WebGear server. Then in our application code, we can integrate this webpage route, as follows: -```python hl_lines="11-14 31" +```python linenums="1" hl_lines="11-14 31" # import libs import uvicorn, asyncio from starlette.templating import Jinja2Templates @@ -258,7 +258,7 @@ For this example, let's use [`CORSMiddleware`](https://www.starlette.io/middlewa !!! tip "Starlette provides several arguments for enabling origins, methods, or headers for CORSMiddleware API. More information can be found [here ➶](https://www.starlette.io/middleware/#corsmiddleware)" -```python hl_lines="21-29" +```python linenums="1" hl_lines="21-29" # import libs import uvicorn, asyncio from starlette.middleware import Middleware diff --git a/docs/gears/webgear/overview.md b/docs/gears/webgear/overview.md index 347acdfb1..e107ac650 100644 --- a/docs/gears/webgear/overview.md +++ b/docs/gears/webgear/overview.md @@ -48,7 +48,7 @@ In layman's terms, WebGear acts as a powerful ==**Video Broadcaster**== that tra ??? note "Customizing default video endpoint path" Starting with vidgear `v0.3.1`, you can change default `/video` video endpoint path to any alphanumeric string value, using [`custom_video_endpoint`](../params/#webgear-specific-attributes) optional string attribute. For example: - !!! error "Only alphanumeric string with no space in between are allowed as `custom_video_endpoint` value. Any other value will be discarded." + !!! failure "Only alphanumeric string with no space in between are allowed as `custom_video_endpoint` value. Any other value will be discarded." !!! warning "WebGear's Default Theme which expects only default `/video` video endpoint path, will fail to work, if it is customized to any other value using this `custom_video_endpoint` attribute." @@ -69,15 +69,16 @@ On initializing WebGear API, it automatically checks for three critical **data f ### Default Location - A _default location_ is the path of the directory where data files/folders are downloaded/generated/saved. -- By default, the `.vidgear` the folder at the home directory of your machine _(for e.g `/home/foo/.vidgear` on Linux)_ serves as the _default location_. +- By default, the `.vidgear` the folder at the home directory of your machine _(for e.g `/home/foo/.vidgear` on Linux :material-linux:)_ serves as the _default location_. - But you can also use WebGear's [`custom_data_location`](../params/#webgear-specific-attributes) dictionary attribute to change/alter _default location_ path to somewhere else. - !!! tip - You can set [`logging=True`](../params/#logging) during initialization, for easily identifying the selected _default location_, which will be something like this _(on a Linux machine)_ +!!! tip "Identifying Default location" + You can set [`logging=True`](../params/#logging) during initialization, for easily identifying the selected _default location_, which will be something like this on a Linux :material-linux: machine: + + ```sh + WebGear :: DEBUG :: `/home/foo/.vidgear` is the default location for saving WebGear data-files. + ``` - ```sh - WebGear :: DEBUG :: `/home/foo/.vidgear` is the default location for saving WebGear data-files. - ``` ### Auto-Generation process @@ -85,7 +86,7 @@ On initializing WebGear API, it automatically checks for three critical **data f * You can also force trigger the Auto-generation process to overwrite existing data-files using [`overwrite_default_files`](../params/#webgear-specific-attributes) dictionary attribute. Remember, only downloaded default data files(given above) will be overwritten in this process but any other file/folder will NOT be affected. - * It is advised to enable logging(`logging=True`) on the first run for easily identifying any runtime errors + * It is advised to enable logging(`logging=True`) on the first run for easily identifying any runtime errors. - On triggering this process, WebGear API creates `webgear` directory, and `templates` and `static` folders inside along with `js`, `css`, `img` sub-folders at the assigned [_default location_](#default-location). - Thereby at this [_default location_](#default-location), the necessary default data files will be downloaded from a dedicated [**Github Server**](https://github.com/abhiTronix/vidgear-vitals) inside respective folders in the following order: @@ -110,26 +111,12 @@ On initializing WebGear API, it automatically checks for three critical **data f - Finally these downloaded files thereby are verified for errors and API proceeds for instantiating the Starlette application normally. -  - -  - -## Importing - -You can import WebGear API in your program as follows: - -```python -from vidgear.gears.asyncio import WebGear -``` -   -  - ## WebGear's Default Template ??? new "New in v0.2.1" -New Standalone **WebGear's Default Theme** was added in `v0.2.1`. + New Standalone **WebGear's Default Theme** was added in `v0.2.1`. The WebGear API by default uses simple & elegant [**WebGear's Default Theme**](https://github.com/abhiTronix/vidgear-vitals#webgear-default-theme) which looks like something as follows: @@ -159,7 +146,7 @@ _Appears when an API Error is encountered:_ WebGear default 500 page -  +  ## Usage Examples diff --git a/docs/gears/webgear/params.md b/docs/gears/webgear/params.md index 087a5cf31..b20e2330d 100644 --- a/docs/gears/webgear/params.md +++ b/docs/gears/webgear/params.md @@ -69,7 +69,7 @@ This parameter can be used to pass user-defined parameter to WebGear API by form ??? new "New in v0.3.1" `custom_video_endpoint` attribute was added in `v0.3.1`. - !!! error "Only alphanumeric string with no space in between are allowed as `custom_video_endpoint` value. Any other value will be discarded." + !!! failure "Only alphanumeric string with no space in between are allowed as `custom_video_endpoint` value. Any other value will be discarded." !!! warning "WebGear's Default Theme which expects only default `/video` video endpoint path, will fail to work, if it is customized to any other value using this `custom_video_endpoint` attribute." @@ -393,9 +393,7 @@ WebGear(source=0, **options) ### **`camera_num`** -This parameter selects the camera module index which will be used as source, if you're having multiple camera modules connected. Its value can only be greater than zero, otherwise, it will throw `ValueError` for any negative value. - -!!! warning "This parameter shouldn't be altered, until unless you using [Raspberry Pi 3/3+ Compute Module IO Board](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md)."" +This parameter selects the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. Its value can only be zero or greater, otherwise, WebGear API will throw `ValueError` for any negative value. **Data-Type:** Integer @@ -404,18 +402,23 @@ This parameter selects the camera module index which will be used as source, if **Usage:** ```python -WebGear(enablePiCamera=True, camera_num=0) +# select Camera Module at index `1` +WebGear(enablePiCamera=True, camera_num=1) ``` + +!!! example "The complete usage example demonstrating the usage of the `camera_num` parameter is available [here ➶](../../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)." +   ### **`resolution`** -This parameter sets the resolution (i.e. `(width,height)`) of the source. +This parameter controls the **resolution** - a tuple _(i.e. `(width,height)`)_ of two values giving the width and height of the output frames. -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.resolution)" +!!! warning "Make sure both width and height values should be at least `64`." +!!! danger "When using the Picamera2 backend, the `resolution` parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor`](#a-configurational-camera-parameters) configurational parameter." **Data-Type:** Tuple @@ -431,11 +434,8 @@ WebGear(enablePiCamera=True, resolution=(1280,720)) # sets 1280x720 resolution ### **`framerate`** -This parameter sets the framerate of the source. - - -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.framerate)" +This parameter controls the framerate of the source. **Data-Type:** integer/float @@ -452,7 +452,18 @@ WebGear(enablePiCamera=True, framerate=60) # sets 60fps framerate ### **`options`** -This parameter provides the ability to alter various **Tweak Parameters** `like brightness, saturation, senor_mode, resolution, etc.` available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). +This dictionary parameter in the internal PiGear API backend allows you to control various camera settings for both the `picamera2` and legacy `picamera` backends and some internal API tasks. These settings include: + +#### A. Configurational Camera Parameters +- [x] These parameters are provided by the underlying backend library _(depending upon backend in use)_, and must be applied to the camera system before the camera can be started. +- [x] **These parameter include:** _Brightness, Contrast, Saturation, Exposure, Colour Temperature, Colour Gains, etc._ +- [x] All supported parameters are listed in this [Usage example ➶](../../pigear/usage/#using-pigear-with-variable-camera-properties) + + +#### B. User-defined Parameters +- [x] These user-defined parameters control specific internal behaviors of the API and perform certain tasks on the camera objects. +- [x] All supported User-defined Parameters are listed [here ➶](../../pigear/params/#b-user-defined-parameters) + **Data-Type:** Dictionary @@ -460,32 +471,41 @@ This parameter provides the ability to alter various **Tweak Parameters** `like **Usage:** -!!! tip "All supported parameters are listed in [PiCamera Docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" +!!! example "The complete usage example demonstrating the usage of the `options` parameter is available [here ➶](../usage/#using-pigear-with-variable-camera-properties)." -The desired parameters can be passed to WebGear API by formatting them as this parameter's attributes, as follows: - -```python -# formatting parameters as dictionary attributes -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# assigning it -WebGear(enablePiCamera=True, logging=True, **options) -``` +You can format these user-defined and configurational parameters as attributes of this `options` dictionary parameter as follows: -**User-specific attributes:** +=== "New Picamera2 backend" -Additionally, `options` parameter also support some User-specific attributes, which are as follows: + ```python + # formulate various Picamera2 API parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "exposure_compensation": 15, + "sensor": {"output_size": (480, 320)}, # !!! will override `resolution` !!! + } + + # open pi video stream with defined parameters + stream = WebGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() + ``` -* **`HWFAILURE_TIMEOUT`** (float): PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur at a timeout value. This parameter can be used to control that timeout value i.e. the maximum waiting time _(in seconds)_ after which PiGear exits with a `SystemError` to save resources. Its value can only be between `1.0` _(min)_ and `10.0` _(max)_ and its default value is `2.0`. Its usage is as follows: +=== "Legacy Picamera backend" ```python - options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds + # formulate various Picamera API parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = WebGear(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() ```   diff --git a/docs/gears/webgear/usage.md b/docs/gears/webgear/usage.md index 7673f739b..cb7267e25 100644 --- a/docs/gears/webgear/usage.md +++ b/docs/gears/webgear/usage.md @@ -91,7 +91,7 @@ You can access and run WebGear VideoStreamer Server programmatically in your pyt !!! tip "For accessing WebGear on different Client Devices on the network, use `"0.0.0.0"` as host value instead of `"localhost"` on Host Machine. More information can be found [here ➶](../../../help/webgear_faqs/#is-it-possible-to-stream-on-a-different-device-on-the-network-with-webgear)" -```python hl_lines="7-10" +```python linenums="1" hl_lines="7-10" # import required libraries import uvicorn from vidgear.gears.asyncio import WebGear diff --git a/docs/gears/webgear_rtc/advanced.md b/docs/gears/webgear_rtc/advanced.md index 28bdbbbc4..08b313105 100644 --- a/docs/gears/webgear_rtc/advanced.md +++ b/docs/gears/webgear_rtc/advanced.md @@ -38,7 +38,7 @@ Let's implement a bare-minimum example using WebGear_RTC as Real-time Broadcaste !!! tip "For accessing WebGear_RTC on different Client Devices on the network, we use `"0.0.0.0"` as host value instead of `"localhost"` on Host Machine. More information can be found [here ➶](../../../help/webgear_rtc_faqs/#is-it-possible-to-stream-on-a-different-device-on-the-network-with-webgear_rtc)" -```python hl_lines="8" +```python linenums="1" hl_lines="8" # import required libraries import uvicorn from vidgear.gears.asyncio import WebGear_RTC @@ -83,7 +83,7 @@ Let's implement a bare-minimum example with a Custom Source using WebGear_RTC AP See this [example ➶](../../../help/screengear_ex/#using-screengear-with-webgear_rtc) for more information. -```python hl_lines="6-54 58" +```python linenums="1" hl_lines="6-54 58" # import necessary libs import uvicorn, cv2 from vidgear.gears.asyncio import WebGear_RTC @@ -163,7 +163,7 @@ web.shutdown() With our highly extensible WebGear_RTC API, you can add your own mounting points, where additional files located, as follows: -```python hl_lines="18-20" +```python linenums="1" hl_lines="18-20" # import libs import uvicorn from starlette.routing import Mount @@ -221,7 +221,7 @@ Suppose we want to add a simple **`hello world` webpage** to our WebGear_RTC ser Then in our application code, we can integrate this webpage route, as follows: -```python hl_lines="11-14 28" +```python linenums="1" hl_lines="11-14 28" # import libs import uvicorn, asyncio from starlette.templating import Jinja2Templates @@ -276,7 +276,7 @@ For this example, let's use [`CORSMiddleware`](https://www.starlette.io/middlewa !!! tip "Starlette provides several arguments for enabling origins, methods, or headers for CORSMiddleware API. More information can be found [here ➶](https://www.starlette.io/middleware/#corsmiddleware)" -```python hl_lines="18-26" +```python linenums="1" hl_lines="18-26" # import libs import uvicorn, asyncio from starlette.middleware import Middleware diff --git a/docs/gears/webgear_rtc/overview.md b/docs/gears/webgear_rtc/overview.md index 35ff7b49e..7719d6098 100644 --- a/docs/gears/webgear_rtc/overview.md +++ b/docs/gears/webgear_rtc/overview.md @@ -52,15 +52,16 @@ Same as [WebGear](../../webgear_rtc/overview/), WebGear_RTC API automatically ch ### Default Location * A _default location_ is the path of the directory where data files/folders are downloaded/generated/saved. -* By default, the `.vidgear` the folder at the home directory of your machine _(for e.g `/home/foo/.vidgear` on Linux)_ serves as the _default location_. +* By default, the `.vidgear` the folder at the home directory of your machine _(for e.g `/home/foo/.vidgear` on Linux :material-linux:)_ serves as the _default location_. * But you can also use WebGear_RTC's [`custom_data_location`](../params/#webgear_rtc-specific-attributes) dictionary attribute to change/alter *default location* path to somewhere else. - !!! tip - You can set [`logging=True`](../params/#logging) during initialization, for easily identifying the selected _default location_, which will be something like this _(on a Linux machine)_: +!!! tip "Identifying Default location" + You can set [`logging=True`](../params/#logging) during initialization, for easily identifying the selected _default location_, which will be something like this on a Linux :material-linux: machine: + + ```sh + WebGear_RTC :: DEBUG :: `/home/foo/.vidgear` is the default location for saving WebGear_RTC data-files. + ``` - ```sh - WebGear_RTC :: DEBUG :: `/home/foo/.vidgear` is the default location for saving WebGear_RTC data-files. - ``` ### Auto-Generation process @@ -68,7 +69,7 @@ Same as [WebGear](../../webgear_rtc/overview/), WebGear_RTC API automatically ch * You can also force trigger the Auto-generation process to overwrite existing data-files using [`overwrite_default_files`](../params/#webgear_rtc-specific-attributes) dictionary attribute. Remember, only downloaded default data files(given above) will be overwritten in this process but any other file/folder will NOT be affected. - * It is advised to enable logging(`logging=True`) on the first run for easily identifying any runtime errors + * It is advised to enable logging(`logging=True`) on the first run for easily identifying any runtime errors. * On triggering this process, WebGear_RTC API creates `webgear_rtc` directory, and `templates` and `static` folders inside along with `js`, `css`, `img` sub-folders at the assigned [*default location*](#default-location). @@ -94,23 +95,8 @@ Same as [WebGear](../../webgear_rtc/overview/), WebGear_RTC API automatically ch * Finally these downloaded files thereby are verified for errors and API proceeds for instantiating the Starlette application normally. - -  - -  - -## Importing - -You can import WebGear_RTC API in your program as follows: - -```python -from vidgear.gears.asyncio import WebGear_RTC -``` -   -  - ## WebGear_RTC's Default Template The WebGear_RTC API by default uses simple & elegant [**WebGear_RTC's Default Theme**](https://github.com/abhiTronix/vidgear-vitals#webgear_rtc-default-theme) which looks like something as follows: @@ -143,7 +129,7 @@ The WebGear_RTC API by default uses simple & elegant [**WebGear_RTC's Default Th WebGear_RTC default 500 page -  +  ## Usage Examples @@ -165,7 +151,6 @@ The WebGear_RTC API by default uses simple & elegant [**WebGear_RTC's Default Th See here 🚀
- ## FAQs
diff --git a/docs/gears/webgear_rtc/params.md b/docs/gears/webgear_rtc/params.md index 8abfaa666..324999835 100644 --- a/docs/gears/webgear_rtc/params.md +++ b/docs/gears/webgear_rtc/params.md @@ -358,9 +358,7 @@ WebGear_RTC(source=0, **options) ### **`camera_num`** -This parameter selects the camera module index which will be used as source, if you're having multiple camera modules connected. Its value can only be greater than zero, otherwise, it will throw `ValueError` for any negative value. - -!!! warning "This parameter shouldn't be altered, until unless you using [Raspberry Pi 3/3+ Compute Module IO Board](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md)."" +This parameter selects the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. Its value can only be zero or greater, otherwise, WebGear_RTC API will throw `ValueError` for any negative value. **Data-Type:** Integer @@ -369,18 +367,23 @@ This parameter selects the camera module index which will be used as source, if **Usage:** ```python -WebGear_RTC(enablePiCamera=True, camera_num=0) +# select Camera Module at index `1` +WebGear_RTC(enablePiCamera=True, camera_num=1) ``` + +!!! example "The complete usage example demonstrating the usage of the `camera_num` parameter is available [here ➶](../../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)." +   ### **`resolution`** -This parameter sets the resolution (i.e. `(width,height)`) of the source. +This parameter controls the **resolution** - a tuple _(i.e. `(width,height)`)_ of two values giving the width and height of the output frames. -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.resolution)" +!!! warning "Make sure both width and height values should be at least `64`." +!!! danger "When using the Picamera2 backend, the `resolution` parameter will be **OVERRIDDEN**, if the user explicitly defines the `output_size` property of the [`sensor`](#a-configurational-camera-parameters) configurational parameter." **Data-Type:** Tuple @@ -396,11 +399,8 @@ WebGear_RTC(enablePiCamera=True, resolution=(1280,720)) # sets 1280x720 resoluti ### **`framerate`** -This parameter sets the framerate of the source. - - -!!! info "For more information read [here ➶](https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera.PiCamera.framerate)" +This parameter controls the framerate of the source. **Data-Type:** integer/float @@ -417,7 +417,17 @@ WebGear_RTC(enablePiCamera=True, framerate=60) # sets 60fps framerate ### **`options`** -This parameter provides the ability to alter various **Tweak Parameters** `like brightness, saturation, senor_mode, resolution, etc.` available within [**Picamera library**](https://picamera.readthedocs.io/en/release-1.13/api_camera.html). +This dictionary parameter in the internal PiGear API backend allows you to control various camera settings for both the `picamera2` and legacy `picamera` backends and some internal API tasks. These settings include: + +#### A. Configurational Camera Parameters +- [x] These parameters are provided by the underlying backend library _(depending upon backend in use)_, and must be applied to the camera system before the camera can be started. +- [x] **These parameter include:** _Brightness, Contrast, Saturation, Exposure, Colour Temperature, Colour Gains, etc._ +- [x] All supported parameters are listed in this [Usage example ➶](../../pigear/usage/#using-pigear-with-variable-camera-properties) + + +#### B. User-defined Parameters +- [x] These user-defined parameters control specific internal behaviors of the API and perform certain tasks on the camera objects. +- [x] All supported User-defined Parameters are listed [here ➶](../../pigear/params/#b-user-defined-parameters) **Data-Type:** Dictionary @@ -425,32 +435,41 @@ This parameter provides the ability to alter various **Tweak Parameters** `like **Usage:** -!!! tip "All supported parameters are listed in [PiCamera Docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html)" +!!! example "The complete usage example demonstrating the usage of the `options` parameter is available [here ➶](../usage/#using-pigear-with-variable-camera-properties)." -The desired parameters can be passed to WebGear_RTC API by formatting them as this parameter's attributes, as follows: +You can format these user-defined and configurational parameters as attributes of this `options` dictionary parameter as follows: -```python -# formatting parameters as dictionary attributes -options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} -# assigning it -WebGear_RTC(enablePiCamera=True, logging=True, **options) -``` +=== "New Picamera2 backend" -**User-specific attributes:** - -Additionally, `options` parameter also support some User-specific attributes, which are as follows: + ```python + # formulate various Picamera2 API parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "exposure_compensation": 15, + "sensor": {"output_size": (480, 320)}, # !!! will override `resolution` !!! + } + + # open pi video stream with defined parameters + stream = WebGear_RTC(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() + ``` -* **`HWFAILURE_TIMEOUT`** (float): PiGear contains ==Threaded Internal Timer== - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur at a timeout value. This parameter can be used to control that timeout value i.e. the maximum waiting time _(in seconds)_ after which PiGear exits with a `SystemError` to save resources. Its value can only be between `1.0` _(min)_ and `10.0` _(max)_ and its default value is `2.0`. Its usage is as follows: +=== "Legacy Picamera backend" ```python - options = {"HWFAILURE_TIMEOUT": 2.5} # sets timeout to 2.5 seconds + # formulate various Picamera API parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # open pi video stream with defined parameters + stream = WebGear_RTC(enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options).start() ```   diff --git a/docs/gears/webgear_rtc/usage.md b/docs/gears/webgear_rtc/usage.md index 747074a03..6a7d1a147 100644 --- a/docs/gears/webgear_rtc/usage.md +++ b/docs/gears/webgear_rtc/usage.md @@ -35,7 +35,7 @@ WebGear_RTC API is the part of `asyncio` package of VidGear, thereby you need to Must Required with WebGear_RTC API. You can easily install it via pip: -??? error "Microsoft Visual C++ 14.0 is required." +??? failure "Microsoft Visual C++ 14.0 is required." Installing `aiortc` on windows requires Microsoft Build Tools for Visual C++ libraries installed. You can easily fix this error by installing any **ONE** of these choices: @@ -76,7 +76,7 @@ You can access and run WebGear_RTC VideoStreamer Server programmatically in your !!! info "We are using `frame_size_reduction` attribute for frame size reduction _(in percentage)_ to be streamed with its [`options`](../params/#options) dictionary parameter to cope with performance-throttling in this example." -```python hl_lines="7" +```python linenums="1" hl_lines="7" # import required libraries import uvicorn from vidgear.gears.asyncio import WebGear_RTC diff --git a/docs/gears/writegear/compression/advanced/cciw.md b/docs/gears/writegear/compression/advanced/cciw.md index 9580e43da..88694495a 100644 --- a/docs/gears/writegear/compression/advanced/cciw.md +++ b/docs/gears/writegear/compression/advanced/cciw.md @@ -82,7 +82,7 @@ execute_ffmpeg_cmd(ffmpeg_command) In this example, we will extract and save audio from a URL stream: -```python hl_lines="13-18 21" +```python linenums="1" hl_lines="13-18 21" # import required libraries from vidgear.gears import WriteGear @@ -126,7 +126,7 @@ In this example, we will merge audio with video: * Both these Audio and Video files are compatible. -```python hl_lines="59-75 78" +```python linenums="1" hl_lines="59-75 78" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import WriteGear diff --git a/docs/gears/writegear/compression/advanced/ffmpeg_install.md b/docs/gears/writegear/compression/advanced/ffmpeg_install.md index be96e4db5..9b93165e1 100644 --- a/docs/gears/writegear/compression/advanced/ffmpeg_install.md +++ b/docs/gears/writegear/compression/advanced/ffmpeg_install.md @@ -27,7 +27,7 @@ limitations under the License. WriteGear must requires FFmpeg executables for its Compression capabilities in Compression Mode. You can following machine-specific instructions for its installation: -!!! error "In case WriteGear API fails to detect valid FFmpeg executables on your system _(even if Compression Mode is enabled)_, it automatically fallbacks to [Non-Compression Mode](../../../non_compression/overview/)." +!!! failure "In case WriteGear API fails to detect valid FFmpeg executables on your system _(even if Compression Mode is enabled)_, it automatically fallbacks to [Non-Compression Mode](../../../non_compression/overview/)."   diff --git a/docs/gears/writegear/compression/params.md b/docs/gears/writegear/compression/params.md index d2949d451..b5724db2e 100644 --- a/docs/gears/writegear/compression/params.md +++ b/docs/gears/writegear/compression/params.md @@ -118,7 +118,7 @@ This parameter allows us to exploit almost all FFmpeg supported parameters effor !!! warning "While providing additional av-source with `-i` FFmpeg parameter in `output_params` make sure it don't interfere with WriteGear's frame pipeline otherwise it will break things!" - !!! error "All ffmpeg parameters are case-sensitive. Remember to double check every parameter if any error occurs." + !!! failure "All ffmpeg parameters are case-sensitive. Remember to double check every parameter if any error occurs." !!! tip "Kindly check [H.264 docs ➶](https://trac.ffmpeg.org/wiki/Encode/H.264) and other [FFmpeg Docs ➶](https://ffmpeg.org/documentation.html) for more information on these parameters" @@ -188,9 +188,9 @@ This parameter allows us to exploit almost all FFmpeg supported parameters effor output_params = {"-disable_ffmpeg_window": True} # disables FFmpeg creation window ``` - * **`-disable_force_termination`** _(bool)_: sets a special flag to manually disable the default forced-termination behaviour in WriteGear API when `-i` FFmpeg parameter is used _(For more details, see issue: #149)_. Its usage is as follows: + * **`-disable_force_termination`** _(bool)_: sets a special flag to manually disable the default forced termination of FFmpeg process in WriteGear API when `-i` FFmpeg parameter is used _(For more details, see issue: #149)_. Its usage is as follows: - !!! warning "`-disable_force_termination` flag is a absolute necessity when video duration is too short(<60sec), otherwise WriteGear will not produce any valid output." + !!! warning "The `-disable_force_termination` flag is a absolute necessity when video duration is too short(`< 60sec`), otherwise WriteGear may produce invalid or no output." ```python output_params = {"-disable_force_termination": True} # disable the default forced-termination behaviour diff --git a/docs/gears/writegear/compression/usage.md b/docs/gears/writegear/compression/usage.md index b263d06a0..d1c9256d8 100644 --- a/docs/gears/writegear/compression/usage.md +++ b/docs/gears/writegear/compression/usage.md @@ -29,11 +29,11 @@ limitations under the License. * **DO NOT** feed frames with different dimensions or channels to WriteGear, otherwise WriteGear will exit with `ValueError`. - * While providing additional av-source with `-i` FFmpeg parameter in `output_params` make sure it don't interfere with WriteGear's frame pipeline otherwise it will break things! + * When using the `-i` FFmpeg parameter in `output_params` to provide an additional audio or video source, ensure it **DOES NOT** interfere with WriteGear's internal frame pipeline. Interference can cause the pipeline to break. - * Use [`-disable_force_termination`](../params/#supported-parameters) flag when video duration is too short(<60sec), otherwise WriteGear will not produce any valid output. + * To ensure WriteGear produces valid output when using an additional stream `-i` parameter with videos shorter than `60` seconds, use the [`-disable_force_termination`](../params/#supported-parameters) flag. - * Heavy resolution multimedia files take time to render which can last up to _0.1-1 seconds_. Kindly wait till the WriteGear API terminates itself, and **DO NOT** try to kill the process instead. + * Encoding heavy resolution multimedia files can take up to _~0.2 to 2 seconds_. Please wait for the WriteGear API to terminate itself and **DO NOT** kill the process manually. * Always use `writer.close()` at the very end of the main code. **NEVER USE IT INBETWEEN CODE** to avoid undesired behavior. @@ -47,7 +47,7 @@ limitations under the License. Following is the bare-minimum code you need to get started with WriteGear API in Compression Mode: -```python +```python linenums="1" hl_lines="10 25 42" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -100,7 +100,7 @@ In Compression Mode, WriteGear API contains [`rgb_mode`](../../../../bonus/refer The complete usage example is as follows: -```python hl_lines="26" +```python linenums="1" hl_lines="26" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import WriteGear @@ -168,7 +168,7 @@ WriteGear API provides [`-input_framerate`](../params/#supported-parameters) at In this code we will retrieve framerate from video stream, and set it as `-input_framerate` attribute for `option` parameter in WriteGear API: -```python hl_lines="10" +```python linenums="1" hl_lines="10 13" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -233,7 +233,7 @@ In this example, we will stream live camera frames directly to Twitch :fontaweso !!! alert "Make sure to change [_Twitch Stream Key_](https://www.youtube.com/watch?v=xwOtOfPMIIk) with yours in following code before running!" -```python hl_lines="11-16 20 24" +```python linenums="1" hl_lines="11-16 20 24" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -324,7 +324,7 @@ In this example, we will be using `h264_vaapi` as our hardware encoder and also ``` -```python hl_lines="11-13" +```python linenums="1" hl_lines="11-13" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -382,7 +382,7 @@ writer.close() You can easily use WriterGear API directly with any Video Processing library(_For e.g OpenCV itself_) in Compression Mode. The complete usage example is as follows: -```python hl_lines="6" +```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import WriteGear import cv2 @@ -489,7 +489,7 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" === ":material-linux: Linux" @@ -535,7 +535,7 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" === ":material-apple: MacOS" @@ -577,14 +577,14 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" !!! danger "Make sure this `-i` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." !!! warning "You **MUST** use [`-input_framerate`](../params/#supported-parameters) attribute to set exact value of input framerate when using external audio in Real-time Frames mode, otherwise audio delay will occur in output streams." -```python hl_lines="11-15" +```python linenums="1" hl_lines="11-16" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import WriteGear diff --git a/docs/gears/writegear/introduction.md b/docs/gears/writegear/introduction.md index 3649ccc05..e1e353f80 100644 --- a/docs/gears/writegear/introduction.md +++ b/docs/gears/writegear/introduction.md @@ -59,16 +59,6 @@ WriteGear primarily operates in following modes:   -## Importing - -You can import WriteGear API in your program as follows: - -```python -from vidgear.gears import WriteGear -``` - -  - ## FAQs
diff --git a/docs/gears/writegear/non_compression/usage.md b/docs/gears/writegear/non_compression/usage.md index 71c4c5dce..d76c82457 100644 --- a/docs/gears/writegear/non_compression/usage.md +++ b/docs/gears/writegear/non_compression/usage.md @@ -40,7 +40,7 @@ limitations under the License. Following is the bare-minimum code you need to get started with WriteGear API in Non-Compression Mode: -```python +```python linenums="1" hl_lines="10" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -97,7 +97,7 @@ In Non-Compression mode, WriteGear API provides flexible control over [**OpenCV' The complete usage example is as follows: -```python +```python linenums="1" hl_lines="7 15" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import WriteGear @@ -109,12 +109,12 @@ output_params = {"-fourcc": "MJPG", "-fps": 30} # open live video stream on webcam at first index(i.e. 0) device stream = VideoGear(source=0, logging=True).start() -# Define writer with defined parameters and suitable output filename for e.g. `Output.mp4` +# Define writer with defined parameters and suitable output filename +# for e.g. `Output.mp4` writer = WriteGear( output="Output.mp4", compression_mode=False, logging=True, **output_params ) - # loop over while True: @@ -159,7 +159,7 @@ writer.close() You can easily use WriterGear API directly with any Video Processing library(_For e.g OpenCV itself_) in Non-Compression Mode. The complete usage example is as follows: -```python +```python linenums="1" hl_lines="9 21 46" # import required libraries from vidgear.gears import WriteGear import cv2 @@ -170,7 +170,8 @@ output_params = {"-fourcc": "MJPG", "-fps": 30} # Open suitable video stream, such as webcam on first index(i.e. 0) stream = cv2.VideoCapture(0) -# Define writer with defined parameters and suitable output filename for e.g. `Output.mp4` +# Define writer with defined parameters and suitable output filename +# for e.g. `Output.mp4` writer = WriteGear( output="Output.mp4", compression_mode=False, logging=True, **output_params ) @@ -227,7 +228,7 @@ writer.close() In this example we will be constructing GStreamer pipeline to write video-frames into a file(`foo.mp4`) at 1M video-bitrate. -```python +```python linenums="1" hl_lines="12-14" # import required libraries from vidgear.gears import WriteGear import cv2 diff --git a/docs/help.md b/docs/help.md index 08b1874fa..61618c220 100644 --- a/docs/help.md +++ b/docs/help.md @@ -61,25 +61,13 @@ You can try helping solving those issues, or give valuable feedback/review on ne   - -## :material-twitter: Tweet about VidGear - -Tweet about VidGear and Spread the word 🗣: - - - -Let others know how you are using VidGear and why you like it! - -  - - ## :fontawesome-solid-gift: Helping Author > Donations help keep VidGear's development alive and motivate me _(as author)_. :heart:{ .heart } It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference :slight_smile: - +Buy Me a Coffee at ko-fi.com Thanks a million! :blush: @@ -94,7 +82,6 @@ You can connect with me, the author 👋: ![Author Image](https://avatars.githubusercontent.com/u/34266896?v=4){ align=left width="160" loading=lazy } * Follow author on GitHub: [![GitHub follow](https://img.shields.io/github/followers/abhiTronix?label=Follow%20%40abhiTronix&logo=github&style=flat-square)](https://github.com/abhiTronix) -* Follow author on Twitter: * Get in touch with author on Linkedin: [![Linkedin follow](https://img.shields.io/badge/Follow-@Abhishek Thakur-orange.svg?logo=linkedin&style=flat-square)](https://in.linkedin.com/in/abhishek-abhitronix?trk=profile-badge) diff --git a/docs/help/camgear_ex.md b/docs/help/camgear_ex.md index af72f74d2..096653388 100644 --- a/docs/help/camgear_ex.md +++ b/docs/help/camgear_ex.md @@ -28,7 +28,7 @@ In this example both streams and corresponding frames will be processed synchron !!! danger "Using same source with more than one instances of CamGear can lead to [Global Interpreter Lock (GIL)](https://wiki.python.org/moin/GlobalInterpreterLock#:~:text=In%20CPython%2C%20the%20global%20interpreter,conditions%20and%20ensures%20thread%20safety.&text=The%20GIL%20can%20degrade%20performance%20even%20when%20it%20is%20not%20a%20bottleneck.) that degrades performance even when it is not a bottleneck." -```python +```python linenums="1" # import required libraries from vidgear.gears import CamGear import cv2 @@ -89,7 +89,7 @@ The complete usage example is as follows: !!! tip "More information on `STREAM_RESOLUTION` & `STREAM_PARAMS` attributes can be found [here ➶](../../gears/camgear/advanced/source_params/#exclusive-camgear-parameters)" -```python hl_lines="6" +```python linenums="1" hl_lines="6" # import required libraries from vidgear.gears import CamGear import cv2 @@ -147,7 +147,7 @@ Here's a high-level wrapper code around CamGear API to enable auto-reconnection You can easily enforce UDP for RTSP streams inplace of default TCP, by putting following lines of code on the top of your existing code: - ```python + ```python # import required libraries import os @@ -158,7 +158,7 @@ Here's a high-level wrapper code around CamGear API to enable auto-reconnection Finally, use [`backend`](../../gears/camgear/params/#backend) parameter value as `backend=cv2.CAP_FFMPEG` in CamGear. -```python +```python linenums="1" from vidgear.gears import CamGear import cv2 import datetime diff --git a/docs/help/camgear_faqs.md b/docs/help/camgear_faqs.md index 208094513..53ca2aabd 100644 --- a/docs/help/camgear_faqs.md +++ b/docs/help/camgear_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # CamGear FAQs -  +  ## What is CamGear API and what does it do? @@ -30,7 +30,7 @@ limitations under the License. ## I'm only familiar with OpenCV, how to get started with CamGear API? -**Answer:** First, see [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis), then go through [CamGear doc](../../gears/camgear/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** **Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [CamGear documentation ➶](../../gears/camgear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   @@ -53,15 +53,17 @@ limitations under the License. === ":material-linux: Linux" - - [x] **Follow [this tutorial ➶](https://medium.com/@galaktyk01/how-to-build-opencv-with-gstreamer-b11668fa09c)** + - [x] **Compile manually:** **Follow [this tutorial ➶](https://medium.com/@galaktyk01/how-to-build-opencv-with-gstreamer-b11668fa09c)** + + - [x] **Compile using Pip:** **Follow [this GitHub issue ➶](https://github.com/opencv/opencv-python/issues/530)** === ":fontawesome-brands-windows: Windows" - - [x] **Follow [this tutorial ➶](https://medium.com/@galaktyk01/how-to-build-opencv-with-gstreamer-b11668fa09c)** + - [x] **Compile manually:** **Follow [this tutorial ➶](https://medium.com/@galaktyk01/how-to-build-opencv-with-gstreamer-b11668fa09c)** === ":material-apple: MacOS" - - [x] **Follow [this tutorial ➶](https://medium.com/testinium-tech/how-to-install-opencv-with-java-and-gstreamer-support-on-macos-c3c7b28d2864)** + - [x] **Compile manually:** **Follow [this tutorial ➶](https://medium.com/testinium-tech/how-to-install-opencv-with-java-and-gstreamer-support-on-macos-c3c7b28d2864)**   diff --git a/docs/help/general_faqs.md b/docs/help/general_faqs.md index dbf4dd344..e1cefd142 100644 --- a/docs/help/general_faqs.md +++ b/docs/help/general_faqs.md @@ -20,9 +20,7 @@ limitations under the License. # General FAQs - -  - +  ## "I'm new to Python Programming or its usage in OpenCV Library", How to use vidgear in my projects? @@ -55,11 +53,11 @@ Once done, visit [Switching from OpenCV ➶](../../switch_from_cv/) to easily re ## How to log to a file in VidGear? -**Answer:** VidGear provides exclusive **`VIDGEAR_LOGFILE`** environment variable to enable logging to a file while logging is enabled _(i.e. `logging=True`)_ on respective Gear. You just have to set ==directory pathname _(automatically creates `vidgear.log` file)_== or a ==log file pathname== itself as value for this environment variable. This can be done on various platfroms/OSes as follows: +**Answer:** VidGear provides exclusive **`VIDGEAR_LOGFILE`** environment variable to enable logging to a file while logging is enabled _(i.e. `logging=True`)_ on respective Gear. You just have to set ==directory pathname _(automatically creates `vidgear.log` file)_== or a ==log file pathname== itself as value for this environment variable. This can be done on various Operating Systems as follows: !!! info "Remember enabling this logging to a file will completely disable any output on the terminal." -=== "Linux OS" +=== ":material-linux: Linux" ```sh # path to file @@ -73,7 +71,7 @@ Once done, visit [Switching from OpenCV ➶](../../switch_from_cv/) to easily re unset VIDGEAR_LOGFILE ``` -=== "Windows OS (Powershell)" +=== ":fontawesome-brands-windows: Windows (Powershell)" ```powershell # path to file @@ -87,7 +85,7 @@ Once done, visit [Switching from OpenCV ➶](../../switch_from_cv/) to easily re $Env:VIDGEAR_LOGFILE = "" ``` -=== "OSX/Mac OS" +=== ":material-apple: MacOS" ```sh # path to file diff --git a/docs/help/netgear_async_ex.md b/docs/help/netgear_async_ex.md index 1f759b84e..1b7110e2c 100644 --- a/docs/help/netgear_async_ex.md +++ b/docs/help/netgear_async_ex.md @@ -20,7 +20,7 @@ limitations under the License. # NetGear_Async Examples -  +  ## Using NetGear_Async with WebGear @@ -41,7 +41,7 @@ Open a terminal on Client System where you want to display the input frames _(an !!! note "Note down the IP-address of this system _(required at Server's end)_ by executing the `hostname -I` command and also replace it in the following code."" -```python +```python linenums="1" hl_lines="16-32 40 43 46" # import libraries from vidgear.gears.asyncio import NetGear_Async from vidgear.gears.asyncio import WebGear @@ -104,7 +104,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! note "Replace the IP address in the following code with Client's IP address you noted earlier." -```python +```python linenums="1" # import library from vidgear.gears.asyncio import NetGear_Async import cv2, asyncio diff --git a/docs/help/netgear_async_faqs.md b/docs/help/netgear_async_faqs.md index 289ca8d29..8fa72bcf7 100644 --- a/docs/help/netgear_async_faqs.md +++ b/docs/help/netgear_async_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # NetGear_Async FAQs -  +  ## What is NetGear_Async API and what does it do? @@ -30,7 +30,9 @@ limitations under the License. ## How to get started with NetGear_Async API? -**Answer:** See [NetGear_Async doc ➶](../../gears/netgear_async/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** **Answer:** **Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [NetGear_Async documentation ➶](../../gears/netgear_async/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. + +See [NetGear_Async doc ➶](../../gears/netgear_async/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   diff --git a/docs/help/netgear_ex.md b/docs/help/netgear_ex.md index 67cff187a..375198689 100644 --- a/docs/help/netgear_ex.md +++ b/docs/help/netgear_ex.md @@ -20,7 +20,7 @@ limitations under the License. # NetGear Examples -  +  ## Using NetGear with WebGear @@ -41,7 +41,7 @@ Open a terminal on Client System where you want to display the input frames _(an !!! info "Note down the local IP-address of this system (required at Server's end) and also replace it in the following code. You can follow [this FAQ](../netgear_faqs/#how-to-find-local-ip-address-on-different-os-platforms) for this purpose." -```python +```python linenums="1" # import necessary libs import uvicorn, asyncio, cv2 from vidgear.gears import NetGear @@ -119,7 +119,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! note "Replace the IP address in the following code with Client's IP address you noted earlier." -```python +```python linenums="1" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear @@ -194,9 +194,9 @@ Open a terminal on Client System where you want to display the input frames _(an !!! info "Note down the local IP-address of this system(required at Server's end) and also replace it in the following code. You can follow [this FAQ](../netgear_faqs/#how-to-find-local-ip-address-on-different-os-platforms) for this purpose." -!!! fail "For VideoCapture APIs you also need to implement `start()` in addition to `read()` and `stop()` methods in your Custom Streaming Class as shown in following example, otherwise WebGear_RTC will fail to work!" +!!! failure "For VideoCapture APIs you also need to implement `start()` in addition to `read()` and `stop()` methods in your Custom Streaming Class as shown in following example, otherwise WebGear_RTC will fail to work!" -```python hl_lines="8-79 92-101" +```python linenums="1" hl_lines="8-79 92-101" # import necessary libs import uvicorn, cv2 from vidgear.gears import NetGear @@ -317,7 +317,7 @@ Now, Open the terminal on another Server System _(with a webcam connected to it !!! note "Replace the IP address in the following code with Client's IP address you noted earlier." -```python +```python linenums="1" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import NetGear diff --git a/docs/help/netgear_faqs.md b/docs/help/netgear_faqs.md index 33c6952a9..76753321b 100644 --- a/docs/help/netgear_faqs.md +++ b/docs/help/netgear_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # NetGear FAQs -  +  ## What is NetGear API and what does it do? @@ -30,7 +30,7 @@ limitations under the License. ## How to get started with NetGear API? -**Answer:** See [NetGear doc ➶](../../gears/netgear/overview/). Still in doubt, then discuss on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [NetGear documentation ➶](../../gears/netgear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   diff --git a/docs/help/pigear_ex.md b/docs/help/pigear_ex.md index 66f161b61..e7b15900c 100644 --- a/docs/help/pigear_ex.md +++ b/docs/help/pigear_ex.md @@ -20,56 +20,452 @@ limitations under the License. # PiGear Examples +  + +## Changing Output Pixel Format in PiGear API with Picamera2 Backend + +> With the Picamera2 backend, you can also define a custom `format` _(format of output frame pixels)_ in PiGear API. + +??? info "Handling output frames with a custom pixel format correctly" + While defining custom `format` as an optional parameter, it is advised to also define the [`colorspace`](../params/#colorspace) parameter in the PiGear API. This is required only under **TWO** conditions: + + - If `format` value is not **MPEG** for USB cameras. + - If `format` value is not **BGR** _(i.e., `RGB888`)_ or **BGRA** _(i.e., `XRGB8888`)_ for Raspberry Pi camera modules. + + :warning: Otherwise, output frames might **NOT** be compatible with OpenCV functions, and you need to handle these frames manually! + +??? failure "Picamera2 library has an unconventional naming convention for its pixel formats." + Please note that, Picamera2 takes its pixel format naming from `libcamera`, which in turn takes them from certain underlying Linux components. The results are not always the most intuitive. For example, OpenCV users will typically want each pixel to be a (`B`, `G`, `R`) triple for which the `RGB888` format should be chosen, and not `BGR888`. Similarly, OpenCV users wanting an alpha channel should select `XRGB8888`. + + For more information, refer [Picamera2 docs ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf) + +=== "YUV420/YVU420" + + !!! abstract "For reducing the size of frames in memory it is advised to use the `YUV420` pixels format." + + In this example we will be defining custom `YUV420` _(or `YVU420`)_ pixels format of output frame, and converting it back to `BGR` to be able to display with OpenCV. + + !!! tip "You could also instead define [`colorspace="COLOR_YUV420p2RGB"`](../params/#colorspace) parameter in PiGear API for converting it back to `BGR` similarly." + + ```python linenums="1" hl_lines="8 27" + # import required libraries + from vidgear.gears import PiGear + import cv2 + + # formulate `format` Picamera2 API + # configurational parameters + options = { + "format": "YUV420" # or use `YVU420` + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + yuv420_frame = stream.read() + + # check for frame if Nonetype + if yuv420_frame is None: + break + + # {do something with the `YUV420` frame here} + + # convert `YUV420` to `BGR` + bgr = cv2.cvtColor(yuv420_frame, cv2.COLOR_YUV420p2BGR) + + # {do something with the `BGR` frame here} + + # Show output window + cv2.imshow("Output Frame", bgr) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` + +=== "YUYV" + + !!! abstract "`YUYV` is a one packed `4:2:2` YUV format that is popularly used by USB cameras." + + !!! alert "Make sure `YUYV` pixel format is supported by your USB camera." + + In this example we will be defining custom `YUYV` pixels format of output frame, and converting it back to `BGR` to be able to display with OpenCV. + + !!! tip "You could also instead define [`colorspace="COLOR_YUV2BGR_YUYV"`](../params/#colorspace) parameter in PiGear API for converting it back to `BGR` similarly." + + ```python linenums="1" hl_lines="8 27" + # import required libraries + from vidgear.gears import PiGear + import cv2 + + # formulate `format` Picamera2 API + # configurational parameters + options = { + "format": "YUYV" + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + yuv420_frame = stream.read() + + # check for frame if Nonetype + if yuv420_frame is None: + break + + # {do something with the `YUV420` frame here} + + # convert `YUV420` to `BGR` + bgr = cv2.cvtColor(yuv420_frame, cv2.COLOR_YUV2BGR_YUYV) + + # {do something with the `BGR` frame here} + + # Show output window + cv2.imshow("Output Frame", bgr) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` +   -## Setting variable `picamera` parameters for Camera Module at runtime +## Dynamically Adjusting Raspberry Pi Camera Parameters at Runtime in PiGear API + +=== "New Picamera2 backend" + + > With the `picamera2` backend, using `stream` global parameter in the PiGear API, you can change all camera **controls** _(except output **resolution** and **format**)_ at runtime after the camera has started. + + ??? tip "Accessing all available camera controls" + A complete list of all the available camera controls can be found in the [`picamera2` docs ➶](https://datasheets.raspberrypi.com/camera/picamera2-manual.pdf), and also by inspecting the `camera_controls` property of the Picamera2 object available with `stream` global parameter in PiGear API: + + ```python + # import required libraries + from vidgear.gears import PiGear + + # open any pi video stream + stream = PiGear() + + #display all available camera controls + print(stream.stream.camera_controls) + + # safely close video stream + stream.stop() + ``` + + This returns a dictionary with the control names as keys, and each value being a tuple of _(`min`, `max`, `default`)_ values for that control. :warning: _The default value should be interpreted with some caution as in many cases libcamera's default value will be overwritten by the camera tuning as soon as the camera is started._ + + + In this example, we will set the initial Camera Module's brightness value to `-0.5` _(dark)_, and will change it to `0.5` _(bright)_ when the ++"Z"++ key is pressed at runtime: + + !!! warning "Delay in setting runtime controls" + There will be a delay of several frames before the controls take effect. This is because there is perhaps quite a large number of requests for camera frames already in flight, and for some controls _(`exposure time` and `analogue gain` specifically)_, the camera may actually take several frames to apply the updates. + + ??? info "Using `with` construct for Guaranteed Camera Control Updates at Runtime" + + While directly modifying using `set_controls` method might seem convenient, it doesn't guarantee that all camera control settings are applied within the same frame at runtime. The `with` construct provides a structured approach to managing camera control updates in real-time. Here's how to use it: + + ```python + # import required libraries + from vidgear.gears import PiGear + + # formulate initial configurational parameters + options = "controls": {"ExposureTime": 5000, "AnalogueGain": 0.5} + + # open pi video stream with these parameters + stream = PiGear(logging=True, **options).start() + + # Enter context manager and set runtime controls + # Within this block, the controls are guaranteed to be applied atomically + with stream.stream.controls as controls: + controls.ExposureTime = 10000 # Set new exposure time + controls.AnalogueGain = 1.0 # Set new analogue gain + + # ...rest of code goes here... + + # safely close video stream + stream.stop() + ``` + + ```python linenums="1" hl_lines="7 37" + # import required libraries + from vidgear.gears import PiGear + import cv2 + + # formulate initial configurational parameters + # set brightness to -0.5 (dark) + options = {"controls": {"Brightness": -0.5}} + + # open pi video stream with these parameters + stream = PiGear(logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + + # {do something with the frame here} + + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # check for 'z' key if pressed + if key == ord("z"): + # change brightness to 0.5 (bright) + stream.stream.set_controls({"Brightness": 0.5}) + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` + +=== "Legacy Picamera backend" + + > You can also use the `stream` global parameter in PiGear with the`picamera` backend to feed any [`picamera`](https://picamera.readthedocs.io/en/release-1.10/api_camera.html) parameters at runtime after the camera has started. + + ???+ danger "PiGear API switches to the legacy `picamera`backend if the `picamera2` library is unavailable." + + It is advised to enable logging(`logging=True`) to see which backend is being used. + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#options) optional parameter boolean attribute." + + In this example we will set initial Camera Module's `brightness` value `80` _(brighter)_, and will change it `30` _(darker)_ when ++"Z"++ key is pressed at runtime: + + ```python linenums="1" hl_lines="7 37" + # import required libraries + from vidgear.gears import PiGear + import cv2 + + # formulate initial configurational parameters + # set brightness to `80` (bright) + options = {"brightness": 80} + + # open pi video stream with these parameters + stream = PiGear(logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + + # {do something with the frame here} + + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # check for 'z' key if pressed + if key == ord("z"): + # change brightness to `30` (darker) + stream.stream.brightness = 30 + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` + +## Accessing Multiple Camera through its Index in PiGear API + +> With the [`camera_num`](../params/#camera_num) parameter in the PiGear API, you can easily select the camera index to be used as the source, allowing you to drive these multiple cameras simultaneously from within a single Python session. + +!!! failure "The `camera_num` value can only be zero or greater, otherwise, PiGear API will throw `ValueError` for any negative value." + +=== "New Picamera2 backend" + + With the `picamera2` backend, you can use the `camera_num` parameter in PiGear to select the camera index to be used as the source if you have multiple **Raspberry Pi camera modules _(such as CM4)_** and/or **USB cameras** connected simultaneously to your Raspberry Pi. + + ??? tip "Accessing metadata about connected cameras." + You can call the `global_camera_info()` method of the Picamera2 object available with `stream` global parameter in PiGear API to find out what cameras are attached. This returns a list containing one dictionary for each camera, ordered according the camera number you would pass to the `camera_num` parameter in PiGear API to open that device. The dictionary contains: + + - `Model` : the model name of the camera, as advertised by the camera driver. + - `Location` : a number reporting how the camera is mounted, as reported by `libcamera`. + - `Rotation` : how the camera is rotated for normal operation, as reported by `libcamera`. + - `Id` : an identifier string for the camera, indicating how the camera is connected. + + You should always check this list to discover which camera is which as the order can change when the system boots or USB cameras are re-connected as follows: + + ```python + # import required libraries + from vidgear.gears import PiGear + + # open any pi video stream + stream = PiGear() + + #display all available cameras metadata + print(stream.stream.global_camera_info()) + + # safely close video stream + stream.stop() + ``` + + !!! info "The PiGear API can accurately differentiate between USB and Raspberry Pi camera modules by utilizing the camera's metadata." + + In this example, we will select the USB Camera connected at index `1` on the Raspberry Pi as the primary source for extracting frames in PiGear API: + + ??? failure "Limited support for USB Cameras" + + This example also works with USB Cameras, However: + + - Users should assume that features such as: **Camera controls** (`"controls"`), **Transformations** (`"transform"`), **Queue** (`"queue"`) , and **Buffer Count** (`"buffer_count"`) that are supported on Raspberry Pi cameras, and so forth, are not available on USB Cameras. + - Hot-plugging of USB cameras is also **NOT** supported - PiGear API should be completely shut down and restarted when cameras are added or removed. + + !!! alert "This example assumes a USB Camera is connected at index `1`, and some other camera connected at index `0` on your Raspberry Pi." + + ```python linenums="1" hl_lines="15" + # import required libraries + from vidgear.gears import PiGear + from libcamera import Transform + import cv2 + + # formulate various Picamera2 API + # configurational parameters for USB camera + options = { + "sensor": {"output_size": (480, 320)}, # will override `resolution` + "format": "RGB888" # BGR format for this example + "auto_align_output_config": True, # auto-align camera configuration + } + + # open pi video stream at index `1` with defined parameters + stream = PiGear(camera_num=1, resolution=(640, 480), framerate=60, logging=True, **options).start() + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + ``` +=== "Legacy Picamera backend" + + !!! warning "With the Picamera backend, you should not change the `camera_num` parameter unless you are using the [**Raspberry Pi 3/3+/4 Compute Module IO Boards**](https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md) or third party [**Arducam Camarray Multiple Camera Solutions**](https://www.arducam.com/arducam-camarray-solutions/), which supports attaching multiple camera modules to the same Raspberry Pi board using appropriate I/O connections." + + You can use the `camera_num` parameter in PiGear with the `picamera` backend to select the camera index to be used as the source if you have multiple Raspberry Pi camera modules connected. + + ???+ danger "PiGear API switches to the legacy `picamera`backend if the `picamera2` library is unavailable." + + It is advised to enable logging(`logging=True`) to see which backend is being used. -You can use `stream` global parameter in PiGear to feed any [`picamera`](https://picamera.readthedocs.io/en/release-1.10/api_camera.html) parameters at runtime. + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." -In this example we will set initial Camera Module's `brightness` value `80`, and will change it `50` when **`z` key** is pressed at runtime: + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params/#options) optional parameter boolean attribute." -```python hl_lines="35" -# import required libraries -from vidgear.gears import PiGear -import cv2 + In this example, we will select the Camera Module connected at index `1` on the Raspberry Pi as the primary source for extracting frames in PiGear API: -# initial parameters -options = {"brightness": 80} # set brightness to 80 + !!! alert "This example assumes a Camera Module is connected at index `1` on your Raspberry Pi." -# open pi video stream with default parameters -stream = PiGear(logging=True, **options).start() + ```python linenums="1" hl_lines="17" + # import required libraries + from vidgear.gears import PiGear + import cv2 -# loop over -while True: + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } - # read frames from stream - frame = stream.read() + # open pi video stream at index `1` with defined parameters + stream = PiGear(camera_num=1, resolution=(640, 480), framerate=60, logging=True, **options).start() - # check for frame if Nonetype - if frame is None: - break + # loop over + while True: + # read frames from stream + frame = stream.read() - # {do something with the frame here} + # check for frame if Nonetype + if frame is None: + break + # {do something with the frame here} - # Show output window - cv2.imshow("Output Frame", frame) + # Show output window + cv2.imshow("Output Frame", frame) - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break - # check for 'z' key if pressed - if key == ord("z"): - # change brightness to 50 - stream.stream.brightness = 50 + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break -# close output window -cv2.destroyAllWindows() + # close output window + cv2.destroyAllWindows() -# safely close video stream -stream.stop() -``` + # safely close video stream + stream.stop() + ```   \ No newline at end of file diff --git a/docs/help/pigear_faqs.md b/docs/help/pigear_faqs.md index 3c24814da..c49abe46d 100644 --- a/docs/help/pigear_faqs.md +++ b/docs/help/pigear_faqs.md @@ -20,29 +20,29 @@ limitations under the License. # PiGear FAQs -  +  ## What is PiGear API and what does it do? -**Answer:** PiGear is similar to CamGear but exclusively made to support various Raspberry Pi Camera Modules (such as [OmniVision OV5647 Camera Module](https://github.com/techyian/MMALSharp/wiki/OmniVision-OV5647-Camera-Module) and [Sony IMX219 Camera Module](https://github.com/techyian/MMALSharp/wiki/Sony-IMX219-Camera-Module)). _For more info. see [PiGear doc ➶](../../gears/pigear/overview/)_ +**Answer:** PiGear is a specialized API similar to the [CamGear API](../../gears/camgear/overview/) but optimized for **Raspberry Pi Boards**, offering comprehensive **support for camera modules** _(e.g., OmniVision OV5647, Sony IMX219)_, along with **limited compatibility for USB cameras**. _For more info. see [PiGear doc ➶](../../gears/pigear/overview/)_   ## I'm only familiar with OpenCV, how to get started with PiGear API? -**Answer:** First, see [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis), then go through [PiGear doc](../../gears/pigear/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [PiGear documentation](../../gears/pigear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   ## Why my camera module is not detected by PiGear? -**Answer:** Make sure to [enable Raspberry Pi hardware-specific settings ➶](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) before using PiGear. Also, recheck/change your Camera Module's ribbon-cable and Camera Module itself, if it damaged or got broken somehow. +**Answer:** Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using PiGear API. Also, recheck/change your Camera Module's ribbon-cable and Camera Module itself, if it damaged or got broken somehow.   ## How to select camera index on Pi Compute IO board with two Cameras attached? -**Answer:** See [PiGear's `camera_num` parameter ➶](../../gears/pigear/params/#camera_num) +**Answer:** Refer [this bonus example ➶](../../help/pigear_ex/#accessing-multiple-camera-through-its-index-in-pigear-api)   @@ -52,7 +52,7 @@ limitations under the License.   -## How to assign `picamera` settings for Camera Module with PiGear? +## How to assign various configurational settings for Camera Module with PiGear? **Answer:** See [this usage example ➶](../../gears/pigear/usage/#using-pigear-with-variable-camera-properties) @@ -60,13 +60,21 @@ limitations under the License. ## "Video output is too dark with PiGear", Why? -**Answer:** Seems like the settings are wrong. Kindly see [picamera docs](https://picamera.readthedocs.io/en/release-1.13/api_camera.html) for available parameters, and look for parameters are `sensor_mode`, `shutter_speed` and `exposure_mode`, try changing those values. Also, maybe your `framerate` value is too high. Try lowering it. +**Answer:** The camera configuration settings might be incorrect. Check [this usage example ➶](../../gears/pigear/usage/#using-pigear-with-variable-camera-properties) and try tinkering parameters like `sensor_mode`, `shutter_speed`, and `exposure_mode`. Additionally, if your `framerate` parameter value is too high, try lowering it. + +  + + +## How to dynamically adjust Raspberry Pi Camera Parameters at runtime with PiGear? + +**Answer:** See [this bonus example ➶](../../help/pigear_ex/#dynamically-adjusting-raspberry-pi-camera-parameters-at-runtime-in-pigear-api)   -## How to change `picamera` settings for Camera Module at runtime? +## Is it possible to change output frames Pixel Format in PiGear API? -**Answer:** You can use `stream` global parameter in PiGear to feed any `picamera` setting at runtime. See [this bonus example ➶](../pigear_ex/#setting-variable-picamera-parameters-for-camera-module-at-runtime) +**Answer:** Yes it is possible with Picamera2 Backend. See [this bonus example ➶](../../help/pigear_ex/#changing-output-pixel-format-in-pigear-api-with-picamera2-backend) + +  -  \ No newline at end of file diff --git a/docs/help/screengear_ex.md b/docs/help/screengear_ex.md index ce0e1f26e..49a0374ed 100644 --- a/docs/help/screengear_ex.md +++ b/docs/help/screengear_ex.md @@ -20,7 +20,7 @@ limitations under the License. # ScreenGear Examples -  +  ## Using ScreenGear with NetGear and WriteGear @@ -37,14 +37,14 @@ Open a terminal on Client System _(where you want to save the input frames recei !!! tip "You can terminate client anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import NetGear from vidgear.gears import WriteGear import cv2 # define various tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear Client at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with yours !!! @@ -94,7 +94,7 @@ Now, Open the terminal on another Server System _(with a montior/display attache !!! tip "You can terminate stream on both side anytime by pressing ++ctrl+"C"++ on your keyboard!" -```python +```python linenums="1" # import required libraries from vidgear.gears import ScreenGear from vidgear.gears import NetGear @@ -106,7 +106,7 @@ options = {"top": 40, "left": 0, "width": 100, "height": 100} stream = ScreenGear(logging=True, **options).start() # define various netgear tweak flags -options = {"flag": 0, "copy": False, "track": False} +options = {"flag": 0, "copy": True, "track": False} # Define Netgear server at given IP address and define parameters # !!! change following IP address '192.168.x.xxx' with client's IP address !!! @@ -156,7 +156,7 @@ The complete usage example is as follows: === "Bare-Minimum" - ```python hl_lines="8" + ```python linenums="1" hl_lines="8" # import necessary libs import uvicorn, cv2 from vidgear.gears import ScreenGear @@ -178,9 +178,9 @@ The complete usage example is as follows: === "Advanced" - !!! fail "For VideoCapture APIs you also need to implement `start()` in addition to `read()` and `stop()` methods in your Custom Streaming Class as shown in following example, otherwise WebGear_RTC will fail to work!" + !!! failure "For VideoCapture APIs you also need to implement `start()` in addition to `read()` and `stop()` methods in your Custom Streaming Class as shown in following example, otherwise WebGear_RTC will fail to work!" - ```python hl_lines="8-64 69" + ```python linenums="1" hl_lines="8-64 69" # import necessary libs import uvicorn, cv2 from vidgear.gears import ScreenGear diff --git a/docs/help/screengear_faqs.md b/docs/help/screengear_faqs.md index 7ccdb093e..c83dff16e 100644 --- a/docs/help/screengear_faqs.md +++ b/docs/help/screengear_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # ScreenGear FAQs -  +  ## What is ScreenGear API and what does it do? @@ -30,7 +30,7 @@ limitations under the License. ## I'm only familiar with OpenCV, how to get started with ScreenGear API? -**Answer:** First, see [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis), then go through [ScreenGear doc](../../gears/screengear/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [ScreenGear documentation](../../gears/screengear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   @@ -50,4 +50,29 @@ limitations under the License. **Answer:** With `mss` backend, see ScreenGear's [`monitor`](../../gears/screengear/params/#monitor) parameter that sets the index of the monitor to grab a frame from. If its value is `-1`, it will record from all monitors. _More information can be found [here ➶](https://python-mss.readthedocs.io/examples.html#a-screen-shot-to-grab-them-all)_ +  + +## I'm getting "AttributeError: 'DXCamera' object has no attribute 'is_capturing'" Error? + +**Answer:** This is a [well-known error](https://github.com/ra1nty/DXcam/issues/38) in backend `dxcam` library which occurs when you've multiple GPUs on your Windows machine. To workaround this, you need select Internal GPU in settings as follows: + +=== "On :fontawesome-brands-windows: Windows 11" + + In **Settings**, go to `System > Display > Graphics` and add your `Python.exe` as _"Desktop App"_, then select _"Power saving"_ as follows: +
+ AttributeError: 'DXCamera' +
+ + And finally press **Save** button. + +=== "On :fontawesome-brands-windows: Windows 10" + + In **Settings**, go to `Graphics Settings` and add your `Python.exe` as _"Desktop App"_, then select _"Power saving"_ as follows: + +
+ AttributeError: 'DXCamera' +
+ + And finally press **Save** button. +   \ No newline at end of file diff --git a/docs/help/stabilizer_ex.md b/docs/help/stabilizer_ex.md index 3e0e1aa72..3d0154214 100644 --- a/docs/help/stabilizer_ex.md +++ b/docs/help/stabilizer_ex.md @@ -20,7 +20,7 @@ limitations under the License. # Stabilizer Class Examples -  +  ## Saving Stabilizer Class output with Live Audio Input @@ -37,7 +37,7 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W ??? tip "Identifying and Specifying sound card on different OS platforms" - === "On Windows" + === "Windows :fontawesome-brands-windows:" Windows OS users can use the [dshow](https://trac.ffmpeg.org/wiki/DirectShow) (DirectShow) to list audio input device which is the preferred option for Windows users. You can refer following steps to identify and specify your sound card: @@ -79,10 +79,10 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then [checkout this troubleshooting guide ➶](https://www.maketecheasier.com/fix-microphone-not-working-windows10/) or reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" - === "On Linux" + === "Linux :material-linux:" Linux OS users can use the [alsa](https://ffmpeg.org/ffmpeg-all.html#alsa) to list input device to capture live audio input such as from a webcam. You can refer following steps to identify and specify your sound card: @@ -125,10 +125,10 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" - === "On MacOS" + === "MacOS :material-apple:" MAC OS users can use the [avfoundation](https://ffmpeg.org/ffmpeg-devices.html#avfoundation) to list input devices for grabbing audio from integrated iSight cameras as well as cameras connected via USB or FireWire. You can refer following steps to identify and specify your sound card on MacOS/OSX machines: @@ -167,14 +167,14 @@ In this example code, we will merging the audio from a Audio Device _(for e.g. W } ``` - !!! fail "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" + !!! failure "If audio still doesn't work then reach us out on [Gitter ➶](https://gitter.im/vidgear/community) Community channel" !!! danger "Make sure this `-i` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." !!! warning "You **MUST** use [`-input_framerate`](../../gears/writegear/compression/params/#supported-parameters) attribute to set exact value of input framerate when using external audio in Real-time Frames mode, otherwise audio delay will occur in output streams." -```python +```python linenums="1" hl_lines="14-19" # import required libraries from vidgear.gears import WriteGear from vidgear.gears.stabilizer import Stabilizer @@ -192,7 +192,7 @@ output_params = { "-thread_queue_size": "512", "-ac": "2", "-ar": "48000", - "-f": "alsa", # !!! warning: always keep this line above "-i" parameter !!! + "-f": "alsa", # (1) "-i": "hw:1", } @@ -232,6 +232,8 @@ stream.release() writer.close() ``` +1. :warning: Always keep this line above `-i` parameter! +   ## Saving Stabilizer Class output with File Audio Input @@ -247,7 +249,7 @@ In this example code, we will be directly merging the audio from a Video-File _( !!! alert "Use [`-disable_force_termination`](../../gears/writegear/compression/params/#supported-parameters) flag when video duration is too short(<60sec), otherwise WriteGear will not produce any valid output." -```python +```python linenums="1" hl_lines="17-22 49" # import required libraries from vidgear.gears import WriteGear from vidgear.gears.stabilizer import Stabilizer diff --git a/docs/help/stabilizer_faqs.md b/docs/help/stabilizer_faqs.md index 7406c3795..8a4b84193 100644 --- a/docs/help/stabilizer_faqs.md +++ b/docs/help/stabilizer_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # Stabilizer Class FAQs -  +  ## What is Stabilizer Class and what does it do? diff --git a/docs/help/streamgear_ex.md b/docs/help/streamgear_ex.md index 0f8de8506..253b044c7 100644 --- a/docs/help/streamgear_ex.md +++ b/docs/help/streamgear_ex.md @@ -26,136 +26,286 @@ limitations under the License. In this example, we will be Live-Streaming video-frames from Raspberry Pi _(with Camera Module connected)_ using PiGear API and StreamGear API's Real-time Frames Mode: -??? new "New in v0.2.2" - This example was added in `v0.2.2`. - !!! tip "Use `-window_size` & `-extra_window_size` FFmpeg parameters for controlling number of frames to be kept in Chunks. Less these value, less will be latency." !!! alert "After every few chunks _(equal to the sum of `-window_size` & `-extra_window_size` values)_, all chunks will be overwritten in Live-Streaming. Thereby, since newer chunks in manifest/playlist will contain NO information of any older ones, and therefore resultant DASH/HLS stream will play only the most recent frames." !!! note "In this mode, StreamGear **DOES NOT** automatically maps video-source audio to generated streams. You need to manually assign separate audio-source through [`-audio`](../../gears/streamgear/params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter." +!!! new "PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." + +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using the PiGear API, otherwise nothing will work." + === "DASH" - ```python - # import required libraries - from vidgear.gears import PiGear - from vidgear.gears import StreamGear - import cv2 + === "New Picamera2 backend" + + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import StreamGear + from libcamera import Transform + import cv2 + + # formulate various Picamera2 API + # configurational parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + + # describe a suitable manifest-file location/name + streamer = StreamGear(output="dash_out.mpd", **stream_params) + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # send frame to streamer + streamer.stream(frame) + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + + # safely close streamer + streamer.close() + ``` + + === "Legacy Picamera backend" + + ??? info "Under the hood, PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, PiGear API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." - # add various Picamera tweak parameters to dictionary - options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, - } + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params) user-defined optional parameter boolean attribute." - # open pi video stream with defined parameters - stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import StreamGear + import cv2 - # enable livestreaming and retrieve framerate from CamGear Stream and - # pass it as `-input_framerate` parameter for controlled framerate - stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } - # describe a suitable manifest-file location/name - streamer = StreamGear(output="dash_out.mpd", **stream_params) + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # loop over - while True: + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} - # read frames from stream - frame = stream.read() + # describe a suitable manifest-file location/name + streamer = StreamGear(output="dash_out.mpd", **stream_params) - # check for frame if Nonetype - if frame is None: - break + # loop over + while True: - # {do something with the frame here} + # read frames from stream + frame = stream.read() - # send frame to streamer - streamer.stream(frame) + # check for frame if Nonetype + if frame is None: + break - # Show output window - cv2.imshow("Output Frame", frame) + # {do something with the frame here} - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break + # send frame to streamer + streamer.stream(frame) - # close output window - cv2.destroyAllWindows() + # Show output window + cv2.imshow("Output Frame", frame) - # safely close video stream - stream.stop() + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break - # safely close streamer - streamer.terminate() - ``` + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + + # safely close streamer + streamer.close() + ``` === "HLS" - ```python - # import required libraries - from vidgear.gears import PiGear - from vidgear.gears import StreamGear - import cv2 + === "New Picamera2 backend" + + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import StreamGear + from libcamera import Transform + import cv2 + + # formulate various Picamera2 API + # configurational parameters + options = { + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } + + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + + # describe a suitable manifest-file location/name + streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) + + # loop over + while True: + + # read frames from stream + frame = stream.read() + + # check for frame if Nonetype + if frame is None: + break + + # {do something with the frame here} + + # send frame to streamer + streamer.stream(frame) + + # Show output window + cv2.imshow("Output Frame", frame) + + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + # close output window + cv2.destroyAllWindows() + + # safely close video stream + stream.stop() + + # safely close streamer + streamer.close() + ``` + + === "Legacy Picamera backend" + + ??? info "Under the hood, PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, PiGear API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../params) user-defined optional parameter boolean attribute." - # add various Picamera tweak parameters to dictionary - options = { - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, - } + ```python linenums="1" + # import required libraries + from vidgear.gears import PiGear + from vidgear.gears import StreamGear + import cv2 - # open pi video stream with defined parameters - stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() + # formulate various Picamera API + # configurational parameters + options = { + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } - # enable livestreaming and retrieve framerate from CamGear Stream and - # pass it as `-input_framerate` parameter for controlled framerate - stream_params = {"-input_framerate": stream.framerate, "-livestream": True} + # open pi video stream with defined parameters + stream = PiGear(resolution=(640, 480), framerate=60, logging=True, **options).start() - # describe a suitable manifest-file location/name - streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) + # enable livestreaming and retrieve framerate from CamGear Stream and + # pass it as `-input_framerate` parameter for controlled framerate + stream_params = {"-input_framerate": stream.framerate, "-livestream": True} - # loop over - while True: + # describe a suitable manifest-file location/name + streamer = StreamGear(output="hls_out.m3u8", format = "hls", **stream_params) - # read frames from stream - frame = stream.read() + # loop over + while True: - # check for frame if Nonetype - if frame is None: - break + # read frames from stream + frame = stream.read() - # {do something with the frame here} + # check for frame if Nonetype + if frame is None: + break - # send frame to streamer - streamer.stream(frame) + # {do something with the frame here} - # Show output window - cv2.imshow("Output Frame", frame) + # send frame to streamer + streamer.stream(frame) - # check for 'q' key if pressed - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break + # Show output window + cv2.imshow("Output Frame", frame) - # close output window - cv2.destroyAllWindows() + # check for 'q' key if pressed + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break - # safely close video stream - stream.stop() + # close output window + cv2.destroyAllWindows() - # safely close streamer - streamer.terminate() - ``` + # safely close video stream + stream.stop() + # safely close streamer + streamer.close() + ```   \ No newline at end of file diff --git a/docs/help/streamgear_faqs.md b/docs/help/streamgear_faqs.md index 4729aa38f..31cdf9c65 100644 --- a/docs/help/streamgear_faqs.md +++ b/docs/help/streamgear_faqs.md @@ -30,7 +30,7 @@ limitations under the License. ## How to get started with StreamGear API? -**Answer:** See [StreamGear doc ➶](../../gears/streamgear/introduction/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [StreamGear documentation](../../gears/streamgear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   @@ -77,16 +77,7 @@ limitations under the License.   -## Is Real-time Frames Mode only used for Live-Streaming? - -**Answer:** Real-time Frame Modes and Live-Streaming are completely different terms and not directly related. - -- **Real-time Frame Mode** is one of [primary mode](../../gears/streamgear/introduction/#mode-of-operations) for directly transcoding real-time [`numpy.ndarray`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.html#numpy-ndarray) video-frames _(as opposed to a entire file)_ into a sequence of multiple smaller chunks/segments for streaming. - -- **Live-Streaming** is feature of StreamGear's primary modes that activates behaviour where chunks will contain information for few new frames only and forgets all previous ones for low latency streaming. It can be activated for any primary mode using exclusive [`-livestream`](../../gears/streamgear/params/#a-exclusive-parameters) attribute of `stream_params` dictionary parameter. - - -## How to use Hardware/GPU encoder for StreamGear trancoding? +## How to use Hardware/GPU encoder for transcoding in StreamGear API? **Answer:** [See this example ➶](../../gears/streamgear/rtfm/usage/#usage-with-hardware-video-encoder) diff --git a/docs/help/videogear_ex.md b/docs/help/videogear_ex.md index e544cd237..cc70ffc32 100644 --- a/docs/help/videogear_ex.md +++ b/docs/help/videogear_ex.md @@ -20,7 +20,7 @@ limitations under the License. # VideoGear Examples -  +  ## Using VideoGear with ROS(Robot Operating System) @@ -33,7 +33,7 @@ In this example, we'll create a node that convert OpenCV frames into ROS image m !!! note "This example is vidgear implementation of this [wiki example](http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython)." -```python +```python linenums="1" # import roslib import roslib @@ -130,7 +130,7 @@ Here's a high-level wrapper code around VideoGear API to enable auto-reconnectio Finally, use [`backend`](../../gears/videogear/params/#backend) parameter value as `backend=cv2.CAP_FFMPEG` in VideoGear. -```python +```python linenums="1" from vidgear.gears import VideoGear import cv2 import datetime @@ -233,7 +233,7 @@ In this example code, we will be directly merging the audio from a Video-File _( !!! alert "Use `-disable_force_termination` flag when video duration is too short(<60sec), otherwise WriteGear will not produce any valid output." -```python +```python linenums="1" # import required libraries from vidgear.gears import WriteGear from vidgear.gears import VideoGear diff --git a/docs/help/videogear_faqs.md b/docs/help/videogear_faqs.md index 67cdb89cb..5fdd147d6 100644 --- a/docs/help/videogear_faqs.md +++ b/docs/help/videogear_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # VideoGear FAQs -  +  ## What is VideoGear API and what does it do? diff --git a/docs/help/webgear_ex.md b/docs/help/webgear_ex.md index 46865e1e7..c3dc6c447 100644 --- a/docs/help/webgear_ex.md +++ b/docs/help/webgear_ex.md @@ -20,46 +20,97 @@ limitations under the License. # WebGear Examples -  +  ## Using WebGear with RaspberryPi Camera Module Because of WebGear API's flexible internal wapper around VideoGear, it can easily access any parameter of CamGear and PiGear videocapture APIs. !!! info "Following usage examples are just an idea of what can be done with WebGear API, you can try various [VideoGear](../../gears/videogear/params/), [CamGear](../../gears/camgear/params/) and [PiGear](../../gears/pigear/params/) parameters directly in WebGear API in the similar manner." - -Here's a bare-minimum example of using WebGear API with the Raspberry Pi camera module while tweaking its various properties in just one-liner: -```python -# import libs -import uvicorn -from vidgear.gears.asyncio import WebGear +Here's a bare-minimum example of using WebGear API with the Raspberry Pi camera module while tweaking its various properties in few lines of python code: -# various webgear performance and Raspberry Pi camera tweaks -options = { - "frame_size_reduction": 40, - "jpeg_compression_quality": 80, - "jpeg_compression_fastdct": True, - "jpeg_compression_fastupsample": False, - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# initialize WebGear app -web = WebGear( - enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options -) +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# run this app on Uvicorn server at address http://localhost:8000/ -uvicorn.run(web(), host="localhost", port=8000) -# close app safely -web.shutdown() -``` +=== "New Picamera2 backend" + + ```python linenums="1" hl_lines="22" + # import libs + import uvicorn + from libcamera import Transform + from vidgear.gears.asyncio import WebGear + + # various WebGear_RTC performance + # and Picamera2 API tweaks + options = { + "frame_size_reduction": 40, + "jpeg_compression_quality": 80, + "jpeg_compression_fastdct": True, + "jpeg_compression_fastupsample": False, + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } + + # initialize WebGear app + web = WebGear( + enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options + ) + + # run this app on Uvicorn server at address http://localhost:8000/ + uvicorn.run(web(), host="localhost", port=8000) + + # close app safely + web.shutdown() + ``` + +=== "Legacy Picamera backend" + + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." + + ```python linenums="1" hl_lines="21" + # import libs + import uvicorn + from vidgear.gears.asyncio import WebGear + + # various webgear performance and Picamera API tweaks + options = { + "frame_size_reduction": 40, + "jpeg_compression_quality": 80, + "jpeg_compression_fastdct": True, + "jpeg_compression_fastupsample": False, + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # initialize WebGear app + web = WebGear( + enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options + ) + + # run this app on Uvicorn server at address http://localhost:8000/ + uvicorn.run(web(), host="localhost", port=8000) + + # close app safely + web.shutdown() + ```   @@ -67,7 +118,7 @@ web.shutdown() Here's an example of using WebGear API with real-time Video Stabilization enabled: -```python +```python linenums="1" hl_lines="14" # import libs import uvicorn from vidgear.gears.asyncio import WebGear @@ -102,7 +153,7 @@ In this example, we'll be displaying two video feeds side-by-side simultaneously **Step-1 (Trigger Auto-Generation Process):** Firstly, run this bare-minimum code to trigger the [**Auto-generation**](../../gears/webgear/overview/#auto-generation-process) process, this will create `.vidgear` directory at current location _(directory where you'll run this code)_: -```python +```python linenums="1" hl_lines="6" # import required libraries import uvicorn from vidgear.gears.asyncio import WebGear @@ -119,7 +170,7 @@ web.shutdown() **Step-2 (Replace HTML file):** Now, go inside `.vidgear` :arrow_right: `webgear` :arrow_right: `templates` directory at current location of your machine, and there replace content of `index.html` file with following: -```html +```html hl_lines="5-6" {% extends "base.html" %} {% block content %}

WebGear Video Feed

@@ -132,7 +183,7 @@ web.shutdown() **Step-3 (Build your own Frame Producers):** Now, create a python script code with OpenCV source, as follows: -```python +```python linenums="1" hl_lines="9 15-38 42-65 68-77 81 84-86" # import necessary libs import uvicorn, asyncio, cv2 from vidgear.gears.asyncio import WebGear diff --git a/docs/help/webgear_faqs.md b/docs/help/webgear_faqs.md index e39194337..55fcd5930 100644 --- a/docs/help/webgear_faqs.md +++ b/docs/help/webgear_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # WebGear FAQs -  +  ## What is WebGear API and what does it do? @@ -30,7 +30,7 @@ limitations under the License. ## How to get started with WebGear API? -**Answer:** See [WebGear doc ➶](../../gears/webgear/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [WebGear documentation](../../gears/webgear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   diff --git a/docs/help/webgear_rtc_ex.md b/docs/help/webgear_rtc_ex.md index 8b41c398d..5caa4b83b 100644 --- a/docs/help/webgear_rtc_ex.md +++ b/docs/help/webgear_rtc_ex.md @@ -20,7 +20,7 @@ limitations under the License. # WebGear_RTC_RTC Examples -  +  ## Using WebGear_RTC with RaspberryPi Camera Module @@ -30,33 +30,81 @@ Because of WebGear_RTC API's flexible internal wapper around VideoGear, it can e Here's a bare-minimum example of using WebGear_RTC API with the Raspberry Pi camera module while tweaking its various properties in just one-liner: -```python -# import libs -import uvicorn -from vidgear.gears.asyncio import WebGear_RTC +!!! new "Backend PiGear API now fully supports the newer [`picamera2`](https://github.com/raspberrypi/picamera2) python library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. Follow this [guide ➶](../../installation/pip_install/#picamera2) for its installation." -# various webgear_rtc performance and Raspberry Pi camera tweaks -options = { - "frame_size_reduction": 25, - "hflip": True, - "exposure_mode": "auto", - "iso": 800, - "exposure_compensation": 15, - "awb_mode": "horizon", - "sensor_mode": 0, -} +!!! warning "Make sure to [complete Raspberry Pi Camera Hardware-specific settings](https://www.raspberrypi.com/documentation/accessories/camera.html#installing-a-raspberry-pi-camera) prior using this backend, otherwise nothing will work." -# initialize WebGear_RTC app -web = WebGear_RTC( - enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options -) -# run this app on Uvicorn server at address http://localhost:8000/ -uvicorn.run(web(), host="localhost", port=8000) +=== "New Picamera2 backend" -# close app safely -web.shutdown() -``` + ```python linenums="1" hl_lines="19" + # import libs + import uvicorn + from libcamera import Transform + from vidgear.gears.asyncio import WebGear_RTC + + # various WebGear_RTC performance + # and Picamera2 API tweaks + options = { + "frame_size_reduction": 25, + "queue": True, + "buffer_count": 4, + "controls": {"Brightness": 0.5, "ExposureValue": 2.0}, + "transform": Transform(hflip=1), + "auto_align_output_config": True, # auto-align camera configuration + } + + # initialize WebGear app + web = WebGear_RTC( + enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options + ) + + # run this app on Uvicorn server at address http://localhost:8000/ + uvicorn.run(web(), host="localhost", port=8000) + + # close app safely + web.shutdown() + ``` + +=== "Legacy Picamera backend" + + ??? info "Under the hood, Backend PiGear API _(version `0.3.3` onwards)_ prioritizes the new [`picamera2`](https://github.com/raspberrypi/picamera2) API backend." + + However, the API seamlessly switches to the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/index.html) backend, if the `picamera2` library is unavailable or not installed. + + !!! tip "It is advised to enable logging(`logging=True`) to see which backend is being used." + + !!! failure "The `picamera` library is built on the legacy camera stack that is NOT _(and never has been)_ supported on 64-bit OS builds." + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params) user-defined optional parameter boolean attribute." + + ```python linenums="1" hl_lines="18" + # import libs + import uvicorn + from vidgear.gears.asyncio import WebGear_RTC + + # various WebGear_RTC performance and Picamera API tweaks + options = { + "frame_size_reduction": 25, + "hflip": True, + "exposure_mode": "auto", + "iso": 800, + "exposure_compensation": 15, + "awb_mode": "horizon", + "sensor_mode": 0, + } + + # initialize WebGear app + web = WebGear_RTC( + enablePiCamera=True, resolution=(640, 480), framerate=60, logging=True, **options + ) + + # run this app on Uvicorn server at address http://localhost:8000/ + uvicorn.run(web(), host="localhost", port=8000) + + # close app safely + web.shutdown() + ```   @@ -64,7 +112,7 @@ web.shutdown() Here's an example of using WebGear_RTC API with real-time Video Stabilization enabled: -```python +```python linenums="1" hl_lines="11" # import libs import uvicorn from vidgear.gears.asyncio import WebGear_RTC @@ -93,7 +141,7 @@ In this example, we'll be displaying two video feeds side-by-side simultaneously ??? new "New in v0.2.4" This example was added in `v0.2.4`. -```python hl_lines="10-22 26-92 97-101" +```python linenums="1" hl_lines="10-22 26-92 97-101" # import necessary libs import uvicorn, cv2 import numpy as np diff --git a/docs/help/webgear_rtc_faqs.md b/docs/help/webgear_rtc_faqs.md index 39330843c..a1fb7d4aa 100644 --- a/docs/help/webgear_rtc_faqs.md +++ b/docs/help/webgear_rtc_faqs.md @@ -20,7 +20,7 @@ limitations under the License. # WebGear_RTC FAQs -  +  ## What is WebGear_RTC API and what does it do? @@ -30,7 +30,7 @@ limitations under the License. ## How to get started with WebGear_RTC API? -**Answer:** See [WebGear_RTC doc ➶](../../gears/webgear_rtc/overview/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [WebGear_RTC documentation](../../gears/webgear_rtc/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   diff --git a/docs/help/writegear_ex.md b/docs/help/writegear_ex.md index 63b73abef..00df4cf3b 100644 --- a/docs/help/writegear_ex.md +++ b/docs/help/writegear_ex.md @@ -30,15 +30,15 @@ In Compression Mode, you can use WriteGear for livestreaming with traditional pr ??? new "New in v0.2.6" This example was added in `v0.2.6`. -!!! alert "This example assume you already have a RTSP Server running at specified RTSP address with format *`rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH]`* for publishing video frames." +???+ tip "Creating your own RTSP Server locally" + If you want to create your RTSP Server locally, then checkout [**MediaMTX (formerly rtsp-simple-server)**](https://github.com/bluenviron/mediamtx) - ready-to-use and zero-dependency real-time media server and media proxy that allows to publish, read, proxy, record and playback video and audio streams. -??? tip "Creating your own RTSP Server locally" - If you want to create your RTSP Server locally, then checkout [**rtsp-simple-server**](https://github.com/aler9/rtsp-simple-server) - a ready-to-use and zero-dependency server and proxy that allows users to publish, read and proxy live video and audio streams through various protocols such as RTSP, RTMP etc. +!!! warning "This example assume you already have a RTSP Server running at specified RTSP address with format *`rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH]`* for publishing video frames." !!! danger "Make sure to change RTSP address `rtsp://localhost:8554/mystream` with yours in following code before running!" -```python hl_lines="10 15" +```python linenums="1" hl_lines="10 15" # import required libraries import cv2 from vidgear.gears import CamGear @@ -94,7 +94,7 @@ In Compression Mode, you can also use WriteGear for Youtube-Livestreaming. The e === "Without Audio" - ```python hl_lines="11-17 21 25" + ```python linenums="1" hl_lines="11-17 21 25" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -150,7 +150,7 @@ In Compression Mode, you can also use WriteGear for Youtube-Livestreaming. The e !!! warning "This code assume given input video source contains valid audio stream." - ```python hl_lines="7 15-24 28 32" + ```python linenums="1" hl_lines="7 15-24 28 32" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -247,7 +247,7 @@ With WriteGear's Compression Mode, you can directly feed video-frames to [`v4l2l Now you can use `/dev/video0` Virtual Camera device path in WriteGear API. -??? fail "v4l2: open /dev/videoX: Permission denied" +??? failure "v4l2: open /dev/videoX: Permission denied" If you got this error, then you must add your username to the `video` group by running following commands: ```sh @@ -265,7 +265,7 @@ With WriteGear's Compression Mode, you can directly feed video-frames to [`v4l2l -```python hl_lines="12-15 19" +```python linenums="1" hl_lines="12-15 19" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -323,7 +323,7 @@ In Compression Mode, you can also use WriteGear for creating MP4 segments from a ??? new "New in v0.2.1" This example was added in `v0.2.1`. -```python hl_lines="13-20 24" +```python linenums="1" hl_lines="13-20 24" # import required libraries from vidgear.gears import VideoGear from vidgear.gears import WriteGear @@ -394,7 +394,7 @@ You can also use WriteGear for merging external audio with live video-source: !!! failure "Make sure this `-i` audio-source it compatible with provided video-source, otherwise you could encounter multiple errors or no output at all." -```python hl_lines="11-12" +```python linenums="1" hl_lines="11-12" # import required libraries from vidgear.gears import CamGear from vidgear.gears import WriteGear @@ -458,9 +458,9 @@ If you need timely accurate video with exactly same speed as real-time input, th In this example we are capturing video from desktop screen in a Timely Accurate manner. -=== "Windows" +=== "Windows :fontawesome-brands-windows:" - ```python hl_lines="8-17" + ```python linenums="1" hl_lines="8-17" # import required libraries from vidgear.gears import WriteGear @@ -486,9 +486,9 @@ In this example we are capturing video from desktop screen in a Timely Accurate writer.close() ``` -=== "Linux" +=== "Linux :material-linux:" - ```python hl_lines="8-17" + ```python linenums="1" hl_lines="8-17" # import required libraries from vidgear.gears import WriteGear @@ -514,9 +514,9 @@ In this example we are capturing video from desktop screen in a Timely Accurate writer.close() ``` -=== "macOS" +=== "MacOS :material-apple:" - ```python hl_lines="8-17" + ```python linenums="1" hl_lines="8-17" # import required libraries from vidgear.gears import WriteGear @@ -557,7 +557,7 @@ In this example, we'll create a node that listens to a ROS image message topic, !!! note "This example is vidgear implementation of this [wiki example](http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython)." -```python +```python linenums="1" # import roslib import roslib diff --git a/docs/help/writegear_faqs.md b/docs/help/writegear_faqs.md index 56161915a..b48560390 100644 --- a/docs/help/writegear_faqs.md +++ b/docs/help/writegear_faqs.md @@ -21,7 +21,7 @@ limitations under the License. # WriteGear FAQs -  +  ## What is WriteGear API and what does it do? @@ -31,7 +31,7 @@ limitations under the License. ## I'm only familiar with OpenCV, how to get started with WriteGear API? -**Answer:** First, see [Switching from OpenCV](../../switch_from_cv/#switching-the-videowriter-api), then go through [WriteGear doc](../../gears/writegear/introduction/). Still in doubt, then ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel. +**Answer:** **Answer:** First, refer to the [Switching from OpenCV](../../switch_from_cv/#switching-videocapture-apis) guide, then go through [WriteGear documentation](../../gears/writegear/overview/). If you still have doubts, ask us on [Gitter ➶](https://gitter.im/vidgear/community) Community channel.   diff --git a/docs/index.md b/docs/index.md index a52c5346c..871d87da4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -31,7 +31,7 @@ limitations under the License. > VidGear is a cross-platform High-Performance **Video-Processing** Framework for building complex real-time media applications in python :fire: -VidGear provides an easy-to-use, highly extensible, **[Multi-Threaded](bonus/TQM/#threaded-queue-mode) + [Asyncio](https://docs.python.org/3/library/asyncio.html) API Framework** on top of many state-of-the-art specialized libraries like *[OpenCV][opencv], [FFmpeg][ffmpeg], [ZeroMQ][zmq], [picamera][picamera], [starlette][starlette], [yt_dlp][yt_dlp], [pyscreenshot][pyscreenshot], [dxcam][dxcam], [aiortc][aiortc] and [python-mss][mss]* at its backend, and enable us to flexibly exploit their internal parameters and methods, while silently delivering robust error-handling and real-time performance ⚡️. +VidGear provides an easy-to-use, highly extensible, **[Multi-Threaded](bonus/TQM/#threaded-queue-mode) + [Asyncio](https://docs.python.org/3/library/asyncio.html) API Framework** on top of many state-of-the-art specialized libraries like *[OpenCV][opencv], [FFmpeg][ffmpeg], [ZeroMQ][zmq], [picamera2][picamera2], [starlette][starlette], [yt_dlp][yt_dlp], [pyscreenshot][pyscreenshot], [dxcam][dxcam], [aiortc][aiortc] and [python-mss][mss]* at its backend, and enable us to flexibly exploit their internal parameters and methods, while silently delivering robust error-handling and real-time performance ⚡️. > _"Write Less and Accomplish More"_ — VidGear's Motto @@ -66,7 +66,7 @@ These Gears can be classified as follows: #### VideoCapture Gears * [CamGear](gears/camgear/overview/): Multi-Threaded API targeting various IP-USB-Cameras/Network-Streams/Streaming-Sites-URLs. -* [PiGear](gears/pigear/overview/): Multi-Threaded API targeting various Raspberry-Pi Camera Modules. +* [PiGear](gears/pigear/overview/): Multi-Threaded API targeting various Camera Modules and _(limited)_ USB cameras on Raspberry Pis :fontawesome-brands-raspberry-pi:. * [ScreenGear](gears/screengear/overview/): High-performance API targeting rapid Screencasting Capabilities. * [VideoGear](gears/videogear/overview/): Common Video-Capture API with internal [_Video Stabilizer_](gears/stabilizer/overview/) wrapper. @@ -119,7 +119,7 @@ You can be a [**Stargazer** :star2:{ .heart }][stargazer] by starring us on Gith It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference :slight_smile: - +Buy Me a Coffee at ko-fi.com   @@ -127,7 +127,7 @@ It is something I am doing with my own free time. But so much more needs to be d Here is a Bibtex entry you can use to cite this project in a publication: -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8174694.svg)](https://doi.org/10.5281/zenodo.8174694) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8332548.svg)](https://doi.org/10.5281/zenodo.8332548) ```BibTeX @software{vidgear, @@ -137,18 +137,19 @@ Here is a Bibtex entry you can use to cite this project in a publication: Christian Hollinger and Ian Max Andolina and Vincent Boivin and - enarche-ahn and + Kyle Ahn and freol35241 and Benjamin Lowe and Mickaël Schoentgen and - Renaud Bouckenooghe}, - title = {abhiTronix/vidgear: VidGear v0.3.1}, - month = jul, + Renaud Bouckenooghe and + Ibtsam Ahmad}, + title = {abhiTronix/vidgear: VidGear Stable v0.3.2}, + month = sep, year = 2023, publisher = {Zenodo}, - version = {vidgear-0.3.1}, - doi = {10.5281/zenodo.8174694}, - url = {https://doi.org/10.5281/zenodo.8174694} + version = {vidgear-0.3.2}, + doi = {10.5281/zenodo.8332548}, + url = {https://doi.org/10.5281/zenodo.8332548} } ``` @@ -171,4 +172,5 @@ External URLs [streamlink]:https://streamlink.github.io/ [aiortc]:https://aiortc.readthedocs.io/en/latest/ [yt_dlp]:https://github.com/yt-dlp/yt-dlp -[dxcam]:https://github.com/ra1nty/DXcam \ No newline at end of file +[dxcam]:https://github.com/ra1nty/DXcam +[picamera2]:https://github.com/raspberrypi/picamera2 \ No newline at end of file diff --git a/docs/installation.md b/docs/installation.md index 16e72c879..9b34379fc 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -29,7 +29,7 @@ limitations under the License. ## Supported Systems -VidGear is well-tested and supported on the following systems(but not limited to), with [python 3.8+](https://www.python.org/downloads/) and [pip](https://pip.pypa.io/en/stable/installing/#do-i-need-to-install-pip) installed: +VidGear is well-tested and supported on the following systems(but not limited to), with [python 3.8+](https://www.python.org/downloads/) and [pip](https://pip.pypa.io/en/stable/getting-started/) installed: * Any :material-linux: Linux distro released in 2016 or later * :fontawesome-brands-windows: Windows 7 or later diff --git a/docs/installation/pip_install.md b/docs/installation/pip_install.md index ba51b9181..26612ba98 100644 --- a/docs/installation/pip_install.md +++ b/docs/installation/pip_install.md @@ -92,6 +92,8 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y ``` +  + ### Critical Prerequisites :warning: * #### OpenCV @@ -106,13 +108,14 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y ??? info "Other OpenCV binaries" - OpenCV mainainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). + OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules [`opencv-contrib-python`](https://pypi.org/project/opencv-contrib-python/), and for server (headless) environments like [`opencv-python-headless`](https://pypi.org/project/opencv-python-headless/) and [`opencv-contrib-python-headless`](https://pypi.org/project/opencv-contrib-python-headless/). You can also install ==any one of them== in similar manner. More information can be found [here](https://github.com/opencv/opencv-python#installation-and-usage). ```sh pip install opencv-python ``` +  ### API Specific Prerequisites @@ -125,33 +128,104 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y * **For WriteGear API's Compression Mode**: Follow this dedicated [**FFmpeg Installation doc**](../../gears/writegear/compression/advanced/ffmpeg_install/) for its installation. * **For StreamGear API**: Follow this dedicated [**FFmpeg Installation doc**](../../gears/streamgear/ffmpeg_install/) for its installation. +  -* #### Picamera +* #### Picamera2 - Required only if you're using Raspberry Pi :fontawesome-brands-raspberry-pi: Camera Modules with its [**PiGear**](../../gears/pigear/overview/) API. You can easily install it via pip: + Required only if you're using Raspberry Pi :fontawesome-brands-raspberry-pi: Camera Modules _(or USB webcams)_ with the [**PiGear**](../../gears/pigear/overview/) API. Here's how to install [Picamera2](https://github.com/raspberrypi/picamera2) python library: + ??? tip "Using Legacy `picamera` library with PiGear (`v0.3.3` and above)" - !!! warning "Make sure to [**enable Raspberry Pi hardware-specific settings**](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior to using this library, otherwise it won't work." + PiGear API _(version `0.3.3` onwards)_ prioritizes the newer Picamera2 library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. However, if your operating system doesn't support Picamera2, you can still use the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/) library. Here's how to easily install it using pip: - ```sh - pip install picamera - ``` + ```sh + pip install picamera + ``` + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." + + + ??? warning "Picamera2 is only supported on Raspberry Pi OS Bullseye (or later) images, both 32 and 64-bit." + + Picamera2 is **NOT** supported on: + + - [ ] Images based on Buster or earlier releases. + - [ ] Raspberry Pi OS Legacy images. + - [ ] Bullseye (or later) images where the legacy camera stack has been re-enabled. + + === "Installation using `apt` (Recommended)" + + ??? success "As of September 2022, Picamera2 is pre-installed on images downloaded from Raspberry Pi. So you don't have to install it manually." + + - [x] On **Raspberry Pi OS images**, Picamera2 is now installed with all the GUI (Qt and OpenGL) dependencies. + - [x] On **Raspberry Pi OS Lite**, it is installed without the GUI dependencies, although preview images can still be displayed using DRM/KMS. If these users wish to use the additional X-Windows GUI features, they will need to run: + + ```sh + sudo apt install -y python3-pyqt5 python3-opengl + ``` + + If Picamera2 is not already installed, then your image is presumably older and you should start with system upgrade: + ```sh + sudo apt update && upgrade + ``` + + !!! failure "If you have installed Picamera2 previously using pip, then you should also uninstall this (`pip3 uninstall picamera2`)." + + Thereafter, you can install Picamera2 with all the GUI (Qt and OpenGL) dependencies using: + + ```sh + sudo apt install -y python3-picamera2 + ``` + + Or, If you **DON'T** want the GUI dependencies, use: + + ```sh + sudo apt install -y python3-picamera2 --no-install-recommends + ``` + + === "Installation using `pip`" + + !!! danger "This is **NOT** the recommended way to install Picamera2." + + However, if you wish to install Picamera2 with all the GUI (Qt and OpenGL) dependencies with pip, use: + + ```sh + sudo apt install -y python3-libcamera python3-kms++ + sudo apt install -y python3-pyqt5 python3-prctl + sudo apt install -y libatlas-base-dev ffmpeg python3-pip + pip3 install numpy --upgrade + pip3 install picamera2[gui] + ``` + + Or, If you **DON'T** want the GUI dependencies, use: + + ```sh + sudo apt install -y python3-libcamera python3-kms++ + sudo apt install -y python3-prctl libatlas-base-dev + sudo apt install -y ffmpeg libopenjp2-7 python3-pip + pip3 install numpy --upgrade + pip3 install picamera2 + ``` + +  * #### Uvloop Required only if you're using the [**NetGear_Async**](../../gears/netgear_async/overview/) API on UNIX machines for maximum performance. You can easily install it via pip: - !!! fail "uvloop is **[NOT yet supported on Windows :fontawesome-brands-windows: Machines](https://github.com/MagicStack/uvloop/issues/14).**" + !!! failure "uvloop is **[NOT yet supported on Windows :fontawesome-brands-windows: Machines](https://github.com/MagicStack/uvloop/issues/14).**" ```sh pip install uvloop ``` +  + * #### DXcam Required only if you're using the [**ScreenGear**](../../gears/screengear/overview/) API on Windows machines for better FPS performance. You can easily install it via pip: - !!! fail "FYI, DXcam is **ONLY supported on Windows :fontawesome-brands-windows: Machines.**" + !!! failure "FYI, DXcam is **ONLY supported on Windows :fontawesome-brands-windows: Machines.**" ```sh pip install dxcam @@ -175,7 +249,7 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y === "Older" - !!! fail "`[core]` keyword isn't available in versions older than `v0.2.4`" + !!! failure "`[core]` keyword isn't available in versions older than `v0.2.4`" ```sh # Install older stable release with all Core dependencies @@ -190,7 +264,7 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y **Installation is as simple as:** -??? experiment "Installing vidgear with only selective dependencies" +??? example "Installing vidgear with only selective dependencies" Starting with version `v0.2.2`, you can now run any VidGear API by installing only just specific dependencies required by the API in use(except for some Core dependencies). @@ -244,7 +318,7 @@ When installing VidGear with [pip](https://pip.pypa.io/en/stable/installing/), y | APIs | Dependencies | |:---:|:---| | CamGear | `yt_dlp` | - | PiGear | `picamera` | + | PiGear | `picamera`, `picamera2` _(see [this](#picamera2) for its installation)_ | | VideoGear | *Based on CamGear or PiGear backend in use* | | ScreenGear | `dxcam`, `mss`, `pyscreenshot`, `Pillow` | | WriteGear | **FFmpeg:** See [this doc ➶](../../gears/writegear/compression/advanced/ffmpeg_install/#ffmpeg-installation-instructions) | @@ -329,10 +403,10 @@ pip install git+git://github.com/abhiTronix/vidgear@master#egg=vidgear[asyncio] ```sh # Install latest stable release with all Core dependencies -pip install vidgear-0.3.2-py3-none-any.whl[core] +pip install vidgear-0.3.3-py3-none-any.whl[core] # Or Install latest stable release with all Core & Asyncio dependencies -pip install vidgear-0.3.2-py3-none-any.whl[asyncio] +pip install vidgear-0.3.3-py3-none-any.whl[asyncio] ```   diff --git a/docs/installation/source_install.md b/docs/installation/source_install.md index 88d49ef82..464416280 100644 --- a/docs/installation/source_install.md +++ b/docs/installation/source_install.md @@ -26,7 +26,7 @@ limitations under the License. ## Prerequisites -When installing VidGear from source, FFmpeg is the only API specific prerequisites you need to install manually: +When installing VidGear from source, following are some API specific prerequisites you may need to install manually: !!! question "What about rest of the prerequisites?" @@ -110,6 +110,85 @@ When installing VidGear from source, FFmpeg is the only API specific prerequisit * **For StreamGear API**: Follow this dedicated [**FFmpeg Installation doc**](../../gears/streamgear/ffmpeg_install/) for its installation. +  + +* #### Picamera2 + + Required only if you're using Raspberry Pi :fontawesome-brands-raspberry-pi: Camera Modules _(or USB webcams)_ with the [**PiGear**](../../gears/pigear/overview/) API. Here's how to install [Picamera2](https://github.com/raspberrypi/picamera2) python library: + + ??? tip "Using Legacy `picamera` library with PiGear (`v0.3.3` and above)" + + PiGear API _(version `0.3.3` onwards)_ prioritizes the newer Picamera2 library under the hood for Raspberry Pi :fontawesome-brands-raspberry-pi: camera modules. However, if your operating system doesn't support Picamera2, you can still use the legacy [`picamera`](https://picamera.readthedocs.io/en/release-1.13/) library. Here's how to easily install it using pip: + + ```sh + pip install picamera + ``` + + !!! note "You could also enforce the legacy picamera API backend in PiGear by using the [`enforce_legacy_picamera`](../../gears/pigear/params/#b-user-defined-parameters) user-defined optional parameter boolean attribute." + + + ??? warning "Picamera2 is only supported on Raspberry Pi OS Bullseye (or later) images, both 32 and 64-bit." + + Picamera2 is **NOT** supported on: + + - [ ] Images based on Buster or earlier releases. + - [ ] Raspberry Pi OS Legacy images. + - [ ] Bullseye (or later) images where the legacy camera stack has been re-enabled. + + === "Installation using `apt` (Recommended)" + + ??? success "As of September 2022, Picamera2 is pre-installed on images downloaded from Raspberry Pi. So you don't have to install it manually." + + - [x] On **Raspberry Pi OS images**, Picamera2 is now installed with all the GUI (Qt and OpenGL) dependencies. + - [x] On **Raspberry Pi OS Lite**, it is installed without the GUI dependencies, although preview images can still be displayed using DRM/KMS. If these users wish to use the additional X-Windows GUI features, they will need to run: + + ```sh + sudo apt install -y python3-pyqt5 python3-opengl + ``` + + If Picamera2 is not already installed, then your image is presumably older and you should start with system upgrade: + ```sh + sudo apt update && upgrade + ``` + + !!! failure "If you have installed Picamera2 previously using pip, then you should also uninstall this (`pip3 uninstall picamera2`)." + + Thereafter, you can install Picamera2 with all the GUI (Qt and OpenGL) dependencies using: + + ```sh + sudo apt install -y python3-picamera2 + ``` + + Or, If you **DON'T** want the GUI dependencies, use: + + ```sh + sudo apt install -y python3-picamera2 --no-install-recommends + ``` + + === "Installation using `pip`" + + !!! danger "This is **NOT** the recommended way to install Picamera2." + + However, if you wish to install Picamera2 with all the GUI (Qt and OpenGL) dependencies with pip, use: + + ```sh + sudo apt install -y python3-libcamera python3-kms++ + sudo apt install -y python3-pyqt5 python3-prctl + sudo apt install -y libatlas-base-dev ffmpeg python3-pip + pip3 install numpy --upgrade + pip3 install picamera2[gui] + ``` + + Or, If you **DON'T** want the GUI dependencies, use: + + ```sh + sudo apt install -y python3-libcamera python3-kms++ + sudo apt install -y python3-prctl libatlas-base-dev + sudo apt install -y ffmpeg libopenjp2-7 python3-pip + pip3 install numpy --upgrade + pip3 install picamera2 + ``` +   @@ -123,7 +202,7 @@ When installing VidGear from source, FFmpeg is the only API specific prerequisit !!! danger "DO NOT clone or install any other branch other than `testing` unless advised, as it is not tested with CI environments and possibly very unstable or unusable." -??? experiment "Installing vidgear with only selective dependencies" +??? example "Installing vidgear with only selective dependencies" Starting with version `v0.2.2`, you can now run any VidGear API by installing only just specific dependencies required by the API in use(except for some Core dependencies). @@ -159,7 +238,7 @@ When installing VidGear from source, FFmpeg is the only API specific prerequisit | APIs | Dependencies | |:---:|:---| | CamGear | `yt_dlp` | - | PiGear | `picamera` | + | PiGear | `picamera`, `picamera2` _(see [this](#picamera2) for its installation)_ | | VideoGear | *Based on CamGear or PiGear backend in use* | | ScreenGear | `dxcam`, `mss`, `pyscreenshot`, `Pillow` | | WriteGear | **FFmpeg:** See [this doc ➶](https://abhitronix.github.io/vidgear/dev/gears/writegear/compression/advanced/ffmpeg_install/#ffmpeg-installation-instructions) | diff --git a/docs/overrides/assets/images/screengear_error11.png b/docs/overrides/assets/images/screengear_error11.png new file mode 100644 index 000000000..0728f9e40 Binary files /dev/null and b/docs/overrides/assets/images/screengear_error11.png differ diff --git a/docs/overrides/assets/stylesheets/custom.css b/docs/overrides/assets/stylesheets/custom.css index 18580d4c6..98a3be818 100755 --- a/docs/overrides/assets/stylesheets/custom.css +++ b/docs/overrides/assets/stylesheets/custom.css @@ -21,209 +21,58 @@ limitations under the License. :root { --md-admonition-icon--new: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M13 2V3H12V9H11V10H9V11H8V12H7V13H5V12H4V11H3V9H2V15H3V16H4V17H5V18H6V22H8V21H7V20H8V19H9V18H10V19H11V22H13V21H12V17H13V16H14V15H15V12H16V13H17V11H15V9H20V8H17V7H22V3H21V2M14 3H15V4H14Z' /%3E%3C/svg%3E"); --md-admonition-icon--alert: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'%3E%3Cpath d='M23 7v6h-2V7m0 8h2v2h-2M12 2a2 2 0 0 0-2 2 2 2 0 0 0 0 .29C7.12 5.14 5 7.82 5 11v6l-2 2v1h18v-1l-2-2v-6c0-3.18-2.12-5.86-5-6.71A2 2 0 0 0 14 4a2 2 0 0 0-2-2m-2 19a2 2 0 0 0 2 2 2 2 0 0 0 2-2Z'/%3E%3C/svg%3E"); - --md-admonition-icon--xquote: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M20 2H4C2.9 2 2 2.9 2 4V16C2 17.1 2.9 18 4 18H8V21C8 21.6 8.4 22 9 22H9.5C9.7 22 10 21.9 10.2 21.7L13.9 18H20C21.1 18 22 17.1 22 16V4C22 2.9 21.1 2 20 2M11 13H7V8.8L8.3 6H10.3L8.9 9H11V13M17 13H13V8.8L14.3 6H16.3L14.9 9H17V13Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xwarning: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath d='M23,12L20.56,9.22L20.9,5.54L17.29,4.72L15.4,1.54L12,3L8.6,1.54L6.71,4.72L3.1,5.53L3.44,9.21L1,12L3.44,14.78L3.1,18.47L6.71,19.29L8.6,22.47L12,21L15.4,22.46L17.29,19.28L20.9,18.46L20.56,14.78L23,12M13,17H11V15H13V17M13,13H11V7H13V13Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xdanger: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M12,2A9,9 0 0,0 3,11C3,14.03 4.53,16.82 7,18.47V22H9V19H11V22H13V19H15V22H17V18.46C19.47,16.81 21,14 21,11A9,9 0 0,0 12,2M8,11A2,2 0 0,1 10,13A2,2 0 0,1 8,15A2,2 0 0,1 6,13A2,2 0 0,1 8,11M16,11A2,2 0 0,1 18,13A2,2 0 0,1 16,15A2,2 0 0,1 14,13A2,2 0 0,1 16,11M12,14L13.5,17H10.5L12,14Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xtip: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M12,6A6,6 0 0,1 18,12C18,14.22 16.79,16.16 15,17.2V19A1,1 0 0,1 14,20H10A1,1 0 0,1 9,19V17.2C7.21,16.16 6,14.22 6,12A6,6 0 0,1 12,6M14,21V22A1,1 0 0,1 13,23H11A1,1 0 0,1 10,22V21H14M20,11H23V13H20V11M1,11H4V13H1V11M13,1V4H11V1H13M4.92,3.5L7.05,5.64L5.63,7.05L3.5,4.93L4.92,3.5M16.95,5.63L19.07,3.5L20.5,4.93L18.37,7.05L16.95,5.63Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xfail: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M8.27,3L3,8.27V15.73L8.27,21H15.73L21,15.73V8.27L15.73,3M8.41,7L12,10.59L15.59,7L17,8.41L13.41,12L17,15.59L15.59,17L12,13.41L8.41,17L7,15.59L10.59,12L7,8.41' /%3E%3C/svg%3E"); - --md-admonition-icon--xsuccess: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M13.13 22.19L11.5 18.36C13.07 17.78 14.54 17 15.9 16.09L13.13 22.19M5.64 12.5L1.81 10.87L7.91 8.1C7 9.46 6.22 10.93 5.64 12.5M21.61 2.39C21.61 2.39 16.66 .269 11 5.93C8.81 8.12 7.5 10.53 6.65 12.64C6.37 13.39 6.56 14.21 7.11 14.77L9.24 16.89C9.79 17.45 10.61 17.63 11.36 17.35C13.5 16.53 15.88 15.19 18.07 13C23.73 7.34 21.61 2.39 21.61 2.39M14.54 9.46C13.76 8.68 13.76 7.41 14.54 6.63S16.59 5.85 17.37 6.63C18.14 7.41 18.15 8.68 17.37 9.46C16.59 10.24 15.32 10.24 14.54 9.46M8.88 16.53L7.47 15.12L8.88 16.53M6.24 22L9.88 18.36C9.54 18.27 9.21 18.12 8.91 17.91L4.83 22H6.24M2 22H3.41L8.18 17.24L6.76 15.83L2 20.59V22M2 19.17L6.09 15.09C5.88 14.79 5.73 14.47 5.64 14.12L2 17.76V19.17Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xexample: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M5,9.5L7.5,14H2.5L5,9.5M3,4H7V8H3V4M5,20A2,2 0 0,0 7,18A2,2 0 0,0 5,16A2,2 0 0,0 3,18A2,2 0 0,0 5,20M9,5V7H21V5H9M9,19H21V17H9V19M9,13H21V11H9V13Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xquestion: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M20 4H18V3H20.5C20.78 3 21 3.22 21 3.5V5.5C21 5.78 20.78 6 20.5 6H20V7H19V5H20V4M19 9H20V8H19V9M17 3H16V7H17V3M23 15V18C23 18.55 22.55 19 22 19H21V20C21 21.11 20.11 22 19 22H5C3.9 22 3 21.11 3 20V19H2C1.45 19 1 18.55 1 18V15C1 14.45 1.45 14 2 14H3C3 10.13 6.13 7 10 7H11V5.73C10.4 5.39 10 4.74 10 4C10 2.9 10.9 2 12 2S14 2.9 14 4C14 4.74 13.6 5.39 13 5.73V7H14C14.34 7 14.67 7.03 15 7.08V10H19.74C20.53 11.13 21 12.5 21 14H22C22.55 14 23 14.45 23 15M10 15.5C10 14.12 8.88 13 7.5 13S5 14.12 5 15.5 6.12 18 7.5 18 10 16.88 10 15.5M19 15.5C19 14.12 17.88 13 16.5 13S14 14.12 14 15.5 15.12 18 16.5 18 19 16.88 19 15.5M17 8H16V9H17V8Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xbug: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M13 2V7.08A5.47 5.47 0 0 0 12 7A5.47 5.47 0 0 0 11 7.08V2M16.9 15A5 5 0 0 1 16.73 15.55L20 17.42V22H18V18.58L15.74 17.29A4.94 4.94 0 0 1 8.26 17.29L6 18.58V22H4V17.42L7.27 15.55A5 5 0 0 1 7.1 15H5.3L2.55 16.83L1.45 15.17L4.7 13H7.1A5 5 0 0 1 7.37 12.12L5.81 11.12L2.24 12L1.76 10L6.19 8.92L8.5 10.45A5 5 0 0 1 15.5 10.45L17.77 8.92L22.24 10L21.76 12L18.19 11.11L16.63 12.11A5 5 0 0 1 16.9 13H19.3L22.55 15.16L21.45 16.82L18.7 15M11 14A1 1 0 1 0 10 15A1 1 0 0 0 11 14M15 14A1 1 0 1 0 14 15A1 1 0 0 0 15 14Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xabstract: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M3,3H21V5H3V3M3,7H15V9H3V7M3,11H21V13H3V11M3,15H15V17H3V15M3,19H21V21H3V19Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xnote: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M20.71,7.04C20.37,7.38 20.04,7.71 20.03,8.04C20,8.36 20.34,8.69 20.66,9C21.14,9.5 21.61,9.95 21.59,10.44C21.57,10.93 21.06,11.44 20.55,11.94L16.42,16.08L15,14.66L19.25,10.42L18.29,9.46L16.87,10.87L13.12,7.12L16.96,3.29C17.35,2.9 18,2.9 18.37,3.29L20.71,5.63C21.1,6 21.1,6.65 20.71,7.04M3,17.25L12.56,7.68L16.31,11.43L6.75,21H3V17.25Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xinfo: url("data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'%3E%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' version='1.1' width='24' height='24' viewBox='0 0 24 24'%3E%3Cpath fill='%23000000' d='M18 2H12V9L9.5 7.5L7 9V2H6C4.9 2 4 2.9 4 4V20C4 21.1 4.9 22 6 22H18C19.1 22 20 21.1 20 20V4C20 2.89 19.1 2 18 2M17.68 18.41C17.57 18.5 16.47 19.25 16.05 19.5C15.63 19.79 14 20.72 14.26 18.92C14.89 15.28 16.11 13.12 14.65 14.06C14.27 14.29 14.05 14.43 13.91 14.5C13.78 14.61 13.79 14.6 13.68 14.41S13.53 14.23 13.67 14.13C13.67 14.13 15.9 12.34 16.72 12.28C17.5 12.21 17.31 13.17 17.24 13.61C16.78 15.46 15.94 18.15 16.07 18.54C16.18 18.93 17 18.31 17.44 18C17.44 18 17.5 17.93 17.61 18.05C17.72 18.22 17.83 18.3 17.68 18.41M16.97 11.06C16.4 11.06 15.94 10.6 15.94 10.03C15.94 9.46 16.4 9 16.97 9C17.54 9 18 9.46 18 10.03C18 10.6 17.54 11.06 16.97 11.06Z' /%3E%3C/svg%3E"); - --md-admonition-icon--xadvance: url("data:image/svg+xml,%3C%3Fxml version='1.0' standalone='no'%3F%3E%3C!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 20010904//EN' 'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd'%3E%3Csvg version='1.0' xmlns='http://www.w3.org/2000/svg' width='512.000000pt' height='512.000000pt' viewBox='0 0 512.000000 512.000000' preserveAspectRatio='xMidYMid meet'%3E%3Cg transform='translate(0.000000,512.000000) scale(0.100000,-0.100000)'%0Afill='%23000000' stroke='none'%3E%3Cpath d='M2372 5103 c-39 -19 -70 -59 -78 -102 -12 -57 4 -98 63 -162 l57 -60%0A-280 -278 -279 -278 -774 -329 c-426 -181 -797 -342 -824 -359 -57 -34 -111%0A-105 -128 -168 -17 -62 -7 -173 20 -227 16 -32 215 -237 679 -702 590 -590%0A663 -660 717 -684 144 -66 328 -6 396 129 12 22 167 391 345 821 l324 781 274%0A274 275 275 60 -57 c41 -37 73 -58 96 -63 130 -24 225 99 166 214 -24 46 -934%0A954 -978 976 -41 20 -89 20 -131 -1z m421 -701 l157 -157 -263 -263 -262 -262%0A-488 2 -488 3 293 124 293 124 295 293 c162 162 297 294 300 294 3 0 76 -71%0A163 -158z m-545 -999 c-3 -10 -132 -325 -288 -701 l-284 -683 -628 628 c-345%0A345 -628 631 -628 634 0 3 69 35 153 72 l152 66 764 0 c722 1 763 0 759 -16z'/%3E%3Cpath d='M3742 4560 c-42 -26 -76 -99 -68 -144 13 -68 65 -121 126 -126 62 -5%0A82 1 123 39 41 39 42 41 42 106 0 65 -1 67 -42 106 -38 35 -47 39 -95 39 -36%0A0 -64 -7 -86 -20z'/%3E%3Cpath d='M3980 3922 c-104 -57 -108 -201 -8 -262 95 -58 222 17 222 130 0 110%0A-120 184 -214 132z'/%3E%3Cpath d='M3052 3270 c-99 -61 -95 -203 8 -257 34 -18 59 -23 117 -23 l73 0 0%0A-419 0 -419 -57 -27 c-275 -135 -509 -401 -617 -703 -87 -243 -91 -546 -10%0A-799 33 -105 104 -246 170 -339 61 -87 217 -244 266 -267 33 -16 94 -17 753%0A-17 658 0 720 1 753 17 51 24 206 182 269 273 151 221 223 454 223 719 0 261%0A-68 485 -213 701 -102 152 -281 313 -448 403 l-79 42 0 418 0 417 62 0 c81 0%0A137 27 166 79 40 70 20 154 -47 198 l-34 23 -661 0 c-642 0 -662 -1 -694 -20z%0Am908 -763 c0 -600 -16 -546 182 -636 201 -91 332 -212 443 -407 25 -43 45 -82%0A45 -86 0 -5 -394 -8 -875 -8 -481 0 -875 2 -875 5 0 3 12 29 26 58 97 191 249%0A338 446 432 53 25 100 45 105 45 20 0 75 70 84 106 4 22 9 249 9 507 l0 467%0A205 0 205 0 0 -483z m737 -1554 c-10 -227 -95 -428 -251 -591 l-59 -62 -632 0%0A-632 0 -60 63 c-156 162 -240 359 -250 585 l-6 122 948 0 948 0 -6 -117z'/%3E%3C/g%3E%3C/svg%3E%0A"); -} - - -/* "Advance" admonition*/ -.md-typeset .admonition.advance, -.md-typeset details.advance { - border-color: rgb(27, 77, 62); -} - -.md-typeset .advance > .admonition-title, -.md-typeset .advance > summary, -.md-typeset .experiment > .admonition-title, -.md-typeset .experiment > summary { - background-color: rgba(0, 57, 166, 0.1); - border-color: rgb(0, 57, 166); -} - -.md-typeset .advance > .admonition-title::before, -.md-typeset .advance > summary::before, -.md-typeset .experiment > .admonition-title::before, -.md-typeset .experiment > summary::before { - background-color: rgb(0, 57, 166); - -webkit-mask-image: var(--md-admonition-icon--xadvance); - mask-image: var(--md-admonition-icon--xadvance); } /* "New" admonition*/ .md-typeset .admonition.new, .md-typeset details.new { - border-color: rgb(57,255,20); + border-color: rgb(57, 255, 20); } -.md-typeset .new > .admonition-title, -.md-typeset .new > summary { - background-color: rgb(57,255,20,0.1); - border-color: rgb(57,255,20); +.md-typeset .new>.admonition-title, +.md-typeset .new>summary { + background-color: rgb(57, 255, 20, 0.1); } -.md-typeset .new > .admonition-title::before, -.md-typeset .new > summary::before { - background-color: rgb(57,255,20); +.md-typeset .new>.admonition-title::before, +.md-typeset .new>summary::before { + background-color: rgb(57, 255, 20); -webkit-mask-image: var(--md-admonition-icon--new); mask-image: var(--md-admonition-icon--new); } - /* "Alert" admonition*/ .md-typeset .admonition.alert, .md-typeset details.alert { border-color: rgb(255, 0, 255); } -.md-typeset .alert > .admonition-title, -.md-typeset .alert > summary { +.md-typeset .alert>.admonition-title, +.md-typeset .alert>summary { background-color: rgba(255, 0, 255, 0.1); - border-color: rgb(255, 0, 255); } -.md-typeset .alert > .admonition-title::before, -.md-typeset .alert > summary::before { +.md-typeset .alert>.admonition-title::before, +.md-typeset .alert>summary::before { background-color: rgb(255, 0, 255); -webkit-mask-image: var(--md-admonition-icon--alert); mask-image: var(--md-admonition-icon--alert); } -/* Custom "Warning" admonition*/ -.md-typeset .attention > .admonition-title::before, -.md-typeset .attention > summary::before, -.md-typeset .caution > .admonition-title::before, -.md-typeset .caution > summary::before, -.md-typeset .warning > .admonition-title::before, -.md-typeset .warning > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xwarning); - mask-image: var(--md-admonition-icon--xwarning); -} - -/* Custom "Tip" admonition*/ -.md-typeset .hint > .admonition-title::before, -.md-typeset .hint > summary::before, -.md-typeset .important > .admonition-title::before, -.md-typeset .important > summary::before, -.md-typeset .tip > .admonition-title::before, -.md-typeset .tip > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xtip) !important; - mask-image: var(--md-admonition-icon--xtip) !important; -} - -/* Custom "Info" admonition*/ -.md-typeset .info > .admonition-title::before, -.md-typeset .info > summary::before, -.md-typeset .todo > .admonition-title::before, -.md-typeset .todo > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xinfo); - mask-image: var(--md-admonition-icon--xinfo); -} - -/* Custom "Danger" admonition*/ -.md-typeset .danger > .admonition-title::before, -.md-typeset .danger > summary::before, -.md-typeset .error > .admonition-title::before, -.md-typeset .error > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xdanger); - mask-image: var(--md-admonition-icon--xdanger); -} - -/* Custom "Note" admonition*/ -.md-typeset .note > .admonition-title::before, -.md-typeset .note > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xnote); - mask-image: var(--md-admonition-icon--xnote); -} - -/* Custom "Abstract" admonition*/ -.md-typeset .abstract > .admonition-title::before, -.md-typeset .abstract > summary::before, -.md-typeset .summary > .admonition-title::before, -.md-typeset .summary > summary::before, -.md-typeset .tldr > .admonition-title::before, -.md-typeset .tldr > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xabstract); - mask-image: var(--md-admonition-icon--xabstract); -} - -/* Custom "Question" admonition*/ -.md-typeset .faq > .admonition-title::before, -.md-typeset .faq > summary::before, -.md-typeset .help > .admonition-title::before, -.md-typeset .help > summary::before, -.md-typeset .question > .admonition-title::before, -.md-typeset .question > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xquestion); - mask-image: var(--md-admonition-icon--xquestion); -} - -/* Custom "Success" admonition*/ -.md-typeset .check > .admonition-title::before, -.md-typeset .check > summary::before, -.md-typeset .done > .admonition-title::before, -.md-typeset .done > summary::before, -.md-typeset .success > .admonition-title::before, -.md-typeset .success > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xsuccess); - mask-image: var(--md-admonition-icon--xsuccess); -} - -/* Custom "Fail" admonition*/ -.md-typeset .fail > .admonition-title::before, -.md-typeset .fail > summary::before, -.md-typeset .failure > .admonition-title::before, -.md-typeset .failure > summary::before, -.md-typeset .missing > .admonition-title::before, -.md-typeset .missing > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xfail); - mask-image: var(--md-admonition-icon--xfail); -} - -/* Custom "bug" admonition*/ -.md-typeset .bug > .admonition-title::before, -.md-typeset .bug > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xbug); - mask-image: var(--md-admonition-icon--xbug); -} - -/* Custom "Example" admonition*/ -.md-typeset .example > .admonition-title::before, -.md-typeset .example > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xexample); - mask-image: var(--md-admonition-icon--xexample); -} - -/* Custom "Summary" admonition*/ -.md-typeset .cite > .admonition-title::before, -.md-typeset .cite > summary::before, -.md-typeset .quote > .admonition-title::before, -.md-typeset .quote > summary::before { - -webkit-mask-image: var(--md-admonition-icon--xquote); - mask-image: var(--md-admonition-icon--xquote); -} - - -/* Handles DeFFcode UI */ -.md-nav__item--active > .md-nav__link { + + +/* Handles UI */ +.md-nav__item--active>.md-nav__link { font-weight: bold; } + .center { display: block; margin-left: auto; margin-right: auto; width: 80%; } + .doc-heading { padding-top: 50px; } @@ -261,18 +110,18 @@ limitations under the License. /* Handles Gitter Sidecard UI */ .gitter-open-chat-button { - background-color: var(--md-primary-fg-color) !important; - font-family: inherit !important; - font-size: 12px; - -webkit-filter: none !important; - filter: none !important; + background-color: var(--md-primary-fg-color) !important; + font-family: inherit !important; + font-size: 12px; + -webkit-filter: none !important; + filter: none !important; } /* Custom Blockquotes */ blockquote { padding: 0.5em 10px; - quotes: "\201C""\201D""\2018""\2019"; + quotes: "\201C" "\201D" "\2018" "\2019"; } blockquote:before { @@ -414,62 +263,73 @@ footer.sponsorship:not(:hover) .twemoji.heart-throb-hover svg { /* Heart Animation */ @keyframes heart { - 0%, 40%, 80%, 100% { - transform: scale(1); - } - 20%, 60% { - transform: scale(1.15); - } + + 0%, + 40%, + 80%, + 100% { + transform: scale(1); + } + + 20%, + 60% { + transform: scale(1.15); + } } + .heart { - animation: heart 1000ms infinite; + animation: heart 1000ms infinite; } /* Footer Sponsorship Block */ footer.sponsorship { - text-align: center; + text-align: center; } - footer.sponsorship hr { - display: inline-block; - width: px2rem(32px); - margin: 0 px2rem(14px); - vertical-align: middle; - border-bottom: 2px solid var(--md-default-fg-color--lighter); + +footer.sponsorship hr { + display: inline-block; + width: px2rem(32px); + margin: 0 px2rem(14px); + vertical-align: middle; + border-bottom: 2px solid var(--md-default-fg-color--lighter); } - footer.sponsorship:hover hr { - border-color: var(--md-accent-fg-color); + +footer.sponsorship:hover hr { + border-color: var(--md-accent-fg-color); } - footer.sponsorship:not(:hover) .twemoji.heart-throb-hover svg { - color: var(--md-default-fg-color--lighter) !important; + +footer.sponsorship:not(:hover) .twemoji.heart-throb-hover svg { + color: var(--md-default-fg-color--lighter) !important; } /* Dark Theme Changes */ [data-md-color-scheme="slate"] { - --md-hue: 260; + --md-hue: 260; } -body[data-md-color-scheme="slate"] img[class="shadow"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + +body[data-md-color-scheme="slate"] img[class="shadow"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class="btn-container"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class="btn-container"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class="highlight"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class="highlight"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] div[class^="admonition"]{ - -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); - filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); +body[data-md-color-scheme="slate"] div[class^="admonition"] { + -webkit-filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); + filter: drop-shadow(2px 2px 1px rgba(0, 0, 0, 0.5)); } -body[data-md-color-scheme="slate"] img[class="shadow2"]{ - -webkit-filter: drop-shadow(1px 1px 0 black); - filter: drop-shadow(1px 1px 0 black); +body[data-md-color-scheme="slate"] img[class="shadow2"] { + -webkit-filter: drop-shadow(1px 1px 0 black); + filter: drop-shadow(1px 1px 0 black); } \ No newline at end of file diff --git a/docs/overrides/hooks/js_hook.py b/docs/overrides/hooks/js_hook.py new file mode 100644 index 000000000..8f0175307 --- /dev/null +++ b/docs/overrides/hooks/js_hook.py @@ -0,0 +1,45 @@ +""" +=============================================== +vidgear library source-code is deployed under the Apache 2.0 License: + +Copyright (c) 2019 Abhishek Thakur(@abhiTronix) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=============================================== +""" + +from mkdocs.config.defaults import MkDocsConfig +from mkdocs.structure.pages import Page + +js_scripts = """ + + + + +""" + + +# Add per-file custom javascripts. +def on_page_markdown(markdown: str, *, page: Page, config: MkDocsConfig, files): + if not ( + page.file.src_uri + in ["gears/stabilizer/overview.md", "gears/streamgear/introduction.md"] + ): + return + + # Replace markdown + js scripts + comment, content = markdown.split("-->") + modified_markdown = comment + "-->\n" + js_scripts + content + + # Return modified + return modified_markdown diff --git a/docs/overrides/main.html b/docs/overrides/main.html index 5a5377738..70bfe924d 100644 --- a/docs/overrides/main.html +++ b/docs/overrides/main.html @@ -1,60 +1,49 @@ {% extends "base.html" %} + {% block extrahead %} - {% set title = config.site_name %} - {% if page and page.meta and page.meta.title %} - {% set title = title ~ " - " ~ page.meta.title %} - {% elif page and page.title and not page.is_homepage %} - {% set title = title ~ " - " ~ page.title | striptags %} - {% endif %} - {% set image = config.site_url ~ '/assets/images/banner_link.png' %} - - - - - - - - - - - - - - +{% set title = config.site_name %} +{% if page and page.meta and page.meta.title %} +{% set title = title ~ " - " ~ page.meta.title %} +{% elif page and page.title and not page.is_homepage %} +{% set title = title ~ " - " ~ page.title %} +{% endif %} +{% set image = config.site_url ~ '/assets/images/banner_link.png' %} + + + + + + + + {% endblock %} + {% block announce %} - - {% include ".icons/material/message-alert.svg" %} Hey, checkout our new Deffcode library which will added as vidgear backend soon. We’d love to hear your feedback! + + {% include ".icons/fontawesome/solid/bullhorn.svg" %} We're excited to announce our + new Deffcode + library, + which will be integrated with VidGear soon. We value your feedback and would love to hear your thoughts! {% endblock %} + {% block outdated %} - You're not viewing the latest version. - - Click here to go to latest. - +You're not viewing the latest version. + + Click here to go to latest. + {% endblock %} + {% block content %} - {{ super() }} - -{% endblock %} -{% block libs %} - - - - - - +{{ super() }} + {% endblock %}