From 23af1209cc702bbd65faf115a90a828fe44f0e27 Mon Sep 17 00:00:00 2001 From: abhiTronix Date: Sun, 23 Jun 2024 16:45:55 +0000 Subject: [PATCH] Deployed 2096923 to v0.2.6-dev with MkDocs 1.6.0 and mike 2.1.1 --- dev/reference/ffhelper/index.html | 29 +- dev/reference/sourcer/index.html | 1759 +++++++++++----------- dev/search/search_index.json | 2 +- v0.2.6-dev/reference/ffhelper/index.html | 29 +- v0.2.6-dev/reference/sourcer/index.html | 1759 +++++++++++----------- v0.2.6-dev/search/search_index.json | 2 +- 6 files changed, 1816 insertions(+), 1764 deletions(-) diff --git a/dev/reference/ffhelper/index.html b/dev/reference/ffhelper/index.html index efb61bc..dd26d31 100644 --- a/dev/reference/ffhelper/index.html +++ b/dev/reference/ffhelper/index.html @@ -461,21 +461,20 @@ supported_protocols = splitted[splitted.index("Output:") + 1 : len(splitted) - 1] # RTSP is a demuxer somehow # support both RTSP and RTSPS(over SSL) - logger.critical(get_supported_demuxers(path)) - supported_protocols += ( - ["rtsp", "rtsps"] if "rtsp" in get_supported_demuxers(path) else [] - ) - # Test and return result whether scheme is supported - if extracted_scheme_url and extracted_scheme_url in supported_protocols: - verbose and logger.debug( - "URL scheme `{}` is supported by FFmpeg.".format(extracted_scheme_url) - ) - return True - else: - verbose and logger.warning( - "URL scheme `{}` isn't supported by FFmpeg!".format(extracted_scheme_url) - ) - return False + supported_protocols += ( + ["rtsp", "rtsps"] if "rtsp" in get_supported_demuxers(path) else [] + ) + # Test and return result whether scheme is supported + if extracted_scheme_url and extracted_scheme_url in supported_protocols: + verbose and logger.debug( + "URL scheme `{}` is supported by FFmpeg.".format(extracted_scheme_url) + ) + return True + else: + verbose and logger.warning( + "URL scheme `{}` isn't supported by FFmpeg!".format(extracted_scheme_url) + ) + return False

 

check_sp_output

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default
args based on input

Non Keyword Arguments

()
kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):
     """
     ## check_sp_output
diff --git a/dev/reference/sourcer/index.html b/dev/reference/sourcer/index.html
index 1f292f6..98fc885 100644
--- a/dev/reference/sourcer/index.html
+++ b/dev/reference/sourcer/index.html
@@ -53,756 +53,781 @@
 
         # sanitize sourcer_params
         self.__sourcer_params = {
-            str(k).strip(): str(v).strip()
-            if not isinstance(v, (dict, list, int, float, tuple))
-            else v
-            for k, v in sourcer_params.items()
-        }
-
-        # handle whether to force validate source
-        self.__forcevalidatesource = self.__sourcer_params.pop(
-            "-force_validate_source", False
-        )
-        if not isinstance(self.__forcevalidatesource, bool):
-            # reset improper values
-            self.__forcevalidatesource = False
-
-        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
-        self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
-        if not isinstance(self.__ffmpeg_prefixes, list):
-            # log it
-            logger.warning(
-                "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
-                    type(self.__ffmpeg_prefixes).__name__
-                )
-            )
-            # reset improper values
-            self.__ffmpeg_prefixes = []
-
-        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
-        __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
-        if not isinstance(__ffmpeg_download_path, str):
-            # reset improper values
-            __ffmpeg_download_path = ""
-
-        # validate the FFmpeg assets and return location (also downloads static assets on windows)
-        self.__ffmpeg = get_valid_ffmpeg_path(
-            str(custom_ffmpeg),
-            True if self.__machine_OS == "Windows" else False,
-            ffmpeg_download_path=__ffmpeg_download_path,
-            verbose=self.__verbose_logs,
-        )
-
-        # check if valid FFmpeg path returned
-        if self.__ffmpeg:
-            self.__verbose_logs and logger.debug(
-                "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
-            )
-        else:
-            # else raise error
-            raise RuntimeError(
-                "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
-            )
-
-        # sanitize externally accessible parameters and assign them
-        # handles source demuxer
-        if source is None:
-            # first check if source value is empty
-            # raise error if true
-            raise ValueError("Input `source` parameter is empty!")
-        elif isinstance(source_demuxer, str):
-            # assign if valid demuxer value
-            self.__source_demuxer = source_demuxer.strip().lower()
+            str(k).strip(): (
+                str(v).strip()
+                if not isinstance(v, (dict, list, int, float, tuple))
+                else v
+            )
+            for k, v in sourcer_params.items()
+        }
+
+        # handle whether to force validate source
+        self.__forcevalidatesource = self.__sourcer_params.pop(
+            "-force_validate_source", False
+        )
+        if not isinstance(self.__forcevalidatesource, bool):
+            # reset improper values
+            self.__forcevalidatesource = False
+
+        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
+        self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
+        if not isinstance(self.__ffmpeg_prefixes, list):
+            # log it
+            logger.warning(
+                "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
+                    type(self.__ffmpeg_prefixes).__name__
+                )
+            )
+            # reset improper values
+            self.__ffmpeg_prefixes = []
+
+        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
+        __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
+        if not isinstance(__ffmpeg_download_path, str):
+            # reset improper values
+            __ffmpeg_download_path = ""
+
+        # validate the FFmpeg assets and return location (also downloads static assets on windows)
+        self.__ffmpeg = get_valid_ffmpeg_path(
+            str(custom_ffmpeg),
+            True if self.__machine_OS == "Windows" else False,
+            ffmpeg_download_path=__ffmpeg_download_path,
+            verbose=self.__verbose_logs,
+        )
+
+        # check if valid FFmpeg path returned
+        if self.__ffmpeg:
+            self.__verbose_logs and logger.debug(
+                "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
+            )
+        else:
+            # else raise error
+            raise RuntimeError(
+                "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
+            )
+
+        # sanitize externally accessible parameters and assign them
+        # handles source demuxer
+        if source is None:
+            # first check if source value is empty
+            # raise error if true
+            raise ValueError("Input `source` parameter is empty!")
+        elif isinstance(source_demuxer, str):
             # assign if valid demuxer value
-            assert self.__source_demuxer != "auto" or validate_device_index(
-                source
-            ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+            self.__source_demuxer = source_demuxer.strip().lower()
+            # assign if valid demuxer value
+            assert self.__source_demuxer != "auto" or validate_device_index(
                 source
-            )
-        else:
-            # otherwise find valid default source demuxer value
-            # enforce "auto" if valid index device
-            self.__source_demuxer = "auto" if validate_device_index(source) else None
-            # log if not valid index device and invalid type
-            self.__verbose_logs and not self.__source_demuxer in [
-                "auto",
-                None,
-            ] and logger.warning(
-                "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
-                    type(source_demuxer).__name__
-                )
-            )
-            # log if not valid index device and invalid type
-            self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
-                "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
-                    source
-                )
-            )
-
-        # handles source stream
-        self.__source = source
-
-        # creates shallow copy for further usage #TODO
-        self.__source_org = copy.copy(self.__source)
-        self.__source_demuxer_org = copy.copy(self.__source_demuxer)
-
-        # handles all extracted devices names/paths list
-        # when source_demuxer = "auto"
-        self.__extracted_devices_list = []
-
-        # various source stream params
-        self.__default_video_resolution = ""  # handles stream resolution
-        self.__default_video_orientation = ""  # handles stream's video orientation
-        self.__default_video_framerate = ""  # handles stream framerate
-        self.__default_video_bitrate = ""  # handles stream's video bitrate
-        self.__default_video_pixfmt = ""  # handles stream's video pixfmt
-        self.__default_video_decoder = ""  # handles stream's video decoder
-        self.__default_source_duration = ""  # handles stream's video duration
-        self.__approx_video_nframes = ""  # handles approx stream frame number
-        self.__default_audio_bitrate = ""  # handles stream's audio bitrate
-        self.__default_audio_samplerate = ""  # handles stream's audio samplerate
-
-        # handle various stream flags
-        self.__contains_video = False  # contains video
-        self.__contains_audio = False  # contains audio
-        self.__contains_images = False  # contains image-sequence
-
-        # handles output parameters through filters
-        self.__metadata_output = None  # handles output stream metadata
-        self.__output_frames_resolution = ""  # handles output stream resolution
-        self.__output_framerate = ""  # handles output stream framerate
-        self.__output_frames_pixfmt = ""  # handles output frame pixel format
-        self.__output_orientation = ""  # handles output frame orientation
-
-        # check whether metadata probed or not?
-        self.__metadata_probed = False
-
-    def probe_stream(self, default_stream_indexes=(0, 0)):
-        """
-        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
-
-        Parameters:
-            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is ("0th video stream", "1st audio stream").
-
-        **Returns:** Reference to the instance object.
-        """
-        assert (
-            isinstance(default_stream_indexes, (list, tuple))
-            and len(default_stream_indexes) == 2
-            and all(isinstance(x, int) for x in default_stream_indexes)
-        ), "Invalid default_stream_indexes value!"
-        # validate source and extract metadata
-        self.__ffsp_output = self.__validate_source(
-            self.__source,
-            source_demuxer=self.__source_demuxer,
-            forced_validate=(
-                self.__forcevalidatesource if self.__source_demuxer is None else True
-            ),
-        )
-        # parse resolution and framerate
-        video_rfparams = self.__extract_resolution_framerate(
-            default_stream=default_stream_indexes[0]
-        )
-        if video_rfparams:
-            self.__default_video_resolution = video_rfparams["resolution"]
-            self.__default_video_framerate = video_rfparams["framerate"]
-            self.__default_video_orientation = video_rfparams["orientation"]
-
-        # parse output parameters through filters (if available)
-        if not (self.__metadata_output is None):
-            # parse output resolution and framerate
-            out_video_rfparams = self.__extract_resolution_framerate(
-                default_stream=default_stream_indexes[0], extract_output=True
-            )
-            if out_video_rfparams:
-                self.__output_frames_resolution = out_video_rfparams["resolution"]
-                self.__output_framerate = out_video_rfparams["framerate"]
-                self.__output_orientation = out_video_rfparams["orientation"]
-            # parse output pixel-format
-            self.__output_frames_pixfmt = self.__extract_video_pixfmt(
-                default_stream=default_stream_indexes[0], extract_output=True
-            )
-
-        # parse pixel-format
-        self.__default_video_pixfmt = self.__extract_video_pixfmt(
-            default_stream=default_stream_indexes[0]
-        )
-
-        # parse video decoder
-        self.__default_video_decoder = self.__extract_video_decoder(
-            default_stream=default_stream_indexes[0]
-        )
-        # parse rest of metadata
-        if not self.__contains_images:
-            # parse video bitrate
-            self.__default_video_bitrate = self.__extract_video_bitrate(
-                default_stream=default_stream_indexes[0]
-            )
-            # parse audio bitrate and samplerate
-            audio_params = self.__extract_audio_bitrate_nd_samplerate(
-                default_stream=default_stream_indexes[1]
-            )
-            if audio_params:
-                self.__default_audio_bitrate = audio_params["bitrate"]
-                self.__default_audio_samplerate = audio_params["samplerate"]
-            # parse video duration
-            self.__default_source_duration = self.__extract_duration()
-            # calculate all flags
-            if (
-                self.__default_video_bitrate
-                or (self.__default_video_framerate and self.__default_video_resolution)
-            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):
-                self.__contains_video = True
-                self.__contains_audio = True
-            elif self.__default_video_bitrate or (
-                self.__default_video_framerate and self.__default_video_resolution
-            ):
-                self.__contains_video = True
-            elif self.__default_audio_bitrate or self.__default_audio_samplerate:
-                self.__contains_audio = True
-            else:
-                raise ValueError(
-                    "Invalid source with no decodable audio or video stream provided. Aborting!"
-                )
-        # calculate approximate number of video frame
-        if self.__default_video_framerate and self.__default_source_duration:
-            self.__approx_video_nframes = np.rint(
-                self.__default_video_framerate * self.__default_source_duration
-            ).astype(int, casting="unsafe")
-
-        # signal metadata has been probed
-        self.__metadata_probed = True
-
-        # return reference to the instance object.
-        return self
-
-    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):
-        """
-        This method returns Parsed/Probed Metadata of the given source.
-
-        Parameters:
-            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?
-            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.
-
-        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.
-        """
-        # check if metadata has been probed or not
-        assert (
-            self.__metadata_probed
-        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
-        # log it
-        self.__verbose_logs and logger.debug("Extracting Metadata...")
-        # create metadata dictionary from information populated in private class variables
-        metadata = {
-            "ffmpeg_binary_path": self.__ffmpeg,
-            "source": self.__source,
-        }
-        metadata_missing = {}
-        # Only either `source_demuxer` or `source_extension` attribute can be
-        # present in metadata.
-        if self.__source_demuxer is None:
-            metadata.update({"source_extension": os.path.splitext(self.__source)[-1]})
-            # update missing
-            force_retrieve_missing and metadata_missing.update({"source_demuxer": ""})
-        else:
-            metadata.update({"source_demuxer": self.__source_demuxer})
-            # update missing
-            force_retrieve_missing and metadata_missing.update({"source_extension": ""})
-        # add source video metadata properties
-        metadata.update(
-            {
-                "source_video_resolution": self.__default_video_resolution,
-                "source_video_pixfmt": self.__default_video_pixfmt,
-                "source_video_framerate": self.__default_video_framerate,
-                "source_video_orientation": self.__default_video_orientation,
-                "source_video_decoder": self.__default_video_decoder,
-                "source_duration_sec": self.__default_source_duration,
-                "approx_video_nframes": (
-                    int(self.__approx_video_nframes)
-                    if self.__approx_video_nframes
-                    and not any(
-                        "loop" in x for x in self.__ffmpeg_prefixes
-                    )  # check if any loops in prefix
-                    and not any(
-                        "loop" in x for x in dict2Args(self.__sourcer_params)
-                    )  # check if any loops in filters
-                    else None
-                ),
-                "source_video_bitrate": self.__default_video_bitrate,
-                "source_audio_bitrate": self.__default_audio_bitrate,
-                "source_audio_samplerate": self.__default_audio_samplerate,
-                "source_has_video": self.__contains_video,
-                "source_has_audio": self.__contains_audio,
-                "source_has_image_sequence": self.__contains_images,
-            }
-        )
-        # add output metadata properties (if available)
-        if not (self.__metadata_output is None):
-            metadata.update(
-                {
-                    "output_frames_resolution": self.__output_frames_resolution,
-                    "output_frames_pixfmt": self.__output_frames_pixfmt,
-                    "output_framerate": self.__output_framerate,
-                    "output_orientation": self.__output_orientation,
-                }
-            )
-        else:
-            # since output stream metadata properties are only available when additional
-            # FFmpeg parameters(such as filters) are defined manually, thereby missing
-            # output stream properties are handled by assigning them counterpart source
-            # stream metadata property values
-            force_retrieve_missing and metadata_missing.update(
-                {
-                    "output_frames_resolution": self.__default_video_resolution,
-                    "output_frames_pixfmt": self.__default_video_pixfmt,
-                    "output_framerate": self.__default_video_framerate,
-                    "output_orientation": self.__default_video_orientation,
-                }
-            )
-        # log it
-        self.__verbose_logs and logger.debug(
-            "Metadata Extraction completed successfully!"
-        )
-        # parse as JSON string(`json.dumps`), if defined
-        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata
-        metadata_missing = (
-            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing
-        )
-        # return `metadata` or `(metadata, metadata_missing)`
-        return metadata if not force_retrieve_missing else (metadata, metadata_missing)
-
-    @property
-    def enumerate_devices(self):
-        """
-        A property object that enumerate all probed Camera Devices connected to your system names
-        along with their respective "device indexes" or "camera indexes" as python dictionary.
-
-        **Returns:** Probed Camera Devices as python dictionary.
-        """
-        # check if metadata has been probed or not
-        assert (
-            self.__metadata_probed
-        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
-
-        # log if specified
-        self.__verbose_logs and logger.debug("Enumerating all probed Camera Devices.")
-
-        # return probed Camera Devices as python dictionary.
-        return {
-            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)
-        }
-
-    def __validate_source(self, source, source_demuxer=None, forced_validate=False):
-        """
-        This Internal method validates source and extracts its metadata.
-
-        Parameters:
-            source_demuxer(str): specifies the demuxer(`-f`) for the input source.
-            forced_validate (bool): whether to skip validation tests or not?
-
-        **Returns:** `True` if passed tests else `False`.
-        """
-        # validate source demuxer(if defined)
-        if not (source_demuxer is None):
-            # check if "auto" demuxer is specified
-            if source_demuxer == "auto":
-                # integerise source to get index
-                index = int(source)
-                # extract devices list and actual demuxer value
-                (
-                    self.__extracted_devices_list,
-                    source_demuxer,
-                ) = extract_device_n_demuxer(
-                    self.__ffmpeg,
-                    machine_OS=self.__machine_OS,
-                    verbose=self.__verbose_logs,
-                )
-                # valid indexes range
-                valid_indexes = [
-                    x
-                    for x in range(
-                        -len(self.__extracted_devices_list),
-                        len(self.__extracted_devices_list),
-                    )
-                ]
-                # check index is within valid range
-                if self.__extracted_devices_list and index in valid_indexes:
-                    # overwrite actual source device name/path/index
-                    if self.__machine_OS == "Windows":
-                        # Windows OS requires "video=" suffix
-                        self.__source = source = "video={}".format(
-                            self.__extracted_devices_list[index]
-                        )
-                    elif self.__machine_OS == "Darwin":
-                        # Darwin OS requires only device indexes
-                        self.__source = source = (
-                            str(index)
-                            if index >= 0
-                            else str(len(self.__extracted_devices_list) + index)
-                        )
-                    else:
-                        # Linux OS require /dev/video format
-                        self.__source = source = next(
-                            iter(self.__extracted_devices_list[index].keys())
-                        )
-                    # overwrite source_demuxer global variable
-                    self.__source_demuxer = source_demuxer
-                    self.__verbose_logs and logger.debug(
-                        "Successfully configured device `{}` at index `{}` with demuxer `{}`.".format(
-                            self.__extracted_devices_list[index]
-                            if self.__machine_OS != "Linux"
-                            else next(
-                                iter(self.__extracted_devices_list[index].values())
-                            )[0],
-                            index
-                            if index >= 0
-                            else len(self.__extracted_devices_list) + index,
-                            self.__source_demuxer,
-                        )
-                    )
-                else:
-                    # raise error otherwise
-                    raise ValueError(
-                        "Given source `{}` is not a valid device index. Possible values index values can be: {}".format(
-                            source,
-                            ",".join(f"{x}" for x in valid_indexes),
-                        )
-                    )
-            # otherwise validate against supported demuxers
-            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):
-                # raise if fails
-                raise ValueError(
-                    "Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!".format(
-                        source_demuxer
-                    )
-                )
-            else:
-                pass
-
-        # assert if valid source
-        assert source and isinstance(
-            source, str
-        ), "Input `source` parameter is of invalid type!"
-
-        # Differentiate input
-        if forced_validate:
-            source_demuxer is None and logger.critical(
-                "Forcefully passing validation test for given source!"
-            )
-            self.__source = source
-        elif os.path.isfile(source):
-            self.__source = os.path.abspath(source)
-        elif is_valid_image_seq(
-            self.__ffmpeg, source=source, verbose=self.__verbose_logs
-        ):
-            self.__source = source
-            self.__contains_images = True
-        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):
-            self.__source = source
-        else:
-            logger.error("`source` value is unusable or unsupported!")
-            # discard the value otherwise
-            raise ValueError("Input source is invalid. Aborting!")
-        # format command
-        if self.__sourcer_params:
-            # handle additional params separately
-            meta_cmd = (
-                [self.__ffmpeg]
-                + (["-hide_banner"] if not self.__verbose_logs else [])
-                + ["-t", "0.0001"]
-                + self.__ffmpeg_prefixes
-                + (["-f", source_demuxer] if source_demuxer else [])
-                + ["-i", source]
-                + dict2Args(self.__sourcer_params)
-                + ["-f", "null", "-"]
-            )
-        else:
-            meta_cmd = (
-                [self.__ffmpeg]
-                + (["-hide_banner"] if not self.__verbose_logs else [])
-                + self.__ffmpeg_prefixes
-                + (["-f", source_demuxer] if source_demuxer else [])
-                + ["-i", source]
-            )
-        # extract metadata, decode, and filter
-        metadata = (
-            check_sp_output(
-                meta_cmd,
-                force_retrieve_stderr=True,
-            )
-            .decode("utf-8")
-            .strip()
-        )
-        # separate input and output metadata (if available)
-        if "Output #" in metadata:
-            (metadata, self.__metadata_output) = metadata.split("Output #")
-        # return metadata based on params
-        return metadata
-
-    def __extract_video_bitrate(self, default_stream=0):
-        """
-        This Internal method parses default video-stream bitrate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video bitrate as string value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        video_bitrate_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if video_bitrate_text:
-            selected_stream = video_bitrate_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(video_bitrate_text)
-                else 0
-            ]
-            filtered_bitrate = re.findall(
-                r",\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
-            )
-            if len(filtered_bitrate):
-                default_video_bitrate = filtered_bitrate[0].split(" ")[1:3]
-                final_bitrate = "{}{}".format(
-                    int(default_video_bitrate[0].strip()),
-                    "k" if (default_video_bitrate[1].strip().startswith("k")) else "M",
-                )
-                return final_bitrate
-        return ""
-
-    def __extract_video_decoder(self, default_stream=0):
-        """
-        This Internal method parses default video-stream decoder from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video decoder as string value.
-        """
-        assert isinstance(default_stream, int), "Invalid input!"
-        identifiers = ["Video:", "Stream #"]
-        meta_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            filtered_pixfmt = re.findall(
-                r"Video:\s[a-z0-9_-]*", selected_stream.strip()
-            )
-            if filtered_pixfmt:
-                return filtered_pixfmt[0].split(" ")[-1]
-        return ""
-
-    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):
-        """
-        This Internal method parses default video-stream pixel-format from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video pixel-format as string value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        meta_text = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-            if not extract_output
-            else [
-                line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-        )
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            filtered_pixfmt = re.findall(
-                r",\s[a-z][a-z0-9_-]*", selected_stream.strip()
-            )
-            if filtered_pixfmt:
-                return filtered_pixfmt[0].split(" ")[-1]
-        return ""
-
-    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):
-        """
-        This Internal method parses default audio-stream bitrate and sample-rate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific audio-stream in case of multiple ones.
-
-        **Returns:** Default Audio-stream bitrate and sample-rate as string value.
-        """
-        identifiers = ["Audio:", "Stream #"]
-        meta_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        result = {}
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            # filter data
-            filtered_audio_bitrate = re.findall(
-                r"fltp,\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
-            )
-            filtered_audio_samplerate = re.findall(
-                r",\s[0-9]+\sHz", selected_stream.strip()
-            )
-            # get audio bitrate metadata
-            if filtered_audio_bitrate:
-                filtered = filtered_audio_bitrate[0].split(" ")[1:3]
-                result["bitrate"] = "{}{}".format(
-                    int(filtered[0].strip()),
-                    "k" if (filtered[1].strip().startswith("k")) else "M",
-                )
-            else:
-                result["bitrate"] = ""
-            # get audio samplerate metadata
-            result["samplerate"] = (
-                filtered_audio_samplerate[0].split(", ")[1]
-                if filtered_audio_samplerate
-                else ""
-            )
-        return result if result and (len(result) == 2) else {}
-
-    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):
-        """
-        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific audio-stream in case of multiple ones.
-            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?
-
-        **Returns:** Default Video resolution and framerate as dictionary value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        # use output metadata if available
-        meta_text = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-            if not extract_output
-            else [
-                line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-        )
-        # extract video orientation metadata if available
-        identifiers_orientation = ["displaymatrix:", "rotation"]
-        meta_text_orientation = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers_orientation)
-            ]
-            if not extract_output
-            else [
+            ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+                source
+            )
+        else:
+            # otherwise find valid default source demuxer value
+            # enforce "auto" if valid index device
+            self.__source_demuxer = "auto" if validate_device_index(source) else None
+            # log if not valid index device and invalid type
+            self.__verbose_logs and not self.__source_demuxer in [
+                "auto",
+                None,
+            ] and logger.warning(
+                "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
+                    type(source_demuxer).__name__
+                )
+            )
+            # log if not valid index device and invalid type
+            self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
+                "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
+                    source
+                )
+            )
+
+        # handles source stream
+        self.__source = source
+
+        # creates shallow copy for further usage #TODO
+        self.__source_org = copy.copy(self.__source)
+        self.__source_demuxer_org = copy.copy(self.__source_demuxer)
+
+        # handles all extracted devices names/paths list
+        # when source_demuxer = "auto"
+        self.__extracted_devices_list = []
+
+        # various source stream params
+        self.__default_video_resolution = ""  # handles stream resolution
+        self.__default_video_orientation = ""  # handles stream's video orientation
+        self.__default_video_framerate = ""  # handles stream framerate
+        self.__default_video_bitrate = ""  # handles stream's video bitrate
+        self.__default_video_pixfmt = ""  # handles stream's video pixfmt
+        self.__default_video_decoder = ""  # handles stream's video decoder
+        self.__default_source_duration = ""  # handles stream's video duration
+        self.__approx_video_nframes = ""  # handles approx stream frame number
+        self.__default_audio_bitrate = ""  # handles stream's audio bitrate
+        self.__default_audio_samplerate = ""  # handles stream's audio samplerate
+
+        # handle various stream flags
+        self.__contains_video = False  # contains video
+        self.__contains_audio = False  # contains audio
+        self.__contains_images = False  # contains image-sequence
+
+        # handles output parameters through filters
+        self.__metadata_output = None  # handles output stream metadata
+        self.__output_frames_resolution = ""  # handles output stream resolution
+        self.__output_framerate = ""  # handles output stream framerate
+        self.__output_frames_pixfmt = ""  # handles output frame pixel format
+        self.__output_orientation = ""  # handles output frame orientation
+
+        # check whether metadata probed or not?
+        self.__metadata_probed = False
+
+    def probe_stream(self, default_stream_indexes=(0, 0)):
+        """
+        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
+
+        Parameters:
+            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is ("0th video stream", "1st audio stream").
+
+        **Returns:** Reference to the instance object.
+        """
+        assert (
+            isinstance(default_stream_indexes, (list, tuple))
+            and len(default_stream_indexes) == 2
+            and all(isinstance(x, int) for x in default_stream_indexes)
+        ), "Invalid default_stream_indexes value!"
+        # validate source and extract metadata
+        self.__ffsp_output = self.__validate_source(
+            self.__source,
+            source_demuxer=self.__source_demuxer,
+            forced_validate=(
+                self.__forcevalidatesource if self.__source_demuxer is None else True
+            ),
+        )
+        # parse resolution and framerate
+        video_rfparams = self.__extract_resolution_framerate(
+            default_stream=default_stream_indexes[0]
+        )
+        if video_rfparams:
+            self.__default_video_resolution = video_rfparams["resolution"]
+            self.__default_video_framerate = video_rfparams["framerate"]
+            self.__default_video_orientation = video_rfparams["orientation"]
+
+        # parse output parameters through filters (if available)
+        if not (self.__metadata_output is None):
+            # parse output resolution and framerate
+            out_video_rfparams = self.__extract_resolution_framerate(
+                default_stream=default_stream_indexes[0], extract_output=True
+            )
+            if out_video_rfparams:
+                self.__output_frames_resolution = out_video_rfparams["resolution"]
+                self.__output_framerate = out_video_rfparams["framerate"]
+                self.__output_orientation = out_video_rfparams["orientation"]
+            # parse output pixel-format
+            self.__output_frames_pixfmt = self.__extract_video_pixfmt(
+                default_stream=default_stream_indexes[0], extract_output=True
+            )
+
+        # parse pixel-format
+        self.__default_video_pixfmt = self.__extract_video_pixfmt(
+            default_stream=default_stream_indexes[0]
+        )
+
+        # parse video decoder
+        self.__default_video_decoder = self.__extract_video_decoder(
+            default_stream=default_stream_indexes[0]
+        )
+        # parse rest of metadata
+        if not self.__contains_images:
+            # parse video bitrate
+            self.__default_video_bitrate = self.__extract_video_bitrate(
+                default_stream=default_stream_indexes[0]
+            )
+            # parse audio bitrate and samplerate
+            audio_params = self.__extract_audio_bitrate_nd_samplerate(
+                default_stream=default_stream_indexes[1]
+            )
+            if audio_params:
+                self.__default_audio_bitrate = audio_params["bitrate"]
+                self.__default_audio_samplerate = audio_params["samplerate"]
+            # parse video duration
+            self.__default_source_duration = self.__extract_duration()
+            # calculate all flags
+            if (
+                self.__default_video_bitrate
+                or (self.__default_video_framerate and self.__default_video_resolution)
+            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):
+                self.__contains_video = True
+                self.__contains_audio = True
+            elif self.__default_video_bitrate or (
+                self.__default_video_framerate and self.__default_video_resolution
+            ):
+                self.__contains_video = True
+            elif self.__default_audio_bitrate or self.__default_audio_samplerate:
+                self.__contains_audio = True
+            else:
+                raise ValueError(
+                    "Invalid source with no decodable audio or video stream provided. Aborting!"
+                )
+        # calculate approximate number of video frame
+        if self.__default_video_framerate and self.__default_source_duration:
+            self.__approx_video_nframes = np.rint(
+                self.__default_video_framerate * self.__default_source_duration
+            ).astype(int, casting="unsafe")
+
+        # signal metadata has been probed
+        self.__metadata_probed = True
+
+        # return reference to the instance object.
+        return self
+
+    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):
+        """
+        This method returns Parsed/Probed Metadata of the given source.
+
+        Parameters:
+            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?
+            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.
+
+        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.
+        """
+        # check if metadata has been probed or not
+        assert (
+            self.__metadata_probed
+        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
+        # log it
+        self.__verbose_logs and logger.debug("Extracting Metadata...")
+        # create metadata dictionary from information populated in private class variables
+        metadata = {
+            "ffmpeg_binary_path": self.__ffmpeg,
+            "source": self.__source,
+        }
+        metadata_missing = {}
+        # Only either `source_demuxer` or `source_extension` attribute can be
+        # present in metadata.
+        if self.__source_demuxer is None:
+            metadata.update({"source_extension": os.path.splitext(self.__source)[-1]})
+            # update missing
+            force_retrieve_missing and metadata_missing.update({"source_demuxer": ""})
+        else:
+            metadata.update({"source_demuxer": self.__source_demuxer})
+            # update missing
+            force_retrieve_missing and metadata_missing.update({"source_extension": ""})
+        # add source video metadata properties
+        metadata.update(
+            {
+                "source_video_resolution": self.__default_video_resolution,
+                "source_video_pixfmt": self.__default_video_pixfmt,
+                "source_video_framerate": self.__default_video_framerate,
+                "source_video_orientation": self.__default_video_orientation,
+                "source_video_decoder": self.__default_video_decoder,
+                "source_duration_sec": self.__default_source_duration,
+                "approx_video_nframes": (
+                    int(self.__approx_video_nframes)
+                    if self.__approx_video_nframes
+                    and not any(
+                        "loop" in x for x in self.__ffmpeg_prefixes
+                    )  # check if any loops in prefix
+                    and not any(
+                        "loop" in x for x in dict2Args(self.__sourcer_params)
+                    )  # check if any loops in filters
+                    else None
+                ),
+                "source_video_bitrate": self.__default_video_bitrate,
+                "source_audio_bitrate": self.__default_audio_bitrate,
+                "source_audio_samplerate": self.__default_audio_samplerate,
+                "source_has_video": self.__contains_video,
+                "source_has_audio": self.__contains_audio,
+                "source_has_image_sequence": self.__contains_images,
+            }
+        )
+        # add output metadata properties (if available)
+        if not (self.__metadata_output is None):
+            metadata.update(
+                {
+                    "output_frames_resolution": self.__output_frames_resolution,
+                    "output_frames_pixfmt": self.__output_frames_pixfmt,
+                    "output_framerate": self.__output_framerate,
+                    "output_orientation": self.__output_orientation,
+                }
+            )
+        else:
+            # since output stream metadata properties are only available when additional
+            # FFmpeg parameters(such as filters) are defined manually, thereby missing
+            # output stream properties are handled by assigning them counterpart source
+            # stream metadata property values
+            force_retrieve_missing and metadata_missing.update(
+                {
+                    "output_frames_resolution": self.__default_video_resolution,
+                    "output_frames_pixfmt": self.__default_video_pixfmt,
+                    "output_framerate": self.__default_video_framerate,
+                    "output_orientation": self.__default_video_orientation,
+                }
+            )
+        # log it
+        self.__verbose_logs and logger.debug(
+            "Metadata Extraction completed successfully!"
+        )
+        # parse as JSON string(`json.dumps`), if defined
+        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata
+        metadata_missing = (
+            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing
+        )
+        # return `metadata` or `(metadata, metadata_missing)`
+        return metadata if not force_retrieve_missing else (metadata, metadata_missing)
+
+    @property
+    def enumerate_devices(self):
+        """
+        A property object that enumerate all probed Camera Devices connected to your system names
+        along with their respective "device indexes" or "camera indexes" as python dictionary.
+
+        **Returns:** Probed Camera Devices as python dictionary.
+        """
+        # check if metadata has been probed or not
+        assert (
+            self.__metadata_probed
+        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
+
+        # log if specified
+        self.__verbose_logs and logger.debug("Enumerating all probed Camera Devices.")
+
+        # return probed Camera Devices as python dictionary.
+        return {
+            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)
+        }
+
+    def __validate_source(self, source, source_demuxer=None, forced_validate=False):
+        """
+        This Internal method validates source and extracts its metadata.
+
+        Parameters:
+            source_demuxer(str): specifies the demuxer(`-f`) for the input source.
+            forced_validate (bool): whether to skip validation tests or not?
+
+        **Returns:** `True` if passed tests else `False`.
+        """
+        logger.critical(
+            "{} :: {} :: {}".format(
+                source_demuxer,
+                source_demuxer in get_supported_demuxers(self.__ffmpeg),
+                get_supported_demuxers(self.__ffmpeg),
+            )
+        )
+        # validate source demuxer(if defined)
+        if not (source_demuxer is None):
+            # check if "auto" demuxer is specified
+            if source_demuxer == "auto":
+                # integerise source to get index
+                index = int(source)
+                # extract devices list and actual demuxer value
+                (
+                    self.__extracted_devices_list,
+                    source_demuxer,
+                ) = extract_device_n_demuxer(
+                    self.__ffmpeg,
+                    machine_OS=self.__machine_OS,
+                    verbose=self.__verbose_logs,
+                )
+                # valid indexes range
+                valid_indexes = [
+                    x
+                    for x in range(
+                        -len(self.__extracted_devices_list),
+                        len(self.__extracted_devices_list),
+                    )
+                ]
+                # check index is within valid range
+                if self.__extracted_devices_list and index in valid_indexes:
+                    # overwrite actual source device name/path/index
+                    if self.__machine_OS == "Windows":
+                        # Windows OS requires "video=" suffix
+                        self.__source = source = "video={}".format(
+                            self.__extracted_devices_list[index]
+                        )
+                    elif self.__machine_OS == "Darwin":
+                        # Darwin OS requires only device indexes
+                        self.__source = source = (
+                            str(index)
+                            if index >= 0
+                            else str(len(self.__extracted_devices_list) + index)
+                        )
+                    else:
+                        # Linux OS require /dev/video format
+                        self.__source = source = next(
+                            iter(self.__extracted_devices_list[index].keys())
+                        )
+                    # overwrite source_demuxer global variable
+                    self.__source_demuxer = source_demuxer
+                    self.__verbose_logs and logger.debug(
+                        "Successfully configured device `{}` at index `{}` with demuxer `{}`.".format(
+                            (
+                                self.__extracted_devices_list[index]
+                                if self.__machine_OS != "Linux"
+                                else next(
+                                    iter(self.__extracted_devices_list[index].values())
+                                )[0]
+                            ),
+                            (
+                                index
+                                if index >= 0
+                                else len(self.__extracted_devices_list) + index
+                            ),
+                            self.__source_demuxer,
+                        )
+                    )
+                else:
+                    # raise error otherwise
+                    raise ValueError(
+                        "Given source `{}` is not a valid device index. Possible values index values can be: {}".format(
+                            source,
+                            ",".join(f"{x}" for x in valid_indexes),
+                        )
+                    )
+            # otherwise validate against supported demuxers
+            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):
+                # raise if fails
+                raise ValueError(
+                    "Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!".format(
+                        source_demuxer
+                    )
+                )
+            else:
+                pass
+
+        # assert if valid source
+        assert source and isinstance(
+            source, str
+        ), "Input `source` parameter is of invalid type!"
+
+        # Differentiate input
+        if forced_validate:
+            source_demuxer is None and logger.critical(
+                "Forcefully passing validation test for given source!"
+            )
+            self.__source = source
+        elif os.path.isfile(source):
+            self.__source = os.path.abspath(source)
+        elif is_valid_image_seq(
+            self.__ffmpeg, source=source, verbose=self.__verbose_logs
+        ):
+            self.__source = source
+            self.__contains_images = True
+        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):
+            self.__source = source
+        else:
+            logger.error("`source` value is unusable or unsupported!")
+            # discard the value otherwise
+            raise ValueError("Input source is invalid. Aborting!")
+        # format command
+        if self.__sourcer_params:
+            # handle additional params separately
+            meta_cmd = (
+                [self.__ffmpeg]
+                + (["-hide_banner"] if not self.__verbose_logs else [])
+                + ["-t", "0.0001"]
+                + self.__ffmpeg_prefixes
+                + (["-f", source_demuxer] if source_demuxer else [])
+                + ["-i", source]
+                + dict2Args(self.__sourcer_params)
+                + ["-f", "null", "-"]
+            )
+        else:
+            meta_cmd = (
+                [self.__ffmpeg]
+                + (["-hide_banner"] if not self.__verbose_logs else [])
+                + self.__ffmpeg_prefixes
+                + (["-f", source_demuxer] if source_demuxer else [])
+                + ["-i", source]
+            )
+        # extract metadata, decode, and filter
+        metadata = (
+            check_sp_output(
+                meta_cmd,
+                force_retrieve_stderr=True,
+            )
+            .decode("utf-8")
+            .strip()
+        )
+        # separate input and output metadata (if available)
+        if "Output #" in metadata:
+            (metadata, self.__metadata_output) = metadata.split("Output #")
+        # return metadata based on params
+        return metadata
+
+    def __extract_video_bitrate(self, default_stream=0):
+        """
+        This Internal method parses default video-stream bitrate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video bitrate as string value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        video_bitrate_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if video_bitrate_text:
+            selected_stream = video_bitrate_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(video_bitrate_text)
+                    else 0
+                )
+            ]
+            filtered_bitrate = re.findall(
+                r",\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
+            )
+            if len(filtered_bitrate):
+                default_video_bitrate = filtered_bitrate[0].split(" ")[1:3]
+                final_bitrate = "{}{}".format(
+                    int(default_video_bitrate[0].strip()),
+                    "k" if (default_video_bitrate[1].strip().startswith("k")) else "M",
+                )
+                return final_bitrate
+        return ""
+
+    def __extract_video_decoder(self, default_stream=0):
+        """
+        This Internal method parses default video-stream decoder from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video decoder as string value.
+        """
+        assert isinstance(default_stream, int), "Invalid input!"
+        identifiers = ["Video:", "Stream #"]
+        meta_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            filtered_pixfmt = re.findall(
+                r"Video:\s[a-z0-9_-]*", selected_stream.strip()
+            )
+            if filtered_pixfmt:
+                return filtered_pixfmt[0].split(" ")[-1]
+        return ""
+
+    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):
+        """
+        This Internal method parses default video-stream pixel-format from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video pixel-format as string value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        meta_text = (
+            [
+                line.strip()
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+        )
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            filtered_pixfmt = re.findall(
+                r",\s[a-z][a-z0-9_-]*", selected_stream.strip()
+            )
+            if filtered_pixfmt:
+                return filtered_pixfmt[0].split(" ")[-1]
+        return ""
+
+    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):
+        """
+        This Internal method parses default audio-stream bitrate and sample-rate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific audio-stream in case of multiple ones.
+
+        **Returns:** Default Audio-stream bitrate and sample-rate as string value.
+        """
+        identifiers = ["Audio:", "Stream #"]
+        meta_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        result = {}
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            # filter data
+            filtered_audio_bitrate = re.findall(
+                r"fltp,\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
+            )
+            filtered_audio_samplerate = re.findall(
+                r",\s[0-9]+\sHz", selected_stream.strip()
+            )
+            # get audio bitrate metadata
+            if filtered_audio_bitrate:
+                filtered = filtered_audio_bitrate[0].split(" ")[1:3]
+                result["bitrate"] = "{}{}".format(
+                    int(filtered[0].strip()),
+                    "k" if (filtered[1].strip().startswith("k")) else "M",
+                )
+            else:
+                result["bitrate"] = ""
+            # get audio samplerate metadata
+            result["samplerate"] = (
+                filtered_audio_samplerate[0].split(", ")[1]
+                if filtered_audio_samplerate
+                else ""
+            )
+        return result if result and (len(result) == 2) else {}
+
+    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):
+        """
+        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific audio-stream in case of multiple ones.
+            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?
+
+        **Returns:** Default Video resolution and framerate as dictionary value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        # use output metadata if available
+        meta_text = (
+            [
                 line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers_orientation)
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers)
             ]
-        )
-        # use metadata if available
-        result = {}
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-
-            # filter data
-            filtered_resolution = re.findall(
-                r"([1-9]\d+)x([1-9]\d+)", selected_stream.strip()
-            )
-            filtered_framerate = re.findall(
-                r"\d+(?:\.\d+)?\sfps", selected_stream.strip()
-            )
-            filtered_tbr = re.findall(r"\d+(?:\.\d+)?\stbr", selected_stream.strip())
-
-            # extract framerate metadata
-            if filtered_framerate:
-                # calculate actual framerate
-                result["framerate"] = float(
-                    re.findall(r"[\d\.\d]+", filtered_framerate[0])[0]
-                )
-            elif filtered_tbr:
-                # guess from TBR(if fps unavailable)
-                result["framerate"] = float(
-                    re.findall(r"[\d\.\d]+", filtered_tbr[0])[0]
-                )
-
-            # extract resolution metadata
-            if filtered_resolution:
-                result["resolution"] = [int(x) for x in filtered_resolution[0]]
-
-            # extract video orientation metadata
-            if meta_text_orientation:
-                selected_stream = meta_text_orientation[
-                    default_stream
-                    if default_stream > 0 and default_stream < len(meta_text)
-                    else 0
-                ]
-                filtered_orientation = re.findall(
-                    r"[-]?\d+\.\d+", selected_stream.strip()
-                )
-                result["orientation"] = float(filtered_orientation[0])
-            else:
-                result["orientation"] = 0.0
-
-        return result if result and (len(result) == 3) else {}
-
-    def __extract_duration(self, inseconds=True):
-        """
-        This Internal method parses stream duration from metadata.
-
-        Parameters:
-            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+        )
+        # extract video orientation metadata if available
+        identifiers_orientation = ["displaymatrix:", "rotation"]
+        meta_text_orientation = (
+            [
+                line.strip()
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers_orientation)
+            ]
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers_orientation)
+            ]
+        )
+        # use metadata if available
+        result = {}
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+
+            # filter data
+            filtered_resolution = re.findall(
+                r"([1-9]\d+)x([1-9]\d+)", selected_stream.strip()
+            )
+            filtered_framerate = re.findall(
+                r"\d+(?:\.\d+)?\sfps", selected_stream.strip()
+            )
+            filtered_tbr = re.findall(r"\d+(?:\.\d+)?\stbr", selected_stream.strip())
+
+            # extract framerate metadata
+            if filtered_framerate:
+                # calculate actual framerate
+                result["framerate"] = float(
+                    re.findall(r"[\d\.\d]+", filtered_framerate[0])[0]
+                )
+            elif filtered_tbr:
+                # guess from TBR(if fps unavailable)
+                result["framerate"] = float(
+                    re.findall(r"[\d\.\d]+", filtered_tbr[0])[0]
+                )
+
+            # extract resolution metadata
+            if filtered_resolution:
+                result["resolution"] = [int(x) for x in filtered_resolution[0]]
 
-        **Returns:** Default Stream duration as string value.
-        """
-        identifiers = ["Duration:"]
-        stripped_data = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if stripped_data:
-            t_duration = re.findall(
-                r"(?:[01]\d|2[0123]):(?:[012345]\d):(?:[012345]\d+(?:\.\d+)?)",
-                stripped_data[0],
-            )
-            if t_duration:
-                return (
-                    sum(
-                        float(x) * 60**i
-                        for i, x in enumerate(reversed(t_duration[0].split(":")))
-                    )
-                    if inseconds
-                    else t_duration
-                )
-        return 0
+            # extract video orientation metadata
+            if meta_text_orientation:
+                selected_stream = meta_text_orientation[
+                    (
+                        default_stream
+                        if default_stream > 0 and default_stream < len(meta_text)
+                        else 0
+                    )
+                ]
+                filtered_orientation = re.findall(
+                    r"[-]?\d+\.\d+", selected_stream.strip()
+                )
+                result["orientation"] = float(filtered_orientation[0])
+            else:
+                result["orientation"] = 0.0
+
+        return result if result and (len(result) == 3) else {}
+
+    def __extract_duration(self, inseconds=True):
+        """
+        This Internal method parses stream duration from metadata.
+
+        Parameters:
+            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?
+
+        **Returns:** Default Stream duration as string value.
+        """
+        identifiers = ["Duration:"]
+        stripped_data = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if stripped_data:
+            t_duration = re.findall(
+                r"(?:[01]\d|2[0123]):(?:[012345]\d):(?:[012345]\d+(?:\.\d+)?)",
+                stripped_data[0],
+            )
+            if t_duration:
+                return (
+                    sum(
+                        float(x) * 60**i
+                        for i, x in enumerate(reversed(t_duration[0].split(":")))
+                    )
+                    if inseconds
+                    else t_duration
+                )
+        return 0
 

enumerate_devices property readonly

A property object that enumerate all probed Camera Devices connected to your system names along with their respective "device indexes" or "camera indexes" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default
source str

defines the input(-i) source filename/URL/device-name/device-path.

required
source_demuxer str

specifies the demuxer(-f) for the input source.

None
custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

''
verbose bool

enables/disables verbose.

False
sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{}
Source code in deffcode/sourcer.py
def __init__(
     self,
     source,
@@ -834,129 +859,131 @@
 
     # sanitize sourcer_params
     self.__sourcer_params = {
-        str(k).strip(): str(v).strip()
-        if not isinstance(v, (dict, list, int, float, tuple))
-        else v
-        for k, v in sourcer_params.items()
-    }
-
-    # handle whether to force validate source
-    self.__forcevalidatesource = self.__sourcer_params.pop(
-        "-force_validate_source", False
-    )
-    if not isinstance(self.__forcevalidatesource, bool):
-        # reset improper values
-        self.__forcevalidatesource = False
-
-    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
-    self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
-    if not isinstance(self.__ffmpeg_prefixes, list):
-        # log it
-        logger.warning(
-            "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
-                type(self.__ffmpeg_prefixes).__name__
-            )
-        )
-        # reset improper values
-        self.__ffmpeg_prefixes = []
-
-    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
-    __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
-    if not isinstance(__ffmpeg_download_path, str):
-        # reset improper values
-        __ffmpeg_download_path = ""
-
-    # validate the FFmpeg assets and return location (also downloads static assets on windows)
-    self.__ffmpeg = get_valid_ffmpeg_path(
-        str(custom_ffmpeg),
-        True if self.__machine_OS == "Windows" else False,
-        ffmpeg_download_path=__ffmpeg_download_path,
-        verbose=self.__verbose_logs,
-    )
-
-    # check if valid FFmpeg path returned
-    if self.__ffmpeg:
-        self.__verbose_logs and logger.debug(
-            "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
-        )
-    else:
-        # else raise error
-        raise RuntimeError(
-            "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
-        )
-
-    # sanitize externally accessible parameters and assign them
-    # handles source demuxer
-    if source is None:
-        # first check if source value is empty
-        # raise error if true
-        raise ValueError("Input `source` parameter is empty!")
-    elif isinstance(source_demuxer, str):
-        # assign if valid demuxer value
-        self.__source_demuxer = source_demuxer.strip().lower()
+        str(k).strip(): (
+            str(v).strip()
+            if not isinstance(v, (dict, list, int, float, tuple))
+            else v
+        )
+        for k, v in sourcer_params.items()
+    }
+
+    # handle whether to force validate source
+    self.__forcevalidatesource = self.__sourcer_params.pop(
+        "-force_validate_source", False
+    )
+    if not isinstance(self.__forcevalidatesource, bool):
+        # reset improper values
+        self.__forcevalidatesource = False
+
+    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
+    self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
+    if not isinstance(self.__ffmpeg_prefixes, list):
+        # log it
+        logger.warning(
+            "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
+                type(self.__ffmpeg_prefixes).__name__
+            )
+        )
+        # reset improper values
+        self.__ffmpeg_prefixes = []
+
+    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
+    __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
+    if not isinstance(__ffmpeg_download_path, str):
+        # reset improper values
+        __ffmpeg_download_path = ""
+
+    # validate the FFmpeg assets and return location (also downloads static assets on windows)
+    self.__ffmpeg = get_valid_ffmpeg_path(
+        str(custom_ffmpeg),
+        True if self.__machine_OS == "Windows" else False,
+        ffmpeg_download_path=__ffmpeg_download_path,
+        verbose=self.__verbose_logs,
+    )
+
+    # check if valid FFmpeg path returned
+    if self.__ffmpeg:
+        self.__verbose_logs and logger.debug(
+            "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
+        )
+    else:
+        # else raise error
+        raise RuntimeError(
+            "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
+        )
+
+    # sanitize externally accessible parameters and assign them
+    # handles source demuxer
+    if source is None:
+        # first check if source value is empty
+        # raise error if true
+        raise ValueError("Input `source` parameter is empty!")
+    elif isinstance(source_demuxer, str):
         # assign if valid demuxer value
-        assert self.__source_demuxer != "auto" or validate_device_index(
-            source
-        ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+        self.__source_demuxer = source_demuxer.strip().lower()
+        # assign if valid demuxer value
+        assert self.__source_demuxer != "auto" or validate_device_index(
             source
-        )
-    else:
-        # otherwise find valid default source demuxer value
-        # enforce "auto" if valid index device
-        self.__source_demuxer = "auto" if validate_device_index(source) else None
-        # log if not valid index device and invalid type
-        self.__verbose_logs and not self.__source_demuxer in [
-            "auto",
-            None,
-        ] and logger.warning(
-            "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
-                type(source_demuxer).__name__
-            )
-        )
-        # log if not valid index device and invalid type
-        self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
-            "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
-                source
-            )
-        )
-
-    # handles source stream
-    self.__source = source
-
-    # creates shallow copy for further usage #TODO
-    self.__source_org = copy.copy(self.__source)
-    self.__source_demuxer_org = copy.copy(self.__source_demuxer)
-
-    # handles all extracted devices names/paths list
-    # when source_demuxer = "auto"
-    self.__extracted_devices_list = []
-
-    # various source stream params
-    self.__default_video_resolution = ""  # handles stream resolution
-    self.__default_video_orientation = ""  # handles stream's video orientation
-    self.__default_video_framerate = ""  # handles stream framerate
-    self.__default_video_bitrate = ""  # handles stream's video bitrate
-    self.__default_video_pixfmt = ""  # handles stream's video pixfmt
-    self.__default_video_decoder = ""  # handles stream's video decoder
-    self.__default_source_duration = ""  # handles stream's video duration
-    self.__approx_video_nframes = ""  # handles approx stream frame number
-    self.__default_audio_bitrate = ""  # handles stream's audio bitrate
-    self.__default_audio_samplerate = ""  # handles stream's audio samplerate
-
-    # handle various stream flags
-    self.__contains_video = False  # contains video
-    self.__contains_audio = False  # contains audio
-    self.__contains_images = False  # contains image-sequence
-
-    # handles output parameters through filters
-    self.__metadata_output = None  # handles output stream metadata
-    self.__output_frames_resolution = ""  # handles output stream resolution
-    self.__output_framerate = ""  # handles output stream framerate
-    self.__output_frames_pixfmt = ""  # handles output frame pixel format
-    self.__output_orientation = ""  # handles output frame orientation
-
-    # check whether metadata probed or not?
-    self.__metadata_probed = False
+        ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+            source
+        )
+    else:
+        # otherwise find valid default source demuxer value
+        # enforce "auto" if valid index device
+        self.__source_demuxer = "auto" if validate_device_index(source) else None
+        # log if not valid index device and invalid type
+        self.__verbose_logs and not self.__source_demuxer in [
+            "auto",
+            None,
+        ] and logger.warning(
+            "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
+                type(source_demuxer).__name__
+            )
+        )
+        # log if not valid index device and invalid type
+        self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
+            "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
+                source
+            )
+        )
+
+    # handles source stream
+    self.__source = source
+
+    # creates shallow copy for further usage #TODO
+    self.__source_org = copy.copy(self.__source)
+    self.__source_demuxer_org = copy.copy(self.__source_demuxer)
+
+    # handles all extracted devices names/paths list
+    # when source_demuxer = "auto"
+    self.__extracted_devices_list = []
+
+    # various source stream params
+    self.__default_video_resolution = ""  # handles stream resolution
+    self.__default_video_orientation = ""  # handles stream's video orientation
+    self.__default_video_framerate = ""  # handles stream framerate
+    self.__default_video_bitrate = ""  # handles stream's video bitrate
+    self.__default_video_pixfmt = ""  # handles stream's video pixfmt
+    self.__default_video_decoder = ""  # handles stream's video decoder
+    self.__default_source_duration = ""  # handles stream's video duration
+    self.__approx_video_nframes = ""  # handles approx stream frame number
+    self.__default_audio_bitrate = ""  # handles stream's audio bitrate
+    self.__default_audio_samplerate = ""  # handles stream's audio samplerate
+
+    # handle various stream flags
+    self.__contains_video = False  # contains video
+    self.__contains_audio = False  # contains audio
+    self.__contains_images = False  # contains image-sequence
+
+    # handles output parameters through filters
+    self.__metadata_output = None  # handles output stream metadata
+    self.__output_frames_resolution = ""  # handles output stream resolution
+    self.__output_framerate = ""  # handles output stream framerate
+    self.__output_frames_pixfmt = ""  # handles output frame pixel format
+    self.__output_orientation = ""  # handles output frame orientation
+
+    # check whether metadata probed or not?
+    self.__metadata_probed = False
 

probe_stream(self, default_stream_indexes=(0, 0))

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default
default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is ("0th video stream", "1st audio stream").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):
     """
     This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
diff --git a/dev/search/search_index.json b/dev/search/search_index.json
index e45fff8..b5498d7 100644
--- a/dev/search/search_index.json
+++ b/dev/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

A cross-platform High-performance Video Frames Decoder that flexibly executes FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in just a few lines of python code

Highly Adaptive - DeFFcode APIs implements a standalone highly-extensible wrapper around FFmpeg multimedia framework. These APIs supports a wide-ranging media streams as input source such as live USB/Virtual/IP camera feeds, regular multimedia files, screen recordings, image sequences, network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Highly Flexible - DeFFcode APIs gains an edge over other Wrappers by providing complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as specifying framerate, resolution, hardware decoder(s), filtergraph(s), and pixel-format(s) that are readily supported by all well known Computer Vision libraries.

Highly Convenient - FFmpeg has a steep learning curve especially for users unfamiliar with a command line interface. DeFFcode helps users by providing similar to OpenCV, Index based Camera Device Capturing and the same OpenCV-Python (Python API for OpenCV) coding syntax for its APIs, thereby making it even easier to learn, create, and develop FFmpeg based apps in Python.

"},{"location":"#key-features-of-deffcode","title":"Key features of DeFFcode","text":"

Here are some key features that stand out:

  • High-performance, low-overhead video frames decoding with robust error-handling.
  • Flexible API with access to almost any FFmpeg specification thinkable.
  • Supports a wide-range of media streams/devices/protocols as input source.
  • Curated list of well-documented recipes ranging from Basic to Advanced skill levels.
  • Hands down the easiest Index based Camera Device Capturing, similar to OpenCV.
  • Memory efficient Live Simple & Complex Filtergraphs. (Yes, You read it correctly \"Live\"!)
  • Lightning fast dedicated GPU-Accelerated Video Decoding & Transcoding.
  • Enables precise FFmpeg Frame Seeking with pinpoint accuracy.
  • Effortless Metadata Extraction from all streams available in the source.
  • Maintains the standard easy to learn OpenCV-Python coding syntax.
  • Out-of-the-box support for all prominent Computer Vision libraries.
  • Cross-platform, runs on Python 3.7+, and easy to install.
Still missing a key feature in DeFFcode?

Please review DeFFcode's Roadmap. If you still can't find the desired feature there, then you can request one simply by Commenting or Upvoting an existing comment on that issue.

"},{"location":"#getting-started","title":"Getting Started","text":"

In case you're run into any problems, consult our Help section.

"},{"location":"#installation-notes","title":"Installation Notes","text":"

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode on your machine.

"},{"location":"#recipes-aka-examples","title":"Recipes a.k.a Examples","text":"

Once you have DeFFcode installed, checkout our Well-Documented Recipes for usage examples:

How to Begin?

If you\u2019re just starting, check out the Beginner Basic Recipes and as your confidence grows, move up to Advanced Recipes .

  • Basic Recipes : Recipes for beginners of any skill level to get started.
  • Advanced Recipes : Recipes to take your skills to the next level.
"},{"location":"#api-in-a-nutshell","title":"API in a nutshell","text":"

As a user, you just have to remember only two DeFFcode APIs, namely:

See API Reference for more in-depth information.

"},{"location":"#a-ffdecoder-api","title":"A. FFdecoder API","text":"

The primary function of FFdecoder API is to decode 24-bit RGB video frames from the given source:

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# formulate the decoder with suitable source\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").formulate()\n\n# grab RGB24(default) 3D frames from decoder\nfor frame in decoder.generateFrame():\n\n    # lets print its shape\n    print(frame.shape) # (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n
"},{"location":"#b-sourcer-api","title":"B. Sourcer API","text":"

The primary function of Sourcer API is to gather information from all multimedia streams available in the given source:

# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
The resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 60.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 10.0,\n  \"approx_video_nframes\": 600,\n  \"source_video_bitrate\": \"832k\",\n  \"source_audio_bitrate\": \"\",\n  \"source_audio_samplerate\": \"\",\n  \"source_has_video\": true,\n  \"source_has_audio\": false,\n  \"source_has_image_sequence\": false\n}\n

"},{"location":"#contribution-guidelines","title":"Contribution Guidelines","text":"

Contributions are welcome, and greatly appreciated!

Please read our Contribution Guidelines for more details.

"},{"location":"#community-channel","title":"Community Channel","text":"

If you've come up with some new idea, or looking for the fastest way troubleshoot your problems. Please checkout our Gitter community channel \u27b6

"},{"location":"#become-a-stargazer","title":"Become a Stargazer","text":"

You can be a Stargazer by starring us on Github, it helps us a lot and you're making it easier for others to find & trust this library. Thanks!

"},{"location":"#donations","title":"Donations","text":"

DeFFcode is free and open source and will always remain so.

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

"},{"location":"#citation","title":"Citation","text":"

Here is a Bibtex entry you can use to cite this project in a publication:

@software{deffcode,\n  author       = {Abhishek Singh Thakur},\n  title        = {abhiTronix/deffcode: v0.2.4},\n  month        = oct,\n  year         = 2022,\n  publisher    = {Zenodo},\n  version      = {v0.2.4},\n  doi          = {10.5281/zenodo.7155399},\n  url          = {https://doi.org/10.5281/zenodo.7155399}\n}\n

"},{"location":"changelog/","title":"Release Notes","text":""},{"location":"changelog/#v025-2023-01-11","title":"v0.2.5 (2023-01-11)","text":"New Features
  • FFdecoder:
    • Added OpenCV compatibility patch for YUV pixel-formats.
      • Implemented new patch for handling YUV pixel-formats(such as YUV420p, yuv444p, NV12, NV21 etc.) for exclusive compatibility with OpenCV APIs.
        • Note: Only YUV pixel-formats starting with YUV and NV are currently supported.
      • Added new -enforce_cv_patch boolean attribute for enabling OpenCV compatibility patch.
  • Sourcer:
    • Added Looping Video support.
      • Now raw-frame numbers revert to null(None) whenever any looping is defined through filter(such as -filter_complex \"loop=loop=3:size=75:start=25\") or prefix(\"-ffprefixes\":[\"-stream_loop\", \"3\"]).
  • Docs:
    • Added YUV frames example code for Capturing and Previewing BGR frames from a video file recipe.
    • Added YUV frames example code for `Transcoding video using OpenCV VideoWriter API recipe.
    • Added YUV frames example code for `Transcoding lossless video using WriteGear API recipe.
    • Added new CUVID-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Video Transcoding with OpenCV`s VideoWriter API recipe.
    • Added new CUDA-NVENC-accelerated Video Transcoding with WriteGear API recipe both for consuming BGR and NV12 frames.
    • Added new CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API recipe which is still WIP(\ud83d\udcacconfirmed with a GIF from tenor).
    • Added new Capturing and Previewing frames from a Looping Video recipe using -stream_loop option and loop filter.
    • Added docs for -enforce_cv_patch boolean attribute in ffparam dictionary parameter.
    • Added new python dependency block for recipes.
    • Reflected new OpenCV compatibility patch for YUV pixel-formats in code.
    • Added new content.code.copy and content.code.link features.
Updates/Improvements
  • FFhelper:
    • Replaced depreciating Retry API from requests.packages with requests.adapters.
  • Maintenance:
    • Replaced raw.github.com links with GitLab and GH links.
    • Removed unused code.
    • Updated log message.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
    • Updated test_frame_format test to include -enforce_cv_patch boolean attribute.
    • Updated test_source to test looping video support.
  • Setup:
    • Removed unused imports and patches.
    • Bumped version to 0.2.5.
  • Docs:
    • Updated Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing passage.
    • Updated and corrected docs hyperlinks in index.md and ReadMe.md
    • Update Zenodo Badge and BibTex entry.
    • Updated Readme.md banner image URLs.
    • Updated md-typeset text font size to .75rem.
    • Updated text and admonitions.
    • Updated recipe assumptions.
    • Updated Readme.md GIF URLs.
    • Updated abstract text in recipes.
    • Updated changelog.md.
    • Updated recipe code.
    • Removed old recipes.
Bug-fixes
  • FFdecoder API:
    • Fixed Zero division bug while calculating raw_bit_per_component.
  • FFhelper:
    • Fixed response.headers returning content-length as Nonetype since it may not necessarily have the Content-Length header set.
      • Reason: The response from gitlab.com contains a Transfer-Encoding field as 'Transfer-Encoding': 'chunked', which means data is sent in a series of chunks, so the Content-Length header is emitted. More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding#Directives
  • Docs:
    • Fixed https://github.com/badges/shields/issues/8671 badge issue in README.md
    • Removed depreciated text.
    • Fixed several typos in docs.
  • CI:
    • Added fix for codecov upload bug (https://github.com/codecov/codecov-action/issues/598).
      • Updated codecov-action workflow to `v3.
      • Added new CODECOV_TOKEN GitHub secret.
Pull Requests
  • PR #37
"},{"location":"changelog/#v024-2022-10-07","title":"v0.2.4 (2022-10-07)","text":"New Features
  • FFdecoder API:
    • Implemented new comprehensive support for both discarding key default FFmpeg parameters from Decoding pipeline simply by assigning them null string values, and concurrently using values extracted from Output Stream metadata properties (available only when FFmpeg filters are defined) for formulating pipelines.
      • Added null string value support to -framerate and -custom_resolution attributes, as well as frame_format parameter for easily discarding them.
      • Re-Implemented calculation of rawframe pixel-format.
        • Reconfigured default rawframe pixel-format, Now rawframe pixel-format will always default to source_video_pixfmt with frame_format=\"null\".
        • Now with frame_format parameter value either \"null\" or invalid or undefined, rawframe pixel-format value is taken from output_frames_pixfmt metadata property extracted from Output Stream (available only when filters are defined). If valid output_video_resolution metadata property is found then it defaults to default pixel-format(calculated variably).
        • With frame_format=\"null\", -pix_fmt FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of rawframe resolution value.
        • Now with -custom_resolution dictionary attribute value either \"null\" or invalid or undefined, rawframe resolution value is first taken from output_video_resolution metadata property extracted from Output Stream (available only when filters are defined), next from source_video_resolution metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_resolution valid metadata properties are found then RuntimeError is raised.
        • With -custom_resolution dictionary attribute value \"null\", -s/-size FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of output framerate value.
        • Now with -framerate dictionary attribute either null or invalid or undefined, output framerate value is first taken from output_video_framerate metadata property extracted from Output Stream (available only when filters are defined), next from source_video_framerate metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_framerate valid metadata properties are found then RuntimeError is raised.
        • With -framerate dictionary attribute value \"null\", -r/-framerate FFmpeg parameter will not be added to Decoding pipeline.
    • Implemented passing of simple -vf filters, complex -filter_complex filters, and pre-headers(via -ffprefixes) directly to Sourcer API's sourcer_params parameter for probing Output Stream metadata and filter values.
  • Sourcer API:
    • Implemented new comprehensive approach to handle source_demuxer parameter w.r.t different source parameter values.
      • The source_demuxer parameter now accepts \"auto\" as its value for enabling Index based Camera Device Capture feature in Sourcer API.
      • Sourcer API auto-enforces source_demuxer=\"auto\" by default, whenever a valid device index (uses validate_device_index method for validation) is provided as its source parameter value.
        • \u26a0\ufe0f Sourcer API will throw Assertion error if source_demuxer=\"auto\" is provided explicitly without a valid device index at its source parameter.
      • Source API now accepts all +ve and -ve device indexes (e.g. -1,0,1,2 etc.) to its source parameter, both as in integer and string of integer types as source in Index based Camera Device Capture feature.
        • Sourcer API imports and utilizes extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system.
          • \u26a0\ufe0f Sourcer API will throw RuntimeError on failure to identify any device.
        • Sourcer API auto verifies that the specified source device index is in range of the devices discovered.
          • \u26a0\ufe0f Sourcer API will raise ValueError if value goes out of valid range.
        • Sourcer API also automatically handle -ve indexes if specified within the valid range.
        • Implemented patch to auto-add video= suffix to selected device name before using it as video source on Windows OSes.
        • Added patch for handling dictionary of devices paths(with devices names as values) and log messages on Linux Oses.
        • Added copy import for shallow copying various class parameters.
      • Implemented new Support for additional FFmpeg parameters and Output metadata.
        • Added three new metadata properties: output_video_resolution, output_video_framerate, output_frames_pixfmt for handling extracted Output Stream values, whenever additional FFmpeg parameters(such as FFmpeg filters) are defined.
        • Added support for auto-handling additional FFmpeg parameters defined by sourcer_params dictionary parameters.
        • Implement new separate pipeline for parsing Output Stream metadata by decoding video source using null muxer for few microseconds whenever additional FFmpeg parameters(such as -vf filters) are defined by the user.
        • Included new metadata_output internal parameter for holding Output Stream metadata splitted from original Sourcer Metadata extracted from new pipeline.
        • Included new output_video_resolution, output_video_framerate, output_frames_pixfmt internal parameters for metadata properties, whenever Output Stream Metadata available.
        • Added new extract_output boolean parameter to extract_video_pixfmt and extract_resolution_framerate internal methods for extracting output pixel-format, framerate and resolution using Output Stream metadata instead of Sourcer Metadata, whenever available.
      • Added tuple datatype to sourcer_params exception.
      • Added dict2Args import.
    • Added enumerate_devices property object to enumerate all probed Camera Devices connected to a system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.
    • Added new force_retrieve_missing parameter to retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata, when force_retrieve_missing=True.
    • Added various output stream metadata properties that are only available when additional FFmpeg parameters(such as filters) are defined manually, by assigning them counterpart source stream metadata property values
  • FFhelper:
    • Implemented new extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system and supported by valid OS specific FFmpeg demuxer.
      • Added support for three OS specific FFmpeg demuxers: namely dshow for Windows, v4l2 for Linux, and avfoundation for Darwin/Mac OSes.
      • Implemented separate code for parsing outputs of python subprocess module outputs provided with different commands for discovering all Video-Capture devices present on system.
        • Processed dshow (on Windows) and avfoundation (on Darwin) demuxers in FFmpeg commands with -list_devices true parameters using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names/indexes.
        • Used v4l2-ctl submodule command on Linux machines for listing all Video-Capture devices using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names and true system /dev/video paths.
          • Added patch for a single device with multiple /dev/video paths (each for metadata, video, controls), where it iterates on each path to find the exact path that contains valid video stream.
          • Added elaborated checks for catching all possible system errors that can occur while running v4l2-ctl submodule command.
          • The method will return discovered devices as list of dictionaries with device paths(/dev/video) as keys and respective device name as the values, instead of default list of device names.
          • Added patch for handling Linux specific log messages.
      • Added various logging messages to notify users about all discover devices names/paths w.r.t indexes.
      • \u26a0\ufe0f The extract_device_n_demuxer method will raise RuntimeError if it fails to identify any device.
      • Added various checks to assert invalid input parameters and unsupported OSes.
      • Added machine_OS parameter to specify OS running on the system, must be value of platform.system() module. If invalid the method will raise ValueError.
  • Utilities:
    • Added new new validate_device_index() method to verify if given device index is valid or not?
      • Only Integers or String of integers are valid indexes.
      • Returns a boolean value, confirming whether valid(If true), or not(If False).
    • Added checks to support all +ve and -ve integers, both as integer and string types.
  • Docs:
    • Added new validate_device_index() method and its parameters description.
    • Added new extract_device_n_demuxer() method and its parameters description.
    • Added Decoding Camera Devices using Indexes support docs.
      • Added decode-camera-devices.md doc for Decoding Camera Devices using Indexes.
        • Added Enumerating all Camera Devices with Indexes example doc with code.
        • Added Capturing and Previewing frames from a Camera using Indexes example doc with code.
      • Added Camera Device Index support docs to FFdecoder and Sourcer API params.
  • CI:
    • Added check exception for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Added new test_discard_n_filter_params unittest for test recently added supported for both discarded parameters and filter values.
Updates/Improvements
  • FFdecoder API:
    • Extended range of supported output frame pixel-formats.
      • Added new pixel-formats to supported group by extending raw bits-per-component range.
    • Simplified raw frame dtype calculation based on selected pixel-format.
      • output_frames_pixfmt metadata property(if available) will be overridden to rgb24.
    • Replaced continue with break in generateFrame() method.
    • Improved handling of frame_format parameter.
  • Sourcer API:
    • Simplified JSON formatting and returning values logic.
    • Updated logging messages text and position.
    • Removed redundant variable definitions.
    • Changed related internal variable names w.r.t metadata property names.
    • Replaced os_windows internal parameter with machine_OS, and changed its input from os.name to more flexible platform.system().
    • Removed source_extension internal parameter and assigned values directly.
  • FFhelper:
    • Implemented more robust pattern matching for Linux machines.
    • Updated logs in check_sp_output() method for improving error output message.
    • Implemented \"Cannot open device\" v4l2-ctl command Error logs.
  • Maintenance:
    • Bumped version to 0.2.4.
    • Updated code comments.
  • CI:
    • Updated FFdecoder API's test_camera_capture unittest to test new Index based Camera Device Capturing on different platforms.
      • Added various parametrize source and source_demuxer parameter data to attain maximum coverage.
      • Added result field to fail and xfail unittest according to parametrize data provided on different platforms.
      • Removed pytest.mark.skipif to support all platforms.
    • Added and updated various parametrize test data to attain maximum coverage.
    • Limited range of extracted frames, for finishing tests faster.
    • Updated unittests to reflect recent name changes.
    • Disabled capturing of stdout/stderr with -s flag in pytest.
  • Setup:
    • Updated description metadata.
  • Bash Script:
    • Created undeleteable undelete.txt file for testing on Linux envs.
    • Updated undelete.txt file path.
    • Made FFmpeg output less verbose.
  • Docs:
    • Updated FFdecoder API params docs w.r.t recent changes and supported for both discarded parameters and filter values.
      • Added new admonitions to explain handling of \"null\" and (special-case), undefined, or invalid type values in various parameters/attributes.
      • Added new footer reference explaining the handling of Default pixel-format for frame_format parameter.
      • Added missing docs for -default_stream_indexes ffparams attribute.
    • Added docs for recently added additional FFmpeg parameter in Sourcer API's sourcer_params parameter.
      • Removed unsupported -custom_resolution sourcer_params attributes from sourcer_params parameter docs.
      • Removed redundant -vcodec and -framerate attributes from sourcer_params parameter docs.
    • Updated both basic and advanced project Index hyperlinks.
    • Moved decoding-live-feed-devices.md doc from basic to advanced directory.
    • Updated page navigation in mkdocs.yml.
    • Update announcement bar to feature Index based Camera Device Capture support.
    • Updated Project description and Key features of DeFFcode.
    • Updated README.md with latest information.
    • Updated source and source_demuxer param doc.
    • Updated Hardware-Acceleration docs.
      • Updated Hardware-Accelerated Video Decoding and Transcoding docs to inform users about DeFFcode generated YUV frames not yet supported by OpenCV and its APIs.
    • Updated recipes docs to reflect recent changes in APIs.
    • Updated parameter docs to reflect recent name changes.
    • Updated parameters/attributes introductory descriptions.
    • Updated various parametrize data to attain maximum coverage.
    • Updated Zenodo badge and the BibTeX entry.
    • Updated method description texts and logging messages.
    • Update title headings, icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Breaking Updates/Changes
  • API:
    • Implemented new Index based Camera Device Capture feature (Similar to OpenCV), where the user just have to assign device index as integer (-n to n-1) in source parameter of DeFFcode APIs to directly access the given input device in few seconds.
  • FFdecoder API
    • Unsupported dtype pixel-format always defaults to rgb24.
  • Sourcer API:
    • Renamed output_video_resolution metadata property to output_frames_resolution.
    • Renamed output_video_framerate metadata property to output_framerate.
Bug-fixes
  • FFdecoder API:
    • Removed redundant dummy value for output_frames_pixfmt metadata property.
    • Fixed critical KeyError bug arises due to missing output metadata properties.
      • Enforced force_retrieve_missing parameter in Sourcer API's retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata.
      • Added new missing_prop internal class variable for handling metadata properties missing, received from Sourcer API.
      • Moved ffdecoder_operational_mode to missing metadata properties that cannot be updated but are read only.
      • Added missing metadata properties to metadata class property object for easy printing along with other metadata information.
      • Implemented missing metadata properties updation via. overridden metadata class property object.
        • Added counterpart_prop dict to handle all counterpart source properties for each missing output properties.
        • Implemented missing output properties auto-updation w.r.t counterpart source property.
        • Added separate case for handling only missing metadata properties and notifying user about counterpart source properties.
    • Fixed source metadata properties update bug causing non-existential missing metadata properties to be added to source metadata properties dictionary along with source metadata property.
      • Replaced update() calling on value dict directly with explicitly assigning values to source metadata properties dictionary.
      • Simplified missing_prop validation.
      • Removed unwanted continue in middle of loop.
    • Remove unusable exclusive yuv frames patch.
    • Fixed KeyError bug arises due to wrong variable placement.
    • Fixed approx_video_nframes metadata property check.
    • Fixed av_interleaved_write_frame(): broken pipe warning bug by switching process.terminate() with process.kill().
    • Fixed AttributeError bug caused due to typo in logger.
  • FFhelper:
    • Fixed check_sp_output() method returning Standard Error (stderr) even when Nonetype.
    • Fixed logger requiring utf-8 decoding.
    • Fixed missing force_retrieve_stderr argument to check_sp_output in extract_device_n_demuxer method on Linux platforms.
    • Fixed logger message bug.
  • Utils:
    • Fixed logger name typo.
  • Maintenance:
    • Fixed hyperlinks to new GitHub's form schemas.
    • Fixed typos in logs messages.
    • Removed redundant code.
    • Updated code comments.
  • Setup:
    • Rearranged long_description patches to address unused patch bug.
  • Bash Script:
    • Fixed chattr: No such file or directory bug.
  • CI:
    • Fixed missing lavfi demuxer for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Fixed missing ffparams parameter bug in test_discard_n_filter_params() unittest.
    • Fixed test_camera_capture test.
    • Removed redundant similar ValueError checks.
    • Fixed typo in pytest arguments.
    • Fixed missing arguments.
  • Docs:
    • Fixed invalid hyperlinks in ReadMe.md
    • Fixed bad formatting and context.
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #29
  • PR #32
"},{"location":"changelog/#v023-2022-08-11","title":"v0.2.3 (2022-08-11)","text":"New Features
  • Docs:
    • Added Zenodo Bibtex entry and badge in docs for easy citation.
    • Added new <div> tag bounding-box style to the Static FFmpeg binary download links in FFmpeg Installation Doc for better accessibility.
  • Maintenance:
    • Switched to new Issue GitHub's form schema using YAML:
      • Added new bug_report.yaml Issue GitHub's form schema for Bug Reports.
      • Added new idea.yaml Issue GitHub's form schema for new Ideas.
      • Added new question.yaml Issue GitHub's form schema for Questions.
      • Deleted old depreciated markdown(.md) files.
      • Polished forms.
Updates/Improvements
  • Maintenance:
    • Added new patterns to .gitignore to ignore vim files.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
  • Setup:
    • Added new patches for using README.md text as long_description metadata.
      • Implemented new patch to remove GitHub README UI specific text.
    • Simplified multiple str.replace to chained str.replace of better readability.
    • Bumped version to 0.2.3.
  • Docs:
    • Updated recipes to include with statement access method.
      • Updated existing recipes to include with statement access method in FFdecoder APIs.
      • Included new example code of accessing RGB frames using with statement access method.
      • Updated Recipe title to \"Accessing RGB frames from a video file\" across docs.
    • Included warning admonition for advising users to always use trim with reverse filter.
    • Updated docs text font to Libre Franklin.
    • Updated method description texts and logging messages.
    • Update icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Bug-fixes
  • FFdecoder API:
    • Fixed Context Manager methods.
      • Fixed __enter__ method returning class instance instead of formulating pipeline.
      • Fixed __exit__ method calling wrong non-existent method.
  • Setup:
    • Fixed missing comma(,) in keywords metadata.
    • Fixed bug in patch string.
  • Docs:
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #26
"},{"location":"changelog/#v022-2022-08-09","title":"v0.2.2 (2022-08-09)","text":"New Features
  • Sourcer API:
    • Added support for -ffprefixes attribute through Sourcer API's sourcer_param dictionary parameter (similar to FFdecoder API).
  • FFdecoder API:
    • Added new output_frames_pixfmt metadata property to preview and handle output frames pixel-format.
  • Docs:
    • Added separate \"Basic\" and \"Advanced\" Recipes markdowns files with self-explanatory text, related usage code, asset (such as images, diagrams, GIFs, etc.), and UI upgrades for bringing standard quality to visual design.
    • Added separate index.md for Basic and Advanced Recipes with introductory text and curated hyperlinks for quick references to various recipes (separated with sub-categories \"Decoding\", \"Transcoding\", and \"Extracting Video Metadata\").
    • Added related admonitions to specify python dependencies as well as other requirements and relevant information required for each of these recipes.
    • Added new Basic Decoding Recipes:
      • Added Decoding Video files with various pixel formats recipes.
      • Added Decoding Live Feed Devices recipes with source_demuxer FFdecoder API parameter.
      • Added Decoding Image sequences recipes supporting Sequential, Glob pattern , Single (looping) image.
      • Added Decoding Network Streams recipes.
    • Added new Basic Transcoding Recipes:
      • Added Transcoding Live frames recipes with OpenCV and WriteGear.
      • Added Transcoding Live Simple Filtergraphs recipes with OpenCV.
      • Added Saving Key-frames as Image recipes with different image processing libraries.
    • Added new Basic Extracting Video Metadata Recipes:
      • Added Extracting Video Metadata recipes with FFdecoder and Sourcer APIs.
    • Added new Advanced Decoding Recipes:
      • Added Hardware-Accelerated Video Decoding recipe using NVIDIA's H.264 CUVID Video-decoder(h264_cuvid).
      • Added Decoding Live Virtual Sources recipes with many test patterns using lavfi input virtual device.
    • Added new Advanced Decoding Recipes:
      • Added lossless Hardware-Accelerated Video Transcoding recipe with WriteGear API.
      • Added Transcoding Live Complex Filtergraphs recipes with WriteGear API.
      • Added Transcoding Video Art with Filtergraphs recipes with WriteGear API for creating real-time artistic generative video art using simple and complex filtergraphs.
    • Added new Advanced Updating Video Metadata Recipes:
      • Added Updating Video Metadata recipes with user-defined as well as source metadata in FFdecoder API.
    • Added new dark and light theme logo support.
    • Added new recipes GIF assets to gifs folder.
    • Added new dark logo deffcode-dark.png asset to images folder.
    • Added new ffdecoder.png and sourcer.png Image assets to images folder.
    • Added new navigation.tabs feature.
    • Added Material Announcement-Bar notifying recent changes.
Updates/Improvements
  • Sourcer API:
    • Implemented new validation checks to ensure given source has usable video stream available by checking availability of either video bitrate or both frame-size and framerate_ properties in the source metadata.
    • Improved extract_resolution_framerate method for making framerate extraction more robust by falling back to extracting TBR value when no framerate value available in the source metadata.
  • FFdecoder API:
    • Updated metadata property object to validate and override source metadata properties directly by overloading same property object before formulating Frames Decoder Pipeline:
      • Implemented validation checks to verify each validate manually assigned source metadata property against specific datatype before overriding.
      • Updated logging to notify invalid datatype values when assigned through metadata property object.
      • Added support for overriding source_video_resolution source metadata property to control frame-size directly through metadata.
      • Added support for overriding output_frames_pixfmt metadata attribute to be used as default pixel-format, when frame_format parameter value is None-type.
      • Improved handling of source metadata keys in metadata property object.
    • Updated metadata property object to handle and assign User-defined metadata directly by overloading the same property object:
      • Added new internal user_metadata class variable to handle all User-defined metadata information separately.
      • FFdecoder API's metadata property object now returns User-defined metadata information merged with Source Video metadata.
      • Added tuple value warning log to notify users json module converts Python tuples to JSON lists.
    • Improved logic to test validity of -custom_resolution attribute value through ffparams dictionary parameter.
    • Improved handling of FFmpeg pipeline framerate with both user-defined and metadata defined values.
    • Added tuple to exception in datatype check for ffparams dictionary parameter.
    • Added datatype validation check for frame_format parameter.
    • Improved handling of -framerate parameter.
  • Maintenance:
    • Reformatted all Core class and methods text descriptions:
      • Rewritten introductory each API class description.
      • Moved reference block from index.md to class description.
      • Fixed missing class and methods parameter description.
      • Fixed typos and context in texts.
      • Reformatted code comments.
    • Simplified for loop with if condition checking in metadata property object.
    • Updated logging comments.
  • Setup:
    • Updated project description in metadata.
    • Bumped version to 0.2.2.
  • Docs:
    • Updated Introduction doc:
      • Added new text sections such as \"Getting Started\", \"Installation Notes\", \"Recipes a.k.a Examples\" and \"API in a nutshell\".
      • Rewritten Introduction(index.md) with recent Information, redefined context, UI changes, updated recipe codes, curated hyperlinks to various recipes(separated with categories), and relatable GIFs.
      • Updated spacing in index.md using spacer class within <div> tag and &nbsp;.
      • Reformatted and centered DeFFcode Introductory description.
      • Reformatted FFmpeg Installation doc and Issue & PR guidelines.
      • Updated static FFmpeg binaries download URLs in FFmpeg Installation doc.
      • Refashioned text contexts, icons, and recipes codes.
      • Updated Key Features section with reflecting new features.
    • Updated README.md:
      • Updated README.md w.r.t recent changes in Introduction(index.md) doc.
      • Simplified and Reformatted text sections similar to Introduction doc.
      • Imported new \"Contributions\" and \"Donations\" sections from VidGear docs.
      • Added collapsible text and output section using <summary> and <detail> tags.
      • Added experimental note GitHub blockquote to simulate admonition in README.md.
      • Removed tag-line from README.md and related image asset.
      • Simplified and Grouped README URL hyperlinks.
      • Removed Roadmap section.
    • Updated Recipes docs:
      • Revamped DeFFcode Introduction index.md with new Information, Context and UI changes, Updated example codes and hyperlinks.
      • Updated Announcement Bar to fix announcement_link variable and text.
      • Updated footer note to notify users regarding tuple value warning in FFdecoder API.
      • Rewritten recipes w.r.t breaking changes in APIs.
    • Updated Reference docs:
      • Completely revamped API's parameter reference docs.
      • Added new Functional Block Diagrams to FFdecoder and Sourcer API References.
      • Rewritten and Reformatted FFdecoder and Sourcer API's parameter reference docs with new information w.r.t recent changes.
      • Implemented new admonitions explaining new changes, related warnings/errors, usage examples etc.
      • Removed redundant advanced.md and basic.md docs.
      • Added new abstracts to FFhelper and Utils docs.
    • Updated docs site navigation and titles:
      • Reformatted index.md and installation/index.md.
      • Renamed help/index.md to help/help.md.
      • Moved basic and advanced recipes from example to recipes folder.
      • Imported \"Donations\" sections from VidGear docs to help.md.
      • Added updated page-title and navigation hyperlinks in mkdocs.yml to new markdown files incorporated recently.
      • Updated internal navigation hyperlinks in docs and removed old redundant file links.
    • Updated docs UI:
      • Added custom spacer class in CSS for custom vertical spacing.
      • Imported new \"New\", \"Advance\", \"Alert\", \"Danger\" and \"Bug\" admonitions custom CSS UI patches from vidgear.
      • Updated all admonitions icons with new custom icon SVG+XML URLs.
      • Reformatted custom.css and added missing comments.
      • Updated docs fonts:
        • Updated text font to Heebo.
        • Updated code font to JetBrains Mono.
      • Updated primary and accent colors:
        • Updated primary light color to light green.
        • Updated primary dark color to amber.
        • Updated accent light color to green.
        • Updated accent dark color to lime.
      • Replaced admonitions with appropriate ones.
      • Changed Color palette toggle icons.
      • Updated icons in title headings.
    • Updated admonitions messages.
    • Updated changelog.md.
  • CI:
    • Pinned jinja2 version to <3.1.0, since jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799).
    • Updated unittests w.r.t recent changes in APIs:
      • Updated test_frame_format unittest to include manually assign output pixel-format via metadata property object.
      • Updated test_metadata unittest to include new checks parameter to decide whether to perform Assertion test on assigned metadata properties in FFdecoder API.
      • Added new parametrize attributes in test_metadata and test_seek_n_save unittests to cover every use-cases.
      • Replaced IOError with ValueError in Sourcer API unittests.
    • Updated test_metadata unittest to verify tuple value warning.
    • Updated unittests to increase code coverage significantly.
Breaking Updates/Changes
  • Sourcer API:
    • Sourcer API's retrieve_metadata() method now returns parsed metadata either as JSON string or dictionary type.
      • Added new pretty_json boolean parameter to retrieve_metadata(), that is when True, returns metadata formatted as JSON string instead of default python dictionary.
    • Changed IOError to ValueError in Sourcer API, raised when source with no decodable audio or video stream is provided.
  • FFdecoder API:
    • Rename extraparams dictionary parameter to ffparams in FFdecoder API.
    • The source metadata value cannot be altered through metadata property object in FFdecoder API.
    • Removed -ffpostfixes attribute support from ffparams dictionary parameter in FFdecoder API, since totally redundant in favor of similar -ffprefixes and -clones attributes.
Bug-fixes
  • FFdecoder API:
    • Fixed metadata property object unable to process user-defined keys when any source metadata keys are defined.
    • Fixed TypeError bug with string type -framerate parameter values.
  • Sourcer API:
    • Fixed Sourcer API throws IOError for videos containing streams without both source bitrate and framerate defined (such as from lavfi input virtual device).
    • Fixed AttributeError bug due to typo in variable name.
  • CI:
    • Fixed support for newer mkdocstring version in DeFFcode Docs Deployer workflow.
      • Added new mkdocstrings-python-legacy dependency.
      • Replaced rendering variable with options.
      • Removed pinned mkdocstrings==0.17.0 version.
      • Removed redundant variables.
    • Updated test_metadata unittest to fix AssertionError Bug.
  • Docs:
    • Fixed some admonitions icons not showing bug using !important rule in CSS.
    • Fixed 404.html static page not showing up.
    • Fixed invalid internal navigation hyperlinks and asset paths.
    • Removed quote/cite/summary admonition custom UI patches.
    • Removed redundant information texts.
    • Fixed typos in code comments.
    • Fixed typos in example code.
Pull Requests
  • PR #23
"},{"location":"changelog/#v021-2022-07-14","title":"v0.2.1 (2022-07-14)","text":"New Features
  • Sourcer API:
    • Implemented support for extracting metadata from live input devices/sources.
    • Added new source_demuxer and forced_validate parameters to validate_source internal method.
    • Implemented logic to validate source_demuxer value against FFmpeg supported demuxers.
    • Rearranged metadata dict.
    • Updated Code comments.
  • FFdecoder API:
    • Implemented functionality to supported live devices by allowing device path and respective demuxer into pipeline.
    • Included -f FFmpeg parameter into pipeline to specify source device demuxer.
    • Added special case for discarding -framerate value with Nonetype.
  • CI:
    • Added new unittest test_camera_capture() to test support for live Virtual Camera devices.
    • Added new v4l2loopback-dkms, v4l2loopback-utils and kernel related APT dependencies.
  • Bash Script:
    • Added new FFmpeg command to extract image datasets from given video on Linux envs.
    • Created live Virtual Camera devices through v4l2loopback library on Github Actions Linux envs.
      • Added v4l2loopback modprobe command to setup Virtual Camera named VCamera dynamically at /dev/video2.
      • Added v4l2-ctl --list-devices command for debugging.
      • Implemented FFmpeg command through nohup(no hangup) to feed video loop input to Virtual Camera in the background.
Updates/Improvements
  • Sourcer API:
    • Only either source_demuxer or source_extension attribute can be present in metadata.
    • Enforced forced_validate for live input devices/sources in validate_source internal method.
  • FFdecoder API:
    • Rearranged FFmpeg parameters in pipeline.
    • Removed redundant code.
    • Updated Code comments.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
  • CI:
    • Restricted test_camera_capture() unittest to Linux envs only.
    • Removed return_generated_frames_path() method support for Linux envs.
    • Pinned jinja2 3.1.0 or above breaking mkdocs.
      • jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799), therefore pinned jinja2 version to <3.1.0.
  • Bash Script:
    • Updated to latest FFmpeg Static Binaries links.
      • Updated download links to abhiTronix/ffmpeg-static-builds * hosting latest available versions.
      • Updated date/version tag to 12-07-2022.
      • Removed depreciated binaries download links and code.
  • Setup:
    • Bumped version to 0.2.1.
  • Docs:
    • Updated changelog.md.
Breaking Updates/Changes
  • Implement support for live input devices/sources.
    • source parameter now accepts device name or path.
    • Added source_demuxer parameter to specify demuxer for live input devices/sources.
    • Implemented Automated inserting of -f FFmpeg parameter whenever source_demuxer is specified by the user.
Bug-fixes
  • Sourcer API:
    • Fixed Nonetype value bug in source_demuxer assertion logic.
    • Fixed typos in parameter names.
    • Added missing import.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
    • Fixed bug with get_supported_demuxers not detecting name patterns with commas.
    • Removed redundant logging.
  • CI:
    • Fixed critical permission bug causing v4l2loopback to fail on Github Actions Linux envs.
      • Elevated privileges to root by adding sudo to all commands(including bash scripts and python commands).
      • Updated vidgear dependency to pip install from its git testing branch with recent bug fixes.
      • Replaced relative paths with absolute paths in unit tests.
    • Fixed WriteGear API unable to write frames due to permission errors.
    • Fixed test_source_playback() test failing on Darwin envs with OLD FFmpeg binaries.
      • Removed custom_ffmpeg value for Darwin envs.
    • Fixed various naming typos.
    • Fixed missing APT dependencies.
Pull Requests
  • PR #17
"},{"location":"changelog/#v020-2022-03-21","title":"v0.2.0 (2022-03-21)","text":"New Features
  • Sourcer API:
    • Added a new source_audio_samplerate metadata parameter:
      • Re-implemented __extract_audio_bitrate internal function from scratch as __extract_audio_bitrate_nd_samplerate.
        • Implemented new algorithm to extract both extract both audio bitrate and samplerate from given source.
        • Updated regex patterns according to changes.
      • Updated __contains_video and __contains_audio logic to support new changes.
    • Added metadata extraction support:
      • Added retrieve_metadata class method to Sourcer API for extracting source metadata as python dictionary.
        • Populated private source member values in dictionary with distinct keys.
    • Added new -force_validate_source attribute to Sourcer API's sourcer_params dict parameter for special cases.
    • Implemented check whether probe_stream() called or not in Sourcer API.
  • FFdecoder API:
    • Added metadata extraction and updation support:
      • Added metadata property object function to FFdecoder API for retrieving source metadata form Sourcer API as dict and return it as JSON dump for pretty printing.
        • Added Operational Mode as read-only property in metadata.
      • Added metadata property object with setter() method for updating source metadata with user-defined dictionary.
        • Implemented way to manually alter metadata keys and values for custom results.
  • Docs:
    • Added new comprehensive documentation with Mkdocs:
      • Added new image assets:
        • Added new Deffcode banner image, logo and tagline
        • Added new icon ICO file with each layer of the favicon holds a different size of the image.
        • Added new png images for best compatibility with different web browsers.
      • Added new docs files:
        • Added new index.md with introduction to project.
        • Added new changelog.md.
        • Added license.md
        • Added new index.md with instructions for contributing in DeFFcode.
          • Added issue.md with Issue Contribution Guidelines.
          • Added PR.md with PR Contribution Guidelines.
        • Added new custom.js to add gitter sidecard support.
        • Added new custom.css that brings standard and quality visual design experience to DeFFcode docs.
          • Added new admonitions new and alert.
        • Added separate LICENSE(under CC creative commons) and REAME.md for assets.
        • Added new main.html extending base.html for defining custom site metadata.
        • Added deFFcode banner image to metadata.
        • Added twitter card and metadata.
        • Added version warning for displaying a warning when the user visits any other version.
        • Added footer sponsorship block.
        • Added gitter card official JS script dist.
        • Added new custom 404.html to handle HTTP status code 404 Not Found.
          • Implemented custom theming with new CSS style.
          • Added custom 404 image asset.
        • Added new index.md with DeFFcode Installation notes.
          • Added info about Supported Systems, Supported Python legacies, Prerequisites, Installation instructions.
          • Added Pip and Source Installation instructions.
        • Added new ffmpeg_install.md with machine-specific instructions for FFmpeg installation.
        • Added new index.md with different ways to help DeFFcode, other users, and the author.
          • Added info about Starring and Watching DeFFcode on GitHub, Helping with open issues etc.
          • Added Tweeter intent used for tweeting #deffode hastags easily.
          • Added Kofi Donation link button.
          • Added author contact links and left align avatar image.
        • Added new get_help.md to get help with DeFFcode.
          • Added DeFFcode gitter community link.
          • Added other helpful links.
      • Added new assets folders.
      • Added Basic Recipes with basic.md
      • Added Advanced Recipes with advanced.md
      • Added all API References.
        • Added mkdocstrings automatic documentation from sources.
        • Added new index.md for FFdecoder API with its description and explaining its API.
        • Added new index.md for Sourcer API with its description and explaining its API.
        • Added ffhelper methods API references.
        • Added utils methods API references.
      • Added all API Parameters.
        • Added new params.md for FFdecoder API explaining all its parameters.
        • Added new params.md for Sourcer API explaining all its parameters.
        • Added Mkdocs support with mkdocs.yml
      • Implemented new mkdocs.yml with relevant parameters.
        • Added extended material theme with overridden parts.
        • Added site metadata with site_name, site_url, site_author, site_description, repo_name, repo_url, edit_uri, copyright etc.
        • Added navigation under sections for easily accessing each document.
        • Implemented Page tree for DeFFcode docs.
        • Added features like navigation.tracking, navigation.indexes, navigation.top, search.suggest, search.highlight, search.share, content.code.annotate.
        • Added separate palette [default]light(with primary:green accent: dark green) and [slate]dark(with primary:teal accent: light green) mode.
        • Added Color palette toggle switch with icon material/home-lightning-bolt.
        • Added support for all pymarkdown-extensions.
        • Added google fonts for text: Quicksand and code: Fira Code.
        • Added custom logo and icon for DeFFcode.
        • Added support for plugins like search, git-revision-date-localized, minify.
        • Added support for mkdocstrings plugin for auto-built API references.
          • Added python handler for parsing python source-code to mkdocstrings.
          • Improved source-code docs for compatibility with mkdocstrings.
        • Added support for extensions like admonition, attr_list, codehilite, def_list, footnotes, meta, and toc.
        • Added social icons and links.
        • Added custom extra_css and extra_javascript.
        • Added support for en (English) language.
      • Added new badges to README.md for displaying current status of CI jobs and coverage.
      • Added Roadmap to README.md
  • CI:
    • Automated CI support for different environments:
      • Implemented auto-handling of dependencies installation, unit testing, and coverage report uploading.
      • Added GitHub Action workflow for Linux envs:
        • Added and configured CIlinux.yml to enable GitHub Action workflow for Linux-based Testing Envs.
        • Added 3.7+ python-versions to build matrix.
        • Added code coverage through codecov/codecov-action@v2 workflow for measuring unit-tests effectiveness.
          • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
      • Added Appveyor workflow for Windows envs:
        • Add and configured appveyor.yml to enable Appveyor workflow for Windows-based Testing Envs.
        • Added 3.7+ 64-bit python-versions to build matrix.
        • Enabled fast_finish to exit immediately on error.
      • Added Azure-Pipelines workflow for MacOS envs:
        • Add and configured azure-pipelines.yml to enable Azure-Pipelines workflow for MacOS-based Testing Envs.
        • Added code coverage through codecov workflow for measuring unit-tests effectiveness.
          • Added online auto validation of codecov bash script using SH256SUM and sig files as recommended.
        • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
        • Added 3.7+ python-versions to build matrix.
      • Added automated flake8 testing to discover any anomalies in code.
      • Added master branches for triggering CI.
    • Implement new automated Docs Building and Deployment on gh-pages through GitHub Actions workflow:
      • Added new workflow yaml docs_deployer.yml for automated docs deployment.
      • Added different jobs with ubuntu-latest environement to build matrix.
      • Added actions/checkout@v2 for repo checkout and actions/setup-python@v2 for python environment.
      • Pinned python version to 3.8 for python environment in docs building.
      • Added GIT_TOKEN, GIT_NAME, GIT_EMAIL environment variables through secrets.
      • Added Mkdocs Material theme related python dependencies and environments.
      • Added push on master and dev branch release with published as triggers.
      • Pinned mkdocstrings==0.17.0.
    • Added new Automated Docs Versioning:
      • Implemented Docs versioning through mike.
      • Separate new workflow steps to handle different versions.
      • Added step to auto-create RELEASE_NAME environment variable from DeFFcode version file.
      • Update docs deploy workflow to support latest, release and dev builds.
      • Added automatic release version extraction from GitHub events.
    • Added Skip Duplicate Actions Workflow to DeFFcode Docs Deployer:
      • Added Skip Duplicate Actions(fkirc/skip-duplicate-actions@master) Workflow to DeFFcode Docs Deployer to prevent redundant duplicate workflow-runs.
  • Maintenance:
    • New DeFFcode project issue and PR templates:
      • Added PR template:
        • Added a pull request template(PULL_REQUEST_TEMPLATE.md) for project contributors to automatically see the template's contents in the pull request body.
        • Added Brief Description, Requirements / Checklist, Related Issue, Context, Types of changes blocks.
      • Added Proposal, Bug-Report and Question templates:
        • Created an ISSUE_TEMPLATE subdirectory to contain multiple issue templates.
        • Add manually-created Proposal(proposal.md) and Question(question.md) issue template for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Any Other Information like blocks.
        • Add an manually-created Bug Report(bug_report.md) issue template to ISSUE_TEMPLATE subdirectory for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Expected Behavior, Actual Behavior, Possible Fix, Steps to reproduce, Miscellaneous like blocks.
        • Added YAML frontmatter to each issue template to pre-fill the issue title, automatically add labels and assignees, and give the template a name and description.
        • Added a config.yml file to the .github/ISSUE_TEMPLATE folder to customize the issue template chooser that people see when creating a new issue.
        • Set blank_issues_enabled parameter to false to encourage contributors to use issue templates.
        • Added contact_links parameter with gitter community link to receive regular issues outside of GitHub.
      • Added new FUNDING.yml with ko-fi donation link.
      • Added .gitattributes for DeFFcode, that set the default behavior, in case people don't have core.autocrlf set.
      • Imported Codecov config(codecov.yml) from vidgear to modify coverage parameters.
  • Tests:
    • Added DeFFcode unit tests with pytest:
      • Added essential.py for defining all essential functions necessary for DeFFcode unit tests.
      • Added return_static_ffmpeg, remove_file_safe, return_testvideo_path, return_generated_frames_path, actual_frame_count_n_frame_size essential functions.
      • Added is_windows global variable.
      • Added related imports and logging.
      • Added __init__.py.
      • Moved all files to test folder.
      • Added DeFFcode's utils unit tests with pytest.
        • Added new test_loggerhandler and test_dict2Args tests.
      • Added DeFFcode's ffhelper unit tests with pytest.
        • Added new test_ffmpeg_binaries_download, test_validate_ffmpeg, test_get_valid_ffmpeg_path, test_check_sp_output, test_is_valid_url, test_is_valid_image_seq, and test_validate_imgseqdir parametrize tests.
      • Added DeFFcode's Sourcer API unit tests with pytest.
        • Added new test_source and test_probe_stream_n_retrieve_metadata parametrize tests.
      • Added DeFFcode's FFdecoder API unit tests with pytest.
        • Added new test_source_playback, test_frame_format, test_metadata, test_seek_n_save, and test_FFdecoder_params parametrize unit tests.
      • Added related imports and logging.
      • Added unit test for delete_file_safe utils function.
  • Bash:
    • \ud83d\udd27 Imported prepare_dataset.sh from vidgear for downloading pytest datasets to temp dir.
Updates/Improvements
  • FFdecoder API:
    • Removed redundant forcing -r FFmpeg parameter for image sequences as source.
    • Removed redundant checks on -vf FFmpeg parameter.
    • FFmpeg parameter -s will be discarded in favor of -custom_resolution attribute.
    • Replaced -constant_framerate with FFmpeg -framerate attribute.
    • Replaced -custom_source_params with correct -custom_sourcer_params attribute.
    • Renamed operational_mode metadata parameter to ffdecoder_operational_mode.
  • Sourcer API:
    • Converted all Sourcer APIs public available variables into private ones for stability.
    • All Sourcer's publicly accessed variable metadata values in FFdecoder, therefore replaced with dictionary counterparts.
    • Moved FFmpeg path validation and handling to Sourcer from FFdecoder API.
    • Moved -ffmpeg_download_path dictionary attribute to Sourcer API's sourcer_params parameter.
    • Moved dependencies and related functions.
  • CI:
    • Excluded dev branch from triggering workflow on any environment.
      • Updated yaml files to exclude beta dev branch from triggering workflow on any environment.
      • Restricted codecov to use only master branch.
    • Re-implemented fkirc/skip-duplicate-actions@master to Skip individual deploy steps instead of Skip entire jobs
  • Docs:
    • Updated PR.md
      • Added instructions to download prepare_dataset.sh using curl.
      • Updated dependencies for pytest.
    • Updated advanced.md
      • Updated generating Video from Image sequence to save video using OpenCV writer instead of WriteGear API.
      • Added frame_format=\"bgr24\"and additional instructions regarding OpenCV writer.
      • Updated example codes with new changes.
      • Rearranged examples placement.
    • Updates to custom.css
      • Added donation sponsor link in page footer with heart animation.
      • Added bouncing heart animation through pure CSS.
      • Added Bold property to currently highlighted link in Navigation Bar.
      • Updated Navigation Bar title font size.
      • Updated version list text to uppercase and bold.
      • Updated icon for task list unchecked.
      • Added more top-padding to docs heading.
      • Updated Block quote symbol and theming.
      • Updated Custom Button theming to match docs.
      • Added new custom classes to create shadow effect in dark mode for better visibility.
      • Updated dark mode theme \"slate\" hue to 285.
    • Updated admonitions colors.
    • Updated gitter sidecard UI colors and properties.
    • Reflected recent changes in Sourcer and FFdecoder API's metadata.
    • Updated sample code formatting from sh to json.
    • Added missing docs for delete_file_safe utils function.
    • Updated Download Test Datasets instructions.
    • Updated contribution guidelines and installation docs with related changes.
    • Updated License Notice.
    • Updated code comments.
    • Updated logging messages.
    • Updated Deffcode Logo and Tagline to be dark-mode friendly.
    • Adjusted asset alignment.
    • Updated example code.
    • Updated Installation instructions, Requirements and Roadmap.
    • Corrected links to documents.
    • Updated project description.
    • Updated LICENSE.
    • Updated indentation and code comments
    • Re-aligned text and images in README.md
    • Adjusted image classes and width.
  • Maintenance:
    • Updated LICENSE notice to add vidgear notice.
    • Bumped version to 0.2.0
    • Added useful comments for convenience.
Breaking Updates/Changes
  • Sourcer API will now raises Assertion error if probe_stream() not called before calling retrieve_metadata().
  • Only -framerate values greater than 0.0 are now valid.
  • Renamed decode_stream to probe_stream in Sourcer API.
  • Any of video bitrate or video framerate are sufficient to validate if source contains valid video stream(s).
  • Any of audio bitrate or audio samplerate are sufficient to validate if source contains valid audio stream(s).
Bug-fixes
  • APIs:
    • Added missing delete_file_safe function in utils.
      • Imported delete_file_safe from vidgear to safely deletes files at given path.
    • Fixed forward slash bugs in regex patterns.
    • Fixed IndexError when no bitrate was discovered in given source.
    • Fixed FFmpeg subprocess pipeline not terminating gracefully in FFdecoder API.
    • Fixed __version__ not defined in DeFFcode's __init__.py that throws AttributeError: module 'deffcode' has no attribute '__version__' on query.
      • Added necessary import in __init__.py.
  • Docs:
    • Fixed missing \"-vcodec\": \"h264_cuvid\" value in example code.
    • Fixed typos in filenames in utils.py
    • Fixed internal missing or invalid hyperlinks.
    • Fixed improper docs context and typos.
    • Fixed \"year\" in license notice.
    • Fixed content spacing.
    • Fixed Gitter Community Link in Mkdocs.
    • Fixed typos in README.md.
    • Fixed typos in license notices.
    • Fixed typos in code comments.
    • Fixed typos in example code.
  • CI:
    • Fixed missing FFmpeg dependency bug in GitHub Actions.
    • Fixes typo in Docs Deployer yaml.
    • Fixed if condition skipping when need is skipping
  • Maintenance:
    • Added missing imports.
    • Fixed redundant conditional logics.
    • Removed or Replaced redundant conditions and definitions.
    • Fixed minor typos in templates.
Pull Requests
  • PR #5
  • PR #6
  • PR #8
  • PR #9
  • PR #11
  • PR #12
  • PR #13
  • PR #14
"},{"location":"changelog/#v010-2022-03-07","title":"v0.1.0 (2022-03-07)","text":"New Features
  • Open-Sourced DeFFcode under the Apache 2.0 License.
  • Added new Classes(APIs):
    • FFdecoder: Performant Real-time Video frames Generator for generating blazingly fast video frames(RGB ndarray by default).
    • Sourcer: Extracts source video metadata (bitrate, resolution, framerate, nframes etc.) using its subprocess FFmpeg output.
  • Added new Helper functions:
    • ffhelper: Backend FFmpeg Wrapper that handles all subprocess transactions and gather data.
    • utils: Handles all additional Utilizes required for functioning of DeFFcode.
  • First PyPi Release:
    • Released DeFFcode to Python Package Index (PyPI)
    • Added setup.py and related metadata.
    • Added version.py
  • Docs:
    • Added abstract and related information in README.md
    • Added installation instructions.
    • Added preliminary usage examples.
  • Maintenance:
    • Added LICENSE.
    • Added .gitignore
Updates/Improvements
  • Maintenance:
    • Bumped version to 0.1.0
    • Updated LICENSE notice to add vidgear code usage notice.
Breaking Updates/Changes
  • Fixed support for Python-3.7 and above legacies only.
Bug-fixes
  • Docs:
    • Fixed hyperlinks in README.
    • Fixed indentation and spacing.
    • Fixed typos and updated context.
    • Removed dead code.
"},{"location":"help/","title":"Helping Us","text":"

Liked DeFFcode? Would you like to help DeFFcode, other users, and the author?

There are many simple ways to help us:

"},{"location":"help/#star-deffcode-on-github","title":"Star DeFFcode on GitHub","text":"

You can star DeFFcode on GitHub:

It helps us a lot by making it easier for others to find & trust this library. Thanks!

"},{"location":"help/#help-others-with-issues-on-github","title":"Help others with issues on GitHub","text":"

You can see through any opened or pinned existing issues on our GitHub repository, and try helping others, wherever possible:

"},{"location":"help/#watch-the-github-repository","title":"Watch the GitHub repository","text":"

You can watch \ud83d\udc40 DeFFcode Activities on GitHub:

When you watch a repository, you will be notified of all conversations for that repository, including when someone creates a new issue, or pushes a new pull request.

You can try helping solving those issues, or give valuable feedback/review on new Pull Requests.

"},{"location":"help/#tweet-about-deffcode","title":"Tweet about DeFFcode","text":"

Tweet about DeFFcode and Spread the word \ud83d\udde3:

Tweet #deffcode

Let others know how you are using DeFFcode and why you like it!

"},{"location":"help/#helping-author","title":"Helping Author","text":"

Donations help keep DeFFcode's development alive and motivate me (as author).

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

Thanks a million!

"},{"location":"help/#connect-with-author","title":"Connect with Author","text":"

You can connect with me, the author \ud83d\udc4b:

  • Follow author on GitHub:
  • Follow author on Twitter: Follow @abhi_una12
  • Get in touch with author on Linkedin:

"},{"location":"license/","title":"License","text":"

This library is released under the Apache 2.0 License.

"},{"location":"license/#copyright-notice","title":"Copyright Notice","text":"
Copyright (c) 2021 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n
"},{"location":"contribution/","title":"Overview","text":""},{"location":"contribution/#contribution-overview","title":"Contribution Overview","text":"

Contributions are always welcomed

We'd love your contribution to DeFFcode in order to fix bugs or to implement new features!

"},{"location":"contribution/#submission-guidelines","title":"Submission Guidelines","text":"
  • Submitting an Issue Guidelines \u27b6
  • Submitting Pull Request(PR) Guidelines \u27b6
"},{"location":"contribution/#submission-contexts","title":"Submission Contexts","text":""},{"location":"contribution/#got-a-question-or-problem","title":"Got a question or problem?","text":"

For quick questions, please refrain from opening an issue, instead you can reach us on Gitter community channel.

"},{"location":"contribution/#found-a-typo","title":"Found a typo?","text":"

There's no need to contribute for some typos. Just reach us on Gitter \u27b6 community channel, We will correct them in (less than) no time.

"},{"location":"contribution/#found-a-bug","title":"Found a bug?","text":"

If you encountered a bug, you can help us by submitting an issue in our GitHub repository. Even better, you can submit a Pull Request(PR) with a fix, but make sure to read the guidelines \u27b6.

"},{"location":"contribution/#request-for-a-featureimprovement","title":"Request for a feature/improvement?","text":"Subscribe to Github Repository

You can subscribe our GitHub Repository to receive notifications through email for new pull requests, commits and issues that are created in DeFFcode. Learn more about it here \u27b6

You can request our GitHub Repository for a new feature/improvement based on the type of request:

Please submit an issue with a proposal template for your request to explain how it benefits everyone in the community.

  • Major Feature Requests: If you require a major feature for DeFFcode, then first open an issue and outline your proposal so that it can be discussed. This will also allow us to better coordinate our efforts, prevent duplication of work, and help you to craft the change so that it is successfully accepted into the project. The purposed feature, if accepted, may take time based on its complexity and availability/time-schedule of our maintainers, but once it's completed, you will be notified right away. Please be patient!

  • Minor Feature Requests: Small features and bugs resolved on priority. You just have to submit an issue to our GitHub Repository.

"},{"location":"contribution/PR/","title":"Submitting Pull Request(PR) Guidelines:","text":"

The following guidelines tells you how to submit a valid PR for DeFFcode:

Working on your first Pull Request for DeFFcode?

  • You can learn about \"How to contribute to an Open Source Project on GitHub\" from this doc \u27b6
  • If you're stuck at something, please join our Gitter community channel. We will help you get started!

"},{"location":"contribution/PR/#clone-branch-for-pr","title":"Clone branch for PR","text":"

You can clone your Forked remote git to local and create your PR working branch as a sub-branch of latest master branch as follows:

Make sure the master branch of your Forked repository is up-to-date with DeFFcode, before starting working on a Pull Request.

# clone your forked repository(change with your username) and get inside\ngit clone https://github.com/{YOUR USERNAME}/DeFFcode.git && cd DeFFcode\n\n# pull any recent updates\ngit pull\n\n# Now create your new branch with suitable name(such as \"subbranch_of_master\")\ngit checkout -b subbranch_of_master\n

Now after working with this newly created branch for your Pull Request, you can commit and push or merge it locally or remotely as usual.

"},{"location":"contribution/PR/#pr-submission-checklist","title":"PR Submission Checklist","text":"

There are some important checks you need to perform while submitting your Pull Request(s) for DeFFcode library:

  • Submit a Related Issue:

  • The first thing you do is submit an issue with a proposal template for your work first and then work on your Pull Request.

  • Submit a Draft Pull Request:

  • Submit the draft pull request from the first day of your development.

  • Add a brief but descriptive title for your PR.
  • Explain what the PR adds, fixes, or improves.
  • In case of bug fixes, add a new unit test case that would fail against your bug fix.
  • Provide output or screenshots, if you can.
  • Make sure your pull request passed all the CI checks (triggers automatically on pushing commits against master branch). If it's somehow failing, then ask the maintainer for a review.
  • Click \"ready for review\" when finished.

  • Test, Format & lint code locally:

  • Make sure to test, format, and lint the modified code locally before every commit. The details are discussed below \u27b6

  • Make sensible commit messages:

  • If your pull request fixes a separate issue number, remember to include \"resolves #issue_number\" in the commit message. Learn more about it here \u27b6.

  • Keep the commit message concisely as much as possible at every submit. You can make a supplement to the previous commit with git commit --amend command.

  • Perform Integrity Checks:

    Any duplicate pull request will be Rejected!

  • Search GitHub if there's a similar open or closed PR that relates to your submission.

  • Check if your purpose code matches the overall direction of the DeFFcode APIs and improves it.
  • Retain copyright for your contributions, but also agree to license them for usage by the project and author(s) under the Apache 2.0 license \u27b6.

  • Link your Issues:

    For more information on Linking a pull request to an issue, See this doc\u27b6

  • Finally, when you're confident enough, make your pull request public.

  • You can link an issue to a pull request manually or using a supported keyword in the pull request description. It helps collaborators see that someone is working on the issue. For more information, see this doc\u27b6

"},{"location":"contribution/PR/#testing-formatting-linting","title":"Testing, Formatting & Linting","text":"

All Pull Request(s) must be tested, formatted & linted against our library standards as discussed below:

"},{"location":"contribution/PR/#requirements","title":"Requirements","text":"

Testing DeFFcode requires additional test dependencies and dataset, which can be handled manually as follows:

  • Install additional python libraries:

    You can easily install these dependencies via pip:

    # Install opencv(only if not installed previously)\n$ pip install opencv-python\n\n# install rest of dependencies\n$ pip install --upgrade flake8 black pytest vidgear[core]\n
  • Download Tests Dataset:

    To perform tests, you also need to download additional dataset (to your temp dir) by running prepare_dataset.sh bash script as follows:

    On Linux/MacOSOn Windows
    $ chmod +x scripts/bash/prepare_dataset.sh\n$ ./scripts/bash/prepare_dataset.sh\n
    $ sh scripts/bash/prepare_dataset.sh\n
"},{"location":"contribution/PR/#running-tests","title":"Running Tests","text":"

All tests can be run with pytest(in DeFFcode's root folder) as follows:

$ pytest -sv  #-sv for verbose output.\n
"},{"location":"contribution/PR/#formatting-linting","title":"Formatting & Linting","text":"

For formatting and linting, following libraries are used:

  • Flake8: You must run flake8 linting for checking the code base against the coding style (PEP8), programming errors and other cyclomatic complexity:

    $ flake8 {source_file_or_directory} --count --select=E9,F63,F7,F82 --show-source --statistics\n
  • Black: DeFFcode follows black formatting to make code review faster by producing the smallest diffs possible. You must run it with sensible defaults as follows:

    $ black {source_file_or_directory}\n

"},{"location":"contribution/PR/#frequently-asked-questions","title":"Frequently Asked Questions","text":"

Q1. Why do my changes taking so long to be Reviewed and/or Merged?

Submission Aftermaths

  • After your PR is merged, you can safely delete your branch and pull the changes from the main (upstream) repository.
  • The changes will remain in dev branch until next DeFFcode version is released, then it will be merged into master branch.
  • After a successful Merge, your newer contributions will be given priority over others.

Pull requests will be reviewed by the maintainers and the rationale behind the maintainer\u2019s decision to accept or deny the changes will be posted in the pull request. Please wait for our code review and approval, possibly enhancing your change on request.

Q2. Would you accept a huge Pull Request with Lots of Changes?

First, make sure that the changes are somewhat related. Otherwise, please create separate pull requests. Anyway, before submitting a huge change, it's probably a good idea to open an issue in the DeFFcode Github repository to ask the maintainers if they agree with your proposed changes. Otherwise, they could refuse your proposal after you put all that hard work into making the changes. We definitely don't want you to waste your time!

"},{"location":"contribution/issue/","title":"Submitting an Issue Guidelines","text":"

If you've found a new bug or you've come up with some new feature which can improve the quality of the DeFFcode, then related issues are welcomed! But, Before you do, please read the following guidelines:

First Issue on GitHub?

You can easily learn about it from creating an issue wiki.

Info

Please note that your issue will be fixed much faster if you spend about half an hour preparing it, including the exact reproduction steps and a demo. If you're in a hurry or don't feel confident, it's fine to report issues with less details, but this makes it less likely they'll get fixed soon.

"},{"location":"contribution/issue/#search-the-docs-and-previous-issues","title":"Search the Docs and Previous Issues","text":"
  • Remember to first search GitHub for a open or closed issue that relates to your submission or already been reported. You may find related information and the discussion might inform you of workarounds that may help to resolve the issue.
  • For quick questions, please refrain from opening an issue, as you can reach us on Gitter community channel.
  • Also, go comprehensively through our dedicated FAQ & Troubleshooting section.
"},{"location":"contribution/issue/#gather-required-information","title":"Gather Required Information","text":"
  • All DeFFcode APIs provides a verbose boolean flag in parameters, to log debugged output to terminal. Kindly turn this parameter True in the respective API for getting debug output, and paste it with your Issue.
  • In order to reproduce bugs we will systematically ask you to provide a minimal reproduction code for your report.
  • Check and paste, exact DeFFcode version by running command python -c \"import deffcode; print(deffcode.__version__)\".
"},{"location":"contribution/issue/#follow-the-issue-template","title":"Follow the Issue Template","text":"
  • Please format your issue by choosing the appropriate template.
  • Any improper/insufficient reports will be marked Invalid \u26d4, and if we don't hear back from you we may close the issue.
"},{"location":"contribution/issue/#raise-the-issue","title":"Raise the Issue","text":"
  • Add a brief but descriptive title for your issue.
  • Keep the issue phrasing in context of the problem.
  • Attach source-code/screenshots if you have one.
  • Finally, raise it by choosing the appropriate Issue Template: Bug report \ud83d\udc1e, Idea \ud83d\udca1, Question \u2754.
"},{"location":"help/get_help/","title":"Getting Help","text":"Courtesy - tenor

Would you like to get help with DeFFcode?

There are several ways to get help with DeFFcode:

"},{"location":"help/get_help/#join-our-gitter-community-channel","title":"Join our Gitter Community channel","text":"

Have you come up with some new idea \ud83d\udca1 or looking for the fastest way troubleshoot your problems

Join and chat on our Gitter Community channel:

There you can ask quick questions, swiftly troubleshoot your problems, help others, share ideas & information, etc.

"},{"location":"help/get_help/#this-is-what-you-do-when","title":"This is what you do when...","text":"
  • Got a question or problem?
  • Found a typo?
  • Found a bug?
  • Missing a feature/improvement?
"},{"location":"help/get_help/#reporting-an-issues","title":"Reporting an issues","text":"

Want to report a bug? Suggest a new feature?

Before you do, please read our guidelines \u27b6

"},{"location":"help/get_help/#preparing-a-pull-request","title":"Preparing a Pull Request","text":"

Interested in contributing to DeFFcode?

Before you do, please read our guidelines \u27b6

"},{"location":"installation/","title":"Overview","text":""},{"location":"installation/#installation-notes","title":"Installation Notes","text":""},{"location":"installation/#supported-systems","title":"Supported Systems","text":"

DeFFcode is well-tested and supported on the following systems(but not limited to), with python 3.7+ and pip installed:

Upgrade your pip

It strongly advised to upgrade to latest pip before installing deffcode to avoid any undesired installation error(s).

There are two mechanisms to upgrade pip:

pipensurepip

You can use existing pip to upgrade itself:

Install pip if not present
  • Download the script, from https://bootstrap.pypa.io/get-pip.py.
  • Open a terminal/command prompt, cd to the folder containing the get-pip.py file and run:
Linux/MacOSWindows
python get-pip.py\n
py get-pip.py\n

More details about this script can be found in pypa/get-pip\u2019s README.

Linux/MacOSWindows
python -m pip install pip --upgrade\n
py -m pip install pip --upgrade\n

Python also comes with an ensurepip module1, which can easily upgrade/install pip in any Python environment.

Linux/MacOSWindows
python -m ensurepip --upgrade\n
py -m ensurepip --upgrade\n
  • Any Linux distro released in 2016 or later
  • Windows 7 or later
  • MacOS 10.12.6 (Sierra) or later

"},{"location":"installation/#supported-python-legacies","title":"Supported Python legacies","text":"

Python 3.7+ are only supported legacies for installing DeFFcode v0.1.0 and above.

"},{"location":"installation/#prerequisites","title":"Prerequisites","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

"},{"location":"installation/#ffmpeg","title":"FFmpeg","text":"

When installing DeFFcode, FFmpeg is the only prerequisites you need to configure/install manually. You could easily do it by referring FFmpeg Installation doc.

"},{"location":"installation/#installation","title":"Installation","text":""},{"location":"installation/#a-installation-using-pip-recommended","title":"A. Installation using pip (Recommended)","text":"

Best option for easily getting stable DeFFcode installed.

Installation is as simple as:

Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest stable release\npython -m pip install -U deffcode\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest stable release\npython -m pip install --upgrade --user deffcode\n

Or, If you're using py as alias for installed python, then:

# Install latest stable release\npy -m pip install --upgrade --user deffcode\n
# Install latest stable release\npip install -U deffcode\n

And you can also download its wheel (.whl) package from our repository's releases section, thereby can be installed as follows:

# Install latest release\npip install deffcode-0.2.0-py3-none-any.whl\n

"},{"location":"installation/#b-installation-from-source","title":"B. Installation from Source","text":"

Best option for trying latest patches(maybe experimental), forking for Pull Requests, or automatically installing all prerequisites(with a few exceptions).

Installation using dev banch

If you're looking for latest work-in-progress enhancements or bug-fixes, then you want to checkout our beta dev branch with the following commands:

The beta dev branch at times can be very unstable or even unusable, User discretion is advised!

# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# checkout the dev beta branch\ngit checkout dev\n\n# Install it\npip install -U .\n
Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest beta branch\npython -m pip install -U .\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest beta branch\npython -m pip install --upgrade --user .\n

Or, If you're using py as alias for installed python, then:

# Install latest beta branch\npy -m pip install --upgrade --user .\n
# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# Install it\npip install -U .\n

  1. The ensurepip module is missing/disabled on Ubuntu. Use pip method only.\u00a0\u21a9

"},{"location":"installation/ffmpeg_install/","title":"FFmpeg Installation Doc","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

You can following machine-specific instructions for its configuration/installation:

DeFFcode APIs will throw RuntimeError, if they failed to detect valid FFmpeg executables on your system.

Enable verbose (verbose=True) for debugging FFmpeg validation process.

"},{"location":"installation/ffmpeg_install/#linux-ffmpeg-installation","title":"Linux FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on a Linux OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection","title":"A. Auto-Detection","text":"

This is a recommended approach on Linux Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the Linux OS systems.

You can install easily install official FFmpeg according to your Linux Distro by following this post \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Linux Static Binaries (based on your machine architecture) from the link below:

    Linux Static Binaries: http://johnvansickle.com/ffmpeg/

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#windows-ffmpeg-installation","title":"Windows FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Installation and Manual Configuration methods on Windows OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-installation","title":"A. Auto-Installation","text":"

This is a recommended approach on Windows Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-generate the required FFmpeg Static Binaries from our dedicated Github Server into the temporary directory(e.g. C:\\Temp) of your machine on the Windows OS systems.

Active Internet connection is required while downloading required FFmpeg Static Binaries from our dedicated Github Server onto your Windows machine.

Important Information regarding Auto-Installation
  • The files downloaded to a temporary directory (e.g. C:\\TEMP), may get erased if your machine shutdowns/restarts in some cases.

  • You can also provide a custom save path for auto-downloading FFmpeg Static Binaries through exclusive -ffmpeg_download_path attribute in Sourcer API.

    How to use -ffmpeg_download_path attribute in FFdecoder API?

    -ffmpeg_download_path is also available in FFdecoder API through the -custom_sourcer_params attribute of its ffparams dictionary parameter.

  • If binaries were found at the specified path, DeFFcode APIs automatically skips the Auto-Installation step.

  • If the required FFmpeg static binary fails to download, extract, or validate during Auto-Installation, then DeFFcode APIs will exit with RuntimeError!

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_1","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Windows Static Binaries (based on your machine arch(x86/x64)) from the link below:

    Windows Static Binaries: https://ffmpeg.org/download.html#build-windows

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'C:/foo/Downloads/ffmpeg/bin') or path of ffmpeg.exe executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#macos-ffmpeg-installation","title":"MacOS FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on MacOS OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection_1","title":"A. Auto-Detection","text":"

This is a recommended approach on MacOS Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the MacOS systems.

You can easily install FFmpeg on your MacOS machine by following this tutorial \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_2","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest MacOS Static Binaries (only x64 Binaries) from the link below:

    MacOS Static Binaries: https://ffmpeg.org/download.html#build-mac

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"recipes/advanced/","title":"Advanced Recipes","text":"

The following challenging recipes will take your skills to the next level and will give access to new DeFFcode techniques, tricky examples, and advanced FFmpeg parameters:

Courtesy - tenor

Refer Basic Recipes first!

If you're just getting started, check out the Beginner's Basic Recipes first before trying these advanced recipes.

Any proficiency with OpenCV-Python will be Helpful

Any proficiency with OpenCV-Python (Python API for OpenCV) surely help you with these recipes.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

"},{"location":"recipes/advanced/#advanced-decoding-recipes","title":"Advanced Decoding Recipes","text":"
  • Decoding Live Virtual Sources
    • Generate and Decode frames from Sierpinski pattern
    • Generate and Decode frames from Test Source pattern
    • Generate and Decode frames from Gradients with custom Text effect
    • Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms
    • Generate and Decode frames from Game of Life Visualization
  • Decoding Live Feed Devices
    • Capturing and Previewing frames from a Webcam using Custom Demuxer
    • Capturing and Previewing frames from your Desktop (Screen Recording)
  • Hardware-Accelerated Video Decoding
    • CUVID-accelerated Hardware-based Video Decoding and Previewing
    • CUDA-accelerated Hardware-based Video Decoding and Previewing
"},{"location":"recipes/advanced/#advanced-transcoding-recipes","title":"Advanced Transcoding Recipes","text":"
  • Transcoding Live Complex Filtergraphs
    • Transcoding video with Live Custom watermark image overlay
    • Transcoding video from sequence of Images with additional filtering
  • Transcoding Video Art with Filtergraphs
    • Transcoding video art with YUV Bitplane Visualization
    • Transcoding video art with Jetcolor effect
    • Transcoding video art with Ghosting effect
    • Transcoding video art with Pixelation effect
  • Hardware-Accelerated Video Transcoding
    • CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API
    • CUDA-NVENC-accelerated Video Transcoding with WriteGear API
    • CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API
"},{"location":"recipes/advanced/#advanced-metadata-recipes","title":"Advanced Metadata Recipes","text":"
  • Updating Video Metadata
    • Added new attributes to metadata in FFdecoder API
    • Overriding source video metadata in FFdecoder API
"},{"location":"recipes/advanced/decode-hw-acceleration/","title":"Hardware-Accelerated Video Decoding","text":"

FFmpeg offer access to dedicated GPU hardware with varying support on different platforms for performing a range of video-related tasks to be completed faster or using less of other resources (particularly CPU).

By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder (extracted using Sourcer API) itself for decoding its input. However, you could easily change the video-decoder to your desired specific supported Video-Decoder using FFmpeg options by way of its ffparams dictionary parameter. This feature provides easy access to GPU Accelerated Hardware Decoder in FFdecoder API that will generate faster video frames while using little to no CPU power, as opposed to CPU intensive Software Decoders.

We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing","title":"CUVID-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

  • Using h264_cuvid decoder: Remember to check if your FFmpeg compiled with H.264 CUVID decoder support by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    Verifying H.264 CUVID decoder support in FFmpeg
    $ ffmpeg  -hide_banner -decoders | grep cuvid\n\nV..... av1_cuvid            Nvidia CUVID AV1 decoder (codec av1)\nV..... h264_cuvid           Nvidia CUVID H264 decoder (codec h264)\nV..... hevc_cuvid           Nvidia CUVID HEVC decoder (codec hevc)\nV..... mjpeg_cuvid          Nvidia CUVID MJPEG decoder (codec mjpeg)\nV..... mpeg1_cuvid          Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)\nV..... mpeg2_cuvid          Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)\nV..... mpeg4_cuvid          Nvidia CUVID MPEG4 decoder (codec mpeg4)\nV..... vc1_cuvid            Nvidia CUVID VC1 decoder (codec vc1)\nV..... vp8_cuvid            Nvidia CUVID VP8 decoder (codec vp8)\nV..... vp9_cuvid            Nvidia CUVID VP9 decoder (codec vp9)\n

    You can also use any of above decoder in the similar way, if supported.

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's H.264 CUVID Video decoder in FFdecoder API to achieve GPU-accelerated hardware video decoding of YUV420p frames from a given Video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's CUVID can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": \"h264_cuvid\",  # use H.264 CUVID Video-decoder\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(YUV420p) frames\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"yuv420p\",  # use YUV420p frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the YUV420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing","title":"CUDA-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters), thereby achieving GPU-accelerated decoding of NV12 pixel-format frames from a given video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

NV12(for 4:2:0 input) and NV21(for 4:4:4 input) are the only supported pixel format. You cannot change pixel format to any other since NV-accelerated video codec supports only them.

NV12 is a biplanar format with a full sized Y plane followed by a single chroma plane with weaved U and V values. NV21 is the same but with weaved V and U values. The 12 in NV12 refers to 12 bits per pixel. NV12 has a half width and half height chroma channel, and therefore is a 420 subsampling. NV16 is 16 bits per pixel, with half width and full height. aka 422. NV24 is 24 bits per pixel with full sized chroma channel. aka 444. Most NV12 functions allow the destination Y pointer to be NULL.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's GPU Accelerated Decoding can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"fps=60.0,\"  # framerate 60.0fps in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the NV12 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/","title":"Decoding Live Feed Devices","text":"

DeFFcode's FFdecoder API provide effortless support for any Live Feed Devices using two parameters: source parameter which accepts device name or its path, and source_demuxer parameter to specify demuxer for the given input device.

We'll discuss the Live Feed Devices support using both these parameters briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer","title":"Capturing and Previewing frames from a Webcam using Custom Demuxer","text":"Example Assumptions

FFmpeg provide set of specific Demuxers on different platforms to read the multimedia streams from a particular type of Video Capture source/device. Please note that following recipe explicitly assumes:

  • You're running Linux Machine with USB webcam connected to it at node/path /dev/video0.
  • You already have appropriate Linux video drivers and related softwares installed on your machine.
  • You machine uses FFmpeg binaries built with --enable-libv4l2 flag to support video4linux2, v4l2 demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode BGR24 video frames from a USB webcam device connected at path /dev/video0 on a Linux Machine with video4linux2 (or simply v4l2) demuxer, and preview them using OpenCV Library's cv2.imshow() method.

Identifying and Specifying Video Capture Device Name/Path/Index and suitable Demuxer on different OS platforms Windows Linux MacOS

Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

  • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

    c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
  • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

    # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

  • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

    You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

    $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
  • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

    # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

    You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

    # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

    Using device's indexUsing device's name
    # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

    # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

    # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop","title":"Capturing and Previewing frames from your Desktop","text":"Example Assumptions

Similar to Webcam capturing, FFmpeg provide set of specific Demuxers on different platforms for capturing your desktop (Screen recording). Please note that following recipe explicitly assumes:

  • You're running Linux Machine with libxcb module installed properly on your machine.
  • You machine uses FFmpeg binaries built with --enable-libxcb flag to support x11grab demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode live BGR video frames from your complete screen as well as a region in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OS platforms Windows Linux MacOS

Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

    # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

Capturing entire desktopCapturing a region

For capturing all your displays as one big contiguous display in FFdecoder API:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

For limit capturing to a region, and show the area being grabbed:

x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/","title":"Decoding Live Virtual Sources","text":"

Instead of using prerecorded video files as streams, DeFFcode's FFdecoder API with the help of powerful lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph, is also capable of creating virtual video frames out of thin air in real-time, which you might want to use as input for testing, compositing, and merging with other streams to obtain desired output on-the-fly.

We'll discuss the recipies for generating Live Fake Sources briefly below:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern","title":"Generate and Decode frames from Sierpinski pattern","text":"

The sierpinski graph generates a Sierpinski carpet/triangle fractal, and randomly pan around by a single pixel each frame.

Sierpinski carpet fractal

In this example we will generate and decode 8 seconds of a Sierpinski carpet fractal pattern of 1280x720 frame size and 30 framerate using sierpinski graph source with lavfi input virtual device in FFdecoder API, and preview decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# playback time of 8 seconds\nffparams = {\"-ffprefixes\": [\"-t\", \"8\"]}\n\n# initialize and formulate the decoder with \"sierpinski\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"sierpinski=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        cv2.imwrite('foo_image.gif', frame)\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern","title":"Generate and Decode frames from Test Source pattern","text":"

The testsrc graph generates a test video pattern showing a color pattern, a scrolling gradient, and a timestamp. This is useful for testing purposes.

Test Source pattern

In this example we will generate and decode 10 seconds of a Test Source pattern (1280x720 frame size & 30 framerate) using testsrc graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n}\n\n# initialize and formulate the decoder with \"testsrc\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"testsrc=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect","title":"Generate and Decode frames from Gradients with custom Text effect","text":"

The gradients graph (as name suggests) generates several random gradients.

Gradients pattern with real-time text output

In this example we will generate and decode 15 seconds of Gradients using gradients graph source with lavfi input virtual device and also draw real-time text output (format HH::MM::SS) scrolling upward direction on it using drawtext filter in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

This example assumes you're running Windows machine. If not, then change fontfile parameter path in drawtext video filtergraph definition accordingly.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"15\"],  # playback time of 15 seconds\n    \"-vf\": \"drawtext=\"  # draw text\n    + \"text='%{localtime\\:%X}':\"  # real time text (HH::MM::SS)\n    + \"fontfile='c\\:\\/windows\\/fonts\\/arial.ttf':\"  # fontfile path (Only Windows)\n    + \"x=(w-text_w)/2:y=h-40*t:\"  # scroll upward effect\n    + \"fontsize=50:\"  # font size 50\n    + \"fontcolor=white\",  # font color white\n}\n\n\n# initialize and formulate the decoder with \n# \"gradients\" source for BGR24 output\ndecoder = FFdecoder(\n    \"gradients=n=3\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms","title":"Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms","text":"

The mandelbrot graph generate a Mandelbrot set fractal, that progressively zoom towards a specfic point.

Mandelbrot pattern with a Vectorscope & two Waveforms

In this example we will generate and decode 20 seconds of a Mandelbrot test pattern (1280x720 frame size & 30 framerate) using mandelbrot graph source with lavfi input virtual device with a vectorscope (plots 2 color component values) & two waveforms (plots YUV color component intensity) stacked to it in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"20\"],  # playback time of 20 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"split=4[a][b][c][d],\" # split input into 4 identical outputs.\n    + \"[a]waveform[aa],\"  # apply waveform on first output\n    + \"[b][aa]vstack[V],\"  # vertical stack 2nd output with waveform [V]\n    + \"[c]waveform=m=0[cc],\"  # apply waveform on 3rd output\n    + \"[d]vectorscope=color4[dd],\"  # apply vectorscope on 4th output\n    + \"[cc][dd]vstack[V2],\"  # vertical stack waveform and vectorscope [V2]\n    + \"[V][V2]hstack\",  # horizontal stack [V] and [V2] vertical stacks\n}\n\n# initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization","title":"Generate and Decode frames from Game of Life Visualization","text":"

The life graph generates a life pattern based on a generalization of John Conway\u2019s life game. The sourced input represents a life grid, each pixel represents a cell which can be in one of two possible states, alive or dead. Every cell interacts with its eight neighbours, which are the cells that are horizontally, vertically, or diagonally adjacent. At each interaction the grid evolves according to the adopted rule, which specifies the number of neighbor alive cells which will make a cell stay alive or born.

Game of Life Visualization

In this example we will generate and decode 25 seconds of Game of Life Visualization using life graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"25\"],  # playback time of 25 seconds\n}\n\n# initialize and formulate the decoder with \"life\" source for BGR24 output\ndecoder = FFdecoder(\n    \"life=\"  # life graph\n    + \"s=640x480:\"  # grid size (in pixels)\n    + \"mold=10:\"  # cell mold speed\n    + \"r=36:\"  # framerate\n    + \"ratio=0.5:\"  # random fill ratio for the initial random grid\n    + \"death_color=#39FF14:\"  # color of dead cells\n    + \"life_color=#1d1160\" # color of living (or new born) cells\n    + \",scale=640:480:\" # frame size\n    + \"flags=16\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/","title":"Transcoding Video Art with Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API unlocks the power of ffmpeg backend for creating real-time artistic generative video art using simple and complex filtergraphs, and decoding them into live video frames.

We'll discuss the Transcoding Video Art with Filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization","title":"Transcoding video art with YUV Bitplane Visualization","text":"

Based on the QCTools bitplane visualization, this video art has numerical values ranging between -1(no change) and 10(noisiest) for the Y (luminance), U and V (chroma or color difference) planes, yielding cool and different results for different values.

YUV Bitplane Visualization

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Bitplane Visualization by binding the bit position of the Y, U, and V planes of a video file (say foo.mp4) by using FFmpeg's lutyuv filter and assigning them random values (between -1(no change) and 10(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"lutyuv=\"  # use  lutyuv filter for binding bit position of the Y, U, and V planes\n    + \"y=if(eq({y}\\,-1)\\,512\\,if(eq({y}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{y}))*pow(2\\,{y}))):\".format(\n        y=3 # define `Y` (luminance) plane value (b/w -1 and 10)\n    )\n    + \"u=if(eq({u}\\,-1)\\,512\\,if(eq({u}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{u}))*pow(2\\,{u}))):\".format(\n        u=1 # define `U` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"v=if(eq({v}\\,-1)\\,512\\,if(eq({v}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{v}))*pow(2\\,{v}))),\".format(\n        v=3 # define `V` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"format=yuv422p10le\", # change output format to yuv422p10le\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect","title":"Transcoding video art with Jetcolor effect","text":"

This video art uses FFmpeg's pseudocolor filter to create a Jetcolor effect which is high contrast, high brightness, and high saturation colormap that ranges from blue to red, and passes through the colors cyan, yellow, and orange. The jet colormap is associated with an astrophysical fluid jet simulation from the National Center for Supercomputer Applications.

Jetcolor effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Jetcolor effect by changing frame colors of a video file (say foo.mp4) using FFmpeg's pseudocolor filter in different modes (values between 0 (cleaner) [default] and 2(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to `yuv444p`\n    + \"eq=brightness=0.40:saturation=8,\"  # default `brightness = 0.40` and `saturation=8`\n    + \"pseudocolor='\"  # dynamically controlled colors through `pseudocolor` filter\n    + \"if(between(val,0,85),lerp(45,159,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(159,177,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(177,70,(val-170)/(255-170))))):\"  # mode 0 (cleaner) [default]\n    + \"if(between(val,0,85),lerp(205,132,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(132,59,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(59,100,(val-170)/(255-170))))):\"  # mode 1\n    + \"if(between(val,0,85),lerp(110,59,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(59,127,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(127,202,(val-170)/(255-170))))):\"  # mode 2 (noisiest)\n    + \"i={mode}',\".format(\n        mode=0  # define mode value (b/w `0` and `2`) to control colors\n    )\n    + \"format=yuv422p10le\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect","title":"Transcoding video art with Ghosting effect","text":"

This video art using FFmpeg\u2019s lagfun filter to create a video echo/ghost/trailing effect.

Ghosting effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Ghosting effect using FFmpeg's lagfun filter on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-filter_complex\": \"format=yuv444p[formatted];\"  # change video input format to yuv444p\n    + \"[formatted]split[a][b];\"  # split input into 2 identical outputs\n    + \"[a]lagfun=decay=.99:planes=1[a];\"  # apply lagfun filter on first output\n    + \"[b]lagfun=decay=.98:planes=2[b];\"  # apply lagfun filter on 2nd output\n    + \"[a][b]blend=all_mode=screen:c0_opacity=.5:c1_opacity=.5,\"  # apply screen blend mode both outputs\n    + \"format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-map\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect","title":"Transcoding video art with Pixelation effect","text":"

This video art uses FFmpeg\u2019s overlay, smartblur and stacks of dilation filters to intentionally Pixelate your video in artistically cool looking ways such that each pixel become visible to the naked eye.

Pixelation effect

This Video Art idea credits goes to oioiiooixiii blogspot.

In this example we will generate 8 seconds of Pixelation effect using FFmpeg\u2019s smartblur and stacks of dilation filters overlayed on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to yuv444p\n    + \"split [out1][out2];\"  # split input into 2 identical outputs\n    + \"[out1][out2] overlay,smartblur,\"  # apply overlay,smartblur filter on both outputs\n    + \"dilation,dilation,dilation,dilation,dilation,\"  # apply stacks of dilation filters on both outputs\n    + \"eq=contrast=1.4:brightness=-0.09 [pixels];\"  # change brightness and contrast\n    + \"[pixels]format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-mode\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/","title":"Hardware-Accelerated Video Transcoding","text":"What exactly is Transcoding?

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allowing us to process real-time video frames with immense flexibility. Both these APIs are capable of utilizing the potential of GPU backed fully-accelerated Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder), thus dramatically improving the transcoding performance. At same time, FFdecoder API Hardware-decoded frames are fully compatible with OpenCV's VideoWriter API for producing high-quality output video in real-time.

Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing

As we know, using the \u2013hwaccel cuda -hwaccel_output_format cuda flags in FFmpeg pipeline will keep video frames in GPU memory, and this ensures that the memory transfers (system memory to video memory and vice versa) are eliminated, and that transcoding is performed with the highest possible performance on the available GPU hardware.

General Memory Flow with Hardware Acceleration

But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gain by explicitly copying raw decoded frames between System and GPU memory (via the PCIe bus), thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Moreover, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus.

Memory Flow with Hardware Acceleration and Real-time Processing

On the bright side, however, GPU enabled Hardware based encoding/decoding is inherently faster and more efficient (do not use much CPU resources when frames in GPU) thus freeing up the CPU for other tasks, as compared to Software based encoding/decoding that is known to be completely CPU intensive. Plus scaling, de-interlacing, filtering, etc. tasks will be way faster and efficient than usual using these Hardware based decoders/encoders as oppose to Software ones.

As you can see the pros definitely outweigh the cons and you're getting to process video frames in the real-time with immense speed and flexibility, which is impossible to do otherwise.

We'll discuss its Hardware-Accelerated Video Transcoding capabilities using these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api","title":"CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with OpenCV's VideoWriter API.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's NVENC Encoder can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\" # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since write() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated Video Transcoding with WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

Consuming BGR framesConsuming NV12 frames

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting patched NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as NV12 frames.
  4. Encoding NV12 frames directly with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\"  # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n    \"-input_pixfmt\": \"nv12\", # input frames pixel format as `NV12`\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the NV12 frame here}\n\n    # writing NV12 frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API","text":"

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a High-performance Lossless FFmpeg Transcoding Pipeline

Courtesy - tenor"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/","title":"Transcoding Live Complex Filtergraphs","text":"What are Complex filtergraphs?

Before heading straight into recipes we will talk about Complex filtergraphs:

Complex filtergraphs are those which cannot be described as simply a linear processing chain applied to one stream.

Complex filtergraphs are configured with the -filter_complex global option.

The -lavfi option is equivalent to -filter_complex.

A trivial example of a complex filtergraph is the overlay filter, which has two video inputs and one video output, containing one video overlaid on top of the other.

DeFFcode's FFdecoder API seamlessly supports processing multiple input streams including real-time frames through multiple filter chains combined into a filtergraph (via. -filter_complex FFmpeg parameter), and use their outputs as inputs for other filter chains.

We'll discuss the transcoding of live complex filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay","title":"Transcoding video with Live Custom watermark image overlay","text":"Big Buck Bunny with custom watermark

In this example we will apply a watermark image (say watermark.png with transparent background) overlay to the 10 seconds of video file (say foo.mp4) using FFmpeg's overlay filter with some additional filtering, , and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

You can use FFdecoder's metadata property object that dumps Source Metadata as JSON to retrieve source framerate and frame-size.

To learn about exclusive -ffprefixes & -clones parameter. See Exclusive Parameters \u27b6

Remember to replace watermark.png watermark image file-path with yours before using this recipe.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json, cv2\n\n# define the Complex Video Filter with additional `watermark.png` image input\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n    \"-clones\": [\n        \"-i\",\n        \"watermark.png\",  # !!! [WARNING] define your `watermark.png` here.\n    ],\n    \"-filter_complex\": \"[1]format=rgba,\"  # change 2nd(image) input format to yuv444p\n    + \"colorchannelmixer=aa=0.7[logo];\"  # apply colorchannelmixer to image for controlling alpha [logo]\n    + \"[0][logo]overlay=W-w-{pixel}:H-h-{pixel}:format=auto,\".format(  # apply overlay to 1st(video) with [logo]\n        pixel=5  # at 5 pixels from the bottom right corner of the input video\n    )\n    + \"format=bgr24\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering","title":"Transcoding video from sequence of Images with additional filtering","text":"Mandelbrot pattern blend with Fish school video Available blend mode options

Other blend mode options for blend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

In this example we will blend 10 seconds of Mandelbrot test pattern (generated using lavfi input virtual device) that serves as the \"top\" layer with 10 seconds of Image Sequence that serves as the \"bottom\" layer, using blend filter (with heat blend mode), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4 (restricted to 12 seconds):

$ ffmpeg -t 12 -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('/path/to/img%03d.png', verbose=True, **ffparams).formulate()\n

FFdecoder API also accepts Glob pattern(*.png) as well Single looping image as as input to its source parameter. See this Basic Recipe \u27b6 for more information.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define mandelbrot pattern generator\n# and the Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\n        \"-t\", \"10\", # playback time of 10 seconds for mandelbrot pattern\n        \"-f\", \"lavfi\", # use input virtual device\n        \"-i\", \"mandelbrot=rate=25\", # create mandelbrot pattern at 25 fps\n        \"-t\", \"10\", # playback time of 10 seconds for video\n    ],  \n    \"-custom_resolution\": (1280, 720), # resize to 1280x720\n    \"-filter_complex\":\"[1:v]format=yuv444p[v1];\" # change 2nd(video) input format to yuv444p\n        + \"[0:v]format=gbrp10le[v0];\" # change 1st(mandelbrot pattern) input format to gbrp10le\n        + \"[v1][v0]scale2ref[v1][v0];\" # resize the 1st(mandelbrot pattern), based on a 2nd(video).\n        + \"[v0][v1]blend=all_mode='heat',\" # apply heat blend mode to output\n        + \"format=yuv422p10le[v]\", # change output format to `yuv422p10le`\n    \"-map\": \"[v]\", # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"/path/to/image-%03d.png\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# define your parameters\n# [WARNING] framerate must match original source framerate !!!\noutput_params = {\n    \"-input_framerate\": 25,  # Default\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/update-metadata/","title":"Updating Video Metadata","text":"

In addition of using metadata property object in FFdecoder API for probing metadata information (only as JSON string) for each multimedia stream available in the given video source, you can also easily update the video metadata on-the-fly by assigning desired data as python dictionary to the same overloaded metadata property object. This feature can be used either for adding new custom properties to metadata, or to override source metadata properties used by FFdecoder API to formulate its default Decoder Pipeline for real-time video-frames generation.

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

This feature is not yet fully explored, but in the near future you'll be able to use it to dynamically override any Video frames Decoder Pipeline property (such as frame-size, pixel-format, etc.) in real-time like a pro. Stay tuned for more updates

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/update-metadata/#added-new-properties-to-metadata-in-ffdecoder-api","title":"Added new properties to metadata in FFdecoder API","text":"

In FFdecoder API, you can easily define any number of new properties for its metadata (formatted as python dictionary) with desired data of any datatype(s)1 , without affecting its default Video frames Decoder pipeline.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, thereby add new propertys (formatted as python dictionary) with desired data of different datatype(s) through overloaded metadata property object, and then finally print it as JSON string using the same metadata property object in FFdecoder API.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\n\n# initialize the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# format your data as dictionary (with data of any [printable] datatype)\ndata = dict(\n    mystring=\"abcd\",  # string data\n    myint=1234,  # integers data\n    mylist=[1, \"Rohan\", [\"inner_list\"]],  # list data\n    mytuple=(1, \"John\", (\"inner_tuple\")),  # tuple data\n    mydict={\"anotherstring\": \"hello\"},  # dictionary data\n    myjson=json.loads('{\"name\": \"John\", \"age\": 30, \"city\": \"New York\"}'),  # json data\n)\n\n# assign your dictionary data\ndecoder.metadata = data\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"D:\\\\foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 29.97,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 21.03,\n  \"approx_video_nframes\": 630,\n  \"source_video_bitrate\": \"4937k\",\n  \"source_audio_bitrate\": \"256k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\",\n  \"mystring\": \"abcd\",\n  \"myint\": 1234,\n  \"mylist\": [\n    1,\n    \"Rohan\",\n    [\n      \"inner_list\"\n    ]\n  ],\n  \"mytuple\": [\n    1,\n    \"John\",\n    \"inner_tuple\"\n  ],\n  \"mydict\": {\n    \"anotherstring\": \"hello\"\n  },\n  \"myjson\": {\n    \"name\": \"John\",\n    \"age\": 30,\n    \"city\": \"New York\"\n  }\n}\n

"},{"location":"recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api","title":"Overriding source video metadata in FFdecoder API","text":"

In FFdecoder API, you can also use its metadata to manually override the source properties (as frame-size, frame pixel-format, video-framerate, video-decoder etc.) that directly affects its default Video frames Decoder pipeline that decodes real-time video-frames.

The \"source\" property in metadata cannot be altered in any manner.

Source Video metadata values must be handled carefully

Source Video metadata information is used by FFdecoder API to formulate its default Video frames Decoder pipeline, and any improper or invalid inputted source property could crash the pipeline with RuntimeError.

Therefore to safeguard against it, FFdecoder API discards any Source Video metadata dictionary keys, if its value's datatype fails to match the exact valid datatype defined in following table:

Only either source_demuxer or source_extension property can be present in source metadata.

Not all Source Video metadata properties directly affects the pipeline (as mentioned in the table). But this might change in future versions.

Source Video Metadata Keys Valid Value Datatype Effect on Pipeline \"source_extension\" string None \"source_demuxer\" string Direct \"source_video_resolution\" list of integers e.g. [1280,720] Direct \"source_video_framerate\" float Direct \"source_video_pixfmt\" string Direct \"source_video_decoder\" string Direct \"source_duration_sec\" float None \"approx_video_nframes\" integer Direct \"source_video_bitrate\" string None \"source_audio_bitrate\" string None \"source_audio_samplerate\" string None \"source_has_video\" bool Direct \"source_has_audio\" bool None \"source_has_image_sequence\" bool Direct \"ffdecoder_operational_mode\" str None \"output_frames_pixfmt\" str Direct

Hence for instance, if \"source_video_resolution\" is assigned \"1280x720\" (i.e. string datatype value instead of list), then it will be discarded.

In this example we will probe all metadata information available within foo.mp4 video file, and override frame size (originally 1920x1080) and pixel-format (originally rgb24) to our desired values through overloaded metadata property object in FFdecoder API, and thereby preview them using OpenCV Library's cv2.imshow() method.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

Once the formulate() method is called, the metadata information present in FFdecoder API is finalized and thereby used to formulate its default pipeline for decoding real-time video-frames. Therefore make all changes to video properties beforehand.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# override source metadata values\n# !!! [WARNING] Make sure each value datatype matches the table !!!\ndecoder.metadata = {\n    \"output_frames_pixfmt\": \"gray\",  # gray frame-pixfmt\n    \"source_video_resolution\": [1280, 720],  # 1280x720 frame-size\n}\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# [NOTE] uncomment following line to debug values\n# print(decoder.metadata)\n\n# let's grab the 1280x720 sized gray frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with gray frame here}\n\n    # Show gray frames in output window\n    cv2.imshow(\"Output gray\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

  1. There is no concept of tuple datatype in the JSON format. Thereby, Python's json module auto-converts all tuple python values into JSON list because that's the closest thing in JSON format to a tuple.\u00a0\u21a9

"},{"location":"recipes/basic/","title":"Basic Recipes","text":"

The following recipes should be reasonably accessible to beginners of any skill level to get started with DeFFcode APIs:

Courtesy - tenor

Refer Installation doc first!

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode with required prerequisites on your machine.

Any proficiency with OpenCV-Python will be Helpful

If you've any proficiency with OpenCV-Python (Python API for OpenCV), you will find these recipes really easy.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

Frames are actually 3D Numpy arrays

In python, \"Frames\" are actually three-dimensional NumPy ndarray composed of 3 nested levels of arrays, one for each dimension.

"},{"location":"recipes/basic/#basic-decoding-recipes","title":"Basic Decoding Recipes","text":"
  • Decoding Video files
    • Accessing RGB frames from a video file
    • Capturing and Previewing BGR frames from a video file (OpenCV Support)
    • Playing with any other FFmpeg pixel formats
    • Capturing and Previewing frames from a Looping Video
  • Decoding Camera Devices using Indexes
    • Enumerating all Camera Devices with Indexes
    • Capturing and Previewing frames from a Camera using Indexes
  • Decoding Network Streams
    • Capturing and Previewing frames from a HTTPs Stream
    • Capturing and Previewing frames from a RTSP/RTP Stream
  • Decoding Image sequences
    • Capturing and Previewing frames from Sequence of images
    • Capturing and Previewing frames from Single looping image
"},{"location":"recipes/basic/#basic-transcoding-recipes","title":"Basic Transcoding Recipes","text":"
  • Transcoding Live frames
    • Transcoding video using OpenCV VideoWriter API
    • Transcoding lossless video using WriteGear API
  • Transcoding Live Simple Filtergraphs
    • Transcoding Trimmed and Reversed video
    • Transcoding Cropped video
    • Transcoding Rotated video (with rotate filter)
    • Transcoding Rotated video (with transpose filter)
    • Transcoding Horizontally flipped and Scaled video
  • Saving Key-frames as Image (Image processing)
    • Extracting Key-frames as PNG image
    • Generating Thumbnail with a Fancy filter
"},{"location":"recipes/basic/#basic-metadata-recipes","title":"Basic Metadata Recipes","text":"
  • Extracting Video Metadata
    • Extracting video metadata using Sourcer API
    • Extracting video metadata using FFdecoder API
"},{"location":"recipes/basic/#whats-next","title":"What's next?","text":"

Done already! Let's checkout Advanced Recipes to level up your skills!

"},{"location":"recipes/basic/decode-camera-devices/","title":"Decoding Camera Devices using Indexes","text":"

With DeFFcode APIs, we are able to probe and enumerate all Camera Devices names along with their respective \"device indexes\" or \"camera indexes\" no matter how many cameras are connected to your system. This makes Camera Devices decoding as simple as OpenCV, where one can effortlessly access a specific Camera Device just by the specifying the matching index of it. These indexes are much easier to read, memorize, and type, and one don't have to remember long Device names or worry about its Demuxer.

We'll discuss the Decoding Camera Devices using Indexes briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes","title":"Enumerating all Camera Devices with Indexes","text":"

In Sourcer API, you can easily use its enumerate_devices property object to enumerate all probed Camera Devices (connected to your system) as dictionary object with device indexes as keys and device names as their respective values.

Requirement for Enumerating all Camera Devices in Sourcer API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will enumerate all probed Camera Devices connected on a Windows machine using enumerate_devices property object in Sourcer API, both as dictionary object and JSON string.

# import the necessary packages\nfrom deffcode import Sourcer\nimport json\n\n# initialize and formulate the decoder\nsourcer = Sourcer(\"0\").probe_stream()\n\n# enumerate probed devices as Dictionary object(`dict`)\nprint(sourcer.enumerate_devices)\n\n# enumerate probed devices as JSON string(`json.dump`)\nprint(json.dumps(sourcer.enumerate_devices,indent=2))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine: As Dictionary objectAs JSON string
{0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
{\n  \"0\": \"Integrated Camera\",\n  \"1\": \"USB2.0 Camera\",\n  \"2\": \"DroidCam Source\"\n}\n

"},{"location":"recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes","title":"Capturing and Previewing frames from a Camera using Indexes","text":"

After knowing the index of Camera Device with Sourcer API, One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value either as integer or string of integer type to its source parameter.

Requirement for Index based Camera Device Capturing in FFdecoder API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will decode BGR24 video frames from Integrated Camera at index 0 on a Windows Machine, and preview them using OpenCV Library's cv2.imshow() method.

Important Facts related to Camera Device Indexing
  • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
  • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
  • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
    • For example, If there are three devices:
      {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
    • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

      Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

Out of Index Camera Device index values will raise ValueError in FFdecoder API

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/","title":"Decoding Image sequences","text":"

DeFFcode's FFdecoder API supports a wide-ranging media streams as input to its source parameter, which also includes Image Sequences such as Sequential(img%03d.png) and Glob pattern(*.png) as well as Single looping image.

We'll discuss both briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images","title":"Capturing and Previewing frames from Sequence of images","text":"

In this example we will capture video frames from a given Image Sequence using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

OpenCV expects BGR format frames in its cv2.imshow() method.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4:

$ ffmpeg -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

SequentialGlob pattern How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img%03d.png\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

The glob pattern is not available on Windows FFmpeg builds.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-pattern_type glob` for accepting glob pattern\nffparams = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img*.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image","title":"Capturing and Previewing frames from Single looping image","text":"

In this example we will capture video frames from a Single Looping image using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-loop 1` for infinite looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"img.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/","title":"Decoding Network Streams","text":"

Similar to decoding Video files, DeFFcode's FFdecoder API directly supports Network Streams with specific protocols (such as RTSP/RTP, HTTP(s), MPEG-TS, etc.) as input to its source parameter.

We'll discuss Network Streams support briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream","title":"Capturing and Previewing frames from a HTTPs Stream","text":"

In this example we will decode live BGR24 video frames from a HTTPs protocol Stream in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream","title":"Capturing and Previewing frames from a RTSP/RTP Stream","text":"

In this example we will decode live BGR24 video frames from RTSP/RTP protocol Streams in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

This example assume you already have a RSTP Server running at specified RSTP address with syntax rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH] and video data already being published to it.

For creating your own RSTP Server locally and publishing video data to it, You can refer this WriteGear API's bonus example \u27b6

Make sure to change RSTP address rtsp://localhost:8554/mystream with yours in following code before running

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\"-rtsp_transport\": \"tcp\"}\n\n# initialize and formulate the decoder with RTSP protocol source for BGR24 output\n# [WARNING] Change your RSTP address `rtsp://localhost:8554/mystream` with yours!\ndecoder = FFdecoder(\"rtsp://localhost:8554/mystream\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/","title":"Decoding Video files","text":"

DeFFcode's FFdecoder API readily supports multimedia Video files path as input to its source parameter. And with its frame_format parameter, you can easily decode video frames in any pixel format(s) that are readily supported by all well known Computer Vision libraries (such as OpenCV).

We'll discuss its video files support and pixel format capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file","title":"Accessing RGB frames from a video file","text":"

The default function of FFdecoder API is to decode 24-bit RGB video frames from the given source.

FFdecoder API's generateFrame() function can be used in multiple methods to access RGB frames from a given source, such as as a Generator (Recommended Approach), calling with Statement, and as a Iterator.

In this example we will decode the default RGB24 video frames from a given Video file (say foo.mp4) using above mentioned accessing methods:

As a Generator (Recommended)Calling with StatementAs a Iterator

This is a recommended approach for faster and error-proof access of decoded frames. We'll use it throughout the recipes.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# grab RGB24(default) frame from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder\nwith FFdecoder(\"foo.mp4\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # lets print its shape\n        print(frame.shape)  # for e.g. (1080, 1920, 3)\n

This Iterator Approach bears a close resemblance to OpenCV-Python (Python API for OpenCV) coding syntax, thereby easier to learn and remember.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# loop over frames\nwhile True:\n\n    # grab RGB24(default) frames from decoder\n    frame = next(decoder.generateFrame(), None)\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file","title":"Capturing and Previewing BGR frames from a video file","text":"

In this example we will decode OpenCV supported live BGR24 video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

By default, OpenCV expects BGR format frames in its cv2.imshow() method by using two accessing methods.

As a Generator (Recommended)Calling with Statement
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\nwith FFdecoder(\"foo.mp4\", frame_format=\"bgr24\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # Show output window\n        cv2.imshow(\"Output\", frame)\n\n        # check for 'q' key if pressed\n        key = cv2.waitKey(1) & 0xFF\n        if key == ord(\"q\"):\n            break\n\n# close output window\ncv2.destroyAllWindows()\n

"},{"location":"recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats","title":"Playing with any other FFmpeg pixel formats","text":"

Similar to BGR, you can input any pixel format (supported by installed FFmpeg) by way of frame_format parameter of FFdecoder API for the desired video frame format.

In this example we will decode live Grayscale and YUV video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Decode GrayscaleDecode YUV frames
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"input_foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# grab the GRAYSCALE frames from the decoder\nfor gray in decoder.generateFrame():\n\n    # check if frame is None\n    if gray is None:\n        break\n\n    # {do something with the gray frame here}\n\n    # Show output window\n    cv2.imshow(\"Gray Output\", gray)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try decoding YUV420p pixel-format frames in following python code:

You can also use other YUV pixel formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# grab the YUV420p frames from the decoder\nfor yuv in decoder.generateFrame():\n\n    # check if frame is None\n    if yuv is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the bgr frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", bgr)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video","title":"Capturing and Previewing frames from a Looping Video","text":"

In this example we will decode live BGR24 video frames from looping video using different means in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Using -stream_loop optionUsing loop filter

The recommend way to loop video is to use -stream_loop option via. -ffprefixes list attribute of ffparam dictionary parameter in FFdecoder API. Possible values are integer values: >0 value of loop, 0 means no loop, -1 means infinite loop.

Using -stream_loop 3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-stream_loop 3` for looping 4 times\nffparams = {\"-ffprefixes\":[\"-stream_loop\", \"3\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Another way to loop video is to use loop complex filter via. -filter_complex FFmpeg flag as attribute of ffparam dictionary parameter in FFdecoder API.

This filter places all frames into memory(RAM), so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

Using loop filter for looping video

The filter accepts the following options:

  • loop: Sets the number of loops for integer values >0. Setting this value to -1 will result in infinite loops. Default is 0(no loops).
  • size: Sets maximal size in number of frames. Default is 0.
  • start: Sets first frame of loop. Default is 0.

Using loop=3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define loop 4 times, each loop is 15 frames, each loop skips the first 25 frames\nffparams = {\n    \"-filter_complex\": \"loop=loop=3:size=15:start=25\" # Or use: `loop=3:15:25`\n}  \n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/extract-video-metadata/","title":"Extracting Video Metadata","text":"

DeFFcode's Sourcer API acts as Source Probing Utility for easily probing metadata information for each multimedia stream available in the given video source, and return it as in Human-readable (as JSON string) or Machine-readable (as Dictionary object) type with its retrieve_metadata() class method. Apart from this, you can also use metadata property object in FFdecoder API to extract this metadata information (only as JSON string).

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api","title":"Extracting video metadata using Sourcer API","text":"

This is the recommended way for extracting video metadata.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it in both Human-readable (as JSON string) and Machine-readable (as Dictionary object) types using retrieve_metadata() class method in Sourcer API:

The Sourcer API's retrieve_metadata() class method provides pretty_json boolean parameter to return metadata as JSON string (if True) and as Dictionary (if False).

As JSON stringAs Dictionary object
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n}\n
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `dict`\nprint(sourcer.retrieve_metadata())\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{'ffmpeg_binary_path': 'C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe', 'source': 'foo.mp4', 'source_extension': '.mp4', 'source_video_resolution': [1280, 720], 'source_video_framerate': 25.0, 'source_video_pixfmt': 'yuv420p', 'source_video_decoder': 'h264', 'source_duration_sec': 5.31, 'approx_video_nframes': 133, 'source_video_bitrate': '1205k', 'source_audio_bitrate': '384k', 'source_audio_samplerate': '48000 Hz', 'source_has_video': True, 'source_has_audio': True, 'source_has_image_sequence': False}\n

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api","title":"Extracting video metadata using FFdecoder API","text":"

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it as JSON string using metadata property object in FFdecoder API.

You can also update video's metadata by using the same overloaded metadata property object in FFdecoder API. More information can be found in this Advanced Recipe \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\"\n}\n

"},{"location":"recipes/basic/save-keyframe-image/","title":"Saving Key-frames as Image","text":"

DeFFcode's FFdecoder API provide effortless and precise Frame Seeking with -ss FFmpeg parameter that enable us to save any frame from a specific part of our input source.

We'll discuss aboout it briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for saving video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • Pillow: Pillow is a Imaging Library required for saving frame as Image. You can easily install it directly via pip:

    pip install Pillow     \n
  • Matplotlib: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations, also required for saving frame as Image. You can easily install it directly via pip:

    pip install matplotlib   \n
  • Imageio: Imageio is a Library for reading and writing a wide range of image, video, scientific, and volumetric data formats, also required for saving frame as Image. You can easily install it directly via pip:

    pip install imageio      \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image","title":"Extracting Key-frames as PNG image","text":"

In this example we will seek to 00:00:01.45(or 1045msec) in time and decode one single frame in FFdecoder API, and thereby saving it as PNG image using few prominent Image processing python libraries by providing valid filename (e.g. foo_image.png).

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter:

  • Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS) format, such as in 01:23:45.678.
  • Fractional: such as in 02:30.05. This is interpreted as 2 minutes, 30 and a half a second, which would be the same as using 150.5 in seconds.
Using PillowUsing OpenCVUsing MatplotlibUsing Imageio

In Pillow, the fromarray() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as PNG\n    im.save(\"foo_image.png\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In OpenCV, the imwrite() function can export BGR frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder for BGR24 outputwith suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    cv2.imwrite('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Matplotlib, the imsave() function can save an RGB frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport matplotlib.pyplot as plt\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    plt.imsave('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Imageio, the imwrite() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport imageio\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our output\n    imageio.imwrite('foo_image.jpeg', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter","title":"Generating Thumbnail with a Fancy filter","text":"fancy_thumbnail.jpg (Courtesy - BigBuckBunny)

In this example we first apply FFmpeg\u2019s tblend filter with an hardmix blend mode (cool stuff) and then seek to 00:00:25.917(or 25.917sec) in time to retrieve our single frame thumbnail, and thereby save it as JPEG image with valid filename (e.g. fancy_thumbnail.jpg) using Pillow library.

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter: - [x] Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS), such as in 01:23:45.678 - [x] Fractional: such as in 02:30.05, this is interpreted as 2 minutes, 30 seconds, and a half a second, which would be the same as using 150.5 in seconds.

Available blend mode options

Other blend mode options for tblend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to\nffparams = {\n    \"-vf\": \"tblend=all_mode='hardmix'\",  # trim and reverse\n    \"-ss\": \"00:00:25.917\",  # seek to 00:00:25.917(or 25s 917msec)\n    \"-frames:v\": 1,  # get one single frame\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"BigBuckBunny.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as JPEG\n    im.save(\"fancy_thumbnail.jpg\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/","title":"Transcoding Live Simple Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API handles a single chain of filtergraphs (through -vf FFmpeg parameter) to the to real-time frames quite effortlessly.

We'll discuss the transcoding of live simple filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

OpenCV's' VideoWriter() class lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video","title":"Transcoding Trimmed and Reversed video","text":"Big Buck Bunny Reversed

In this example we will take the first 5 seconds of a video clip (using trim filter) and reverse it (by applying reverse filter), and encode them using OpenCV Library's VideoWriter() method in real-time.

The reverse filter requires memory to buffer the entire clip, so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

By default, OpenCV expects BGR format frames in its write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# trim 5 sec from end and reverse\nffparams = {\n    \"-vf\": \"trim=end=5,reverse\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video","title":"Transcoding Cropped video","text":"Big Buck Bunny Cropped

In this example we will crop real-time video frames by an area with size \u2154 of the input video (say foo.mp4) by applying crop filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using crop filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# cropped the central input area with size 2/3 of the input video\nffparams = {\n    \"-vf\": \"crop=2/3*in_w:2/3*in_h\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter","title":"Transcoding Rotated video (with rotate filter)","text":"

FFmpeg features Rotate Filter that is used to rotate videos by an arbitrary angle (expressed in radians).

Big Buck Bunny Rotated (with rotate filter)

In this example we will rotate real-time video frames at an arbitrary angle by applying rotate filter in FFdecoder API and also using green color to fill the output area not covered by the rotated image, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 0.35 rad and fill green\nffparams = {\n    \"-vf\": \"rotate=angle=-20*PI/180:fillcolor=green\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter","title":"Transcoding Rotated video (with transpose filter)","text":"

FFmpeg also features Transpose Filter that is used to rotate videos by 90 degrees clockwise and counter-clockwise direction as well as flip them vertically and horizontally.

Big Buck Bunny Rotated (with transpose filter)

In this example we will rotate real-time video frames by 90 degrees counterclockwise and preserve portrait geometry by applying transpose filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 90 degrees counter-clockwise and preserve portrait layout\nffparams = {\n    \"-vf\": \"transpose=dir=2:passthrough=portrait\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video","title":"Transcoding Horizontally flipped and Scaled video","text":"Big Buck Bunny Horizontally flipped and Scaled

In this example we will horizontally flip and scale real-time video frames to half its original size by applying hflip and scale filter one-by-one in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using scale filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# horizontally flip and scale to half its original size\nffparams = {\n    \"-vf\": \"hflip,scale=w=iw/2:h=ih/2\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/","title":"Transcoding Live frames","text":"What exactly is Transcoding?

Before heading directly into recipes we have to talk about Transcoding:

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

While decoding media into video frames is purely managed by DeFFcode's FFdecoder API, you can easily encode those video frames back into multimedia files using any well-known video processing library such as OpenCV and VidGear.

We'll discuss transcoding using both these libraries briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api","title":"Transcoding video using OpenCV VideoWriter API","text":"

OpenCV's' VideoWriter() class can be used directly with DeFFcode's FFdecoder API to encode video frames into a multimedia video file but it lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

BGR framesRGB framesGRAYSCALE framesYUV frames

By default, OpenCV expects BGR format frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n     # let's also show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

Since OpenCV expects BGR format frames in its cv2.write() method, therefore we need to convert RGB frames into BGR before encoding as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for RGB24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the RGB24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # converting RGB24 to BGR24 frame\n    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n    # writing BGR24 frame to writer\n    writer.write(frame_bgr)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

OpenCV also directly consumes GRAYSCALE frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try encoding YUV420p pixel-format frames with OpenCV's write() method in following python code:

You can also use other YUV pixel-formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the yuv420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api","title":"Transcoding lossless video using WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them into lossless video file with controlled framerate using WriteGear API in real-time.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

BGR framesRGB framesGRAYSCALE framesYUV frames

WriteGear API by default expects BGR format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for BGR24 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In WriteGear API, you can use rgb_mode parameter in write() class method to write RGB format frames instead of default BGR as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing RGB24 frame to writer\n    writer.write(frame, rgb_mode=True)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consumes GRAYSCALE format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` parameter\n# for controlled output framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_gray.mp4`\nwriter = WriteGear(output_filename=\"output_foo_gray.mp4\", **output_params)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consume YUV (or basically any other supported pixel format) frames in its write() class method with its -input_pixfmt attribute in compression mode. For its non-compression mode, see above example.

You can also use yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) instead for more higher dynamic ranges.

In WriteGear API, the support for -input_pixfmt attribute in output_params dictionary parameter was added in v0.3.0.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for YUV420 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"yuv420p\").formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as \n# `-input_framerate` parameter for controlled framerate\n# and add input pixfmt as yuv420p also\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-input_pixfmt\": \"yuv420p\"\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_yuv.mp4`\nwriter = WriteGear(output_filename=\"output_foo_yuv.mp4\", logging=True, **output_params)\n\n# grab the YUV420 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing YUV420 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"reference/ffhelper/","title":"deffcode.ffhelper","text":"

Following methods are exclusively design to handle FFmpeg related tasks. These tasks includes validation of installed FFmpeg binaries, downloading of FFmpeg binaries(on Windows), and parsing of FFmpeg metadata into useful information using various pattern matching methods.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.download_ffmpeg_binaries--download_ffmpeg_binaries","title":"download_ffmpeg_binaries","text":"

Generates FFmpeg Static Binaries for windows(if not available)

Parameters:

Name Type Description Default path string

path for downloading custom FFmpeg executables

required os_windows boolean

is running on Windows OS?

False os_bit string

32-bit or 64-bit OS?

''

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def download_ffmpeg_binaries(path, os_windows=False, os_bit=\"\"):\n    \"\"\"\n    ## download_ffmpeg_binaries\n\n    Generates FFmpeg Static Binaries for windows(if not available)\n\n    Parameters:\n        path (string): path for downloading custom FFmpeg executables\n        os_windows (boolean): is running on Windows OS?\n        os_bit (string): 32-bit or 64-bit OS?\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if os_windows and os_bit:\n        # initialize with available FFmpeg Static Binaries GitHub Server\n        file_url = \"https://github.com/abhiTronix/FFmpeg-Builds/releases/latest/download/ffmpeg-static-{}-gpl.zip\".format(\n            os_bit\n        )\n\n        file_name = os.path.join(\n            os.path.abspath(path), \"ffmpeg-static-{}-gpl.zip\".format(os_bit)\n        )\n        file_path = os.path.join(\n            os.path.abspath(path),\n            \"ffmpeg-static-{}-gpl/bin/ffmpeg.exe\".format(os_bit),\n        )\n        base_path, _ = os.path.split(file_name)  # extract file base path\n        # check if file already exists\n        if os.path.isfile(file_path):\n            final_path += file_path  # skip download if does\n        else:\n            # import libs\n            import zipfile\n\n            # check if given path has write access\n            assert os.access(path, os.W_OK), (\n                \"[Helper:ERROR] :: Permission Denied, Cannot write binaries to directory = \"\n                + path\n            )\n            # remove leftovers if exists\n            os.path.isfile(file_name) and delete_file_safe(file_name)\n            # download and write file to the given path\n            with open(file_name, \"wb\") as f:\n                logger.debug(\n                    \"No Custom FFmpeg path provided. Auto-Installing FFmpeg static binaries from GitHub Mirror now. Please wait...\"\n                )\n                # create session\n                with requests.Session() as http:\n                    # setup retry strategy\n                    retries = Retry(\n                        total=3,\n                        backoff_factor=1,\n                        status_forcelist=[429, 500, 502, 503, 504],\n                    )\n                    # Mount it for https usage\n                    adapter = TimeoutHTTPAdapter(timeout=2.0, max_retries=retries)\n                    http.mount(\"https://\", adapter)\n                    response = http.get(file_url, stream=True)\n                    response.raise_for_status()\n                    total_length = (\n                        response.headers.get(\"content-length\")\n                        if \"content-length\" in response.headers\n                        else len(response.content)\n                    )\n                    assert not (\n                        total_length is None\n                    ), \"[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!\"\n                    bar = tqdm(total=int(total_length), unit=\"B\", unit_scale=True)\n                    for data in response.iter_content(chunk_size=4096):\n                        f.write(data)\n                        len(data) > 0 and bar.update(len(data))\n                    bar.close()\n            logger.debug(\"Extracting executables.\")\n            with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n                zip_fname, _ = os.path.split(zip_ref.infolist()[0].filename)\n                zip_ref.extractall(base_path)\n            # perform cleaning\n            delete_file_safe(file_name)\n            logger.debug(\"FFmpeg binaries for Windows configured successfully!\")\n            final_path += file_path\n    # return final path\n    return final_path\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_ffmpeg--validate_ffmpeg","title":"validate_ffmpeg","text":"

Validate FFmpeg Binaries. Returns True if validity test passes successfully.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_ffmpeg(path, verbose=False):\n    \"\"\"\n    ## validate_ffmpeg\n\n    Validate FFmpeg Binaries. Returns `True` if validity test passes successfully.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    try:\n        # get the FFmpeg version\n        version = check_sp_output([path, \"-version\"])\n        firstline = version.split(b\"\\n\")[0]\n        version = firstline.split(b\" \")[2].strip()\n        if verbose:  # log if test are passed\n            logger.debug(\"FFmpeg validity Test Passed!\")\n            logger.debug(\n                \"Found valid FFmpeg Version: `{}` installed on this system\".format(\n                    version\n                )\n            )\n    except Exception as e:\n        # log if test are failed\n        if verbose:\n            logger.exception(str(e))\n            logger.warning(\"FFmpeg validity Test Failed!\")\n        return False\n    return True\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_pixfmts--get_supported_pixfmts","title":"get_supported_pixfmts","text":"

Find and returns all FFmpeg's supported pixel formats.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).

Source code in deffcode/ffhelper.py
def get_supported_pixfmts(path):\n    \"\"\"\n    ## get_supported_pixfmts\n\n    Find and returns all FFmpeg's supported pixel formats.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).\n    \"\"\"\n    pxfmts = check_sp_output([path, \"-hide_banner\", \"-pix_fmts\"])\n    splitted = pxfmts.split(b\"\\n\")\n    srtindex = [i for i, s in enumerate(splitted) if b\"-----\" in s]\n    # extract video encoders\n    supported_pxfmts = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[srtindex[0] + 1 :]\n        if x.decode(\"utf-8\").strip()\n    ]\n    # compile regex\n    finder = re.compile(r\"([A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*)(\\s+[0-4])(\\s+[0-9]+)\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_pxfmts))\n    # return output findings\n    return [\n        ([s for s in o[0].split(\" \")][-1], o[1].strip(), o[2].strip())\n        for o in outputs\n        if len(o) == 3\n    ]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_vdecoders--get_supported_vdecoders","title":"get_supported_vdecoders","text":"

Find and returns all FFmpeg's supported video decoders.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported decoders.

Source code in deffcode/ffhelper.py
def get_supported_vdecoders(path):\n    \"\"\"\n    ## get_supported_vdecoders\n\n    Find and returns all FFmpeg's supported video decoders.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported decoders.\n    \"\"\"\n    decoders = check_sp_output([path, \"-hide_banner\", \"-decoders\"])\n    splitted = decoders.split(b\"\\n\")\n    # extract video encoders\n    supported_vdecoders = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[2 : len(splitted) - 1]\n        if x.decode(\"utf-8\").strip().startswith(\"V\")\n    ]\n    # compile regex\n    finder = re.compile(r\"[A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_vdecoders))\n    # return output findings\n    return [[s for s in o.split(\" \")][-1] for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_demuxers--get_supported_demuxers","title":"get_supported_demuxers","text":"

Find and returns all FFmpeg's supported demuxers.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported demuxers.

Source code in deffcode/ffhelper.py
def get_supported_demuxers(path):\n    \"\"\"\n    ## get_supported_demuxers\n\n    Find and returns all FFmpeg's supported demuxers.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported demuxers.\n    \"\"\"\n    demuxers = check_sp_output([path, \"-hide_banner\", \"-demuxers\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in demuxers.split(b\"\\n\")]\n    split_index = [idx for idx, s in enumerate(splitted) if \"--\" in s][0]\n    supported_demuxers = splitted[split_index + 1 : len(splitted) - 1]\n    # compile regex\n    finder = re.compile(r\"\\s\\s[a-z0-9_,-]+\\s+\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_demuxers))\n    # return output findings\n    return [o.strip() if not (\",\" in o) else o.split(\",\")[-1].strip() for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_imgseqdir--validate_imgseqdir","title":"validate_imgseqdir","text":"

Validates Image Sequence by counting number of Image files.

Parameters:

Name Type Description Default source string

video source to be validated

required extension string

extension of image sequence.

'jpg'

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_imgseqdir(source, extension=\"jpg\", verbose=False):\n    \"\"\"\n    ## validate_imgseqdir\n\n    Validates Image Sequence by counting number of Image files.\n\n    Parameters:\n        source (string): video source to be validated\n        extension (string): extension of image sequence.\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    # check if path exists\n    dirpath = Path(source).parent\n    try:\n        if not (dirpath.exists() and dirpath.is_dir()):\n            verbose and logger.warning(\n                \"Specified path `{}` doesn't exists or valid.\".format(dirpath)\n            )\n            return False\n        else:\n            return (\n                True if len(list(dirpath.glob(\"*.{}\".format(extension)))) > 2 else False\n            )\n    except:\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_image_seq--is_valid_image_seq","title":"is_valid_image_seq","text":"

Checks Image sequence validity by testing its extension against FFmpeg's supported pipe formats and number of Image files.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required source string

video source to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_image_seq(path, source=None, verbose=False):\n    \"\"\"\n    ## is_valid_image_seq\n\n    Checks Image sequence validity by testing its extension against\n    FFmpeg's supported pipe formats and number of Image files.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        source (string): video source to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if source is None or not (source):\n        logger.error(\"Source is empty!\")\n        return False\n    # extract all FFmpeg supported protocols\n    formats = check_sp_output([path, \"-hide_banner\", \"-formats\"])\n    extract_formats = re.findall(r\"\\w+_pipe\", formats.decode(\"utf-8\").strip())\n    supported_image_formats = [\n        x.split(\"_\")[0] for x in extract_formats if x.endswith(\"_pipe\")\n    ]\n    filename, extension = os.path.splitext(source)\n    # Test and return result whether scheme is supported\n    if extension and source.endswith(tuple(supported_image_formats)):\n        if validate_imgseqdir(source, extension=extension[1:], verbose=verbose):\n            verbose and logger.debug(\n                \"A valid Image Sequence source of format `{}` found.\".format(extension)\n            )\n            return True\n        else:\n            ValueError(\n                \"Given Image Sequence source of format `{}` contains insignificant(invalid) sample size, Check the `source` parameter value again!\".format(\n                    source.split(\".\")[1]\n                )\n            )\n    else:\n        verbose and logger.warning(\"Source isn't a valid Image Sequence\")\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_url--is_valid_url","title":"is_valid_url","text":"

Checks URL validity by testing its scheme against FFmpeg's supported protocols.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required url string

URL to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_url(path, url=None, verbose=False):\n    \"\"\"\n    ## is_valid_url\n\n    Checks URL validity by testing its scheme against\n    FFmpeg's supported protocols.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        url (string): URL to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if url is None or not (url):\n        logger.warning(\"URL is empty!\")\n        return False\n    # extract URL scheme\n    extracted_scheme_url = url.split(\"://\", 1)[0]\n    # extract all FFmpeg supported protocols\n    protocols = check_sp_output([path, \"-hide_banner\", \"-protocols\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in protocols.split(b\"\\n\")]\n    supported_protocols = splitted[splitted.index(\"Output:\") + 1 : len(splitted) - 1]\n    # RTSP is a demuxer somehow\n    # support both RTSP and RTSPS(over SSL)\n    logger.critical(get_supported_demuxers(path))\n    supported_protocols += (\n        [\"rtsp\", \"rtsps\"] if \"rtsp\" in get_supported_demuxers(path) else []\n    )\n    # Test and return result whether scheme is supported\n    if extracted_scheme_url and extracted_scheme_url in supported_protocols:\n        verbose and logger.debug(\n            \"URL scheme `{}` is supported by FFmpeg.\".format(extracted_scheme_url)\n        )\n        return True\n    else:\n        verbose and logger.warning(\n            \"URL scheme `{}` isn't supported by FFmpeg!\".format(extracted_scheme_url)\n        )\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.check_sp_output--check_sp_output","title":"check_sp_output","text":"

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default args based on input

Non Keyword Arguments

() kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):\n    \"\"\"\n    ## check_sp_output\n\n    Returns FFmpeg `stdout` output from subprocess module.\n\n    Parameters:\n        args (based on input): Non Keyword Arguments\n        kwargs (based on input): Keyword Arguments\n\n    **Returns:** A string value.\n    \"\"\"\n    # workaround for python bug: https://bugs.python.org/issue37380\n    if platform.system() == \"Windows\":\n        # see comment https://bugs.python.org/msg370334\n        sp._cleanup = lambda: None\n    # handle additional params\n    retrieve_stderr = kwargs.pop(\"force_retrieve_stderr\", False)\n    # execute command in subprocess\n    process = sp.Popen(\n        stdout=sp.PIPE,\n        stderr=sp.DEVNULL if not (retrieve_stderr) else sp.PIPE,\n        *args,\n        **kwargs,\n    )\n    # communicate and poll process\n    output, stderr = process.communicate()\n    retcode = process.poll()\n    # handle return code\n    if retcode and not (retrieve_stderr):\n        logger.error(\"[Pipline-Error] :: {}\".format(output.decode(\"utf-8\")))\n        cmd = kwargs.get(\"args\")\n        if cmd is None:\n            cmd = args[0]\n        error = sp.CalledProcessError(retcode, cmd)\n        error.output = output\n        raise error\n    # raise error if no output\n    bool(output) or bool(stderr) or logger.error(\n        \"[Pipline-Error] :: Pipline failed to exact any data from command: {}!\".format(\n            args[0] if args else []\n        )\n    )\n    # return output otherwise\n    return stderr if retrieve_stderr and stderr else output\n
"},{"location":"reference/utils/","title":"deffcode.utils","text":"

Following are the helper methods required by the DeFFcode APIs.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/utils/#deffcode.utils.logger_handler--logger_handler","title":"logger_handler","text":"

Returns the logger handler

Returns: A logger handler

Source code in deffcode/utils.py
def logger_handler():\n    \"\"\"\n    ## logger_handler\n\n    Returns the logger handler\n\n    **Returns:** A logger handler\n    \"\"\"\n    # logging formatter\n    formatter = ColoredFormatter(\n        \"{green}{asctime}{reset} :: {bold_purple}{name:^13}{reset} :: {log_color}{levelname:^8}{reset} :: {bold_white}{message}\",\n        datefmt=\"%H:%M:%S\",\n        reset=True,\n        log_colors={\n            \"INFO\": \"bold_cyan\",\n            \"DEBUG\": \"bold_yellow\",\n            \"WARNING\": \"bold_red,fg_thin_yellow\",\n            \"ERROR\": \"bold_red\",\n            \"CRITICAL\": \"bold_red,bg_white\",\n        },\n        style=\"{\",\n    )\n    # check if FFdecoder_LOGFILE defined\n    file_mode = os.environ.get(\"DEFFCODE_LOGFILE\", False)\n    # define handler\n    handler = logging.StreamHandler()\n    if file_mode and isinstance(file_mode, str):\n        file_path = os.path.abspath(file_mode)\n        if (os.name == \"nt\" or os.access in os.supports_effective_ids) and os.access(\n            os.path.dirname(file_path), os.W_OK\n        ):\n            file_path = (\n                os.path.join(file_path, \"deffcode.log\")\n                if os.path.isdir(file_path)\n                else file_path\n            )\n            handler = logging.FileHandler(file_path, mode=\"a\")\n            formatter = logging.Formatter(\n                \"{asctime} :: {name} :: {levelname} :: {message}\",\n                datefmt=\"%H:%M:%S\",\n                style=\"{\",\n            )\n\n    handler.setFormatter(formatter)\n    return handler\n
"},{"location":"reference/utils/#deffcode.utils.dict2Args--dict2args","title":"dict2Args","text":"

Converts dictionary attributes to list(args)

Parameters:

Name Type Description Default param_dict dict

Parameters dictionary

required

Returns: Arguments list

Source code in deffcode/utils.py
def dict2Args(param_dict):\n    \"\"\"\n    ## dict2Args\n\n    Converts dictionary attributes to list(args)\n\n    Parameters:\n        param_dict (dict): Parameters dictionary\n\n    **Returns:** Arguments list\n    \"\"\"\n    args = []\n    for key in param_dict.keys():\n        if key in [\"-clones\"] or key.startswith(\"-core\"):\n            if isinstance(param_dict[key], list):\n                args.extend(param_dict[key])\n            else:\n                logger.warning(\n                    \"{} with invalid datatype:`{}`, Skipped!\".format(\n                        \"Core parameter\" if key.startswith(\"-core\") else \"Clone\",\n                        param_dict[key],\n                    )\n                )\n        else:\n            args.append(key)\n            args.append(str(param_dict[key]))\n    return args\n
"},{"location":"reference/utils/#deffcode.utils.delete_file_safe--delete_ext_safe","title":"delete_ext_safe","text":"

Safely deletes files at given path.

Parameters:

Name Type Description Default file_path string

path to the file

required Source code in deffcode/utils.py
def delete_file_safe(file_path):\n    \"\"\"\n    ## delete_ext_safe\n\n    Safely deletes files at given path.\n\n    Parameters:\n        file_path (string): path to the file\n    \"\"\"\n    try:\n        dfile = Path(file_path)\n        if sys.version_info >= (3, 8, 0):\n            dfile.unlink(missing_ok=True)\n        else:\n            dfile.exists() and dfile.unlink()\n    except Exception as e:\n        logger.exception(str(e))\n
"},{"location":"reference/ffdecoder/","title":"FFdecoder API","text":"

FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1

FFdecoder API implements a standalone highly-extensible wrapper around FFmpeg multimedia framework that provides complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as framerate, resolution, hardware decoder(s), complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.

FFdecoder API compiles its FFmpeg pipeline by processing input Video Source metadata and User-defined options, and runs it inside a subprocess pipe concurrently with the main thread, while extracting output dataframes(1D arrays) into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into 24-bit RGB (default) ndarray 3D frames that are readily available through its generateFrame() method.

FFdecoder API employs Sourcer API at its backend for gathering, processing, and validating metadata of all multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also available as a JSON string with its metadata property object and can be updated as desired.

FFdecoder API supports a wide-ranging media stream as input source such as USB/Virtual/IP Camera Feed, Multimedia video file, Screen Capture, Image Sequence, Network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Furthermore, FFdecoder API maintains the standard OpenCV-Python (Python API for OpenCV) coding syntax, thereby making it even easier to integrate this API in any Computer Vision application.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

FFdecoder API parameters are explained here \u27b6

Source code in deffcode/ffdecoder.py
class FFdecoder:\n    \"\"\"\n    > FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames\n    with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1\n\n    FFdecoder API implements a **standalone highly-extensible wrapper around [FFmpeg](https://ffmpeg.org/)** multimedia framework that provides complete\n    control over the underline pipeline including **access to almost any FFmpeg specification thinkable** such as framerate, resolution, hardware decoder(s),\n    complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.\n\n    FFdecoder API **compiles its FFmpeg pipeline** by processing input Video Source metadata and User-defined options, and **runs it inside a\n    [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe** concurrently with the main thread, while extracting output dataframes(1D arrays)\n    into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into ==[24-bit RGB](https://en.wikipedia.org/wiki/List_of_monochrome_and_RGB_color_formats#24-bit_RGB) _(default)_\n    [`ndarray`](https://numpy.org/doc/stable/reference/arrays.ndarray.html#the-n-dimensional-array-ndarray) 3D frames== that are readily available\n    through its [`generateFrame()`](#deffcode.ffdecoder.FFdecoder.generateFrame) method.\n\n    FFdecoder API **employs [Sourcer API](../../reference/sourcer) at its backend** for gathering, processing, and validating metadata of all\n    multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also\n    available as a JSON string with its [`metadata`](#deffcode.ffdecoder.FFdecoder.metadata) property object and can be updated as desired.\n\n    FFdecoder API **supports a wide-ranging media stream** as input source such as USB/Virtual/IP Camera Feed, Multimedia video file,\n    Screen Capture, Image Sequence, Network protocols _(such as HTTP(s), RTP/RSTP, etc.)_, so on and so forth.\n\n    Furthermore, FFdecoder API maintains the **standard [OpenCV-Python](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) _(Python API for OpenCV)_ coding syntax**, thereby making it even easier to\n    integrate this API in any Computer Vision application.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"FFdecoder API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        frame_format=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **ffparams\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n\n        # enable verbose if specified\n        self.__verbose_logs = (\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # define whether initializing\n        self.__initializing = True\n\n        # define frame pixel-format for decoded frames\n        self.__frame_format = (\n            frame_format.lower().strip() if isinstance(frame_format, str) else None\n        )\n\n        # handles user-defined parameters\n        self.__extra_params = {}\n\n        # handle process to be frames written\n        self.__process = None\n\n        # handle exclusive metadata\n        self.__ff_pixfmt_metadata = None  # metadata\n        self.__raw_frame_num = None  # raw-frame number\n        self.__raw_frame_pixfmt = None  # raw-frame pixformat\n        self.__raw_frame_dtype = None  # raw-frame dtype\n        self.__raw_frame_depth = None  # raw-frame depth\n        self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n        # define supported mode of operation\n        self.__supported_opmodes = {\n            \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n            \"vo\": \"Video-Only\",\n            \"imgseq\": \"Image-Sequence\",\n            # \"ao\":\"Audio-Only\", # reserved for future\n        }\n        # operation mode variable\n        self.__opmode = None\n\n        # handle termination\n        self.__terminate_stream = False\n\n        # cleans and reformat user-defined parameters\n        self.__extra_params = {\n            str(k).strip(): str(v).strip()\n            if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in ffparams.items()\n        }\n\n        # handle custom Sourcer API params\n        sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n        # reset improper values\n        sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n        # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n        # check if not valid type\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n        else:\n            # also pass valid ffmpeg pre-headers to Sourcer API\n            sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n        # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n        # assets on Windows(if specified)\n        sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n            \"-ffmpeg_download_path\", \"\"\n        )\n\n        # handle video and audio stream indexes in case of multiple ones.\n        default_stream_indexes = self.__extra_params.pop(\n            \"-default_stream_indexes\", (0, 0)\n        )\n        # reset improper values\n        default_stream_indexes = (\n            (0, 0)\n            if not isinstance(default_stream_indexes, (list, tuple))\n            else default_stream_indexes\n        )\n\n        # pass FFmpeg filter to Sourcer API params for processing\n        if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n            key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n            sourcer_params[key] = self.__extra_params[key]\n\n        # define dict to store user-defined parameters\n        self.__user_metadata = {}\n        # extract and assign source metadata as dict\n        (self.__sourcer_metadata, self.__missing_prop) = (\n            Sourcer(\n                source=source,\n                source_demuxer=source_demuxer,\n                verbose=verbose,\n                custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n                **sourcer_params\n            )\n            .probe_stream(default_stream_indexes=default_stream_indexes)\n            .retrieve_metadata(force_retrieve_missing=True)\n        )\n\n        # handle valid FFmpeg assets location\n        self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n        # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n        # patch for compatibility with OpenCV APIs.\n        self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n        if not (isinstance(self.__cv_patch, bool)):\n            self.__cv_patch = False\n            self.__verbose_logs and logger.critical(\n                \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n            )\n\n        # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n        self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n        if not (isinstance(self.__passthrough_mode, bool)):\n            self.__passthrough_mode = False\n\n        # handle mode of operation\n        if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n            # image-sequence mode\n            self.__opmode = \"imgseq\"\n        elif (\n            self.__sourcer_metadata[\n                \"source_has_video\"\n            ]  # audio is only for pass-through, not really for audio decoding yet.\n            and self.__sourcer_metadata[\"source_has_audio\"]\n            and self.__passthrough_mode  # [TODO]\n        ):\n            self.__opmode = \"av\"\n        # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n        #    self.__opmode = \"ao\"\n        elif self.__sourcer_metadata[\"source_has_video\"]:\n            # video-only mode\n            self.__opmode = \"vo\"\n        else:\n            # raise if unknown mode\n            raise ValueError(\n                \"Unable to find any usable video stream in the given source!\"\n            )\n        # store as metadata\n        self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n            self.__opmode\n        ]\n\n        # handle user-defined output framerate\n        __framerate = self.__extra_params.pop(\"-framerate\", None)\n        if (\n            isinstance(__framerate, str)\n            and __framerate\n            == \"null\"  # special mode to discard `-framerate/-r` parameter\n        ):\n            self.__inputframerate = __framerate\n        elif isinstance(__framerate, (float, int)):\n            self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n        else:\n            # warn if wrong type\n            not (__framerate is None) and logger.warning(\n                \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                    type(__framerate).__name__\n                )\n            )\n            # reset to default\n            self.__inputframerate = 0.0\n\n        # handle user defined decoded frame resolution\n        self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n        if (\n            isinstance(self.__custom_resolution, str)\n            and self.__custom_resolution\n            == \"null\"  # special mode to discard `-size/-s` parameter\n        ) or (\n            isinstance(self.__custom_resolution, (list, tuple))\n            and len(self.__custom_resolution)\n            == 2  # valid resolution(must be a tuple or list)\n        ):\n            # log it\n            self.__verbose_logs and not isinstance(\n                self.__custom_resolution, str\n            ) and logger.debug(\n                \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n            )\n        else:\n            # log it\n            not (self.__custom_resolution is None) and logger.warning(\n                \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                    self.__custom_resolution\n                )\n            )\n            # reset improper values\n            self.__custom_resolution = None\n\n    def formulate(self):\n\n        \"\"\"\n        This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n        **Returns:** A reference to the FFdecoder class object.\n        \"\"\"\n        # assign values to class variables on first run\n        if self.__initializing:\n            # prepare parameter dict\n            input_params = OrderedDict()\n            output_params = OrderedDict()\n\n            # dynamically pre-assign a default video-decoder (if not assigned by user).\n            supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n            default_vdecodec = (\n                self.__sourcer_metadata[\"source_video_decoder\"]\n                if self.__sourcer_metadata[\"source_video_decoder\"]\n                in supported_vdecodecs\n                else \"unknown\"\n            )\n            if \"-c:v\" in self.__extra_params:\n                self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-c:v\", default_vdecodec\n                )\n            # handle image sequence separately\n            if self.__opmode == \"imgseq\":\n                # -vcodec is discarded by default\n                # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n                self.__extra_params.pop(\"-vcodec\", None)\n            elif (\n                \"-vcodec\" in self.__extra_params\n                and self.__extra_params[\"-vcodec\"] is None\n            ):\n                # special case when -vcodec is not needed intentionally\n                self.__extra_params.pop(\"-vcodec\", None)\n            else:\n                # assign video decoder selected here.\n                if not \"-vcodec\" in self.__extra_params:\n                    input_params[\"-vcodec\"] = default_vdecodec\n                else:\n                    input_params[\"-vcodec\"] = self.__extra_params.pop(\n                        \"-vcodec\", default_vdecodec\n                    )\n                if (\n                    default_vdecodec != \"unknown\"\n                    and not input_params[\"-vcodec\"] in supported_vdecodecs\n                ):\n                    # reset to default if not supported\n                    logger.warning(\n                        \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                            input_params[\"-vcodec\"], default_vdecodec\n                        )\n                    )\n                    input_params[\"-vcodec\"] = default_vdecodec\n                # raise error if not valid decoder found\n                if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                    raise RuntimeError(\n                        \"Provided FFmpeg does not support any known usable video-decoders.\"\n                        \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                    )\n\n            # handle user-defined number of frames.\n            if \"-vframes\" in self.__extra_params:\n                self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                    \"-vframes\", None\n                )\n            if \"-frames:v\" in self.__extra_params:\n                value = self.__extra_params.pop(\"-frames:v\", None)\n                if not (value is None) and value > 0:\n                    output_params[\"-frames:v\"] = value\n\n            # dynamically calculate default raw-frames pixel format(if not assigned by user).\n            # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n            if \"-pix_fmt\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n                )\n                self.__extra_params.pop(\"-pix_fmt\", None)\n            # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n            self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n            supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n            # calculate default pixel-format\n            # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n            self.__frame_format == \"null\" and logger.critical(\n                \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n            )\n            # choose between rgb24(if available) or source pixel-format\n            # otherwise, only source pixel-format for special case\n            default_pixfmt = (\n                \"rgb24\"\n                if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n                else self.__sourcer_metadata[\"source_video_pixfmt\"]\n            )\n            # assign output raw-frames pixel format\n            rawframe_pixfmt = None\n            if (\n                not (self.__frame_format is None)\n                and self.__frame_format in supported_pixfmts\n            ):\n                # check if valid and supported `frame_format` parameter assigned\n                rawframe_pixfmt = self.__frame_format.strip()\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                        rawframe_pixfmt\n                    )\n                )\n            elif (\n                \"output_frames_pixfmt\"\n                in self.__sourcer_metadata  # means `format` filter is defined\n                and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n            ):\n                # assign if valid and supported\n                rawframe_pixfmt = self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ].strip()\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n                )\n            else:\n                # reset to default if not supported\n                rawframe_pixfmt = default_pixfmt\n                # log it accordingly\n                if self.__frame_format is None:\n                    logger.info(\n                        \"Using default `{}` pixel-format for this pipeline.\".format(\n                            default_pixfmt\n                        )\n                    )\n                else:\n                    logger.warning(\n                        \"{} Switching to default `{}` pixel-format!\".format(\n                            \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                                self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                                if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                                else self.__frame_format\n                            )\n                            if self.__frame_format != \"null\"\n                            else \"No usable pixel-format defined.\",\n                            default_pixfmt,\n                        )\n                    )\n\n            # dynamically calculate raw-frame datatype based on pixel-format selected\n            (self.__raw_frame_depth, rawframesbpp) = [\n                (int(x[1]), int(x[2]))\n                for x in self.__ff_pixfmt_metadata\n                if x[0] == rawframe_pixfmt\n            ][0]\n            raw_bit_per_component = (\n                rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n            )\n            if 4 <= raw_bit_per_component <= 8:\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n            elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n                (\"le\", \"be\")\n            ):\n                if rawframe_pixfmt.endswith(\"le\"):\n                    self.__raw_frame_dtype = np.dtype(\"<u2\")\n                else:\n                    self.__raw_frame_dtype = np.dtype(\">u2\")\n            else:\n                # reset to both pixel-format and datatype to default if not supported\n                not (self.__frame_format is None) and logger.warning(\n                    \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                        rawframe_pixfmt\n                    )\n                )\n                rawframe_pixfmt = \"rgb24\"\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n\n            # Check if not special case\n            if self.__frame_format != \"null\":\n                # assign to FFmpeg pipeline otherwise\n                output_params[\"-pix_fmt\"] = rawframe_pixfmt\n            # assign to global parameter further usage\n            self.__raw_frame_pixfmt = rawframe_pixfmt\n            # also override as metadata(if available)\n            if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n                self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ] = self.__raw_frame_pixfmt\n\n            # handle raw-frame resolution\n            # notify FFmpeg `-s` parameter cannot be assigned directly\n            if \"-s\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n                )\n                self.__extra_params.pop(\"-s\", None)\n            # assign output rawframe resolution\n            if not (self.__custom_resolution is None) and not isinstance(\n                self.__custom_resolution, str\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                self.__raw_frame_resolution = self.__custom_resolution\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                        self.__raw_frame_resolution\n                    )\n                )\n            elif (\n                \"output_frames_resolution\"\n                in self.__sourcer_metadata  # means `scale` filter is defined\n                and self.__sourcer_metadata[\"output_frames_resolution\"]\n                and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on output.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"output_frames_resolution\"\n                ]\n            elif (\n                self.__sourcer_metadata[\"source_video_resolution\"]\n                and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on source.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"source_video_resolution\"\n                ]\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n            # special mode to discard `-size/-s` FFmpeg parameter completely\n            if isinstance(self.__custom_resolution, str):\n                logger.critical(\n                    \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # add to pipeline\n                dimensions = \"{}x{}\".format(\n                    self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n                )\n                output_params[\"-s\"] = str(dimensions)\n            # log if filters or default source is used\n            self.__verbose_logs and (\n                self.__custom_resolution is None\n                or isinstance(self.__custom_resolution, str)\n            ) and logger.info(\n                \"{} for this pipeline for defining output resolution.\".format(\n                    \"FFmpeg filter values will be used\"\n                    if \"output_frames_resolution\" in self.__sourcer_metadata\n                    else \"Default source resolution will be used\"\n                )\n            )\n\n            # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n            if (\n                not isinstance(self.__inputframerate, str)\n                and self.__inputframerate > 0.0\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                output_params[\"-framerate\"] = str(self.__inputframerate)\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                        str(self.__inputframerate)\n                    )\n                )\n            elif (\n                \"output_framerate\"\n                in self.__sourcer_metadata  # means `fps` filter is defined\n                and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n            ):\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on output\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"output_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n                )\n            elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on source\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"source_video_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"Default source framerate will be used for this pipeline for defining output framerate.\"\n                )\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n\n            # add rest to output parameters\n            output_params.update(self.__extra_params)\n\n            # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n            # TODO Added support for `-re -stream_loop` and `-loop`\n            if \"-frames:v\" in input_params:\n                self.__raw_frame_num = input_params[\"-frames:v\"]\n            elif (\n                not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n                and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n            ):\n                self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n            else:\n                self.__raw_frame_num = None\n                # log that number of frames are unknown\n                self.__verbose_logs and logger.info(\n                    \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n                )\n\n            # log Mode of Operation\n            self.__verbose_logs and logger.critical(\n                \"Activating {} Mode of Operation.\".format(\n                    self.__supported_opmodes[self.__opmode]\n                )\n            )\n\n            # compose the Pipeline using formulated FFmpeg parameters\n            self.__launch_FFdecoderline(input_params, output_params)\n\n            # inform the initialization is completed\n            self.__initializing = False\n        else:\n            # warn if pipeline is recreated\n            logger.error(\"This pipeline is already created and running!\")\n        return self\n\n    def __fetchNextfromPipeline(self):\n        \"\"\"\n        This Internal method to fetch next dataframes(1D arrays) from `subprocess` pipe's standard output(`stdout`) into a Numpy buffer.\n        \"\"\"\n        assert not (\n            self.__process is None\n        ), \"Pipeline is not running! You must call `formulate()` method first.\"\n\n        # formulated raw frame size and apply YUV pixel formats patch(if applicable)\n        raw_frame_size = (\n            (self.__raw_frame_resolution[0] * (self.__raw_frame_resolution[1] * 3 // 2))\n            if self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch\n            else (\n                self.__raw_frame_depth\n                * self.__raw_frame_resolution[0]\n                * self.__raw_frame_resolution[1]\n            )\n        )\n        # next dataframe as numpy ndarray\n        nparray = None\n        try:\n            # read bytes frames from buffer\n            nparray = np.frombuffer(\n                self.__process.stdout.read(\n                    raw_frame_size * self.__raw_frame_dtype.itemsize\n                ),\n                dtype=self.__raw_frame_dtype,\n            )\n        except Exception as e:\n            raise RuntimeError(\"Frame buffering failed with error: {}\".format(str(e)))\n        return (\n            nparray\n            if not (nparray is None) and len(nparray) == raw_frame_size\n            else None\n        )\n\n    def __fetchNextFrame(self):\n        \"\"\"\n        This Internal method grabs and decodes next 3D `ndarray` video-frame from the buffer.\n        \"\"\"\n        # Read next and reconstruct as numpy array\n        frame = self.__fetchNextfromPipeline()\n        # check if empty\n        if frame is None:\n            return frame\n        elif self.__raw_frame_pixfmt.startswith(\"gray\"):\n            # reconstruct exclusive `gray` frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )[:, :, 0]\n        elif self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch:\n            # reconstruct exclusive YUV formats frames for OpenCV APIs\n            frame = frame.reshape(\n                self.__raw_frame_resolution[1] * 3 // 2,\n                self.__raw_frame_resolution[0],\n            )\n        else:\n            # reconstruct default frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )\n        # return frame\n        return frame\n\n    def generateFrame(self):\n        \"\"\"\n        This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n        _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n        \"\"\"\n        if self.__raw_frame_num is None or not self.__raw_frame_num:\n            while not self.__terminate_stream:  # infinite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n        else:\n            for _ in range(self.__raw_frame_num):  # finite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n\n    def __enter__(self):\n        \"\"\"\n        Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n        **Returns:** Output of `formulate()` method.\n        \"\"\"\n        return self.formulate()\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"\n        Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n        \"\"\"\n        self.terminate()\n\n    @property\n    def metadata(self):\n        \"\"\"\n        A property object that dumps metadata information as JSON string.\n\n        **Returns:** Metadata as JSON string.\n        \"\"\"\n        # import dependency\n        import json\n\n        # return complete metadata information as JSON string\n        return json.dumps(\n            {\n                **self.__sourcer_metadata,  # source video\n                **self.__missing_prop,  # missing properties\n                **self.__user_metadata,  # user-defined\n            },\n            indent=2,\n        )\n\n    @metadata.setter\n    def metadata(self, value):\n        \"\"\"\n        A property object that updates metadata information with user-defined dictionary.\n\n        Parameters:\n            value (dict): User-defined dictionary.\n        \"\"\"\n        # check if value dict type\n        if value and isinstance(value, dict):\n            # log it\n            self.__verbose_logs and logger.info(\"Updating Metadata...\")\n            # extract any source and output internal metadata keys\n            default_keys = set(value).intersection(\n                {**self.__sourcer_metadata, **self.__missing_prop}\n            )\n            # counterpart source properties for each output properties\n            counterpart_prop = {\n                \"output_frames_resolution\": \"source_video_resolution\",\n                \"output_frames_pixfmt\": \"source_video_pixfmt\",\n                \"output_framerate\": \"source_video_framerate\",\n            }\n            # iterate over source metadata keys and sanitize it\n            for key in default_keys or []:\n                if key == \"source\":\n                    # metadata properties that cannot be altered\n                    logger.warning(\n                        \"`{}` metadata property value cannot be altered. Discarding!\".format(\n                            key\n                        )\n                    )\n                elif key in self.__missing_prop:\n                    # missing metadata properties are unavailable and read-only\n                    # notify user about alternative counterpart property (if available)\n                    logger.warning(\n                        \"`{}` metadata property is read-only\".format(key)\n                        + (\n                            \". Try updating `{}` property instead!\".format(\n                                counterpart_prop[key]\n                            )\n                            if key in counterpart_prop.keys()\n                            else \" and cannot be updated!\"\n                        )\n                    )\n                elif isinstance(value[key], type(self.__sourcer_metadata[key])):\n                    # check if correct datatype as original\n                    self.__verbose_logs and logger.info(\n                        \"Updating `{}`{} metadata property to `{}`.\".format(\n                            key,\n                            \" and its counterpart\"\n                            if key in counterpart_prop.values()\n                            else \"\",\n                            value[key],\n                        )\n                    )\n                    # update source metadata if valid\n                    self.__sourcer_metadata[key] = value[key]\n                    # also update missing counterpart property (if available)\n                    counter_key = next(\n                        (k for k, v in counterpart_prop.items() if v == key), \"\"\n                    )\n                    if counter_key:\n                        self.__missing_prop[counter_key] = value[key]\n                else:\n                    # otherwise discard and log it\n                    logger.warning(\n                        \"Manually assigned `{}` metadata property value is of invalid type. Discarding!\"\n                    ).format(key)\n                # delete invalid key\n                del value[key]\n            # There is no concept of a tuple in the JSON format.\n            # Python's `json` module converts Python tuples to JSON lists\n            # because that's the closest thing in JSON to a tuple.\n            any(isinstance(value[x], tuple) for x in value) and logger.warning(\n                \"All TUPLE metadata properties will be converted to LIST datatype. Read docs for more details.\"\n            )\n            # update user-defined metadata\n            self.__user_metadata.update(value)\n        else:\n            # otherwise raise error\n            raise ValueError(\"Invalid datatype metadata assigned. Aborting!\")\n\n    def __launch_FFdecoderline(self, input_params, output_params):\n\n        \"\"\"\n        This Internal method executes FFmpeg pipeline arguments inside a `subprocess` pipe in a new process.\n\n        Parameters:\n            input_params (dict): Input FFmpeg parameters\n            output_params (dict): Output FFmpeg parameters\n        \"\"\"\n        # convert input parameters to list\n        input_parameters = dict2Args(input_params)\n\n        # convert output parameters to list\n        output_parameters = dict2Args(output_params)\n\n        # format command\n        cmd = (\n            [self.__ffmpeg]\n            + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n            + self.__ffmpeg_prefixes\n            + input_parameters\n            + (\n                [\"-f\", self.__sourcer_metadata[\"source_demuxer\"]]\n                if (\"source_demuxer\" in self.__sourcer_metadata.keys())\n                else []\n            )\n            + [\"-i\", self.__sourcer_metadata[\"source\"]]\n            + output_parameters\n            + [\"-f\", \"rawvideo\", \"-\"]\n        )\n        # compose the FFmpeg process\n        if self.__verbose_logs:\n            logger.debug(\"Executing FFmpeg command: `{}`\".format(\" \".join(cmd)))\n            # In debugging mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=None\n            )\n        else:\n            # In silent mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=sp.DEVNULL\n            )\n\n    def terminate(self):\n        \"\"\"\n        Safely terminates all processes.\n        \"\"\"\n\n        # signal we are closing\n        self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n        self.__terminate_stream = True\n        # check if no process was initiated at first place\n        if self.__process is None or not (self.__process.poll() is None):\n            logger.info(\"Pipeline already terminated.\")\n            return\n        # Attempt to close pipeline.\n        # close `stdin` output\n        self.__process.stdin and self.__process.stdin.close()\n        # close `stdout` output\n        self.__process.stdout and self.__process.stdout.close()\n        # terminate/kill process if still processing\n        if self.__process.poll() is None:\n            # demuxers prefer kill\n            self.__process.kill()\n        # wait if not exiting\n        self.__process.wait()\n        self.__process = None\n        logger.info(\"Pipeline terminated successfully.\")\n

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata","title":"metadata property writable","text":"

A property object that dumps metadata information as JSON string.

Returns: Metadata as JSON string.

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__enter__","title":"__enter__(self) special","text":"

Handles entry with the with statement. See PEP343 -- The 'with' statement'.

Returns: Output of formulate() method.

Source code in deffcode/ffdecoder.py
def __enter__(self):\n    \"\"\"\n    Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n    **Returns:** Output of `formulate()` method.\n    \"\"\"\n    return self.formulate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__exit__","title":"__exit__(self, exc_type, exc_val, exc_tb) special","text":"

Handles exit with the with statement. See PEP343 -- The 'with' statement'.

Source code in deffcode/ffdecoder.py
def __exit__(self, exc_type, exc_val, exc_tb):\n    \"\"\"\n    Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n    \"\"\"\n    self.terminate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__init__","title":"__init__(self, source, source_demuxer=None, frame_format=None, custom_ffmpeg='', verbose=False, **ffparams) special","text":"

This constructor method initializes the object state and attributes of the FFdecoder Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None frame_format str

sets pixel format(-pix_fmt) of the decoded frames.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False ffparams dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/ffdecoder.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    frame_format=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **ffparams\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n\n    # enable verbose if specified\n    self.__verbose_logs = (\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # define whether initializing\n    self.__initializing = True\n\n    # define frame pixel-format for decoded frames\n    self.__frame_format = (\n        frame_format.lower().strip() if isinstance(frame_format, str) else None\n    )\n\n    # handles user-defined parameters\n    self.__extra_params = {}\n\n    # handle process to be frames written\n    self.__process = None\n\n    # handle exclusive metadata\n    self.__ff_pixfmt_metadata = None  # metadata\n    self.__raw_frame_num = None  # raw-frame number\n    self.__raw_frame_pixfmt = None  # raw-frame pixformat\n    self.__raw_frame_dtype = None  # raw-frame dtype\n    self.__raw_frame_depth = None  # raw-frame depth\n    self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n    # define supported mode of operation\n    self.__supported_opmodes = {\n        \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n        \"vo\": \"Video-Only\",\n        \"imgseq\": \"Image-Sequence\",\n        # \"ao\":\"Audio-Only\", # reserved for future\n    }\n    # operation mode variable\n    self.__opmode = None\n\n    # handle termination\n    self.__terminate_stream = False\n\n    # cleans and reformat user-defined parameters\n    self.__extra_params = {\n        str(k).strip(): str(v).strip()\n        if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in ffparams.items()\n    }\n\n    # handle custom Sourcer API params\n    sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n    # reset improper values\n    sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n    # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n    # check if not valid type\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n    else:\n        # also pass valid ffmpeg pre-headers to Sourcer API\n        sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n    # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n    # assets on Windows(if specified)\n    sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n        \"-ffmpeg_download_path\", \"\"\n    )\n\n    # handle video and audio stream indexes in case of multiple ones.\n    default_stream_indexes = self.__extra_params.pop(\n        \"-default_stream_indexes\", (0, 0)\n    )\n    # reset improper values\n    default_stream_indexes = (\n        (0, 0)\n        if not isinstance(default_stream_indexes, (list, tuple))\n        else default_stream_indexes\n    )\n\n    # pass FFmpeg filter to Sourcer API params for processing\n    if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n        key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n        sourcer_params[key] = self.__extra_params[key]\n\n    # define dict to store user-defined parameters\n    self.__user_metadata = {}\n    # extract and assign source metadata as dict\n    (self.__sourcer_metadata, self.__missing_prop) = (\n        Sourcer(\n            source=source,\n            source_demuxer=source_demuxer,\n            verbose=verbose,\n            custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n            **sourcer_params\n        )\n        .probe_stream(default_stream_indexes=default_stream_indexes)\n        .retrieve_metadata(force_retrieve_missing=True)\n    )\n\n    # handle valid FFmpeg assets location\n    self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n    # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n    # patch for compatibility with OpenCV APIs.\n    self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n    if not (isinstance(self.__cv_patch, bool)):\n        self.__cv_patch = False\n        self.__verbose_logs and logger.critical(\n            \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n        )\n\n    # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n    self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n    if not (isinstance(self.__passthrough_mode, bool)):\n        self.__passthrough_mode = False\n\n    # handle mode of operation\n    if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n        # image-sequence mode\n        self.__opmode = \"imgseq\"\n    elif (\n        self.__sourcer_metadata[\n            \"source_has_video\"\n        ]  # audio is only for pass-through, not really for audio decoding yet.\n        and self.__sourcer_metadata[\"source_has_audio\"]\n        and self.__passthrough_mode  # [TODO]\n    ):\n        self.__opmode = \"av\"\n    # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n    #    self.__opmode = \"ao\"\n    elif self.__sourcer_metadata[\"source_has_video\"]:\n        # video-only mode\n        self.__opmode = \"vo\"\n    else:\n        # raise if unknown mode\n        raise ValueError(\n            \"Unable to find any usable video stream in the given source!\"\n        )\n    # store as metadata\n    self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n        self.__opmode\n    ]\n\n    # handle user-defined output framerate\n    __framerate = self.__extra_params.pop(\"-framerate\", None)\n    if (\n        isinstance(__framerate, str)\n        and __framerate\n        == \"null\"  # special mode to discard `-framerate/-r` parameter\n    ):\n        self.__inputframerate = __framerate\n    elif isinstance(__framerate, (float, int)):\n        self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n    else:\n        # warn if wrong type\n        not (__framerate is None) and logger.warning(\n            \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                type(__framerate).__name__\n            )\n        )\n        # reset to default\n        self.__inputframerate = 0.0\n\n    # handle user defined decoded frame resolution\n    self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n    if (\n        isinstance(self.__custom_resolution, str)\n        and self.__custom_resolution\n        == \"null\"  # special mode to discard `-size/-s` parameter\n    ) or (\n        isinstance(self.__custom_resolution, (list, tuple))\n        and len(self.__custom_resolution)\n        == 2  # valid resolution(must be a tuple or list)\n    ):\n        # log it\n        self.__verbose_logs and not isinstance(\n            self.__custom_resolution, str\n        ) and logger.debug(\n            \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n        )\n    else:\n        # log it\n        not (self.__custom_resolution is None) and logger.warning(\n            \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                self.__custom_resolution\n            )\n        )\n        # reset improper values\n        self.__custom_resolution = None\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.formulate","title":"formulate(self)","text":"

This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg subprocess pipe.

Returns: A reference to the FFdecoder class object.

Source code in deffcode/ffdecoder.py
def formulate(self):\n\n    \"\"\"\n    This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n    **Returns:** A reference to the FFdecoder class object.\n    \"\"\"\n    # assign values to class variables on first run\n    if self.__initializing:\n        # prepare parameter dict\n        input_params = OrderedDict()\n        output_params = OrderedDict()\n\n        # dynamically pre-assign a default video-decoder (if not assigned by user).\n        supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n        default_vdecodec = (\n            self.__sourcer_metadata[\"source_video_decoder\"]\n            if self.__sourcer_metadata[\"source_video_decoder\"]\n            in supported_vdecodecs\n            else \"unknown\"\n        )\n        if \"-c:v\" in self.__extra_params:\n            self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                \"-c:v\", default_vdecodec\n            )\n        # handle image sequence separately\n        if self.__opmode == \"imgseq\":\n            # -vcodec is discarded by default\n            # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n            self.__extra_params.pop(\"-vcodec\", None)\n        elif (\n            \"-vcodec\" in self.__extra_params\n            and self.__extra_params[\"-vcodec\"] is None\n        ):\n            # special case when -vcodec is not needed intentionally\n            self.__extra_params.pop(\"-vcodec\", None)\n        else:\n            # assign video decoder selected here.\n            if not \"-vcodec\" in self.__extra_params:\n                input_params[\"-vcodec\"] = default_vdecodec\n            else:\n                input_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-vcodec\", default_vdecodec\n                )\n            if (\n                default_vdecodec != \"unknown\"\n                and not input_params[\"-vcodec\"] in supported_vdecodecs\n            ):\n                # reset to default if not supported\n                logger.warning(\n                    \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                        input_params[\"-vcodec\"], default_vdecodec\n                    )\n                )\n                input_params[\"-vcodec\"] = default_vdecodec\n            # raise error if not valid decoder found\n            if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                raise RuntimeError(\n                    \"Provided FFmpeg does not support any known usable video-decoders.\"\n                    \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                )\n\n        # handle user-defined number of frames.\n        if \"-vframes\" in self.__extra_params:\n            self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                \"-vframes\", None\n            )\n        if \"-frames:v\" in self.__extra_params:\n            value = self.__extra_params.pop(\"-frames:v\", None)\n            if not (value is None) and value > 0:\n                output_params[\"-frames:v\"] = value\n\n        # dynamically calculate default raw-frames pixel format(if not assigned by user).\n        # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n        if \"-pix_fmt\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n            )\n            self.__extra_params.pop(\"-pix_fmt\", None)\n        # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n        self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n        supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n        # calculate default pixel-format\n        # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n        self.__frame_format == \"null\" and logger.critical(\n            \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n        )\n        # choose between rgb24(if available) or source pixel-format\n        # otherwise, only source pixel-format for special case\n        default_pixfmt = (\n            \"rgb24\"\n            if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n            else self.__sourcer_metadata[\"source_video_pixfmt\"]\n        )\n        # assign output raw-frames pixel format\n        rawframe_pixfmt = None\n        if (\n            not (self.__frame_format is None)\n            and self.__frame_format in supported_pixfmts\n        ):\n            # check if valid and supported `frame_format` parameter assigned\n            rawframe_pixfmt = self.__frame_format.strip()\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                    rawframe_pixfmt\n                )\n            )\n        elif (\n            \"output_frames_pixfmt\"\n            in self.__sourcer_metadata  # means `format` filter is defined\n            and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n        ):\n            # assign if valid and supported\n            rawframe_pixfmt = self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ].strip()\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n            )\n        else:\n            # reset to default if not supported\n            rawframe_pixfmt = default_pixfmt\n            # log it accordingly\n            if self.__frame_format is None:\n                logger.info(\n                    \"Using default `{}` pixel-format for this pipeline.\".format(\n                        default_pixfmt\n                    )\n                )\n            else:\n                logger.warning(\n                    \"{} Switching to default `{}` pixel-format!\".format(\n                        \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                            self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                            if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                            else self.__frame_format\n                        )\n                        if self.__frame_format != \"null\"\n                        else \"No usable pixel-format defined.\",\n                        default_pixfmt,\n                    )\n                )\n\n        # dynamically calculate raw-frame datatype based on pixel-format selected\n        (self.__raw_frame_depth, rawframesbpp) = [\n            (int(x[1]), int(x[2]))\n            for x in self.__ff_pixfmt_metadata\n            if x[0] == rawframe_pixfmt\n        ][0]\n        raw_bit_per_component = (\n            rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n        )\n        if 4 <= raw_bit_per_component <= 8:\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n        elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n            (\"le\", \"be\")\n        ):\n            if rawframe_pixfmt.endswith(\"le\"):\n                self.__raw_frame_dtype = np.dtype(\"<u2\")\n            else:\n                self.__raw_frame_dtype = np.dtype(\">u2\")\n        else:\n            # reset to both pixel-format and datatype to default if not supported\n            not (self.__frame_format is None) and logger.warning(\n                \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                    rawframe_pixfmt\n                )\n            )\n            rawframe_pixfmt = \"rgb24\"\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n\n        # Check if not special case\n        if self.__frame_format != \"null\":\n            # assign to FFmpeg pipeline otherwise\n            output_params[\"-pix_fmt\"] = rawframe_pixfmt\n        # assign to global parameter further usage\n        self.__raw_frame_pixfmt = rawframe_pixfmt\n        # also override as metadata(if available)\n        if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n            self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ] = self.__raw_frame_pixfmt\n\n        # handle raw-frame resolution\n        # notify FFmpeg `-s` parameter cannot be assigned directly\n        if \"-s\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n            )\n            self.__extra_params.pop(\"-s\", None)\n        # assign output rawframe resolution\n        if not (self.__custom_resolution is None) and not isinstance(\n            self.__custom_resolution, str\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            self.__raw_frame_resolution = self.__custom_resolution\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                    self.__raw_frame_resolution\n                )\n            )\n        elif (\n            \"output_frames_resolution\"\n            in self.__sourcer_metadata  # means `scale` filter is defined\n            and self.__sourcer_metadata[\"output_frames_resolution\"]\n            and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on output.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"output_frames_resolution\"\n            ]\n        elif (\n            self.__sourcer_metadata[\"source_video_resolution\"]\n            and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on source.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"source_video_resolution\"\n            ]\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n        # special mode to discard `-size/-s` FFmpeg parameter completely\n        if isinstance(self.__custom_resolution, str):\n            logger.critical(\n                \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n            )\n        else:\n            # add to pipeline\n            dimensions = \"{}x{}\".format(\n                self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n            )\n            output_params[\"-s\"] = str(dimensions)\n        # log if filters or default source is used\n        self.__verbose_logs and (\n            self.__custom_resolution is None\n            or isinstance(self.__custom_resolution, str)\n        ) and logger.info(\n            \"{} for this pipeline for defining output resolution.\".format(\n                \"FFmpeg filter values will be used\"\n                if \"output_frames_resolution\" in self.__sourcer_metadata\n                else \"Default source resolution will be used\"\n            )\n        )\n\n        # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n        if (\n            not isinstance(self.__inputframerate, str)\n            and self.__inputframerate > 0.0\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            output_params[\"-framerate\"] = str(self.__inputframerate)\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                    str(self.__inputframerate)\n                )\n            )\n        elif (\n            \"output_framerate\"\n            in self.__sourcer_metadata  # means `fps` filter is defined\n            and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n        ):\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on output\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"output_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n            )\n        elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on source\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"source_video_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"Default source framerate will be used for this pipeline for defining output framerate.\"\n            )\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n\n        # add rest to output parameters\n        output_params.update(self.__extra_params)\n\n        # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n        # TODO Added support for `-re -stream_loop` and `-loop`\n        if \"-frames:v\" in input_params:\n            self.__raw_frame_num = input_params[\"-frames:v\"]\n        elif (\n            not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n            and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n        ):\n            self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n        else:\n            self.__raw_frame_num = None\n            # log that number of frames are unknown\n            self.__verbose_logs and logger.info(\n                \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n            )\n\n        # log Mode of Operation\n        self.__verbose_logs and logger.critical(\n            \"Activating {} Mode of Operation.\".format(\n                self.__supported_opmodes[self.__opmode]\n            )\n        )\n\n        # compose the Pipeline using formulated FFmpeg parameters\n        self.__launch_FFdecoderline(input_params, output_params)\n\n        # inform the initialization is completed\n        self.__initializing = False\n    else:\n        # warn if pipeline is recreated\n        logger.error(\"This pipeline is already created and running!\")\n    return self\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.generateFrame","title":"generateFrame(self)","text":"

This method returns a Generator function (also an Iterator using next()) of video frames, grabbed continuously from the buffer.

Source code in deffcode/ffdecoder.py
def generateFrame(self):\n    \"\"\"\n    This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n    _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n    \"\"\"\n    if self.__raw_frame_num is None or not self.__raw_frame_num:\n        while not self.__terminate_stream:  # infinite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n    else:\n        for _ in range(self.__raw_frame_num):  # finite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate","title":"terminate(self)","text":"

Safely terminates all processes.

Source code in deffcode/ffdecoder.py
def terminate(self):\n    \"\"\"\n    Safely terminates all processes.\n    \"\"\"\n\n    # signal we are closing\n    self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n    self.__terminate_stream = True\n    # check if no process was initiated at first place\n    if self.__process is None or not (self.__process.poll() is None):\n        logger.info(\"Pipeline already terminated.\")\n        return\n    # Attempt to close pipeline.\n    # close `stdin` output\n    self.__process.stdin and self.__process.stdin.close()\n    # close `stdout` output\n    self.__process.stdout and self.__process.stdout.close()\n    # terminate/kill process if still processing\n    if self.__process.poll() is None:\n        # demuxers prefer kill\n        self.__process.kill()\n    # wait if not exiting\n    self.__process.wait()\n    self.__process = None\n    logger.info(\"Pipeline terminated successfully.\")\n
"},{"location":"reference/ffdecoder/params/","title":"FFdecoder API Parameters","text":""},{"location":"reference/ffdecoder/params/#source","title":"source","text":"

This parameter defines the input source (-i) for decoding real-time frames.

FFdecoder API will throw Assertion if source provided is invalid or missing.

FFdecoder API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder('/home/foo.mp4').formulate()\n

    Related usage recipes can found here \u27b6

  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
    # initialize and formulate the decoder\ndecoder = FFdecoder('img%03d.png').formulate()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img*.png', verbose=True, **sourcer_params).formulate()\n
    # define `-loop 1` for looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img.jpg', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nffparams = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value (use Sourcer API's enumerate_devices to list them) either as integer or string of integer type to its source parameter. For example, for capturing \"0\" index device on Windows, we can do as follows in FFdecoder API:

    Requirement for Index based Camera Device Capturing in FFdecoder API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".
    Important Facts related to Camera Device Indexing
    • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
    • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
    • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
      • For example, If there are three devices:
        {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
      • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

        Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

    Out of Index Camera Device index values will raise ValueError in FFdecoder API

    # initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipes can found here \u27b6

  • Video Capture Device Name/Path: Valid video capture device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for capturing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in FFdecoder API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

      # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

      # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

      You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

      # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

      # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipe can found here \u27b6

  • Screen Capturing/Recording: Valid screen capture device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. You can also specify additional specifications (such as limiting capture area to a region, setting capturing coordinates, whether to capture mouse pointer and clicks etc.). For example, for capturing \"0:\" indexed device with avfoundation source demuxer on MacOS along with mouse pointer and clicks, we can do as follows in FFdecoder API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

    For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for stream capturing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for stream capturing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

      # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

      # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Related usage recipe can found here \u27b6

  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and decoding Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in FFdecoder API:

    # initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).formulate()\n

    Related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph, and

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for Index based Camera Device Capturing in FFdecoder API

For enabling Index based Camera Device Capturing in FFdecoder API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\").formulate()\n
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", source_demuxer=\"auto, frame_format=\"bgr24\").formulate()\n

Related usage recipes can found here \u27b6

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize and formulate the decoder with `dshow` demuxer\ndecoder = FFdecoder(\"foo.mp4\", source_demuxer=\"dshow\").formulate()\n

"},{"location":"reference/ffdecoder/params/#frame_format","title":"frame_format","text":"

This parameter select the pixel format for output video frames (such as gray for grayscale output).

Any invalid or unsupported value to frame_format parameter will discarded!

Any improper frame_format parameter value (i.e. either null(special-case), undefined, or invalid type) , then -pix_fmt FFmpeg parameter value in Decoding pipeline uses output_frames_pixfmt metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to Default pixel-format1 (calculated variably).

Use frame_format=\"null\" to manually discard -pix_fmt FFmpeg parameter entirely from Decoding pipeline.

This feature allows users to manually skip -pix_fmt FFmpeg parameter in Decoding pipeline, essentially for using only format ffmpeg filter values instead, or even better let FFmpeg itself choose the best available output frame pixel-format for the given source.

Data-Type: String

Default Value: Its default value is Default pixel-format1 (calculated variably).

Usage:

# initialize and formulate the decoder for grayscale frames\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\").formulate()\n

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Various Pixel formats related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then FFdecoder API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path (via. -custom_sourcer_params) exclusive parameter in FFdecoder API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in FFdecoder API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n\n# initialize and formulate the decoder\nFFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nFFdecoder(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").formulate()\n

"},{"location":"reference/ffdecoder/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize and formulate decoder with verbose logs\nFFdecoder(\"foo.mp4\", verbose=True).formulate()\n

"},{"location":"reference/ffdecoder/params/#ffparams","title":"ffparams","text":"

This dictionary parameter accepts all supported parameters formatted as its attributes:

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/ffdecoder/params/#supported-parameters","title":"Supported Parameters","text":""},{"location":"reference/ffdecoder/params/#a-ffmpeg-parameters","title":"A. FFmpeg Parameters","text":"

Almost any FFmpeg parameter (supported by installed FFmpeg) can be passed as dictionary attributes in ffparams parameter.

Let's assume we want to 00:00:01.45(or 1045msec) in time and decode one single frame from given source (say foo.mp4) in FFdecoder API, then we can assign required FFmpeg parameters as dictionary attributes as follows:

Kindly read FFmpeg Docs carefully before passing any additional values to ffparams parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate decoder with suitable source and FFmpeg params\ndecoder = FFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

"},{"location":"reference/ffdecoder/params/#b-exclusive-parameters","title":"B. Exclusive Parameters","text":"

In addition to FFmpeg parameters, FFdecoder API also supports few Exclusive Parameters to allow users to flexibly change its internal pipeline, properties, and handle some special FFmpeg parameters (such as repeated map) that cannot be assigned via. python dictionary.

These parameters are discussed below:

  • -vcodec (str) : This attribute works similar to -vcodec FFmpeg parameter for specifying supported decoders that are compiled with FFmpeg in use. If not specified, it's value is derived from source video metadata. Its usage is as follows:

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

    Use {\"-vcodec\":None} in ffparams to discard -vcodec FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -vcodec FFmpeg parameter in Decoding pipeline, for letting FFmpeg itself choose the best available video decoder for the given source.

    # define suitable parameter\nffparams = {\"-vcodec\": \"h264\"} # set decoder to `h264`\n

  • -framerate (float/int) : This attribute works similar to -framerate FFmpeg parameter for generating video-frames at specified framerate. If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -framerate attribute will discarded!

    The output_frames_framerate metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -framerate parameter value (i.e. either null(special-case), undefined, or invalid type) , then -framerate/-r FFmpeg parameter value in Decoding pipeline uses output_frames_framerate metadata property extracted from Output Stream. Thereby, in case if no valid output_framerate metadata property is found, then API finally defaults to source_video_framerate metadata property extracted from Input Source Stream.

    In case neither output_framerate nor source_video_framerate valid metadata properties are found, then RuntimeError is raised.

    Use {\"-framerate\":\"null\"} in ffparams to discard -framerate/-r FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -framerate/-r FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output framerate for the given source.

    # define suitable parameter\nffparams = {\"-framerate\": 60.0} # set input video source framerate to 60fps\n

  • -custom_resolution (tuple/list) : This attribute sets the custom resolution/size of the output frames. Its value can either be a tuple ((width,height)) or a list ([width, height]). If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -custom_resolution attribute will discarded!

    The output_frames_resolution metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -custom_resolution parameter value (i.e. either null(special-case), undefined, or invalid type) , then -s/-size FFmpeg parameter value in Decoding pipeline uses output_frames_resolution metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to source_video_resolution metadata property extracted from Input Source Stream.

    In case neither output_frames_resolution nor source_video_resolution valid metadata properties are found, then RuntimeError is raised.

    Use {\"-custom_resolution\":\"null\"} in ffparams to discard -size/-s FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -size/-s FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output frames resolution for the given source.

    # define suitable parameter\nffparams = {\"-output_dimensions\": (1280,720)} # to produce a 1280x720 resolution/scale output video\n

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Difference from -clones parameter

    The -clones and -ffprefixes parameters even tho fundamentally work the same, they're meant to serve at different positions in the FFmpeg command. Normally, FFdecoder API pipeline looks something like following with these parameters in place:

    ffmpeg {{-ffprefixes FFmpeg params}} -vcodec h264 -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 {{-clones FFmpeg params}} -f rawvideo -\n

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -clones (list): This attribute sets the special FFmpeg parameters after that are repeated more than once or occurs in a specific order (that cannot be altered) in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-clones\": ['-map', '0:v:0', '-map', '1:a?']} \n\n# NOTE: Will be format as `ffmpeg -vcodec -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 -map 0:v:0 -map 1:a -f rawvideo -`\n

  • -custom_sourcer_params (dict) : This attribute assigns all Exclusive Parameter meant for Sourcer API's sourcer_params dictionary parameter directly through FFdecoder API. Its usage is as follows:

    # define suitable parameter meant for `sourcer_params`\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n

  • -default_stream_indexes (list/tuple) : This attribute assign value directly to default_stream_indexes parameter in Sourcer API's probe_stream() method for selecting specific video and audio stream index in case of multiple ones. Value can be of format: (int,int) or [int,int] as follows:

    # define suitable parameter meant for `probe_stream()` method\nffparams = {\"-default_stream_indexes\": (0,1)} # (\"0th video stream\", \"1st audio stream\")\n

  • -enforce_cv_patch (bool) : This attribute can be enabled(True) for patching YUV pixel-formats (such as YUV420p, yuv444p, NV12, NV21 etc.) frames to be seamless compatibility with OpenCV APIs such as imshow(), write() etc. It can be used as follows:

    As of now, YUV pixel-formats starting with YUV and NV are only supported.

    # define suitable parameter\nffparams = {\"-enforce_cv_patch\": True} # enables OpenCV patch for YUV frames\n

    YUV pixel-formats usage recipe can found here \u27b6

  • -passthrough_audio (bool/list) : (Yet to be supported)

  1. Default pixel-format is calculated variably in FFdecoder API:

    • If frame_format != \"null\":
      • If frame_format parameter is valid and supported: Default pixel-format is frame_format parameter value.
      • If frame_format parameter is NOT valid or supported:
        • If output_frame_pixfmt metadata is available: Default pixel-format is output_frame_pixfmt metadata value.
        • If output_frame_pixfmt metadata is NOT available: Default pixel-format is rgb24 if supported otherwise source_video_pixfmt metadata value.
    • If frame_format == \"null\": Default pixel-format is source_video_pixfmt metadata value

    \u21a9\u21a9

"},{"location":"reference/sourcer/","title":"Sourcer API","text":"

Sourcer API acts as Source Probing Utility that unlike other FFmpeg Wrappers which mostly uses ffprobe module, attempts to open the given Input Source directly with FFmpeg inside a subprocess pipe, and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each media stream contained in it.

Sourcer API primarily acts as a backend for FFdecoder API for gathering, processing, and validating all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in formulating its default FFmpeg pipeline parameters for real-time video-frames generation.

Sourcer API is design as a standalone Metadata Extraction API for easily parsing information from multimedia streams available in the given Input Source and returns it in either Human-readable (JSON string) or Machine-readable (Dictionary object) type with its retrieve_metadata() method.

All metadata attributes available with Sourcer API(On Windows) are discussed here \u27b6.

Furthermore, Sourcer's sourcer_params dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

Sourcer API parameters are explained here \u27b6

Source code in deffcode/sourcer.py
class Sourcer:\n    \"\"\"\n    > Sourcer API acts as **Source Probing Utility** that unlike other FFmpeg Wrappers which mostly uses [`ffprobe`](https://ffmpeg.org/ffprobe.html) module,\n    attempts to open the given Input Source directly with [**FFmpeg**](https://ffmpeg.org/) inside a [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe,\n    and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each\n    media stream contained in it.\n\n    Sourcer API primarily acts as a **backend for [FFdecoder API](../../reference/ffdecoder)** for gathering, processing, and validating\n    all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in\n    formulating its default FFmpeg pipeline parameters for real-time video-frames generation.\n\n    Sourcer API is design as a standalone **Metadata Extraction API** for easily parsing information from multimedia streams available in the\n    given Input Source and returns it in either Human-readable _(JSON string)_ or Machine-readable _(Dictionary object)_ type with its\n    [`retrieve_metadata()`](#deffcode.sourcer.Sourcer.retrieve_metadata) method.\n\n    !!! info \"All metadata attributes available with Sourcer API(On :fontawesome-brands-windows: Windows) are discussed [here \u27b6](../../recipes/basic/#display-source-video-metadata).\"\n\n    Furthermore, Sourcer's [`sourcer_params`](params/#sourcer_params) dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"Sourcer API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **sourcer_params,\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the Sourcer Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n        # checks if machine in-use is running windows os or not\n        self.__machine_OS = platform.system()\n\n        # define internal parameters\n        self.__verbose_logs = (  # enable verbose if specified\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # handle metadata received\n        self.__ffsp_output = None\n\n        # sanitize sourcer_params\n        self.__sourcer_params = {\n            str(k).strip(): str(v).strip()\n            if not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in sourcer_params.items()\n        }\n\n        # handle whether to force validate source\n        self.__forcevalidatesource = self.__sourcer_params.pop(\n            \"-force_validate_source\", False\n        )\n        if not isinstance(self.__forcevalidatesource, bool):\n            # reset improper values\n            self.__forcevalidatesource = False\n\n        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n\n        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n        __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n        if not isinstance(__ffmpeg_download_path, str):\n            # reset improper values\n            __ffmpeg_download_path = \"\"\n\n        # validate the FFmpeg assets and return location (also downloads static assets on windows)\n        self.__ffmpeg = get_valid_ffmpeg_path(\n            str(custom_ffmpeg),\n            True if self.__machine_OS == \"Windows\" else False,\n            ffmpeg_download_path=__ffmpeg_download_path,\n            verbose=self.__verbose_logs,\n        )\n\n        # check if valid FFmpeg path returned\n        if self.__ffmpeg:\n            self.__verbose_logs and logger.debug(\n                \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n            )\n        else:\n            # else raise error\n            raise RuntimeError(\n                \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n            )\n\n        # sanitize externally accessible parameters and assign them\n        # handles source demuxer\n        if source is None:\n            # first check if source value is empty\n            # raise error if true\n            raise ValueError(\"Input `source` parameter is empty!\")\n        elif isinstance(source_demuxer, str):\n            # assign if valid demuxer value\n            self.__source_demuxer = source_demuxer.strip().lower()\n            # assign if valid demuxer value\n            assert self.__source_demuxer != \"auto\" or validate_device_index(\n                source\n            ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n                source\n            )\n        else:\n            # otherwise find valid default source demuxer value\n            # enforce \"auto\" if valid index device\n            self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n            # log if not valid index device and invalid type\n            self.__verbose_logs and not self.__source_demuxer in [\n                \"auto\",\n                None,\n            ] and logger.warning(\n                \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                    type(source_demuxer).__name__\n                )\n            )\n            # log if not valid index device and invalid type\n            self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n                \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                    source\n                )\n            )\n\n        # handles source stream\n        self.__source = source\n\n        # creates shallow copy for further usage #TODO\n        self.__source_org = copy.copy(self.__source)\n        self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n        # handles all extracted devices names/paths list\n        # when source_demuxer = \"auto\"\n        self.__extracted_devices_list = []\n\n        # various source stream params\n        self.__default_video_resolution = \"\"  # handles stream resolution\n        self.__default_video_orientation = \"\"  # handles stream's video orientation\n        self.__default_video_framerate = \"\"  # handles stream framerate\n        self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n        self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n        self.__default_video_decoder = \"\"  # handles stream's video decoder\n        self.__default_source_duration = \"\"  # handles stream's video duration\n        self.__approx_video_nframes = \"\"  # handles approx stream frame number\n        self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n        self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n        # handle various stream flags\n        self.__contains_video = False  # contains video\n        self.__contains_audio = False  # contains audio\n        self.__contains_images = False  # contains image-sequence\n\n        # handles output parameters through filters\n        self.__metadata_output = None  # handles output stream metadata\n        self.__output_frames_resolution = \"\"  # handles output stream resolution\n        self.__output_framerate = \"\"  # handles output stream framerate\n        self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n        self.__output_orientation = \"\"  # handles output frame orientation\n\n        # check whether metadata probed or not?\n        self.__metadata_probed = False\n\n    def probe_stream(self, default_stream_indexes=(0, 0)):\n        \"\"\"\n        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n        Parameters:\n            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n        **Returns:** Reference to the instance object.\n        \"\"\"\n        assert (\n            isinstance(default_stream_indexes, (list, tuple))\n            and len(default_stream_indexes) == 2\n            and all(isinstance(x, int) for x in default_stream_indexes)\n        ), \"Invalid default_stream_indexes value!\"\n        # validate source and extract metadata\n        self.__ffsp_output = self.__validate_source(\n            self.__source,\n            source_demuxer=self.__source_demuxer,\n            forced_validate=(\n                self.__forcevalidatesource if self.__source_demuxer is None else True\n            ),\n        )\n        # parse resolution and framerate\n        video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0]\n        )\n        if video_rfparams:\n            self.__default_video_resolution = video_rfparams[\"resolution\"]\n            self.__default_video_framerate = video_rfparams[\"framerate\"]\n            self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n        # parse output parameters through filters (if available)\n        if not (self.__metadata_output is None):\n            # parse output resolution and framerate\n            out_video_rfparams = self.__extract_resolution_framerate(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n            if out_video_rfparams:\n                self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n                self.__output_framerate = out_video_rfparams[\"framerate\"]\n                self.__output_orientation = out_video_rfparams[\"orientation\"]\n            # parse output pixel-format\n            self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n\n        # parse pixel-format\n        self.__default_video_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0]\n        )\n\n        # parse video decoder\n        self.__default_video_decoder = self.__extract_video_decoder(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse rest of metadata\n        if not self.__contains_images:\n            # parse video bitrate\n            self.__default_video_bitrate = self.__extract_video_bitrate(\n                default_stream=default_stream_indexes[0]\n            )\n            # parse audio bitrate and samplerate\n            audio_params = self.__extract_audio_bitrate_nd_samplerate(\n                default_stream=default_stream_indexes[1]\n            )\n            if audio_params:\n                self.__default_audio_bitrate = audio_params[\"bitrate\"]\n                self.__default_audio_samplerate = audio_params[\"samplerate\"]\n            # parse video duration\n            self.__default_source_duration = self.__extract_duration()\n            # calculate all flags\n            if (\n                self.__default_video_bitrate\n                or (self.__default_video_framerate and self.__default_video_resolution)\n            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n                self.__contains_video = True\n                self.__contains_audio = True\n            elif self.__default_video_bitrate or (\n                self.__default_video_framerate and self.__default_video_resolution\n            ):\n                self.__contains_video = True\n            elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n                self.__contains_audio = True\n            else:\n                raise ValueError(\n                    \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n                )\n        # calculate approximate number of video frame\n        if self.__default_video_framerate and self.__default_source_duration:\n            self.__approx_video_nframes = np.rint(\n                self.__default_video_framerate * self.__default_source_duration\n            ).astype(int, casting=\"unsafe\")\n\n        # signal metadata has been probed\n        self.__metadata_probed = True\n\n        # return reference to the instance object.\n        return self\n\n    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n        \"\"\"\n        This method returns Parsed/Probed Metadata of the given source.\n\n        Parameters:\n            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n        # log it\n        self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n        # create metadata dictionary from information populated in private class variables\n        metadata = {\n            \"ffmpeg_binary_path\": self.__ffmpeg,\n            \"source\": self.__source,\n        }\n        metadata_missing = {}\n        # Only either `source_demuxer` or `source_extension` attribute can be\n        # present in metadata.\n        if self.__source_demuxer is None:\n            metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n        else:\n            metadata.update({\"source_demuxer\": self.__source_demuxer})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n        # add source video metadata properties\n        metadata.update(\n            {\n                \"source_video_resolution\": self.__default_video_resolution,\n                \"source_video_pixfmt\": self.__default_video_pixfmt,\n                \"source_video_framerate\": self.__default_video_framerate,\n                \"source_video_orientation\": self.__default_video_orientation,\n                \"source_video_decoder\": self.__default_video_decoder,\n                \"source_duration_sec\": self.__default_source_duration,\n                \"approx_video_nframes\": (\n                    int(self.__approx_video_nframes)\n                    if self.__approx_video_nframes\n                    and not any(\n                        \"loop\" in x for x in self.__ffmpeg_prefixes\n                    )  # check if any loops in prefix\n                    and not any(\n                        \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                    )  # check if any loops in filters\n                    else None\n                ),\n                \"source_video_bitrate\": self.__default_video_bitrate,\n                \"source_audio_bitrate\": self.__default_audio_bitrate,\n                \"source_audio_samplerate\": self.__default_audio_samplerate,\n                \"source_has_video\": self.__contains_video,\n                \"source_has_audio\": self.__contains_audio,\n                \"source_has_image_sequence\": self.__contains_images,\n            }\n        )\n        # add output metadata properties (if available)\n        if not (self.__metadata_output is None):\n            metadata.update(\n                {\n                    \"output_frames_resolution\": self.__output_frames_resolution,\n                    \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                    \"output_framerate\": self.__output_framerate,\n                    \"output_orientation\": self.__output_orientation,\n                }\n            )\n        else:\n            # since output stream metadata properties are only available when additional\n            # FFmpeg parameters(such as filters) are defined manually, thereby missing\n            # output stream properties are handled by assigning them counterpart source\n            # stream metadata property values\n            force_retrieve_missing and metadata_missing.update(\n                {\n                    \"output_frames_resolution\": self.__default_video_resolution,\n                    \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                    \"output_framerate\": self.__default_video_framerate,\n                    \"output_orientation\": self.__default_video_orientation,\n                }\n            )\n        # log it\n        self.__verbose_logs and logger.debug(\n            \"Metadata Extraction completed successfully!\"\n        )\n        # parse as JSON string(`json.dumps`), if defined\n        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n        metadata_missing = (\n            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n        )\n        # return `metadata` or `(metadata, metadata_missing)`\n        return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n\n    @property\n    def enumerate_devices(self):\n        \"\"\"\n        A property object that enumerate all probed Camera Devices connected to your system names\n        along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.\n\n        **Returns:** Probed Camera Devices as python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n\n        # log if specified\n        self.__verbose_logs and logger.debug(\"Enumerating all probed Camera Devices.\")\n\n        # return probed Camera Devices as python dictionary.\n        return {\n            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)\n        }\n\n    def __validate_source(self, source, source_demuxer=None, forced_validate=False):\n        \"\"\"\n        This Internal method validates source and extracts its metadata.\n\n        Parameters:\n            source_demuxer(str): specifies the demuxer(`-f`) for the input source.\n            forced_validate (bool): whether to skip validation tests or not?\n\n        **Returns:** `True` if passed tests else `False`.\n        \"\"\"\n        # validate source demuxer(if defined)\n        if not (source_demuxer is None):\n            # check if \"auto\" demuxer is specified\n            if source_demuxer == \"auto\":\n                # integerise source to get index\n                index = int(source)\n                # extract devices list and actual demuxer value\n                (\n                    self.__extracted_devices_list,\n                    source_demuxer,\n                ) = extract_device_n_demuxer(\n                    self.__ffmpeg,\n                    machine_OS=self.__machine_OS,\n                    verbose=self.__verbose_logs,\n                )\n                # valid indexes range\n                valid_indexes = [\n                    x\n                    for x in range(\n                        -len(self.__extracted_devices_list),\n                        len(self.__extracted_devices_list),\n                    )\n                ]\n                # check index is within valid range\n                if self.__extracted_devices_list and index in valid_indexes:\n                    # overwrite actual source device name/path/index\n                    if self.__machine_OS == \"Windows\":\n                        # Windows OS requires \"video=\" suffix\n                        self.__source = source = \"video={}\".format(\n                            self.__extracted_devices_list[index]\n                        )\n                    elif self.__machine_OS == \"Darwin\":\n                        # Darwin OS requires only device indexes\n                        self.__source = source = (\n                            str(index)\n                            if index >= 0\n                            else str(len(self.__extracted_devices_list) + index)\n                        )\n                    else:\n                        # Linux OS require /dev/video format\n                        self.__source = source = next(\n                            iter(self.__extracted_devices_list[index].keys())\n                        )\n                    # overwrite source_demuxer global variable\n                    self.__source_demuxer = source_demuxer\n                    self.__verbose_logs and logger.debug(\n                        \"Successfully configured device `{}` at index `{}` with demuxer `{}`.\".format(\n                            self.__extracted_devices_list[index]\n                            if self.__machine_OS != \"Linux\"\n                            else next(\n                                iter(self.__extracted_devices_list[index].values())\n                            )[0],\n                            index\n                            if index >= 0\n                            else len(self.__extracted_devices_list) + index,\n                            self.__source_demuxer,\n                        )\n                    )\n                else:\n                    # raise error otherwise\n                    raise ValueError(\n                        \"Given source `{}` is not a valid device index. Possible values index values can be: {}\".format(\n                            source,\n                            \",\".join(f\"{x}\" for x in valid_indexes),\n                        )\n                    )\n            # otherwise validate against supported demuxers\n            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):\n                # raise if fails\n                raise ValueError(\n                    \"Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!\".format(\n                        source_demuxer\n                    )\n                )\n            else:\n                pass\n\n        # assert if valid source\n        assert source and isinstance(\n            source, str\n        ), \"Input `source` parameter is of invalid type!\"\n\n        # Differentiate input\n        if forced_validate:\n            source_demuxer is None and logger.critical(\n                \"Forcefully passing validation test for given source!\"\n            )\n            self.__source = source\n        elif os.path.isfile(source):\n            self.__source = os.path.abspath(source)\n        elif is_valid_image_seq(\n            self.__ffmpeg, source=source, verbose=self.__verbose_logs\n        ):\n            self.__source = source\n            self.__contains_images = True\n        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):\n            self.__source = source\n        else:\n            logger.error(\"`source` value is unusable or unsupported!\")\n            # discard the value otherwise\n            raise ValueError(\"Input source is invalid. Aborting!\")\n        # format command\n        if self.__sourcer_params:\n            # handle additional params separately\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + [\"-t\", \"0.0001\"]\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n                + dict2Args(self.__sourcer_params)\n                + [\"-f\", \"null\", \"-\"]\n            )\n        else:\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n            )\n        # extract metadata, decode, and filter\n        metadata = (\n            check_sp_output(\n                meta_cmd,\n                force_retrieve_stderr=True,\n            )\n            .decode(\"utf-8\")\n            .strip()\n        )\n        # separate input and output metadata (if available)\n        if \"Output #\" in metadata:\n            (metadata, self.__metadata_output) = metadata.split(\"Output #\")\n        # return metadata based on params\n        return metadata\n\n    def __extract_video_bitrate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream bitrate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video bitrate as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        video_bitrate_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if video_bitrate_text:\n            selected_stream = video_bitrate_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(video_bitrate_text)\n                else 0\n            ]\n            filtered_bitrate = re.findall(\n                r\",\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            if len(filtered_bitrate):\n                default_video_bitrate = filtered_bitrate[0].split(\" \")[1:3]\n                final_bitrate = \"{}{}\".format(\n                    int(default_video_bitrate[0].strip()),\n                    \"k\" if (default_video_bitrate[1].strip().startswith(\"k\")) else \"M\",\n                )\n                return final_bitrate\n        return \"\"\n\n    def __extract_video_decoder(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream decoder from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video decoder as string value.\n        \"\"\"\n        assert isinstance(default_stream, int), \"Invalid input!\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            filtered_pixfmt = re.findall(\n                r\"Video:\\s[a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream pixel-format from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video pixel-format as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            filtered_pixfmt = re.findall(\n                r\",\\s[a-z][a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default audio-stream bitrate and sample-rate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n\n        **Returns:** Default Audio-stream bitrate and sample-rate as string value.\n        \"\"\"\n        identifiers = [\"Audio:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            # filter data\n            filtered_audio_bitrate = re.findall(\n                r\"fltp,\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            filtered_audio_samplerate = re.findall(\n                r\",\\s[0-9]+\\sHz\", selected_stream.strip()\n            )\n            # get audio bitrate metadata\n            if filtered_audio_bitrate:\n                filtered = filtered_audio_bitrate[0].split(\" \")[1:3]\n                result[\"bitrate\"] = \"{}{}\".format(\n                    int(filtered[0].strip()),\n                    \"k\" if (filtered[1].strip().startswith(\"k\")) else \"M\",\n                )\n            else:\n                result[\"bitrate\"] = \"\"\n            # get audio samplerate metadata\n            result[\"samplerate\"] = (\n                filtered_audio_samplerate[0].split(\", \")[1]\n                if filtered_audio_samplerate\n                else \"\"\n            )\n        return result if result and (len(result) == 2) else {}\n\n    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?\n\n        **Returns:** Default Video resolution and framerate as dictionary value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        # use output metadata if available\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        # extract video orientation metadata if available\n        identifiers_orientation = [\"displaymatrix:\", \"rotation\"]\n        meta_text_orientation = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n        )\n        # use metadata if available\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n\n            # filter data\n            filtered_resolution = re.findall(\n                r\"([1-9]\\d+)x([1-9]\\d+)\", selected_stream.strip()\n            )\n            filtered_framerate = re.findall(\n                r\"\\d+(?:\\.\\d+)?\\sfps\", selected_stream.strip()\n            )\n            filtered_tbr = re.findall(r\"\\d+(?:\\.\\d+)?\\stbr\", selected_stream.strip())\n\n            # extract framerate metadata\n            if filtered_framerate:\n                # calculate actual framerate\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_framerate[0])[0]\n                )\n            elif filtered_tbr:\n                # guess from TBR(if fps unavailable)\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_tbr[0])[0]\n                )\n\n            # extract resolution metadata\n            if filtered_resolution:\n                result[\"resolution\"] = [int(x) for x in filtered_resolution[0]]\n\n            # extract video orientation metadata\n            if meta_text_orientation:\n                selected_stream = meta_text_orientation[\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                ]\n                filtered_orientation = re.findall(\n                    r\"[-]?\\d+\\.\\d+\", selected_stream.strip()\n                )\n                result[\"orientation\"] = float(filtered_orientation[0])\n            else:\n                result[\"orientation\"] = 0.0\n\n        return result if result and (len(result) == 3) else {}\n\n    def __extract_duration(self, inseconds=True):\n        \"\"\"\n        This Internal method parses stream duration from metadata.\n\n        Parameters:\n            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?\n\n        **Returns:** Default Stream duration as string value.\n        \"\"\"\n        identifiers = [\"Duration:\"]\n        stripped_data = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if stripped_data:\n            t_duration = re.findall(\n                r\"(?:[01]\\d|2[0123]):(?:[012345]\\d):(?:[012345]\\d+(?:\\.\\d+)?)\",\n                stripped_data[0],\n            )\n            if t_duration:\n                return (\n                    sum(\n                        float(x) * 60**i\n                        for i, x in enumerate(reversed(t_duration[0].split(\":\")))\n                    )\n                    if inseconds\n                    else t_duration\n                )\n        return 0\n

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.enumerate_devices","title":"enumerate_devices property readonly","text":"

A property object that enumerate all probed Camera Devices connected to your system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.__init__","title":"__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special","text":"

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/sourcer.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **sourcer_params,\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the Sourcer Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n    # checks if machine in-use is running windows os or not\n    self.__machine_OS = platform.system()\n\n    # define internal parameters\n    self.__verbose_logs = (  # enable verbose if specified\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # handle metadata received\n    self.__ffsp_output = None\n\n    # sanitize sourcer_params\n    self.__sourcer_params = {\n        str(k).strip(): str(v).strip()\n        if not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in sourcer_params.items()\n    }\n\n    # handle whether to force validate source\n    self.__forcevalidatesource = self.__sourcer_params.pop(\n        \"-force_validate_source\", False\n    )\n    if not isinstance(self.__forcevalidatesource, bool):\n        # reset improper values\n        self.__forcevalidatesource = False\n\n    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n\n    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n    __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n    if not isinstance(__ffmpeg_download_path, str):\n        # reset improper values\n        __ffmpeg_download_path = \"\"\n\n    # validate the FFmpeg assets and return location (also downloads static assets on windows)\n    self.__ffmpeg = get_valid_ffmpeg_path(\n        str(custom_ffmpeg),\n        True if self.__machine_OS == \"Windows\" else False,\n        ffmpeg_download_path=__ffmpeg_download_path,\n        verbose=self.__verbose_logs,\n    )\n\n    # check if valid FFmpeg path returned\n    if self.__ffmpeg:\n        self.__verbose_logs and logger.debug(\n            \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n        )\n    else:\n        # else raise error\n        raise RuntimeError(\n            \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n        )\n\n    # sanitize externally accessible parameters and assign them\n    # handles source demuxer\n    if source is None:\n        # first check if source value is empty\n        # raise error if true\n        raise ValueError(\"Input `source` parameter is empty!\")\n    elif isinstance(source_demuxer, str):\n        # assign if valid demuxer value\n        self.__source_demuxer = source_demuxer.strip().lower()\n        # assign if valid demuxer value\n        assert self.__source_demuxer != \"auto\" or validate_device_index(\n            source\n        ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n            source\n        )\n    else:\n        # otherwise find valid default source demuxer value\n        # enforce \"auto\" if valid index device\n        self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n        # log if not valid index device and invalid type\n        self.__verbose_logs and not self.__source_demuxer in [\n            \"auto\",\n            None,\n        ] and logger.warning(\n            \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                type(source_demuxer).__name__\n            )\n        )\n        # log if not valid index device and invalid type\n        self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n            \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                source\n            )\n        )\n\n    # handles source stream\n    self.__source = source\n\n    # creates shallow copy for further usage #TODO\n    self.__source_org = copy.copy(self.__source)\n    self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n    # handles all extracted devices names/paths list\n    # when source_demuxer = \"auto\"\n    self.__extracted_devices_list = []\n\n    # various source stream params\n    self.__default_video_resolution = \"\"  # handles stream resolution\n    self.__default_video_orientation = \"\"  # handles stream's video orientation\n    self.__default_video_framerate = \"\"  # handles stream framerate\n    self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n    self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n    self.__default_video_decoder = \"\"  # handles stream's video decoder\n    self.__default_source_duration = \"\"  # handles stream's video duration\n    self.__approx_video_nframes = \"\"  # handles approx stream frame number\n    self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n    self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n    # handle various stream flags\n    self.__contains_video = False  # contains video\n    self.__contains_audio = False  # contains audio\n    self.__contains_images = False  # contains image-sequence\n\n    # handles output parameters through filters\n    self.__metadata_output = None  # handles output stream metadata\n    self.__output_frames_resolution = \"\"  # handles output stream resolution\n    self.__output_framerate = \"\"  # handles output stream framerate\n    self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n    self.__output_orientation = \"\"  # handles output frame orientation\n\n    # check whether metadata probed or not?\n    self.__metadata_probed = False\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.probe_stream","title":"probe_stream(self, default_stream_indexes=(0, 0))","text":"

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is (\"0th video stream\", \"1st audio stream\").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):\n    \"\"\"\n    This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n    Parameters:\n        default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n    **Returns:** Reference to the instance object.\n    \"\"\"\n    assert (\n        isinstance(default_stream_indexes, (list, tuple))\n        and len(default_stream_indexes) == 2\n        and all(isinstance(x, int) for x in default_stream_indexes)\n    ), \"Invalid default_stream_indexes value!\"\n    # validate source and extract metadata\n    self.__ffsp_output = self.__validate_source(\n        self.__source,\n        source_demuxer=self.__source_demuxer,\n        forced_validate=(\n            self.__forcevalidatesource if self.__source_demuxer is None else True\n        ),\n    )\n    # parse resolution and framerate\n    video_rfparams = self.__extract_resolution_framerate(\n        default_stream=default_stream_indexes[0]\n    )\n    if video_rfparams:\n        self.__default_video_resolution = video_rfparams[\"resolution\"]\n        self.__default_video_framerate = video_rfparams[\"framerate\"]\n        self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n    # parse output parameters through filters (if available)\n    if not (self.__metadata_output is None):\n        # parse output resolution and framerate\n        out_video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n        if out_video_rfparams:\n            self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n            self.__output_framerate = out_video_rfparams[\"framerate\"]\n            self.__output_orientation = out_video_rfparams[\"orientation\"]\n        # parse output pixel-format\n        self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n\n    # parse pixel-format\n    self.__default_video_pixfmt = self.__extract_video_pixfmt(\n        default_stream=default_stream_indexes[0]\n    )\n\n    # parse video decoder\n    self.__default_video_decoder = self.__extract_video_decoder(\n        default_stream=default_stream_indexes[0]\n    )\n    # parse rest of metadata\n    if not self.__contains_images:\n        # parse video bitrate\n        self.__default_video_bitrate = self.__extract_video_bitrate(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse audio bitrate and samplerate\n        audio_params = self.__extract_audio_bitrate_nd_samplerate(\n            default_stream=default_stream_indexes[1]\n        )\n        if audio_params:\n            self.__default_audio_bitrate = audio_params[\"bitrate\"]\n            self.__default_audio_samplerate = audio_params[\"samplerate\"]\n        # parse video duration\n        self.__default_source_duration = self.__extract_duration()\n        # calculate all flags\n        if (\n            self.__default_video_bitrate\n            or (self.__default_video_framerate and self.__default_video_resolution)\n        ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n            self.__contains_video = True\n            self.__contains_audio = True\n        elif self.__default_video_bitrate or (\n            self.__default_video_framerate and self.__default_video_resolution\n        ):\n            self.__contains_video = True\n        elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n            self.__contains_audio = True\n        else:\n            raise ValueError(\n                \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n            )\n    # calculate approximate number of video frame\n    if self.__default_video_framerate and self.__default_source_duration:\n        self.__approx_video_nframes = np.rint(\n            self.__default_video_framerate * self.__default_source_duration\n        ).astype(int, casting=\"unsafe\")\n\n    # signal metadata has been probed\n    self.__metadata_probed = True\n\n    # return reference to the instance object.\n    return self\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.retrieve_metadata","title":"retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False)","text":"

This method returns Parsed/Probed Metadata of the given source.

Parameters:

Name Type Description Default pretty_json bool

whether to return metadata as JSON string(if True) or Dictionary(if False) type?

False force_retrieve_output bool

whether to also return metadata missing in current Pipeline. This method returns (metadata, metadata_missing) tuple if force_retrieve_output=True instead of metadata.

required

Returns: metadata or (metadata, metadata_missing), formatted as JSON string or python dictionary.

Source code in deffcode/sourcer.py
def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n    \"\"\"\n    This method returns Parsed/Probed Metadata of the given source.\n\n    Parameters:\n        pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n        force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n    **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n    \"\"\"\n    # check if metadata has been probed or not\n    assert (\n        self.__metadata_probed\n    ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n    # log it\n    self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n    # create metadata dictionary from information populated in private class variables\n    metadata = {\n        \"ffmpeg_binary_path\": self.__ffmpeg,\n        \"source\": self.__source,\n    }\n    metadata_missing = {}\n    # Only either `source_demuxer` or `source_extension` attribute can be\n    # present in metadata.\n    if self.__source_demuxer is None:\n        metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n    else:\n        metadata.update({\"source_demuxer\": self.__source_demuxer})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n    # add source video metadata properties\n    metadata.update(\n        {\n            \"source_video_resolution\": self.__default_video_resolution,\n            \"source_video_pixfmt\": self.__default_video_pixfmt,\n            \"source_video_framerate\": self.__default_video_framerate,\n            \"source_video_orientation\": self.__default_video_orientation,\n            \"source_video_decoder\": self.__default_video_decoder,\n            \"source_duration_sec\": self.__default_source_duration,\n            \"approx_video_nframes\": (\n                int(self.__approx_video_nframes)\n                if self.__approx_video_nframes\n                and not any(\n                    \"loop\" in x for x in self.__ffmpeg_prefixes\n                )  # check if any loops in prefix\n                and not any(\n                    \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                )  # check if any loops in filters\n                else None\n            ),\n            \"source_video_bitrate\": self.__default_video_bitrate,\n            \"source_audio_bitrate\": self.__default_audio_bitrate,\n            \"source_audio_samplerate\": self.__default_audio_samplerate,\n            \"source_has_video\": self.__contains_video,\n            \"source_has_audio\": self.__contains_audio,\n            \"source_has_image_sequence\": self.__contains_images,\n        }\n    )\n    # add output metadata properties (if available)\n    if not (self.__metadata_output is None):\n        metadata.update(\n            {\n                \"output_frames_resolution\": self.__output_frames_resolution,\n                \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                \"output_framerate\": self.__output_framerate,\n                \"output_orientation\": self.__output_orientation,\n            }\n        )\n    else:\n        # since output stream metadata properties are only available when additional\n        # FFmpeg parameters(such as filters) are defined manually, thereby missing\n        # output stream properties are handled by assigning them counterpart source\n        # stream metadata property values\n        force_retrieve_missing and metadata_missing.update(\n            {\n                \"output_frames_resolution\": self.__default_video_resolution,\n                \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                \"output_framerate\": self.__default_video_framerate,\n                \"output_orientation\": self.__default_video_orientation,\n            }\n        )\n    # log it\n    self.__verbose_logs and logger.debug(\n        \"Metadata Extraction completed successfully!\"\n    )\n    # parse as JSON string(`json.dumps`), if defined\n    metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n    metadata_missing = (\n        json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n    )\n    # return `metadata` or `(metadata, metadata_missing)`\n    return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n
"},{"location":"reference/sourcer/params/","title":"Sourcer API Parameters","text":""},{"location":"reference/sourcer/params/#source","title":"source","text":"

This parameter defines the input source (-i) for probing.

Sourcer API will throw AssertionError if source provided is invalid or missing.

Sourcer API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize the sourcer and probe it\nsourcer = Sourcer('/home/foo.mp4').probe_stream()\n
  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nsourcer_params = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize the sourcer with define parameters\nsourcer = Sourcer('img%03d.png', verbose=True, **sourcer_params).probe_stream()\n
    # initialize the sourcer and probe it\nsourcer = Sourcer('img%03d.png', verbose=True).probe_stream()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img*.png', verbose=True, **sourcer_params).probe_stream()\n
    # define `-loop 1` for looping\nsourcer_params = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img.jpg', verbose=True, **sourcer_params).probe_stream()\n
  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nsourcer_params = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **sourcer_params).probe_stream()\n
  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. For example, for using \"0\" index device as source on Windows, we can do as follows in Sourcer API:

    Requirement for using Camera Device as source in Sourcer API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

    # initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", verbose=True).probe_stream()\n
  • Video Capture Devices: Valid video probe device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in Sourcer API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in Sourcer API as follows:

      # initialize the sourcer with \"USB2.0 Camera\" source and probe it\nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nsourcer_params = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize the sourcer with \"Camera\" source and probe it\nsourcer = Sourcer(\"Camera\", source_demuxer=\"dshow\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all video capture devices such as from an USB webcam. You can refer following steps to identify and specify your probe video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in Sourcer API as follows:

      # initialize the sourcer with \"/dev/video0\" source and probe it\nsourcer = Sourcer(\"/dev/video0\", source_demuxer=\"v4l2\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in Sourcer API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize the sourcer with `1` index source and probe it\nsourcer = Sourcer(\"1\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to probe from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize the sourcer with \"Integrated iSight-camera\" source \nsourcer = Sourcer(\"Integrated\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"USB2.0 Camera\" source \nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
  • Screen Capturing/Recording: Valid screen probe device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"0:\" indexed device with avfoundation source demuxer on MacOS, we can do as follows in Sourcer API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for probing:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    # define framerate\nsourcer_params = {\"-framerate\": \"30\"}\n\n# initialize the sourcer with \"desktop\" source and probe it\nsourcer = Sourcer(\"desktop\", source_demuxer=\"gdigrab\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the x11grab to probe an X11 display. You can refer following steps to specify source for probing:

    # initialize the sourcer with \":0.0\" desktop source and probe it\nsourcer = Sourcer(\":0.0\", source_demuxer=\"x11grab\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index in Sourcer API:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    You can enumerate all the available input devices including screens ready to be probed using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n

    Then, you can specify and initialize your located screens in Sourcer API using its index shown:

    # initialize the sourcer with `0:` index desktop screen and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"0:\" source and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n
  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and probing Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in Sourcer API:

    # initialize the sourcer with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate and probe it\nsourcer = Sourcer(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).probe_stream()\n

"},{"location":"reference/sourcer/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, as well as lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph.

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for using Camera Device Index as source in Sourcer API

For using Camera Device Index as source in Sourcer API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\").probe_stream()\n
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", source_demuxer=\"auto).probe_stream()\n

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize the sourcer with `dshow` demuxer and probe it\nsourcer = Sourcer(\"foo.mp4\", source_demuxer=\"dshow\").probe_stream()\n

"},{"location":"reference/sourcer/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then Sourcer API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path exclusive parameter in Sourcer API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in Sourcer API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nsourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}\n\n# initialize the sourcer\nSourcer(\"foo.mp4\", verbose=True, **sourcer_params).probe_stream()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nSourcer(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").probe_stream()\n

"},{"location":"reference/sourcer/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize the sourcer with verbose logs\nSourcer(\"foo.mp4\", verbose=True).probe_stream()\n

"},{"location":"reference/sourcer/params/#sourcer_params","title":"sourcer_params","text":"

This dictionary parameter accepts all Exclusive Parameters formatted as its attributes:

Additional FFmpeg parameters

In addition to Exclusive Parameters, Sourcer API supports almost any FFmpeg parameter (supported by installed FFmpeg), and thereby can be passed as dictionary attributes in sourcer_params parameter.

Kindly read FFmpeg Docs carefully before passing any additional values to sourcer_params parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/sourcer/params/#exclusive-parameters","title":"Exclusive Parameters","text":"

Sourcer API supports few Exclusive Parameters to allow users to flexibly change its probing properties and handle some special FFmpeg parameters.

These parameters are discussed below:

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in Sourcer's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nsourcer_params = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -ffmpeg_download_path (string): sets the custom directory for downloading FFmpeg Static Binaries in Compression Mode, during the Auto-Installation on Windows Machines Only. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows:

    sourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"} # will be saved to \"C:/User/foo/foo1\"\n

  • -force_validate_source (bool): forcefully passes validation test for given source which is required for some special cases with unusual input. It can be used as follows:

    sourcer_params = {\"-force_validate_source\": True} # will pass validation test forcefully\n

"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

A cross-platform High-performance Video Frames Decoder that flexibly executes FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in just a few lines of python code

Highly Adaptive - DeFFcode APIs implements a standalone highly-extensible wrapper around FFmpeg multimedia framework. These APIs supports a wide-ranging media streams as input source such as live USB/Virtual/IP camera feeds, regular multimedia files, screen recordings, image sequences, network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Highly Flexible - DeFFcode APIs gains an edge over other Wrappers by providing complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as specifying framerate, resolution, hardware decoder(s), filtergraph(s), and pixel-format(s) that are readily supported by all well known Computer Vision libraries.

Highly Convenient - FFmpeg has a steep learning curve especially for users unfamiliar with a command line interface. DeFFcode helps users by providing similar to OpenCV, Index based Camera Device Capturing and the same OpenCV-Python (Python API for OpenCV) coding syntax for its APIs, thereby making it even easier to learn, create, and develop FFmpeg based apps in Python.

"},{"location":"#key-features-of-deffcode","title":"Key features of DeFFcode","text":"

Here are some key features that stand out:

  • High-performance, low-overhead video frames decoding with robust error-handling.
  • Flexible API with access to almost any FFmpeg specification thinkable.
  • Supports a wide-range of media streams/devices/protocols as input source.
  • Curated list of well-documented recipes ranging from Basic to Advanced skill levels.
  • Hands down the easiest Index based Camera Device Capturing, similar to OpenCV.
  • Memory efficient Live Simple & Complex Filtergraphs. (Yes, You read it correctly \"Live\"!)
  • Lightning fast dedicated GPU-Accelerated Video Decoding & Transcoding.
  • Enables precise FFmpeg Frame Seeking with pinpoint accuracy.
  • Effortless Metadata Extraction from all streams available in the source.
  • Maintains the standard easy to learn OpenCV-Python coding syntax.
  • Out-of-the-box support for all prominent Computer Vision libraries.
  • Cross-platform, runs on Python 3.7+, and easy to install.
Still missing a key feature in DeFFcode?

Please review DeFFcode's Roadmap. If you still can't find the desired feature there, then you can request one simply by Commenting or Upvoting an existing comment on that issue.

"},{"location":"#getting-started","title":"Getting Started","text":"

In case you're run into any problems, consult our Help section.

"},{"location":"#installation-notes","title":"Installation Notes","text":"

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode on your machine.

"},{"location":"#recipes-aka-examples","title":"Recipes a.k.a Examples","text":"

Once you have DeFFcode installed, checkout our Well-Documented Recipes for usage examples:

How to Begin?

If you\u2019re just starting, check out the Beginner Basic Recipes and as your confidence grows, move up to Advanced Recipes .

  • Basic Recipes : Recipes for beginners of any skill level to get started.
  • Advanced Recipes : Recipes to take your skills to the next level.
"},{"location":"#api-in-a-nutshell","title":"API in a nutshell","text":"

As a user, you just have to remember only two DeFFcode APIs, namely:

See API Reference for more in-depth information.

"},{"location":"#a-ffdecoder-api","title":"A. FFdecoder API","text":"

The primary function of FFdecoder API is to decode 24-bit RGB video frames from the given source:

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# formulate the decoder with suitable source\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").formulate()\n\n# grab RGB24(default) 3D frames from decoder\nfor frame in decoder.generateFrame():\n\n    # lets print its shape\n    print(frame.shape) # (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n
"},{"location":"#b-sourcer-api","title":"B. Sourcer API","text":"

The primary function of Sourcer API is to gather information from all multimedia streams available in the given source:

# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
The resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 60.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 10.0,\n  \"approx_video_nframes\": 600,\n  \"source_video_bitrate\": \"832k\",\n  \"source_audio_bitrate\": \"\",\n  \"source_audio_samplerate\": \"\",\n  \"source_has_video\": true,\n  \"source_has_audio\": false,\n  \"source_has_image_sequence\": false\n}\n

"},{"location":"#contribution-guidelines","title":"Contribution Guidelines","text":"

Contributions are welcome, and greatly appreciated!

Please read our Contribution Guidelines for more details.

"},{"location":"#community-channel","title":"Community Channel","text":"

If you've come up with some new idea, or looking for the fastest way troubleshoot your problems. Please checkout our Gitter community channel \u27b6

"},{"location":"#become-a-stargazer","title":"Become a Stargazer","text":"

You can be a Stargazer by starring us on Github, it helps us a lot and you're making it easier for others to find & trust this library. Thanks!

"},{"location":"#donations","title":"Donations","text":"

DeFFcode is free and open source and will always remain so.

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

"},{"location":"#citation","title":"Citation","text":"

Here is a Bibtex entry you can use to cite this project in a publication:

@software{deffcode,\n  author       = {Abhishek Singh Thakur},\n  title        = {abhiTronix/deffcode: v0.2.4},\n  month        = oct,\n  year         = 2022,\n  publisher    = {Zenodo},\n  version      = {v0.2.4},\n  doi          = {10.5281/zenodo.7155399},\n  url          = {https://doi.org/10.5281/zenodo.7155399}\n}\n

"},{"location":"changelog/","title":"Release Notes","text":""},{"location":"changelog/#v025-2023-01-11","title":"v0.2.5 (2023-01-11)","text":"New Features
  • FFdecoder:
    • Added OpenCV compatibility patch for YUV pixel-formats.
      • Implemented new patch for handling YUV pixel-formats(such as YUV420p, yuv444p, NV12, NV21 etc.) for exclusive compatibility with OpenCV APIs.
        • Note: Only YUV pixel-formats starting with YUV and NV are currently supported.
      • Added new -enforce_cv_patch boolean attribute for enabling OpenCV compatibility patch.
  • Sourcer:
    • Added Looping Video support.
      • Now raw-frame numbers revert to null(None) whenever any looping is defined through filter(such as -filter_complex \"loop=loop=3:size=75:start=25\") or prefix(\"-ffprefixes\":[\"-stream_loop\", \"3\"]).
  • Docs:
    • Added YUV frames example code for Capturing and Previewing BGR frames from a video file recipe.
    • Added YUV frames example code for `Transcoding video using OpenCV VideoWriter API recipe.
    • Added YUV frames example code for `Transcoding lossless video using WriteGear API recipe.
    • Added new CUVID-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Video Transcoding with OpenCV`s VideoWriter API recipe.
    • Added new CUDA-NVENC-accelerated Video Transcoding with WriteGear API recipe both for consuming BGR and NV12 frames.
    • Added new CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API recipe which is still WIP(\ud83d\udcacconfirmed with a GIF from tenor).
    • Added new Capturing and Previewing frames from a Looping Video recipe using -stream_loop option and loop filter.
    • Added docs for -enforce_cv_patch boolean attribute in ffparam dictionary parameter.
    • Added new python dependency block for recipes.
    • Reflected new OpenCV compatibility patch for YUV pixel-formats in code.
    • Added new content.code.copy and content.code.link features.
Updates/Improvements
  • FFhelper:
    • Replaced depreciating Retry API from requests.packages with requests.adapters.
  • Maintenance:
    • Replaced raw.github.com links with GitLab and GH links.
    • Removed unused code.
    • Updated log message.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
    • Updated test_frame_format test to include -enforce_cv_patch boolean attribute.
    • Updated test_source to test looping video support.
  • Setup:
    • Removed unused imports and patches.
    • Bumped version to 0.2.5.
  • Docs:
    • Updated Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing passage.
    • Updated and corrected docs hyperlinks in index.md and ReadMe.md
    • Update Zenodo Badge and BibTex entry.
    • Updated Readme.md banner image URLs.
    • Updated md-typeset text font size to .75rem.
    • Updated text and admonitions.
    • Updated recipe assumptions.
    • Updated Readme.md GIF URLs.
    • Updated abstract text in recipes.
    • Updated changelog.md.
    • Updated recipe code.
    • Removed old recipes.
Bug-fixes
  • FFdecoder API:
    • Fixed Zero division bug while calculating raw_bit_per_component.
  • FFhelper:
    • Fixed response.headers returning content-length as Nonetype since it may not necessarily have the Content-Length header set.
      • Reason: The response from gitlab.com contains a Transfer-Encoding field as 'Transfer-Encoding': 'chunked', which means data is sent in a series of chunks, so the Content-Length header is emitted. More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding#Directives
  • Docs:
    • Fixed https://github.com/badges/shields/issues/8671 badge issue in README.md
    • Removed depreciated text.
    • Fixed several typos in docs.
  • CI:
    • Added fix for codecov upload bug (https://github.com/codecov/codecov-action/issues/598).
      • Updated codecov-action workflow to `v3.
      • Added new CODECOV_TOKEN GitHub secret.
Pull Requests
  • PR #37
"},{"location":"changelog/#v024-2022-10-07","title":"v0.2.4 (2022-10-07)","text":"New Features
  • FFdecoder API:
    • Implemented new comprehensive support for both discarding key default FFmpeg parameters from Decoding pipeline simply by assigning them null string values, and concurrently using values extracted from Output Stream metadata properties (available only when FFmpeg filters are defined) for formulating pipelines.
      • Added null string value support to -framerate and -custom_resolution attributes, as well as frame_format parameter for easily discarding them.
      • Re-Implemented calculation of rawframe pixel-format.
        • Reconfigured default rawframe pixel-format, Now rawframe pixel-format will always default to source_video_pixfmt with frame_format=\"null\".
        • Now with frame_format parameter value either \"null\" or invalid or undefined, rawframe pixel-format value is taken from output_frames_pixfmt metadata property extracted from Output Stream (available only when filters are defined). If valid output_video_resolution metadata property is found then it defaults to default pixel-format(calculated variably).
        • With frame_format=\"null\", -pix_fmt FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of rawframe resolution value.
        • Now with -custom_resolution dictionary attribute value either \"null\" or invalid or undefined, rawframe resolution value is first taken from output_video_resolution metadata property extracted from Output Stream (available only when filters are defined), next from source_video_resolution metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_resolution valid metadata properties are found then RuntimeError is raised.
        • With -custom_resolution dictionary attribute value \"null\", -s/-size FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of output framerate value.
        • Now with -framerate dictionary attribute either null or invalid or undefined, output framerate value is first taken from output_video_framerate metadata property extracted from Output Stream (available only when filters are defined), next from source_video_framerate metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_framerate valid metadata properties are found then RuntimeError is raised.
        • With -framerate dictionary attribute value \"null\", -r/-framerate FFmpeg parameter will not be added to Decoding pipeline.
    • Implemented passing of simple -vf filters, complex -filter_complex filters, and pre-headers(via -ffprefixes) directly to Sourcer API's sourcer_params parameter for probing Output Stream metadata and filter values.
  • Sourcer API:
    • Implemented new comprehensive approach to handle source_demuxer parameter w.r.t different source parameter values.
      • The source_demuxer parameter now accepts \"auto\" as its value for enabling Index based Camera Device Capture feature in Sourcer API.
      • Sourcer API auto-enforces source_demuxer=\"auto\" by default, whenever a valid device index (uses validate_device_index method for validation) is provided as its source parameter value.
        • \u26a0\ufe0f Sourcer API will throw Assertion error if source_demuxer=\"auto\" is provided explicitly without a valid device index at its source parameter.
      • Source API now accepts all +ve and -ve device indexes (e.g. -1,0,1,2 etc.) to its source parameter, both as in integer and string of integer types as source in Index based Camera Device Capture feature.
        • Sourcer API imports and utilizes extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system.
          • \u26a0\ufe0f Sourcer API will throw RuntimeError on failure to identify any device.
        • Sourcer API auto verifies that the specified source device index is in range of the devices discovered.
          • \u26a0\ufe0f Sourcer API will raise ValueError if value goes out of valid range.
        • Sourcer API also automatically handle -ve indexes if specified within the valid range.
        • Implemented patch to auto-add video= suffix to selected device name before using it as video source on Windows OSes.
        • Added patch for handling dictionary of devices paths(with devices names as values) and log messages on Linux Oses.
        • Added copy import for shallow copying various class parameters.
      • Implemented new Support for additional FFmpeg parameters and Output metadata.
        • Added three new metadata properties: output_video_resolution, output_video_framerate, output_frames_pixfmt for handling extracted Output Stream values, whenever additional FFmpeg parameters(such as FFmpeg filters) are defined.
        • Added support for auto-handling additional FFmpeg parameters defined by sourcer_params dictionary parameters.
        • Implement new separate pipeline for parsing Output Stream metadata by decoding video source using null muxer for few microseconds whenever additional FFmpeg parameters(such as -vf filters) are defined by the user.
        • Included new metadata_output internal parameter for holding Output Stream metadata splitted from original Sourcer Metadata extracted from new pipeline.
        • Included new output_video_resolution, output_video_framerate, output_frames_pixfmt internal parameters for metadata properties, whenever Output Stream Metadata available.
        • Added new extract_output boolean parameter to extract_video_pixfmt and extract_resolution_framerate internal methods for extracting output pixel-format, framerate and resolution using Output Stream metadata instead of Sourcer Metadata, whenever available.
      • Added tuple datatype to sourcer_params exception.
      • Added dict2Args import.
    • Added enumerate_devices property object to enumerate all probed Camera Devices connected to a system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.
    • Added new force_retrieve_missing parameter to retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata, when force_retrieve_missing=True.
    • Added various output stream metadata properties that are only available when additional FFmpeg parameters(such as filters) are defined manually, by assigning them counterpart source stream metadata property values
  • FFhelper:
    • Implemented new extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system and supported by valid OS specific FFmpeg demuxer.
      • Added support for three OS specific FFmpeg demuxers: namely dshow for Windows, v4l2 for Linux, and avfoundation for Darwin/Mac OSes.
      • Implemented separate code for parsing outputs of python subprocess module outputs provided with different commands for discovering all Video-Capture devices present on system.
        • Processed dshow (on Windows) and avfoundation (on Darwin) demuxers in FFmpeg commands with -list_devices true parameters using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names/indexes.
        • Used v4l2-ctl submodule command on Linux machines for listing all Video-Capture devices using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names and true system /dev/video paths.
          • Added patch for a single device with multiple /dev/video paths (each for metadata, video, controls), where it iterates on each path to find the exact path that contains valid video stream.
          • Added elaborated checks for catching all possible system errors that can occur while running v4l2-ctl submodule command.
          • The method will return discovered devices as list of dictionaries with device paths(/dev/video) as keys and respective device name as the values, instead of default list of device names.
          • Added patch for handling Linux specific log messages.
      • Added various logging messages to notify users about all discover devices names/paths w.r.t indexes.
      • \u26a0\ufe0f The extract_device_n_demuxer method will raise RuntimeError if it fails to identify any device.
      • Added various checks to assert invalid input parameters and unsupported OSes.
      • Added machine_OS parameter to specify OS running on the system, must be value of platform.system() module. If invalid the method will raise ValueError.
  • Utilities:
    • Added new new validate_device_index() method to verify if given device index is valid or not?
      • Only Integers or String of integers are valid indexes.
      • Returns a boolean value, confirming whether valid(If true), or not(If False).
    • Added checks to support all +ve and -ve integers, both as integer and string types.
  • Docs:
    • Added new validate_device_index() method and its parameters description.
    • Added new extract_device_n_demuxer() method and its parameters description.
    • Added Decoding Camera Devices using Indexes support docs.
      • Added decode-camera-devices.md doc for Decoding Camera Devices using Indexes.
        • Added Enumerating all Camera Devices with Indexes example doc with code.
        • Added Capturing and Previewing frames from a Camera using Indexes example doc with code.
      • Added Camera Device Index support docs to FFdecoder and Sourcer API params.
  • CI:
    • Added check exception for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Added new test_discard_n_filter_params unittest for test recently added supported for both discarded parameters and filter values.
Updates/Improvements
  • FFdecoder API:
    • Extended range of supported output frame pixel-formats.
      • Added new pixel-formats to supported group by extending raw bits-per-component range.
    • Simplified raw frame dtype calculation based on selected pixel-format.
      • output_frames_pixfmt metadata property(if available) will be overridden to rgb24.
    • Replaced continue with break in generateFrame() method.
    • Improved handling of frame_format parameter.
  • Sourcer API:
    • Simplified JSON formatting and returning values logic.
    • Updated logging messages text and position.
    • Removed redundant variable definitions.
    • Changed related internal variable names w.r.t metadata property names.
    • Replaced os_windows internal parameter with machine_OS, and changed its input from os.name to more flexible platform.system().
    • Removed source_extension internal parameter and assigned values directly.
  • FFhelper:
    • Implemented more robust pattern matching for Linux machines.
    • Updated logs in check_sp_output() method for improving error output message.
    • Implemented \"Cannot open device\" v4l2-ctl command Error logs.
  • Maintenance:
    • Bumped version to 0.2.4.
    • Updated code comments.
  • CI:
    • Updated FFdecoder API's test_camera_capture unittest to test new Index based Camera Device Capturing on different platforms.
      • Added various parametrize source and source_demuxer parameter data to attain maximum coverage.
      • Added result field to fail and xfail unittest according to parametrize data provided on different platforms.
      • Removed pytest.mark.skipif to support all platforms.
    • Added and updated various parametrize test data to attain maximum coverage.
    • Limited range of extracted frames, for finishing tests faster.
    • Updated unittests to reflect recent name changes.
    • Disabled capturing of stdout/stderr with -s flag in pytest.
  • Setup:
    • Updated description metadata.
  • Bash Script:
    • Created undeleteable undelete.txt file for testing on Linux envs.
    • Updated undelete.txt file path.
    • Made FFmpeg output less verbose.
  • Docs:
    • Updated FFdecoder API params docs w.r.t recent changes and supported for both discarded parameters and filter values.
      • Added new admonitions to explain handling of \"null\" and (special-case), undefined, or invalid type values in various parameters/attributes.
      • Added new footer reference explaining the handling of Default pixel-format for frame_format parameter.
      • Added missing docs for -default_stream_indexes ffparams attribute.
    • Added docs for recently added additional FFmpeg parameter in Sourcer API's sourcer_params parameter.
      • Removed unsupported -custom_resolution sourcer_params attributes from sourcer_params parameter docs.
      • Removed redundant -vcodec and -framerate attributes from sourcer_params parameter docs.
    • Updated both basic and advanced project Index hyperlinks.
    • Moved decoding-live-feed-devices.md doc from basic to advanced directory.
    • Updated page navigation in mkdocs.yml.
    • Update announcement bar to feature Index based Camera Device Capture support.
    • Updated Project description and Key features of DeFFcode.
    • Updated README.md with latest information.
    • Updated source and source_demuxer param doc.
    • Updated Hardware-Acceleration docs.
      • Updated Hardware-Accelerated Video Decoding and Transcoding docs to inform users about DeFFcode generated YUV frames not yet supported by OpenCV and its APIs.
    • Updated recipes docs to reflect recent changes in APIs.
    • Updated parameter docs to reflect recent name changes.
    • Updated parameters/attributes introductory descriptions.
    • Updated various parametrize data to attain maximum coverage.
    • Updated Zenodo badge and the BibTeX entry.
    • Updated method description texts and logging messages.
    • Update title headings, icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Breaking Updates/Changes
  • API:
    • Implemented new Index based Camera Device Capture feature (Similar to OpenCV), where the user just have to assign device index as integer (-n to n-1) in source parameter of DeFFcode APIs to directly access the given input device in few seconds.
  • FFdecoder API
    • Unsupported dtype pixel-format always defaults to rgb24.
  • Sourcer API:
    • Renamed output_video_resolution metadata property to output_frames_resolution.
    • Renamed output_video_framerate metadata property to output_framerate.
Bug-fixes
  • FFdecoder API:
    • Removed redundant dummy value for output_frames_pixfmt metadata property.
    • Fixed critical KeyError bug arises due to missing output metadata properties.
      • Enforced force_retrieve_missing parameter in Sourcer API's retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata.
      • Added new missing_prop internal class variable for handling metadata properties missing, received from Sourcer API.
      • Moved ffdecoder_operational_mode to missing metadata properties that cannot be updated but are read only.
      • Added missing metadata properties to metadata class property object for easy printing along with other metadata information.
      • Implemented missing metadata properties updation via. overridden metadata class property object.
        • Added counterpart_prop dict to handle all counterpart source properties for each missing output properties.
        • Implemented missing output properties auto-updation w.r.t counterpart source property.
        • Added separate case for handling only missing metadata properties and notifying user about counterpart source properties.
    • Fixed source metadata properties update bug causing non-existential missing metadata properties to be added to source metadata properties dictionary along with source metadata property.
      • Replaced update() calling on value dict directly with explicitly assigning values to source metadata properties dictionary.
      • Simplified missing_prop validation.
      • Removed unwanted continue in middle of loop.
    • Remove unusable exclusive yuv frames patch.
    • Fixed KeyError bug arises due to wrong variable placement.
    • Fixed approx_video_nframes metadata property check.
    • Fixed av_interleaved_write_frame(): broken pipe warning bug by switching process.terminate() with process.kill().
    • Fixed AttributeError bug caused due to typo in logger.
  • FFhelper:
    • Fixed check_sp_output() method returning Standard Error (stderr) even when Nonetype.
    • Fixed logger requiring utf-8 decoding.
    • Fixed missing force_retrieve_stderr argument to check_sp_output in extract_device_n_demuxer method on Linux platforms.
    • Fixed logger message bug.
  • Utils:
    • Fixed logger name typo.
  • Maintenance:
    • Fixed hyperlinks to new GitHub's form schemas.
    • Fixed typos in logs messages.
    • Removed redundant code.
    • Updated code comments.
  • Setup:
    • Rearranged long_description patches to address unused patch bug.
  • Bash Script:
    • Fixed chattr: No such file or directory bug.
  • CI:
    • Fixed missing lavfi demuxer for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Fixed missing ffparams parameter bug in test_discard_n_filter_params() unittest.
    • Fixed test_camera_capture test.
    • Removed redundant similar ValueError checks.
    • Fixed typo in pytest arguments.
    • Fixed missing arguments.
  • Docs:
    • Fixed invalid hyperlinks in ReadMe.md
    • Fixed bad formatting and context.
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #29
  • PR #32
"},{"location":"changelog/#v023-2022-08-11","title":"v0.2.3 (2022-08-11)","text":"New Features
  • Docs:
    • Added Zenodo Bibtex entry and badge in docs for easy citation.
    • Added new <div> tag bounding-box style to the Static FFmpeg binary download links in FFmpeg Installation Doc for better accessibility.
  • Maintenance:
    • Switched to new Issue GitHub's form schema using YAML:
      • Added new bug_report.yaml Issue GitHub's form schema for Bug Reports.
      • Added new idea.yaml Issue GitHub's form schema for new Ideas.
      • Added new question.yaml Issue GitHub's form schema for Questions.
      • Deleted old depreciated markdown(.md) files.
      • Polished forms.
Updates/Improvements
  • Maintenance:
    • Added new patterns to .gitignore to ignore vim files.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
  • Setup:
    • Added new patches for using README.md text as long_description metadata.
      • Implemented new patch to remove GitHub README UI specific text.
    • Simplified multiple str.replace to chained str.replace of better readability.
    • Bumped version to 0.2.3.
  • Docs:
    • Updated recipes to include with statement access method.
      • Updated existing recipes to include with statement access method in FFdecoder APIs.
      • Included new example code of accessing RGB frames using with statement access method.
      • Updated Recipe title to \"Accessing RGB frames from a video file\" across docs.
    • Included warning admonition for advising users to always use trim with reverse filter.
    • Updated docs text font to Libre Franklin.
    • Updated method description texts and logging messages.
    • Update icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Bug-fixes
  • FFdecoder API:
    • Fixed Context Manager methods.
      • Fixed __enter__ method returning class instance instead of formulating pipeline.
      • Fixed __exit__ method calling wrong non-existent method.
  • Setup:
    • Fixed missing comma(,) in keywords metadata.
    • Fixed bug in patch string.
  • Docs:
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #26
"},{"location":"changelog/#v022-2022-08-09","title":"v0.2.2 (2022-08-09)","text":"New Features
  • Sourcer API:
    • Added support for -ffprefixes attribute through Sourcer API's sourcer_param dictionary parameter (similar to FFdecoder API).
  • FFdecoder API:
    • Added new output_frames_pixfmt metadata property to preview and handle output frames pixel-format.
  • Docs:
    • Added separate \"Basic\" and \"Advanced\" Recipes markdowns files with self-explanatory text, related usage code, asset (such as images, diagrams, GIFs, etc.), and UI upgrades for bringing standard quality to visual design.
    • Added separate index.md for Basic and Advanced Recipes with introductory text and curated hyperlinks for quick references to various recipes (separated with sub-categories \"Decoding\", \"Transcoding\", and \"Extracting Video Metadata\").
    • Added related admonitions to specify python dependencies as well as other requirements and relevant information required for each of these recipes.
    • Added new Basic Decoding Recipes:
      • Added Decoding Video files with various pixel formats recipes.
      • Added Decoding Live Feed Devices recipes with source_demuxer FFdecoder API parameter.
      • Added Decoding Image sequences recipes supporting Sequential, Glob pattern , Single (looping) image.
      • Added Decoding Network Streams recipes.
    • Added new Basic Transcoding Recipes:
      • Added Transcoding Live frames recipes with OpenCV and WriteGear.
      • Added Transcoding Live Simple Filtergraphs recipes with OpenCV.
      • Added Saving Key-frames as Image recipes with different image processing libraries.
    • Added new Basic Extracting Video Metadata Recipes:
      • Added Extracting Video Metadata recipes with FFdecoder and Sourcer APIs.
    • Added new Advanced Decoding Recipes:
      • Added Hardware-Accelerated Video Decoding recipe using NVIDIA's H.264 CUVID Video-decoder(h264_cuvid).
      • Added Decoding Live Virtual Sources recipes with many test patterns using lavfi input virtual device.
    • Added new Advanced Decoding Recipes:
      • Added lossless Hardware-Accelerated Video Transcoding recipe with WriteGear API.
      • Added Transcoding Live Complex Filtergraphs recipes with WriteGear API.
      • Added Transcoding Video Art with Filtergraphs recipes with WriteGear API for creating real-time artistic generative video art using simple and complex filtergraphs.
    • Added new Advanced Updating Video Metadata Recipes:
      • Added Updating Video Metadata recipes with user-defined as well as source metadata in FFdecoder API.
    • Added new dark and light theme logo support.
    • Added new recipes GIF assets to gifs folder.
    • Added new dark logo deffcode-dark.png asset to images folder.
    • Added new ffdecoder.png and sourcer.png Image assets to images folder.
    • Added new navigation.tabs feature.
    • Added Material Announcement-Bar notifying recent changes.
Updates/Improvements
  • Sourcer API:
    • Implemented new validation checks to ensure given source has usable video stream available by checking availability of either video bitrate or both frame-size and framerate_ properties in the source metadata.
    • Improved extract_resolution_framerate method for making framerate extraction more robust by falling back to extracting TBR value when no framerate value available in the source metadata.
  • FFdecoder API:
    • Updated metadata property object to validate and override source metadata properties directly by overloading same property object before formulating Frames Decoder Pipeline:
      • Implemented validation checks to verify each validate manually assigned source metadata property against specific datatype before overriding.
      • Updated logging to notify invalid datatype values when assigned through metadata property object.
      • Added support for overriding source_video_resolution source metadata property to control frame-size directly through metadata.
      • Added support for overriding output_frames_pixfmt metadata attribute to be used as default pixel-format, when frame_format parameter value is None-type.
      • Improved handling of source metadata keys in metadata property object.
    • Updated metadata property object to handle and assign User-defined metadata directly by overloading the same property object:
      • Added new internal user_metadata class variable to handle all User-defined metadata information separately.
      • FFdecoder API's metadata property object now returns User-defined metadata information merged with Source Video metadata.
      • Added tuple value warning log to notify users json module converts Python tuples to JSON lists.
    • Improved logic to test validity of -custom_resolution attribute value through ffparams dictionary parameter.
    • Improved handling of FFmpeg pipeline framerate with both user-defined and metadata defined values.
    • Added tuple to exception in datatype check for ffparams dictionary parameter.
    • Added datatype validation check for frame_format parameter.
    • Improved handling of -framerate parameter.
  • Maintenance:
    • Reformatted all Core class and methods text descriptions:
      • Rewritten introductory each API class description.
      • Moved reference block from index.md to class description.
      • Fixed missing class and methods parameter description.
      • Fixed typos and context in texts.
      • Reformatted code comments.
    • Simplified for loop with if condition checking in metadata property object.
    • Updated logging comments.
  • Setup:
    • Updated project description in metadata.
    • Bumped version to 0.2.2.
  • Docs:
    • Updated Introduction doc:
      • Added new text sections such as \"Getting Started\", \"Installation Notes\", \"Recipes a.k.a Examples\" and \"API in a nutshell\".
      • Rewritten Introduction(index.md) with recent Information, redefined context, UI changes, updated recipe codes, curated hyperlinks to various recipes(separated with categories), and relatable GIFs.
      • Updated spacing in index.md using spacer class within <div> tag and &nbsp;.
      • Reformatted and centered DeFFcode Introductory description.
      • Reformatted FFmpeg Installation doc and Issue & PR guidelines.
      • Updated static FFmpeg binaries download URLs in FFmpeg Installation doc.
      • Refashioned text contexts, icons, and recipes codes.
      • Updated Key Features section with reflecting new features.
    • Updated README.md:
      • Updated README.md w.r.t recent changes in Introduction(index.md) doc.
      • Simplified and Reformatted text sections similar to Introduction doc.
      • Imported new \"Contributions\" and \"Donations\" sections from VidGear docs.
      • Added collapsible text and output section using <summary> and <detail> tags.
      • Added experimental note GitHub blockquote to simulate admonition in README.md.
      • Removed tag-line from README.md and related image asset.
      • Simplified and Grouped README URL hyperlinks.
      • Removed Roadmap section.
    • Updated Recipes docs:
      • Revamped DeFFcode Introduction index.md with new Information, Context and UI changes, Updated example codes and hyperlinks.
      • Updated Announcement Bar to fix announcement_link variable and text.
      • Updated footer note to notify users regarding tuple value warning in FFdecoder API.
      • Rewritten recipes w.r.t breaking changes in APIs.
    • Updated Reference docs:
      • Completely revamped API's parameter reference docs.
      • Added new Functional Block Diagrams to FFdecoder and Sourcer API References.
      • Rewritten and Reformatted FFdecoder and Sourcer API's parameter reference docs with new information w.r.t recent changes.
      • Implemented new admonitions explaining new changes, related warnings/errors, usage examples etc.
      • Removed redundant advanced.md and basic.md docs.
      • Added new abstracts to FFhelper and Utils docs.
    • Updated docs site navigation and titles:
      • Reformatted index.md and installation/index.md.
      • Renamed help/index.md to help/help.md.
      • Moved basic and advanced recipes from example to recipes folder.
      • Imported \"Donations\" sections from VidGear docs to help.md.
      • Added updated page-title and navigation hyperlinks in mkdocs.yml to new markdown files incorporated recently.
      • Updated internal navigation hyperlinks in docs and removed old redundant file links.
    • Updated docs UI:
      • Added custom spacer class in CSS for custom vertical spacing.
      • Imported new \"New\", \"Advance\", \"Alert\", \"Danger\" and \"Bug\" admonitions custom CSS UI patches from vidgear.
      • Updated all admonitions icons with new custom icon SVG+XML URLs.
      • Reformatted custom.css and added missing comments.
      • Updated docs fonts:
        • Updated text font to Heebo.
        • Updated code font to JetBrains Mono.
      • Updated primary and accent colors:
        • Updated primary light color to light green.
        • Updated primary dark color to amber.
        • Updated accent light color to green.
        • Updated accent dark color to lime.
      • Replaced admonitions with appropriate ones.
      • Changed Color palette toggle icons.
      • Updated icons in title headings.
    • Updated admonitions messages.
    • Updated changelog.md.
  • CI:
    • Pinned jinja2 version to <3.1.0, since jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799).
    • Updated unittests w.r.t recent changes in APIs:
      • Updated test_frame_format unittest to include manually assign output pixel-format via metadata property object.
      • Updated test_metadata unittest to include new checks parameter to decide whether to perform Assertion test on assigned metadata properties in FFdecoder API.
      • Added new parametrize attributes in test_metadata and test_seek_n_save unittests to cover every use-cases.
      • Replaced IOError with ValueError in Sourcer API unittests.
    • Updated test_metadata unittest to verify tuple value warning.
    • Updated unittests to increase code coverage significantly.
Breaking Updates/Changes
  • Sourcer API:
    • Sourcer API's retrieve_metadata() method now returns parsed metadata either as JSON string or dictionary type.
      • Added new pretty_json boolean parameter to retrieve_metadata(), that is when True, returns metadata formatted as JSON string instead of default python dictionary.
    • Changed IOError to ValueError in Sourcer API, raised when source with no decodable audio or video stream is provided.
  • FFdecoder API:
    • Rename extraparams dictionary parameter to ffparams in FFdecoder API.
    • The source metadata value cannot be altered through metadata property object in FFdecoder API.
    • Removed -ffpostfixes attribute support from ffparams dictionary parameter in FFdecoder API, since totally redundant in favor of similar -ffprefixes and -clones attributes.
Bug-fixes
  • FFdecoder API:
    • Fixed metadata property object unable to process user-defined keys when any source metadata keys are defined.
    • Fixed TypeError bug with string type -framerate parameter values.
  • Sourcer API:
    • Fixed Sourcer API throws IOError for videos containing streams without both source bitrate and framerate defined (such as from lavfi input virtual device).
    • Fixed AttributeError bug due to typo in variable name.
  • CI:
    • Fixed support for newer mkdocstring version in DeFFcode Docs Deployer workflow.
      • Added new mkdocstrings-python-legacy dependency.
      • Replaced rendering variable with options.
      • Removed pinned mkdocstrings==0.17.0 version.
      • Removed redundant variables.
    • Updated test_metadata unittest to fix AssertionError Bug.
  • Docs:
    • Fixed some admonitions icons not showing bug using !important rule in CSS.
    • Fixed 404.html static page not showing up.
    • Fixed invalid internal navigation hyperlinks and asset paths.
    • Removed quote/cite/summary admonition custom UI patches.
    • Removed redundant information texts.
    • Fixed typos in code comments.
    • Fixed typos in example code.
Pull Requests
  • PR #23
"},{"location":"changelog/#v021-2022-07-14","title":"v0.2.1 (2022-07-14)","text":"New Features
  • Sourcer API:
    • Implemented support for extracting metadata from live input devices/sources.
    • Added new source_demuxer and forced_validate parameters to validate_source internal method.
    • Implemented logic to validate source_demuxer value against FFmpeg supported demuxers.
    • Rearranged metadata dict.
    • Updated Code comments.
  • FFdecoder API:
    • Implemented functionality to supported live devices by allowing device path and respective demuxer into pipeline.
    • Included -f FFmpeg parameter into pipeline to specify source device demuxer.
    • Added special case for discarding -framerate value with Nonetype.
  • CI:
    • Added new unittest test_camera_capture() to test support for live Virtual Camera devices.
    • Added new v4l2loopback-dkms, v4l2loopback-utils and kernel related APT dependencies.
  • Bash Script:
    • Added new FFmpeg command to extract image datasets from given video on Linux envs.
    • Created live Virtual Camera devices through v4l2loopback library on Github Actions Linux envs.
      • Added v4l2loopback modprobe command to setup Virtual Camera named VCamera dynamically at /dev/video2.
      • Added v4l2-ctl --list-devices command for debugging.
      • Implemented FFmpeg command through nohup(no hangup) to feed video loop input to Virtual Camera in the background.
Updates/Improvements
  • Sourcer API:
    • Only either source_demuxer or source_extension attribute can be present in metadata.
    • Enforced forced_validate for live input devices/sources in validate_source internal method.
  • FFdecoder API:
    • Rearranged FFmpeg parameters in pipeline.
    • Removed redundant code.
    • Updated Code comments.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
  • CI:
    • Restricted test_camera_capture() unittest to Linux envs only.
    • Removed return_generated_frames_path() method support for Linux envs.
    • Pinned jinja2 3.1.0 or above breaking mkdocs.
      • jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799), therefore pinned jinja2 version to <3.1.0.
  • Bash Script:
    • Updated to latest FFmpeg Static Binaries links.
      • Updated download links to abhiTronix/ffmpeg-static-builds * hosting latest available versions.
      • Updated date/version tag to 12-07-2022.
      • Removed depreciated binaries download links and code.
  • Setup:
    • Bumped version to 0.2.1.
  • Docs:
    • Updated changelog.md.
Breaking Updates/Changes
  • Implement support for live input devices/sources.
    • source parameter now accepts device name or path.
    • Added source_demuxer parameter to specify demuxer for live input devices/sources.
    • Implemented Automated inserting of -f FFmpeg parameter whenever source_demuxer is specified by the user.
Bug-fixes
  • Sourcer API:
    • Fixed Nonetype value bug in source_demuxer assertion logic.
    • Fixed typos in parameter names.
    • Added missing import.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
    • Fixed bug with get_supported_demuxers not detecting name patterns with commas.
    • Removed redundant logging.
  • CI:
    • Fixed critical permission bug causing v4l2loopback to fail on Github Actions Linux envs.
      • Elevated privileges to root by adding sudo to all commands(including bash scripts and python commands).
      • Updated vidgear dependency to pip install from its git testing branch with recent bug fixes.
      • Replaced relative paths with absolute paths in unit tests.
    • Fixed WriteGear API unable to write frames due to permission errors.
    • Fixed test_source_playback() test failing on Darwin envs with OLD FFmpeg binaries.
      • Removed custom_ffmpeg value for Darwin envs.
    • Fixed various naming typos.
    • Fixed missing APT dependencies.
Pull Requests
  • PR #17
"},{"location":"changelog/#v020-2022-03-21","title":"v0.2.0 (2022-03-21)","text":"New Features
  • Sourcer API:
    • Added a new source_audio_samplerate metadata parameter:
      • Re-implemented __extract_audio_bitrate internal function from scratch as __extract_audio_bitrate_nd_samplerate.
        • Implemented new algorithm to extract both extract both audio bitrate and samplerate from given source.
        • Updated regex patterns according to changes.
      • Updated __contains_video and __contains_audio logic to support new changes.
    • Added metadata extraction support:
      • Added retrieve_metadata class method to Sourcer API for extracting source metadata as python dictionary.
        • Populated private source member values in dictionary with distinct keys.
    • Added new -force_validate_source attribute to Sourcer API's sourcer_params dict parameter for special cases.
    • Implemented check whether probe_stream() called or not in Sourcer API.
  • FFdecoder API:
    • Added metadata extraction and updation support:
      • Added metadata property object function to FFdecoder API for retrieving source metadata form Sourcer API as dict and return it as JSON dump for pretty printing.
        • Added Operational Mode as read-only property in metadata.
      • Added metadata property object with setter() method for updating source metadata with user-defined dictionary.
        • Implemented way to manually alter metadata keys and values for custom results.
  • Docs:
    • Added new comprehensive documentation with Mkdocs:
      • Added new image assets:
        • Added new Deffcode banner image, logo and tagline
        • Added new icon ICO file with each layer of the favicon holds a different size of the image.
        • Added new png images for best compatibility with different web browsers.
      • Added new docs files:
        • Added new index.md with introduction to project.
        • Added new changelog.md.
        • Added license.md
        • Added new index.md with instructions for contributing in DeFFcode.
          • Added issue.md with Issue Contribution Guidelines.
          • Added PR.md with PR Contribution Guidelines.
        • Added new custom.js to add gitter sidecard support.
        • Added new custom.css that brings standard and quality visual design experience to DeFFcode docs.
          • Added new admonitions new and alert.
        • Added separate LICENSE(under CC creative commons) and REAME.md for assets.
        • Added new main.html extending base.html for defining custom site metadata.
        • Added deFFcode banner image to metadata.
        • Added twitter card and metadata.
        • Added version warning for displaying a warning when the user visits any other version.
        • Added footer sponsorship block.
        • Added gitter card official JS script dist.
        • Added new custom 404.html to handle HTTP status code 404 Not Found.
          • Implemented custom theming with new CSS style.
          • Added custom 404 image asset.
        • Added new index.md with DeFFcode Installation notes.
          • Added info about Supported Systems, Supported Python legacies, Prerequisites, Installation instructions.
          • Added Pip and Source Installation instructions.
        • Added new ffmpeg_install.md with machine-specific instructions for FFmpeg installation.
        • Added new index.md with different ways to help DeFFcode, other users, and the author.
          • Added info about Starring and Watching DeFFcode on GitHub, Helping with open issues etc.
          • Added Tweeter intent used for tweeting #deffode hastags easily.
          • Added Kofi Donation link button.
          • Added author contact links and left align avatar image.
        • Added new get_help.md to get help with DeFFcode.
          • Added DeFFcode gitter community link.
          • Added other helpful links.
      • Added new assets folders.
      • Added Basic Recipes with basic.md
      • Added Advanced Recipes with advanced.md
      • Added all API References.
        • Added mkdocstrings automatic documentation from sources.
        • Added new index.md for FFdecoder API with its description and explaining its API.
        • Added new index.md for Sourcer API with its description and explaining its API.
        • Added ffhelper methods API references.
        • Added utils methods API references.
      • Added all API Parameters.
        • Added new params.md for FFdecoder API explaining all its parameters.
        • Added new params.md for Sourcer API explaining all its parameters.
        • Added Mkdocs support with mkdocs.yml
      • Implemented new mkdocs.yml with relevant parameters.
        • Added extended material theme with overridden parts.
        • Added site metadata with site_name, site_url, site_author, site_description, repo_name, repo_url, edit_uri, copyright etc.
        • Added navigation under sections for easily accessing each document.
        • Implemented Page tree for DeFFcode docs.
        • Added features like navigation.tracking, navigation.indexes, navigation.top, search.suggest, search.highlight, search.share, content.code.annotate.
        • Added separate palette [default]light(with primary:green accent: dark green) and [slate]dark(with primary:teal accent: light green) mode.
        • Added Color palette toggle switch with icon material/home-lightning-bolt.
        • Added support for all pymarkdown-extensions.
        • Added google fonts for text: Quicksand and code: Fira Code.
        • Added custom logo and icon for DeFFcode.
        • Added support for plugins like search, git-revision-date-localized, minify.
        • Added support for mkdocstrings plugin for auto-built API references.
          • Added python handler for parsing python source-code to mkdocstrings.
          • Improved source-code docs for compatibility with mkdocstrings.
        • Added support for extensions like admonition, attr_list, codehilite, def_list, footnotes, meta, and toc.
        • Added social icons and links.
        • Added custom extra_css and extra_javascript.
        • Added support for en (English) language.
      • Added new badges to README.md for displaying current status of CI jobs and coverage.
      • Added Roadmap to README.md
  • CI:
    • Automated CI support for different environments:
      • Implemented auto-handling of dependencies installation, unit testing, and coverage report uploading.
      • Added GitHub Action workflow for Linux envs:
        • Added and configured CIlinux.yml to enable GitHub Action workflow for Linux-based Testing Envs.
        • Added 3.7+ python-versions to build matrix.
        • Added code coverage through codecov/codecov-action@v2 workflow for measuring unit-tests effectiveness.
          • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
      • Added Appveyor workflow for Windows envs:
        • Add and configured appveyor.yml to enable Appveyor workflow for Windows-based Testing Envs.
        • Added 3.7+ 64-bit python-versions to build matrix.
        • Enabled fast_finish to exit immediately on error.
      • Added Azure-Pipelines workflow for MacOS envs:
        • Add and configured azure-pipelines.yml to enable Azure-Pipelines workflow for MacOS-based Testing Envs.
        • Added code coverage through codecov workflow for measuring unit-tests effectiveness.
          • Added online auto validation of codecov bash script using SH256SUM and sig files as recommended.
        • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
        • Added 3.7+ python-versions to build matrix.
      • Added automated flake8 testing to discover any anomalies in code.
      • Added master branches for triggering CI.
    • Implement new automated Docs Building and Deployment on gh-pages through GitHub Actions workflow:
      • Added new workflow yaml docs_deployer.yml for automated docs deployment.
      • Added different jobs with ubuntu-latest environement to build matrix.
      • Added actions/checkout@v2 for repo checkout and actions/setup-python@v2 for python environment.
      • Pinned python version to 3.8 for python environment in docs building.
      • Added GIT_TOKEN, GIT_NAME, GIT_EMAIL environment variables through secrets.
      • Added Mkdocs Material theme related python dependencies and environments.
      • Added push on master and dev branch release with published as triggers.
      • Pinned mkdocstrings==0.17.0.
    • Added new Automated Docs Versioning:
      • Implemented Docs versioning through mike.
      • Separate new workflow steps to handle different versions.
      • Added step to auto-create RELEASE_NAME environment variable from DeFFcode version file.
      • Update docs deploy workflow to support latest, release and dev builds.
      • Added automatic release version extraction from GitHub events.
    • Added Skip Duplicate Actions Workflow to DeFFcode Docs Deployer:
      • Added Skip Duplicate Actions(fkirc/skip-duplicate-actions@master) Workflow to DeFFcode Docs Deployer to prevent redundant duplicate workflow-runs.
  • Maintenance:
    • New DeFFcode project issue and PR templates:
      • Added PR template:
        • Added a pull request template(PULL_REQUEST_TEMPLATE.md) for project contributors to automatically see the template's contents in the pull request body.
        • Added Brief Description, Requirements / Checklist, Related Issue, Context, Types of changes blocks.
      • Added Proposal, Bug-Report and Question templates:
        • Created an ISSUE_TEMPLATE subdirectory to contain multiple issue templates.
        • Add manually-created Proposal(proposal.md) and Question(question.md) issue template for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Any Other Information like blocks.
        • Add an manually-created Bug Report(bug_report.md) issue template to ISSUE_TEMPLATE subdirectory for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Expected Behavior, Actual Behavior, Possible Fix, Steps to reproduce, Miscellaneous like blocks.
        • Added YAML frontmatter to each issue template to pre-fill the issue title, automatically add labels and assignees, and give the template a name and description.
        • Added a config.yml file to the .github/ISSUE_TEMPLATE folder to customize the issue template chooser that people see when creating a new issue.
        • Set blank_issues_enabled parameter to false to encourage contributors to use issue templates.
        • Added contact_links parameter with gitter community link to receive regular issues outside of GitHub.
      • Added new FUNDING.yml with ko-fi donation link.
      • Added .gitattributes for DeFFcode, that set the default behavior, in case people don't have core.autocrlf set.
      • Imported Codecov config(codecov.yml) from vidgear to modify coverage parameters.
  • Tests:
    • Added DeFFcode unit tests with pytest:
      • Added essential.py for defining all essential functions necessary for DeFFcode unit tests.
      • Added return_static_ffmpeg, remove_file_safe, return_testvideo_path, return_generated_frames_path, actual_frame_count_n_frame_size essential functions.
      • Added is_windows global variable.
      • Added related imports and logging.
      • Added __init__.py.
      • Moved all files to test folder.
      • Added DeFFcode's utils unit tests with pytest.
        • Added new test_loggerhandler and test_dict2Args tests.
      • Added DeFFcode's ffhelper unit tests with pytest.
        • Added new test_ffmpeg_binaries_download, test_validate_ffmpeg, test_get_valid_ffmpeg_path, test_check_sp_output, test_is_valid_url, test_is_valid_image_seq, and test_validate_imgseqdir parametrize tests.
      • Added DeFFcode's Sourcer API unit tests with pytest.
        • Added new test_source and test_probe_stream_n_retrieve_metadata parametrize tests.
      • Added DeFFcode's FFdecoder API unit tests with pytest.
        • Added new test_source_playback, test_frame_format, test_metadata, test_seek_n_save, and test_FFdecoder_params parametrize unit tests.
      • Added related imports and logging.
      • Added unit test for delete_file_safe utils function.
  • Bash:
    • \ud83d\udd27 Imported prepare_dataset.sh from vidgear for downloading pytest datasets to temp dir.
Updates/Improvements
  • FFdecoder API:
    • Removed redundant forcing -r FFmpeg parameter for image sequences as source.
    • Removed redundant checks on -vf FFmpeg parameter.
    • FFmpeg parameter -s will be discarded in favor of -custom_resolution attribute.
    • Replaced -constant_framerate with FFmpeg -framerate attribute.
    • Replaced -custom_source_params with correct -custom_sourcer_params attribute.
    • Renamed operational_mode metadata parameter to ffdecoder_operational_mode.
  • Sourcer API:
    • Converted all Sourcer APIs public available variables into private ones for stability.
    • All Sourcer's publicly accessed variable metadata values in FFdecoder, therefore replaced with dictionary counterparts.
    • Moved FFmpeg path validation and handling to Sourcer from FFdecoder API.
    • Moved -ffmpeg_download_path dictionary attribute to Sourcer API's sourcer_params parameter.
    • Moved dependencies and related functions.
  • CI:
    • Excluded dev branch from triggering workflow on any environment.
      • Updated yaml files to exclude beta dev branch from triggering workflow on any environment.
      • Restricted codecov to use only master branch.
    • Re-implemented fkirc/skip-duplicate-actions@master to Skip individual deploy steps instead of Skip entire jobs
  • Docs:
    • Updated PR.md
      • Added instructions to download prepare_dataset.sh using curl.
      • Updated dependencies for pytest.
    • Updated advanced.md
      • Updated generating Video from Image sequence to save video using OpenCV writer instead of WriteGear API.
      • Added frame_format=\"bgr24\"and additional instructions regarding OpenCV writer.
      • Updated example codes with new changes.
      • Rearranged examples placement.
    • Updates to custom.css
      • Added donation sponsor link in page footer with heart animation.
      • Added bouncing heart animation through pure CSS.
      • Added Bold property to currently highlighted link in Navigation Bar.
      • Updated Navigation Bar title font size.
      • Updated version list text to uppercase and bold.
      • Updated icon for task list unchecked.
      • Added more top-padding to docs heading.
      • Updated Block quote symbol and theming.
      • Updated Custom Button theming to match docs.
      • Added new custom classes to create shadow effect in dark mode for better visibility.
      • Updated dark mode theme \"slate\" hue to 285.
    • Updated admonitions colors.
    • Updated gitter sidecard UI colors and properties.
    • Reflected recent changes in Sourcer and FFdecoder API's metadata.
    • Updated sample code formatting from sh to json.
    • Added missing docs for delete_file_safe utils function.
    • Updated Download Test Datasets instructions.
    • Updated contribution guidelines and installation docs with related changes.
    • Updated License Notice.
    • Updated code comments.
    • Updated logging messages.
    • Updated Deffcode Logo and Tagline to be dark-mode friendly.
    • Adjusted asset alignment.
    • Updated example code.
    • Updated Installation instructions, Requirements and Roadmap.
    • Corrected links to documents.
    • Updated project description.
    • Updated LICENSE.
    • Updated indentation and code comments
    • Re-aligned text and images in README.md
    • Adjusted image classes and width.
  • Maintenance:
    • Updated LICENSE notice to add vidgear notice.
    • Bumped version to 0.2.0
    • Added useful comments for convenience.
Breaking Updates/Changes
  • Sourcer API will now raises Assertion error if probe_stream() not called before calling retrieve_metadata().
  • Only -framerate values greater than 0.0 are now valid.
  • Renamed decode_stream to probe_stream in Sourcer API.
  • Any of video bitrate or video framerate are sufficient to validate if source contains valid video stream(s).
  • Any of audio bitrate or audio samplerate are sufficient to validate if source contains valid audio stream(s).
Bug-fixes
  • APIs:
    • Added missing delete_file_safe function in utils.
      • Imported delete_file_safe from vidgear to safely deletes files at given path.
    • Fixed forward slash bugs in regex patterns.
    • Fixed IndexError when no bitrate was discovered in given source.
    • Fixed FFmpeg subprocess pipeline not terminating gracefully in FFdecoder API.
    • Fixed __version__ not defined in DeFFcode's __init__.py that throws AttributeError: module 'deffcode' has no attribute '__version__' on query.
      • Added necessary import in __init__.py.
  • Docs:
    • Fixed missing \"-vcodec\": \"h264_cuvid\" value in example code.
    • Fixed typos in filenames in utils.py
    • Fixed internal missing or invalid hyperlinks.
    • Fixed improper docs context and typos.
    • Fixed \"year\" in license notice.
    • Fixed content spacing.
    • Fixed Gitter Community Link in Mkdocs.
    • Fixed typos in README.md.
    • Fixed typos in license notices.
    • Fixed typos in code comments.
    • Fixed typos in example code.
  • CI:
    • Fixed missing FFmpeg dependency bug in GitHub Actions.
    • Fixes typo in Docs Deployer yaml.
    • Fixed if condition skipping when need is skipping
  • Maintenance:
    • Added missing imports.
    • Fixed redundant conditional logics.
    • Removed or Replaced redundant conditions and definitions.
    • Fixed minor typos in templates.
Pull Requests
  • PR #5
  • PR #6
  • PR #8
  • PR #9
  • PR #11
  • PR #12
  • PR #13
  • PR #14
"},{"location":"changelog/#v010-2022-03-07","title":"v0.1.0 (2022-03-07)","text":"New Features
  • Open-Sourced DeFFcode under the Apache 2.0 License.
  • Added new Classes(APIs):
    • FFdecoder: Performant Real-time Video frames Generator for generating blazingly fast video frames(RGB ndarray by default).
    • Sourcer: Extracts source video metadata (bitrate, resolution, framerate, nframes etc.) using its subprocess FFmpeg output.
  • Added new Helper functions:
    • ffhelper: Backend FFmpeg Wrapper that handles all subprocess transactions and gather data.
    • utils: Handles all additional Utilizes required for functioning of DeFFcode.
  • First PyPi Release:
    • Released DeFFcode to Python Package Index (PyPI)
    • Added setup.py and related metadata.
    • Added version.py
  • Docs:
    • Added abstract and related information in README.md
    • Added installation instructions.
    • Added preliminary usage examples.
  • Maintenance:
    • Added LICENSE.
    • Added .gitignore
Updates/Improvements
  • Maintenance:
    • Bumped version to 0.1.0
    • Updated LICENSE notice to add vidgear code usage notice.
Breaking Updates/Changes
  • Fixed support for Python-3.7 and above legacies only.
Bug-fixes
  • Docs:
    • Fixed hyperlinks in README.
    • Fixed indentation and spacing.
    • Fixed typos and updated context.
    • Removed dead code.
"},{"location":"help/","title":"Helping Us","text":"

Liked DeFFcode? Would you like to help DeFFcode, other users, and the author?

There are many simple ways to help us:

"},{"location":"help/#star-deffcode-on-github","title":"Star DeFFcode on GitHub","text":"

You can star DeFFcode on GitHub:

It helps us a lot by making it easier for others to find & trust this library. Thanks!

"},{"location":"help/#help-others-with-issues-on-github","title":"Help others with issues on GitHub","text":"

You can see through any opened or pinned existing issues on our GitHub repository, and try helping others, wherever possible:

"},{"location":"help/#watch-the-github-repository","title":"Watch the GitHub repository","text":"

You can watch \ud83d\udc40 DeFFcode Activities on GitHub:

When you watch a repository, you will be notified of all conversations for that repository, including when someone creates a new issue, or pushes a new pull request.

You can try helping solving those issues, or give valuable feedback/review on new Pull Requests.

"},{"location":"help/#tweet-about-deffcode","title":"Tweet about DeFFcode","text":"

Tweet about DeFFcode and Spread the word \ud83d\udde3:

Tweet #deffcode

Let others know how you are using DeFFcode and why you like it!

"},{"location":"help/#helping-author","title":"Helping Author","text":"

Donations help keep DeFFcode's development alive and motivate me (as author).

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

Thanks a million!

"},{"location":"help/#connect-with-author","title":"Connect with Author","text":"

You can connect with me, the author \ud83d\udc4b:

  • Follow author on GitHub:
  • Follow author on Twitter: Follow @abhi_una12
  • Get in touch with author on Linkedin:

"},{"location":"license/","title":"License","text":"

This library is released under the Apache 2.0 License.

"},{"location":"license/#copyright-notice","title":"Copyright Notice","text":"
Copyright (c) 2021 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n
"},{"location":"contribution/","title":"Overview","text":""},{"location":"contribution/#contribution-overview","title":"Contribution Overview","text":"

Contributions are always welcomed

We'd love your contribution to DeFFcode in order to fix bugs or to implement new features!

"},{"location":"contribution/#submission-guidelines","title":"Submission Guidelines","text":"
  • Submitting an Issue Guidelines \u27b6
  • Submitting Pull Request(PR) Guidelines \u27b6
"},{"location":"contribution/#submission-contexts","title":"Submission Contexts","text":""},{"location":"contribution/#got-a-question-or-problem","title":"Got a question or problem?","text":"

For quick questions, please refrain from opening an issue, instead you can reach us on Gitter community channel.

"},{"location":"contribution/#found-a-typo","title":"Found a typo?","text":"

There's no need to contribute for some typos. Just reach us on Gitter \u27b6 community channel, We will correct them in (less than) no time.

"},{"location":"contribution/#found-a-bug","title":"Found a bug?","text":"

If you encountered a bug, you can help us by submitting an issue in our GitHub repository. Even better, you can submit a Pull Request(PR) with a fix, but make sure to read the guidelines \u27b6.

"},{"location":"contribution/#request-for-a-featureimprovement","title":"Request for a feature/improvement?","text":"Subscribe to Github Repository

You can subscribe our GitHub Repository to receive notifications through email for new pull requests, commits and issues that are created in DeFFcode. Learn more about it here \u27b6

You can request our GitHub Repository for a new feature/improvement based on the type of request:

Please submit an issue with a proposal template for your request to explain how it benefits everyone in the community.

  • Major Feature Requests: If you require a major feature for DeFFcode, then first open an issue and outline your proposal so that it can be discussed. This will also allow us to better coordinate our efforts, prevent duplication of work, and help you to craft the change so that it is successfully accepted into the project. The purposed feature, if accepted, may take time based on its complexity and availability/time-schedule of our maintainers, but once it's completed, you will be notified right away. Please be patient!

  • Minor Feature Requests: Small features and bugs resolved on priority. You just have to submit an issue to our GitHub Repository.

"},{"location":"contribution/PR/","title":"Submitting Pull Request(PR) Guidelines:","text":"

The following guidelines tells you how to submit a valid PR for DeFFcode:

Working on your first Pull Request for DeFFcode?

  • You can learn about \"How to contribute to an Open Source Project on GitHub\" from this doc \u27b6
  • If you're stuck at something, please join our Gitter community channel. We will help you get started!

"},{"location":"contribution/PR/#clone-branch-for-pr","title":"Clone branch for PR","text":"

You can clone your Forked remote git to local and create your PR working branch as a sub-branch of latest master branch as follows:

Make sure the master branch of your Forked repository is up-to-date with DeFFcode, before starting working on a Pull Request.

# clone your forked repository(change with your username) and get inside\ngit clone https://github.com/{YOUR USERNAME}/DeFFcode.git && cd DeFFcode\n\n# pull any recent updates\ngit pull\n\n# Now create your new branch with suitable name(such as \"subbranch_of_master\")\ngit checkout -b subbranch_of_master\n

Now after working with this newly created branch for your Pull Request, you can commit and push or merge it locally or remotely as usual.

"},{"location":"contribution/PR/#pr-submission-checklist","title":"PR Submission Checklist","text":"

There are some important checks you need to perform while submitting your Pull Request(s) for DeFFcode library:

  • Submit a Related Issue:

  • The first thing you do is submit an issue with a proposal template for your work first and then work on your Pull Request.

  • Submit a Draft Pull Request:

  • Submit the draft pull request from the first day of your development.

  • Add a brief but descriptive title for your PR.
  • Explain what the PR adds, fixes, or improves.
  • In case of bug fixes, add a new unit test case that would fail against your bug fix.
  • Provide output or screenshots, if you can.
  • Make sure your pull request passed all the CI checks (triggers automatically on pushing commits against master branch). If it's somehow failing, then ask the maintainer for a review.
  • Click \"ready for review\" when finished.

  • Test, Format & lint code locally:

  • Make sure to test, format, and lint the modified code locally before every commit. The details are discussed below \u27b6

  • Make sensible commit messages:

  • If your pull request fixes a separate issue number, remember to include \"resolves #issue_number\" in the commit message. Learn more about it here \u27b6.

  • Keep the commit message concisely as much as possible at every submit. You can make a supplement to the previous commit with git commit --amend command.

  • Perform Integrity Checks:

    Any duplicate pull request will be Rejected!

  • Search GitHub if there's a similar open or closed PR that relates to your submission.

  • Check if your purpose code matches the overall direction of the DeFFcode APIs and improves it.
  • Retain copyright for your contributions, but also agree to license them for usage by the project and author(s) under the Apache 2.0 license \u27b6.

  • Link your Issues:

    For more information on Linking a pull request to an issue, See this doc\u27b6

  • Finally, when you're confident enough, make your pull request public.

  • You can link an issue to a pull request manually or using a supported keyword in the pull request description. It helps collaborators see that someone is working on the issue. For more information, see this doc\u27b6

"},{"location":"contribution/PR/#testing-formatting-linting","title":"Testing, Formatting & Linting","text":"

All Pull Request(s) must be tested, formatted & linted against our library standards as discussed below:

"},{"location":"contribution/PR/#requirements","title":"Requirements","text":"

Testing DeFFcode requires additional test dependencies and dataset, which can be handled manually as follows:

  • Install additional python libraries:

    You can easily install these dependencies via pip:

    # Install opencv(only if not installed previously)\n$ pip install opencv-python\n\n# install rest of dependencies\n$ pip install --upgrade flake8 black pytest vidgear[core]\n
  • Download Tests Dataset:

    To perform tests, you also need to download additional dataset (to your temp dir) by running prepare_dataset.sh bash script as follows:

    On Linux/MacOSOn Windows
    $ chmod +x scripts/bash/prepare_dataset.sh\n$ ./scripts/bash/prepare_dataset.sh\n
    $ sh scripts/bash/prepare_dataset.sh\n
"},{"location":"contribution/PR/#running-tests","title":"Running Tests","text":"

All tests can be run with pytest(in DeFFcode's root folder) as follows:

$ pytest -sv  #-sv for verbose output.\n
"},{"location":"contribution/PR/#formatting-linting","title":"Formatting & Linting","text":"

For formatting and linting, following libraries are used:

  • Flake8: You must run flake8 linting for checking the code base against the coding style (PEP8), programming errors and other cyclomatic complexity:

    $ flake8 {source_file_or_directory} --count --select=E9,F63,F7,F82 --show-source --statistics\n
  • Black: DeFFcode follows black formatting to make code review faster by producing the smallest diffs possible. You must run it with sensible defaults as follows:

    $ black {source_file_or_directory}\n

"},{"location":"contribution/PR/#frequently-asked-questions","title":"Frequently Asked Questions","text":"

Q1. Why do my changes taking so long to be Reviewed and/or Merged?

Submission Aftermaths

  • After your PR is merged, you can safely delete your branch and pull the changes from the main (upstream) repository.
  • The changes will remain in dev branch until next DeFFcode version is released, then it will be merged into master branch.
  • After a successful Merge, your newer contributions will be given priority over others.

Pull requests will be reviewed by the maintainers and the rationale behind the maintainer\u2019s decision to accept or deny the changes will be posted in the pull request. Please wait for our code review and approval, possibly enhancing your change on request.

Q2. Would you accept a huge Pull Request with Lots of Changes?

First, make sure that the changes are somewhat related. Otherwise, please create separate pull requests. Anyway, before submitting a huge change, it's probably a good idea to open an issue in the DeFFcode Github repository to ask the maintainers if they agree with your proposed changes. Otherwise, they could refuse your proposal after you put all that hard work into making the changes. We definitely don't want you to waste your time!

"},{"location":"contribution/issue/","title":"Submitting an Issue Guidelines","text":"

If you've found a new bug or you've come up with some new feature which can improve the quality of the DeFFcode, then related issues are welcomed! But, Before you do, please read the following guidelines:

First Issue on GitHub?

You can easily learn about it from creating an issue wiki.

Info

Please note that your issue will be fixed much faster if you spend about half an hour preparing it, including the exact reproduction steps and a demo. If you're in a hurry or don't feel confident, it's fine to report issues with less details, but this makes it less likely they'll get fixed soon.

"},{"location":"contribution/issue/#search-the-docs-and-previous-issues","title":"Search the Docs and Previous Issues","text":"
  • Remember to first search GitHub for a open or closed issue that relates to your submission or already been reported. You may find related information and the discussion might inform you of workarounds that may help to resolve the issue.
  • For quick questions, please refrain from opening an issue, as you can reach us on Gitter community channel.
  • Also, go comprehensively through our dedicated FAQ & Troubleshooting section.
"},{"location":"contribution/issue/#gather-required-information","title":"Gather Required Information","text":"
  • All DeFFcode APIs provides a verbose boolean flag in parameters, to log debugged output to terminal. Kindly turn this parameter True in the respective API for getting debug output, and paste it with your Issue.
  • In order to reproduce bugs we will systematically ask you to provide a minimal reproduction code for your report.
  • Check and paste, exact DeFFcode version by running command python -c \"import deffcode; print(deffcode.__version__)\".
"},{"location":"contribution/issue/#follow-the-issue-template","title":"Follow the Issue Template","text":"
  • Please format your issue by choosing the appropriate template.
  • Any improper/insufficient reports will be marked Invalid \u26d4, and if we don't hear back from you we may close the issue.
"},{"location":"contribution/issue/#raise-the-issue","title":"Raise the Issue","text":"
  • Add a brief but descriptive title for your issue.
  • Keep the issue phrasing in context of the problem.
  • Attach source-code/screenshots if you have one.
  • Finally, raise it by choosing the appropriate Issue Template: Bug report \ud83d\udc1e, Idea \ud83d\udca1, Question \u2754.
"},{"location":"help/get_help/","title":"Getting Help","text":"Courtesy - tenor

Would you like to get help with DeFFcode?

There are several ways to get help with DeFFcode:

"},{"location":"help/get_help/#join-our-gitter-community-channel","title":"Join our Gitter Community channel","text":"

Have you come up with some new idea \ud83d\udca1 or looking for the fastest way troubleshoot your problems

Join and chat on our Gitter Community channel:

There you can ask quick questions, swiftly troubleshoot your problems, help others, share ideas & information, etc.

"},{"location":"help/get_help/#this-is-what-you-do-when","title":"This is what you do when...","text":"
  • Got a question or problem?
  • Found a typo?
  • Found a bug?
  • Missing a feature/improvement?
"},{"location":"help/get_help/#reporting-an-issues","title":"Reporting an issues","text":"

Want to report a bug? Suggest a new feature?

Before you do, please read our guidelines \u27b6

"},{"location":"help/get_help/#preparing-a-pull-request","title":"Preparing a Pull Request","text":"

Interested in contributing to DeFFcode?

Before you do, please read our guidelines \u27b6

"},{"location":"installation/","title":"Overview","text":""},{"location":"installation/#installation-notes","title":"Installation Notes","text":""},{"location":"installation/#supported-systems","title":"Supported Systems","text":"

DeFFcode is well-tested and supported on the following systems(but not limited to), with python 3.7+ and pip installed:

Upgrade your pip

It strongly advised to upgrade to latest pip before installing deffcode to avoid any undesired installation error(s).

There are two mechanisms to upgrade pip:

pipensurepip

You can use existing pip to upgrade itself:

Install pip if not present
  • Download the script, from https://bootstrap.pypa.io/get-pip.py.
  • Open a terminal/command prompt, cd to the folder containing the get-pip.py file and run:
Linux/MacOSWindows
python get-pip.py\n
py get-pip.py\n

More details about this script can be found in pypa/get-pip\u2019s README.

Linux/MacOSWindows
python -m pip install pip --upgrade\n
py -m pip install pip --upgrade\n

Python also comes with an ensurepip module1, which can easily upgrade/install pip in any Python environment.

Linux/MacOSWindows
python -m ensurepip --upgrade\n
py -m ensurepip --upgrade\n
  • Any Linux distro released in 2016 or later
  • Windows 7 or later
  • MacOS 10.12.6 (Sierra) or later

"},{"location":"installation/#supported-python-legacies","title":"Supported Python legacies","text":"

Python 3.7+ are only supported legacies for installing DeFFcode v0.1.0 and above.

"},{"location":"installation/#prerequisites","title":"Prerequisites","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

"},{"location":"installation/#ffmpeg","title":"FFmpeg","text":"

When installing DeFFcode, FFmpeg is the only prerequisites you need to configure/install manually. You could easily do it by referring FFmpeg Installation doc.

"},{"location":"installation/#installation","title":"Installation","text":""},{"location":"installation/#a-installation-using-pip-recommended","title":"A. Installation using pip (Recommended)","text":"

Best option for easily getting stable DeFFcode installed.

Installation is as simple as:

Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest stable release\npython -m pip install -U deffcode\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest stable release\npython -m pip install --upgrade --user deffcode\n

Or, If you're using py as alias for installed python, then:

# Install latest stable release\npy -m pip install --upgrade --user deffcode\n
# Install latest stable release\npip install -U deffcode\n

And you can also download its wheel (.whl) package from our repository's releases section, thereby can be installed as follows:

# Install latest release\npip install deffcode-0.2.0-py3-none-any.whl\n

"},{"location":"installation/#b-installation-from-source","title":"B. Installation from Source","text":"

Best option for trying latest patches(maybe experimental), forking for Pull Requests, or automatically installing all prerequisites(with a few exceptions).

Installation using dev banch

If you're looking for latest work-in-progress enhancements or bug-fixes, then you want to checkout our beta dev branch with the following commands:

The beta dev branch at times can be very unstable or even unusable, User discretion is advised!

# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# checkout the dev beta branch\ngit checkout dev\n\n# Install it\npip install -U .\n
Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest beta branch\npython -m pip install -U .\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest beta branch\npython -m pip install --upgrade --user .\n

Or, If you're using py as alias for installed python, then:

# Install latest beta branch\npy -m pip install --upgrade --user .\n
# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# Install it\npip install -U .\n

  1. The ensurepip module is missing/disabled on Ubuntu. Use pip method only.\u00a0\u21a9

"},{"location":"installation/ffmpeg_install/","title":"FFmpeg Installation Doc","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

You can following machine-specific instructions for its configuration/installation:

DeFFcode APIs will throw RuntimeError, if they failed to detect valid FFmpeg executables on your system.

Enable verbose (verbose=True) for debugging FFmpeg validation process.

"},{"location":"installation/ffmpeg_install/#linux-ffmpeg-installation","title":"Linux FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on a Linux OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection","title":"A. Auto-Detection","text":"

This is a recommended approach on Linux Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the Linux OS systems.

You can install easily install official FFmpeg according to your Linux Distro by following this post \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Linux Static Binaries (based on your machine architecture) from the link below:

    Linux Static Binaries: http://johnvansickle.com/ffmpeg/

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#windows-ffmpeg-installation","title":"Windows FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Installation and Manual Configuration methods on Windows OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-installation","title":"A. Auto-Installation","text":"

This is a recommended approach on Windows Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-generate the required FFmpeg Static Binaries from our dedicated Github Server into the temporary directory(e.g. C:\\Temp) of your machine on the Windows OS systems.

Active Internet connection is required while downloading required FFmpeg Static Binaries from our dedicated Github Server onto your Windows machine.

Important Information regarding Auto-Installation
  • The files downloaded to a temporary directory (e.g. C:\\TEMP), may get erased if your machine shutdowns/restarts in some cases.

  • You can also provide a custom save path for auto-downloading FFmpeg Static Binaries through exclusive -ffmpeg_download_path attribute in Sourcer API.

    How to use -ffmpeg_download_path attribute in FFdecoder API?

    -ffmpeg_download_path is also available in FFdecoder API through the -custom_sourcer_params attribute of its ffparams dictionary parameter.

  • If binaries were found at the specified path, DeFFcode APIs automatically skips the Auto-Installation step.

  • If the required FFmpeg static binary fails to download, extract, or validate during Auto-Installation, then DeFFcode APIs will exit with RuntimeError!

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_1","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Windows Static Binaries (based on your machine arch(x86/x64)) from the link below:

    Windows Static Binaries: https://ffmpeg.org/download.html#build-windows

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'C:/foo/Downloads/ffmpeg/bin') or path of ffmpeg.exe executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#macos-ffmpeg-installation","title":"MacOS FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on MacOS OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection_1","title":"A. Auto-Detection","text":"

This is a recommended approach on MacOS Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the MacOS systems.

You can easily install FFmpeg on your MacOS machine by following this tutorial \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_2","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest MacOS Static Binaries (only x64 Binaries) from the link below:

    MacOS Static Binaries: https://ffmpeg.org/download.html#build-mac

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"recipes/advanced/","title":"Advanced Recipes","text":"

The following challenging recipes will take your skills to the next level and will give access to new DeFFcode techniques, tricky examples, and advanced FFmpeg parameters:

Courtesy - tenor

Refer Basic Recipes first!

If you're just getting started, check out the Beginner's Basic Recipes first before trying these advanced recipes.

Any proficiency with OpenCV-Python will be Helpful

Any proficiency with OpenCV-Python (Python API for OpenCV) surely help you with these recipes.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

"},{"location":"recipes/advanced/#advanced-decoding-recipes","title":"Advanced Decoding Recipes","text":"
  • Decoding Live Virtual Sources
    • Generate and Decode frames from Sierpinski pattern
    • Generate and Decode frames from Test Source pattern
    • Generate and Decode frames from Gradients with custom Text effect
    • Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms
    • Generate and Decode frames from Game of Life Visualization
  • Decoding Live Feed Devices
    • Capturing and Previewing frames from a Webcam using Custom Demuxer
    • Capturing and Previewing frames from your Desktop (Screen Recording)
  • Hardware-Accelerated Video Decoding
    • CUVID-accelerated Hardware-based Video Decoding and Previewing
    • CUDA-accelerated Hardware-based Video Decoding and Previewing
"},{"location":"recipes/advanced/#advanced-transcoding-recipes","title":"Advanced Transcoding Recipes","text":"
  • Transcoding Live Complex Filtergraphs
    • Transcoding video with Live Custom watermark image overlay
    • Transcoding video from sequence of Images with additional filtering
  • Transcoding Video Art with Filtergraphs
    • Transcoding video art with YUV Bitplane Visualization
    • Transcoding video art with Jetcolor effect
    • Transcoding video art with Ghosting effect
    • Transcoding video art with Pixelation effect
  • Hardware-Accelerated Video Transcoding
    • CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API
    • CUDA-NVENC-accelerated Video Transcoding with WriteGear API
    • CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API
"},{"location":"recipes/advanced/#advanced-metadata-recipes","title":"Advanced Metadata Recipes","text":"
  • Updating Video Metadata
    • Added new attributes to metadata in FFdecoder API
    • Overriding source video metadata in FFdecoder API
"},{"location":"recipes/advanced/decode-hw-acceleration/","title":"Hardware-Accelerated Video Decoding","text":"

FFmpeg offer access to dedicated GPU hardware with varying support on different platforms for performing a range of video-related tasks to be completed faster or using less of other resources (particularly CPU).

By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder (extracted using Sourcer API) itself for decoding its input. However, you could easily change the video-decoder to your desired specific supported Video-Decoder using FFmpeg options by way of its ffparams dictionary parameter. This feature provides easy access to GPU Accelerated Hardware Decoder in FFdecoder API that will generate faster video frames while using little to no CPU power, as opposed to CPU intensive Software Decoders.

We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing","title":"CUVID-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

  • Using h264_cuvid decoder: Remember to check if your FFmpeg compiled with H.264 CUVID decoder support by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    Verifying H.264 CUVID decoder support in FFmpeg
    $ ffmpeg  -hide_banner -decoders | grep cuvid\n\nV..... av1_cuvid            Nvidia CUVID AV1 decoder (codec av1)\nV..... h264_cuvid           Nvidia CUVID H264 decoder (codec h264)\nV..... hevc_cuvid           Nvidia CUVID HEVC decoder (codec hevc)\nV..... mjpeg_cuvid          Nvidia CUVID MJPEG decoder (codec mjpeg)\nV..... mpeg1_cuvid          Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)\nV..... mpeg2_cuvid          Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)\nV..... mpeg4_cuvid          Nvidia CUVID MPEG4 decoder (codec mpeg4)\nV..... vc1_cuvid            Nvidia CUVID VC1 decoder (codec vc1)\nV..... vp8_cuvid            Nvidia CUVID VP8 decoder (codec vp8)\nV..... vp9_cuvid            Nvidia CUVID VP9 decoder (codec vp9)\n

    You can also use any of above decoder in the similar way, if supported.

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's H.264 CUVID Video decoder in FFdecoder API to achieve GPU-accelerated hardware video decoding of YUV420p frames from a given Video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's CUVID can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": \"h264_cuvid\",  # use H.264 CUVID Video-decoder\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(YUV420p) frames\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"yuv420p\",  # use YUV420p frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the YUV420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing","title":"CUDA-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters), thereby achieving GPU-accelerated decoding of NV12 pixel-format frames from a given video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

NV12(for 4:2:0 input) and NV21(for 4:4:4 input) are the only supported pixel format. You cannot change pixel format to any other since NV-accelerated video codec supports only them.

NV12 is a biplanar format with a full sized Y plane followed by a single chroma plane with weaved U and V values. NV21 is the same but with weaved V and U values. The 12 in NV12 refers to 12 bits per pixel. NV12 has a half width and half height chroma channel, and therefore is a 420 subsampling. NV16 is 16 bits per pixel, with half width and full height. aka 422. NV24 is 24 bits per pixel with full sized chroma channel. aka 444. Most NV12 functions allow the destination Y pointer to be NULL.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's GPU Accelerated Decoding can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"fps=60.0,\"  # framerate 60.0fps in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the NV12 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/","title":"Decoding Live Feed Devices","text":"

DeFFcode's FFdecoder API provide effortless support for any Live Feed Devices using two parameters: source parameter which accepts device name or its path, and source_demuxer parameter to specify demuxer for the given input device.

We'll discuss the Live Feed Devices support using both these parameters briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer","title":"Capturing and Previewing frames from a Webcam using Custom Demuxer","text":"Example Assumptions

FFmpeg provide set of specific Demuxers on different platforms to read the multimedia streams from a particular type of Video Capture source/device. Please note that following recipe explicitly assumes:

  • You're running Linux Machine with USB webcam connected to it at node/path /dev/video0.
  • You already have appropriate Linux video drivers and related softwares installed on your machine.
  • You machine uses FFmpeg binaries built with --enable-libv4l2 flag to support video4linux2, v4l2 demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode BGR24 video frames from a USB webcam device connected at path /dev/video0 on a Linux Machine with video4linux2 (or simply v4l2) demuxer, and preview them using OpenCV Library's cv2.imshow() method.

Identifying and Specifying Video Capture Device Name/Path/Index and suitable Demuxer on different OS platforms Windows Linux MacOS

Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

  • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

    c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
  • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

    # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

  • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

    You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

    $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
  • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

    # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

    You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

    # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

    Using device's indexUsing device's name
    # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

    # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

    # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop","title":"Capturing and Previewing frames from your Desktop","text":"Example Assumptions

Similar to Webcam capturing, FFmpeg provide set of specific Demuxers on different platforms for capturing your desktop (Screen recording). Please note that following recipe explicitly assumes:

  • You're running Linux Machine with libxcb module installed properly on your machine.
  • You machine uses FFmpeg binaries built with --enable-libxcb flag to support x11grab demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode live BGR video frames from your complete screen as well as a region in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OS platforms Windows Linux MacOS

Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

    # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

Capturing entire desktopCapturing a region

For capturing all your displays as one big contiguous display in FFdecoder API:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

For limit capturing to a region, and show the area being grabbed:

x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/","title":"Decoding Live Virtual Sources","text":"

Instead of using prerecorded video files as streams, DeFFcode's FFdecoder API with the help of powerful lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph, is also capable of creating virtual video frames out of thin air in real-time, which you might want to use as input for testing, compositing, and merging with other streams to obtain desired output on-the-fly.

We'll discuss the recipies for generating Live Fake Sources briefly below:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern","title":"Generate and Decode frames from Sierpinski pattern","text":"

The sierpinski graph generates a Sierpinski carpet/triangle fractal, and randomly pan around by a single pixel each frame.

Sierpinski carpet fractal

In this example we will generate and decode 8 seconds of a Sierpinski carpet fractal pattern of 1280x720 frame size and 30 framerate using sierpinski graph source with lavfi input virtual device in FFdecoder API, and preview decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# playback time of 8 seconds\nffparams = {\"-ffprefixes\": [\"-t\", \"8\"]}\n\n# initialize and formulate the decoder with \"sierpinski\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"sierpinski=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        cv2.imwrite('foo_image.gif', frame)\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern","title":"Generate and Decode frames from Test Source pattern","text":"

The testsrc graph generates a test video pattern showing a color pattern, a scrolling gradient, and a timestamp. This is useful for testing purposes.

Test Source pattern

In this example we will generate and decode 10 seconds of a Test Source pattern (1280x720 frame size & 30 framerate) using testsrc graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n}\n\n# initialize and formulate the decoder with \"testsrc\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"testsrc=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect","title":"Generate and Decode frames from Gradients with custom Text effect","text":"

The gradients graph (as name suggests) generates several random gradients.

Gradients pattern with real-time text output

In this example we will generate and decode 15 seconds of Gradients using gradients graph source with lavfi input virtual device and also draw real-time text output (format HH::MM::SS) scrolling upward direction on it using drawtext filter in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

This example assumes you're running Windows machine. If not, then change fontfile parameter path in drawtext video filtergraph definition accordingly.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"15\"],  # playback time of 15 seconds\n    \"-vf\": \"drawtext=\"  # draw text\n    + \"text='%{localtime\\:%X}':\"  # real time text (HH::MM::SS)\n    + \"fontfile='c\\:\\/windows\\/fonts\\/arial.ttf':\"  # fontfile path (Only Windows)\n    + \"x=(w-text_w)/2:y=h-40*t:\"  # scroll upward effect\n    + \"fontsize=50:\"  # font size 50\n    + \"fontcolor=white\",  # font color white\n}\n\n\n# initialize and formulate the decoder with \n# \"gradients\" source for BGR24 output\ndecoder = FFdecoder(\n    \"gradients=n=3\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms","title":"Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms","text":"

The mandelbrot graph generate a Mandelbrot set fractal, that progressively zoom towards a specfic point.

Mandelbrot pattern with a Vectorscope & two Waveforms

In this example we will generate and decode 20 seconds of a Mandelbrot test pattern (1280x720 frame size & 30 framerate) using mandelbrot graph source with lavfi input virtual device with a vectorscope (plots 2 color component values) & two waveforms (plots YUV color component intensity) stacked to it in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"20\"],  # playback time of 20 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"split=4[a][b][c][d],\" # split input into 4 identical outputs.\n    + \"[a]waveform[aa],\"  # apply waveform on first output\n    + \"[b][aa]vstack[V],\"  # vertical stack 2nd output with waveform [V]\n    + \"[c]waveform=m=0[cc],\"  # apply waveform on 3rd output\n    + \"[d]vectorscope=color4[dd],\"  # apply vectorscope on 4th output\n    + \"[cc][dd]vstack[V2],\"  # vertical stack waveform and vectorscope [V2]\n    + \"[V][V2]hstack\",  # horizontal stack [V] and [V2] vertical stacks\n}\n\n# initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization","title":"Generate and Decode frames from Game of Life Visualization","text":"

The life graph generates a life pattern based on a generalization of John Conway\u2019s life game. The sourced input represents a life grid, each pixel represents a cell which can be in one of two possible states, alive or dead. Every cell interacts with its eight neighbours, which are the cells that are horizontally, vertically, or diagonally adjacent. At each interaction the grid evolves according to the adopted rule, which specifies the number of neighbor alive cells which will make a cell stay alive or born.

Game of Life Visualization

In this example we will generate and decode 25 seconds of Game of Life Visualization using life graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"25\"],  # playback time of 25 seconds\n}\n\n# initialize and formulate the decoder with \"life\" source for BGR24 output\ndecoder = FFdecoder(\n    \"life=\"  # life graph\n    + \"s=640x480:\"  # grid size (in pixels)\n    + \"mold=10:\"  # cell mold speed\n    + \"r=36:\"  # framerate\n    + \"ratio=0.5:\"  # random fill ratio for the initial random grid\n    + \"death_color=#39FF14:\"  # color of dead cells\n    + \"life_color=#1d1160\" # color of living (or new born) cells\n    + \",scale=640:480:\" # frame size\n    + \"flags=16\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/","title":"Transcoding Video Art with Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API unlocks the power of ffmpeg backend for creating real-time artistic generative video art using simple and complex filtergraphs, and decoding them into live video frames.

We'll discuss the Transcoding Video Art with Filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization","title":"Transcoding video art with YUV Bitplane Visualization","text":"

Based on the QCTools bitplane visualization, this video art has numerical values ranging between -1(no change) and 10(noisiest) for the Y (luminance), U and V (chroma or color difference) planes, yielding cool and different results for different values.

YUV Bitplane Visualization

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Bitplane Visualization by binding the bit position of the Y, U, and V planes of a video file (say foo.mp4) by using FFmpeg's lutyuv filter and assigning them random values (between -1(no change) and 10(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"lutyuv=\"  # use  lutyuv filter for binding bit position of the Y, U, and V planes\n    + \"y=if(eq({y}\\,-1)\\,512\\,if(eq({y}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{y}))*pow(2\\,{y}))):\".format(\n        y=3 # define `Y` (luminance) plane value (b/w -1 and 10)\n    )\n    + \"u=if(eq({u}\\,-1)\\,512\\,if(eq({u}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{u}))*pow(2\\,{u}))):\".format(\n        u=1 # define `U` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"v=if(eq({v}\\,-1)\\,512\\,if(eq({v}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{v}))*pow(2\\,{v}))),\".format(\n        v=3 # define `V` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"format=yuv422p10le\", # change output format to yuv422p10le\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect","title":"Transcoding video art with Jetcolor effect","text":"

This video art uses FFmpeg's pseudocolor filter to create a Jetcolor effect which is high contrast, high brightness, and high saturation colormap that ranges from blue to red, and passes through the colors cyan, yellow, and orange. The jet colormap is associated with an astrophysical fluid jet simulation from the National Center for Supercomputer Applications.

Jetcolor effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Jetcolor effect by changing frame colors of a video file (say foo.mp4) using FFmpeg's pseudocolor filter in different modes (values between 0 (cleaner) [default] and 2(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to `yuv444p`\n    + \"eq=brightness=0.40:saturation=8,\"  # default `brightness = 0.40` and `saturation=8`\n    + \"pseudocolor='\"  # dynamically controlled colors through `pseudocolor` filter\n    + \"if(between(val,0,85),lerp(45,159,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(159,177,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(177,70,(val-170)/(255-170))))):\"  # mode 0 (cleaner) [default]\n    + \"if(between(val,0,85),lerp(205,132,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(132,59,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(59,100,(val-170)/(255-170))))):\"  # mode 1\n    + \"if(between(val,0,85),lerp(110,59,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(59,127,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(127,202,(val-170)/(255-170))))):\"  # mode 2 (noisiest)\n    + \"i={mode}',\".format(\n        mode=0  # define mode value (b/w `0` and `2`) to control colors\n    )\n    + \"format=yuv422p10le\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect","title":"Transcoding video art with Ghosting effect","text":"

This video art using FFmpeg\u2019s lagfun filter to create a video echo/ghost/trailing effect.

Ghosting effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Ghosting effect using FFmpeg's lagfun filter on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-filter_complex\": \"format=yuv444p[formatted];\"  # change video input format to yuv444p\n    + \"[formatted]split[a][b];\"  # split input into 2 identical outputs\n    + \"[a]lagfun=decay=.99:planes=1[a];\"  # apply lagfun filter on first output\n    + \"[b]lagfun=decay=.98:planes=2[b];\"  # apply lagfun filter on 2nd output\n    + \"[a][b]blend=all_mode=screen:c0_opacity=.5:c1_opacity=.5,\"  # apply screen blend mode both outputs\n    + \"format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-map\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect","title":"Transcoding video art with Pixelation effect","text":"

This video art uses FFmpeg\u2019s overlay, smartblur and stacks of dilation filters to intentionally Pixelate your video in artistically cool looking ways such that each pixel become visible to the naked eye.

Pixelation effect

This Video Art idea credits goes to oioiiooixiii blogspot.

In this example we will generate 8 seconds of Pixelation effect using FFmpeg\u2019s smartblur and stacks of dilation filters overlayed on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to yuv444p\n    + \"split [out1][out2];\"  # split input into 2 identical outputs\n    + \"[out1][out2] overlay,smartblur,\"  # apply overlay,smartblur filter on both outputs\n    + \"dilation,dilation,dilation,dilation,dilation,\"  # apply stacks of dilation filters on both outputs\n    + \"eq=contrast=1.4:brightness=-0.09 [pixels];\"  # change brightness and contrast\n    + \"[pixels]format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-mode\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/","title":"Hardware-Accelerated Video Transcoding","text":"What exactly is Transcoding?

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allowing us to process real-time video frames with immense flexibility. Both these APIs are capable of utilizing the potential of GPU backed fully-accelerated Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder), thus dramatically improving the transcoding performance. At same time, FFdecoder API Hardware-decoded frames are fully compatible with OpenCV's VideoWriter API for producing high-quality output video in real-time.

Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing

As we know, using the \u2013hwaccel cuda -hwaccel_output_format cuda flags in FFmpeg pipeline will keep video frames in GPU memory, and this ensures that the memory transfers (system memory to video memory and vice versa) are eliminated, and that transcoding is performed with the highest possible performance on the available GPU hardware.

General Memory Flow with Hardware Acceleration

But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gain by explicitly copying raw decoded frames between System and GPU memory (via the PCIe bus), thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Moreover, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus.

Memory Flow with Hardware Acceleration and Real-time Processing

On the bright side, however, GPU enabled Hardware based encoding/decoding is inherently faster and more efficient (do not use much CPU resources when frames in GPU) thus freeing up the CPU for other tasks, as compared to Software based encoding/decoding that is known to be completely CPU intensive. Plus scaling, de-interlacing, filtering, etc. tasks will be way faster and efficient than usual using these Hardware based decoders/encoders as oppose to Software ones.

As you can see the pros definitely outweigh the cons and you're getting to process video frames in the real-time with immense speed and flexibility, which is impossible to do otherwise.

We'll discuss its Hardware-Accelerated Video Transcoding capabilities using these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api","title":"CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with OpenCV's VideoWriter API.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's NVENC Encoder can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\" # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since write() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated Video Transcoding with WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

Consuming BGR framesConsuming NV12 frames

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting patched NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as NV12 frames.
  4. Encoding NV12 frames directly with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\"  # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n    \"-input_pixfmt\": \"nv12\", # input frames pixel format as `NV12`\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the NV12 frame here}\n\n    # writing NV12 frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API","text":"

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a High-performance Lossless FFmpeg Transcoding Pipeline

Courtesy - tenor"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/","title":"Transcoding Live Complex Filtergraphs","text":"What are Complex filtergraphs?

Before heading straight into recipes we will talk about Complex filtergraphs:

Complex filtergraphs are those which cannot be described as simply a linear processing chain applied to one stream.

Complex filtergraphs are configured with the -filter_complex global option.

The -lavfi option is equivalent to -filter_complex.

A trivial example of a complex filtergraph is the overlay filter, which has two video inputs and one video output, containing one video overlaid on top of the other.

DeFFcode's FFdecoder API seamlessly supports processing multiple input streams including real-time frames through multiple filter chains combined into a filtergraph (via. -filter_complex FFmpeg parameter), and use their outputs as inputs for other filter chains.

We'll discuss the transcoding of live complex filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay","title":"Transcoding video with Live Custom watermark image overlay","text":"Big Buck Bunny with custom watermark

In this example we will apply a watermark image (say watermark.png with transparent background) overlay to the 10 seconds of video file (say foo.mp4) using FFmpeg's overlay filter with some additional filtering, , and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

You can use FFdecoder's metadata property object that dumps Source Metadata as JSON to retrieve source framerate and frame-size.

To learn about exclusive -ffprefixes & -clones parameter. See Exclusive Parameters \u27b6

Remember to replace watermark.png watermark image file-path with yours before using this recipe.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json, cv2\n\n# define the Complex Video Filter with additional `watermark.png` image input\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n    \"-clones\": [\n        \"-i\",\n        \"watermark.png\",  # !!! [WARNING] define your `watermark.png` here.\n    ],\n    \"-filter_complex\": \"[1]format=rgba,\"  # change 2nd(image) input format to yuv444p\n    + \"colorchannelmixer=aa=0.7[logo];\"  # apply colorchannelmixer to image for controlling alpha [logo]\n    + \"[0][logo]overlay=W-w-{pixel}:H-h-{pixel}:format=auto,\".format(  # apply overlay to 1st(video) with [logo]\n        pixel=5  # at 5 pixels from the bottom right corner of the input video\n    )\n    + \"format=bgr24\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering","title":"Transcoding video from sequence of Images with additional filtering","text":"Mandelbrot pattern blend with Fish school video Available blend mode options

Other blend mode options for blend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

In this example we will blend 10 seconds of Mandelbrot test pattern (generated using lavfi input virtual device) that serves as the \"top\" layer with 10 seconds of Image Sequence that serves as the \"bottom\" layer, using blend filter (with heat blend mode), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4 (restricted to 12 seconds):

$ ffmpeg -t 12 -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('/path/to/img%03d.png', verbose=True, **ffparams).formulate()\n

FFdecoder API also accepts Glob pattern(*.png) as well Single looping image as as input to its source parameter. See this Basic Recipe \u27b6 for more information.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define mandelbrot pattern generator\n# and the Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\n        \"-t\", \"10\", # playback time of 10 seconds for mandelbrot pattern\n        \"-f\", \"lavfi\", # use input virtual device\n        \"-i\", \"mandelbrot=rate=25\", # create mandelbrot pattern at 25 fps\n        \"-t\", \"10\", # playback time of 10 seconds for video\n    ],  \n    \"-custom_resolution\": (1280, 720), # resize to 1280x720\n    \"-filter_complex\":\"[1:v]format=yuv444p[v1];\" # change 2nd(video) input format to yuv444p\n        + \"[0:v]format=gbrp10le[v0];\" # change 1st(mandelbrot pattern) input format to gbrp10le\n        + \"[v1][v0]scale2ref[v1][v0];\" # resize the 1st(mandelbrot pattern), based on a 2nd(video).\n        + \"[v0][v1]blend=all_mode='heat',\" # apply heat blend mode to output\n        + \"format=yuv422p10le[v]\", # change output format to `yuv422p10le`\n    \"-map\": \"[v]\", # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"/path/to/image-%03d.png\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# define your parameters\n# [WARNING] framerate must match original source framerate !!!\noutput_params = {\n    \"-input_framerate\": 25,  # Default\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/update-metadata/","title":"Updating Video Metadata","text":"

In addition of using metadata property object in FFdecoder API for probing metadata information (only as JSON string) for each multimedia stream available in the given video source, you can also easily update the video metadata on-the-fly by assigning desired data as python dictionary to the same overloaded metadata property object. This feature can be used either for adding new custom properties to metadata, or to override source metadata properties used by FFdecoder API to formulate its default Decoder Pipeline for real-time video-frames generation.

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

This feature is not yet fully explored, but in the near future you'll be able to use it to dynamically override any Video frames Decoder Pipeline property (such as frame-size, pixel-format, etc.) in real-time like a pro. Stay tuned for more updates

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/update-metadata/#added-new-properties-to-metadata-in-ffdecoder-api","title":"Added new properties to metadata in FFdecoder API","text":"

In FFdecoder API, you can easily define any number of new properties for its metadata (formatted as python dictionary) with desired data of any datatype(s)1 , without affecting its default Video frames Decoder pipeline.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, thereby add new propertys (formatted as python dictionary) with desired data of different datatype(s) through overloaded metadata property object, and then finally print it as JSON string using the same metadata property object in FFdecoder API.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\n\n# initialize the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# format your data as dictionary (with data of any [printable] datatype)\ndata = dict(\n    mystring=\"abcd\",  # string data\n    myint=1234,  # integers data\n    mylist=[1, \"Rohan\", [\"inner_list\"]],  # list data\n    mytuple=(1, \"John\", (\"inner_tuple\")),  # tuple data\n    mydict={\"anotherstring\": \"hello\"},  # dictionary data\n    myjson=json.loads('{\"name\": \"John\", \"age\": 30, \"city\": \"New York\"}'),  # json data\n)\n\n# assign your dictionary data\ndecoder.metadata = data\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"D:\\\\foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 29.97,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 21.03,\n  \"approx_video_nframes\": 630,\n  \"source_video_bitrate\": \"4937k\",\n  \"source_audio_bitrate\": \"256k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\",\n  \"mystring\": \"abcd\",\n  \"myint\": 1234,\n  \"mylist\": [\n    1,\n    \"Rohan\",\n    [\n      \"inner_list\"\n    ]\n  ],\n  \"mytuple\": [\n    1,\n    \"John\",\n    \"inner_tuple\"\n  ],\n  \"mydict\": {\n    \"anotherstring\": \"hello\"\n  },\n  \"myjson\": {\n    \"name\": \"John\",\n    \"age\": 30,\n    \"city\": \"New York\"\n  }\n}\n

"},{"location":"recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api","title":"Overriding source video metadata in FFdecoder API","text":"

In FFdecoder API, you can also use its metadata to manually override the source properties (as frame-size, frame pixel-format, video-framerate, video-decoder etc.) that directly affects its default Video frames Decoder pipeline that decodes real-time video-frames.

The \"source\" property in metadata cannot be altered in any manner.

Source Video metadata values must be handled carefully

Source Video metadata information is used by FFdecoder API to formulate its default Video frames Decoder pipeline, and any improper or invalid inputted source property could crash the pipeline with RuntimeError.

Therefore to safeguard against it, FFdecoder API discards any Source Video metadata dictionary keys, if its value's datatype fails to match the exact valid datatype defined in following table:

Only either source_demuxer or source_extension property can be present in source metadata.

Not all Source Video metadata properties directly affects the pipeline (as mentioned in the table). But this might change in future versions.

Source Video Metadata Keys Valid Value Datatype Effect on Pipeline \"source_extension\" string None \"source_demuxer\" string Direct \"source_video_resolution\" list of integers e.g. [1280,720] Direct \"source_video_framerate\" float Direct \"source_video_pixfmt\" string Direct \"source_video_decoder\" string Direct \"source_duration_sec\" float None \"approx_video_nframes\" integer Direct \"source_video_bitrate\" string None \"source_audio_bitrate\" string None \"source_audio_samplerate\" string None \"source_has_video\" bool Direct \"source_has_audio\" bool None \"source_has_image_sequence\" bool Direct \"ffdecoder_operational_mode\" str None \"output_frames_pixfmt\" str Direct

Hence for instance, if \"source_video_resolution\" is assigned \"1280x720\" (i.e. string datatype value instead of list), then it will be discarded.

In this example we will probe all metadata information available within foo.mp4 video file, and override frame size (originally 1920x1080) and pixel-format (originally rgb24) to our desired values through overloaded metadata property object in FFdecoder API, and thereby preview them using OpenCV Library's cv2.imshow() method.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

Once the formulate() method is called, the metadata information present in FFdecoder API is finalized and thereby used to formulate its default pipeline for decoding real-time video-frames. Therefore make all changes to video properties beforehand.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# override source metadata values\n# !!! [WARNING] Make sure each value datatype matches the table !!!\ndecoder.metadata = {\n    \"output_frames_pixfmt\": \"gray\",  # gray frame-pixfmt\n    \"source_video_resolution\": [1280, 720],  # 1280x720 frame-size\n}\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# [NOTE] uncomment following line to debug values\n# print(decoder.metadata)\n\n# let's grab the 1280x720 sized gray frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with gray frame here}\n\n    # Show gray frames in output window\n    cv2.imshow(\"Output gray\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

  1. There is no concept of tuple datatype in the JSON format. Thereby, Python's json module auto-converts all tuple python values into JSON list because that's the closest thing in JSON format to a tuple.\u00a0\u21a9

"},{"location":"recipes/basic/","title":"Basic Recipes","text":"

The following recipes should be reasonably accessible to beginners of any skill level to get started with DeFFcode APIs:

Courtesy - tenor

Refer Installation doc first!

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode with required prerequisites on your machine.

Any proficiency with OpenCV-Python will be Helpful

If you've any proficiency with OpenCV-Python (Python API for OpenCV), you will find these recipes really easy.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

Frames are actually 3D Numpy arrays

In python, \"Frames\" are actually three-dimensional NumPy ndarray composed of 3 nested levels of arrays, one for each dimension.

"},{"location":"recipes/basic/#basic-decoding-recipes","title":"Basic Decoding Recipes","text":"
  • Decoding Video files
    • Accessing RGB frames from a video file
    • Capturing and Previewing BGR frames from a video file (OpenCV Support)
    • Playing with any other FFmpeg pixel formats
    • Capturing and Previewing frames from a Looping Video
  • Decoding Camera Devices using Indexes
    • Enumerating all Camera Devices with Indexes
    • Capturing and Previewing frames from a Camera using Indexes
  • Decoding Network Streams
    • Capturing and Previewing frames from a HTTPs Stream
    • Capturing and Previewing frames from a RTSP/RTP Stream
  • Decoding Image sequences
    • Capturing and Previewing frames from Sequence of images
    • Capturing and Previewing frames from Single looping image
"},{"location":"recipes/basic/#basic-transcoding-recipes","title":"Basic Transcoding Recipes","text":"
  • Transcoding Live frames
    • Transcoding video using OpenCV VideoWriter API
    • Transcoding lossless video using WriteGear API
  • Transcoding Live Simple Filtergraphs
    • Transcoding Trimmed and Reversed video
    • Transcoding Cropped video
    • Transcoding Rotated video (with rotate filter)
    • Transcoding Rotated video (with transpose filter)
    • Transcoding Horizontally flipped and Scaled video
  • Saving Key-frames as Image (Image processing)
    • Extracting Key-frames as PNG image
    • Generating Thumbnail with a Fancy filter
"},{"location":"recipes/basic/#basic-metadata-recipes","title":"Basic Metadata Recipes","text":"
  • Extracting Video Metadata
    • Extracting video metadata using Sourcer API
    • Extracting video metadata using FFdecoder API
"},{"location":"recipes/basic/#whats-next","title":"What's next?","text":"

Done already! Let's checkout Advanced Recipes to level up your skills!

"},{"location":"recipes/basic/decode-camera-devices/","title":"Decoding Camera Devices using Indexes","text":"

With DeFFcode APIs, we are able to probe and enumerate all Camera Devices names along with their respective \"device indexes\" or \"camera indexes\" no matter how many cameras are connected to your system. This makes Camera Devices decoding as simple as OpenCV, where one can effortlessly access a specific Camera Device just by the specifying the matching index of it. These indexes are much easier to read, memorize, and type, and one don't have to remember long Device names or worry about its Demuxer.

We'll discuss the Decoding Camera Devices using Indexes briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes","title":"Enumerating all Camera Devices with Indexes","text":"

In Sourcer API, you can easily use its enumerate_devices property object to enumerate all probed Camera Devices (connected to your system) as dictionary object with device indexes as keys and device names as their respective values.

Requirement for Enumerating all Camera Devices in Sourcer API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will enumerate all probed Camera Devices connected on a Windows machine using enumerate_devices property object in Sourcer API, both as dictionary object and JSON string.

# import the necessary packages\nfrom deffcode import Sourcer\nimport json\n\n# initialize and formulate the decoder\nsourcer = Sourcer(\"0\").probe_stream()\n\n# enumerate probed devices as Dictionary object(`dict`)\nprint(sourcer.enumerate_devices)\n\n# enumerate probed devices as JSON string(`json.dump`)\nprint(json.dumps(sourcer.enumerate_devices,indent=2))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine: As Dictionary objectAs JSON string
{0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
{\n  \"0\": \"Integrated Camera\",\n  \"1\": \"USB2.0 Camera\",\n  \"2\": \"DroidCam Source\"\n}\n

"},{"location":"recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes","title":"Capturing and Previewing frames from a Camera using Indexes","text":"

After knowing the index of Camera Device with Sourcer API, One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value either as integer or string of integer type to its source parameter.

Requirement for Index based Camera Device Capturing in FFdecoder API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will decode BGR24 video frames from Integrated Camera at index 0 on a Windows Machine, and preview them using OpenCV Library's cv2.imshow() method.

Important Facts related to Camera Device Indexing
  • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
  • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
  • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
    • For example, If there are three devices:
      {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
    • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

      Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

Out of Index Camera Device index values will raise ValueError in FFdecoder API

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/","title":"Decoding Image sequences","text":"

DeFFcode's FFdecoder API supports a wide-ranging media streams as input to its source parameter, which also includes Image Sequences such as Sequential(img%03d.png) and Glob pattern(*.png) as well as Single looping image.

We'll discuss both briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images","title":"Capturing and Previewing frames from Sequence of images","text":"

In this example we will capture video frames from a given Image Sequence using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

OpenCV expects BGR format frames in its cv2.imshow() method.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4:

$ ffmpeg -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

SequentialGlob pattern How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img%03d.png\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

The glob pattern is not available on Windows FFmpeg builds.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-pattern_type glob` for accepting glob pattern\nffparams = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img*.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image","title":"Capturing and Previewing frames from Single looping image","text":"

In this example we will capture video frames from a Single Looping image using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-loop 1` for infinite looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"img.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/","title":"Decoding Network Streams","text":"

Similar to decoding Video files, DeFFcode's FFdecoder API directly supports Network Streams with specific protocols (such as RTSP/RTP, HTTP(s), MPEG-TS, etc.) as input to its source parameter.

We'll discuss Network Streams support briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream","title":"Capturing and Previewing frames from a HTTPs Stream","text":"

In this example we will decode live BGR24 video frames from a HTTPs protocol Stream in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream","title":"Capturing and Previewing frames from a RTSP/RTP Stream","text":"

In this example we will decode live BGR24 video frames from RTSP/RTP protocol Streams in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

This example assume you already have a RSTP Server running at specified RSTP address with syntax rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH] and video data already being published to it.

For creating your own RSTP Server locally and publishing video data to it, You can refer this WriteGear API's bonus example \u27b6

Make sure to change RSTP address rtsp://localhost:8554/mystream with yours in following code before running

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\"-rtsp_transport\": \"tcp\"}\n\n# initialize and formulate the decoder with RTSP protocol source for BGR24 output\n# [WARNING] Change your RSTP address `rtsp://localhost:8554/mystream` with yours!\ndecoder = FFdecoder(\"rtsp://localhost:8554/mystream\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/","title":"Decoding Video files","text":"

DeFFcode's FFdecoder API readily supports multimedia Video files path as input to its source parameter. And with its frame_format parameter, you can easily decode video frames in any pixel format(s) that are readily supported by all well known Computer Vision libraries (such as OpenCV).

We'll discuss its video files support and pixel format capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file","title":"Accessing RGB frames from a video file","text":"

The default function of FFdecoder API is to decode 24-bit RGB video frames from the given source.

FFdecoder API's generateFrame() function can be used in multiple methods to access RGB frames from a given source, such as as a Generator (Recommended Approach), calling with Statement, and as a Iterator.

In this example we will decode the default RGB24 video frames from a given Video file (say foo.mp4) using above mentioned accessing methods:

As a Generator (Recommended)Calling with StatementAs a Iterator

This is a recommended approach for faster and error-proof access of decoded frames. We'll use it throughout the recipes.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# grab RGB24(default) frame from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder\nwith FFdecoder(\"foo.mp4\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # lets print its shape\n        print(frame.shape)  # for e.g. (1080, 1920, 3)\n

This Iterator Approach bears a close resemblance to OpenCV-Python (Python API for OpenCV) coding syntax, thereby easier to learn and remember.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# loop over frames\nwhile True:\n\n    # grab RGB24(default) frames from decoder\n    frame = next(decoder.generateFrame(), None)\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file","title":"Capturing and Previewing BGR frames from a video file","text":"

In this example we will decode OpenCV supported live BGR24 video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

By default, OpenCV expects BGR format frames in its cv2.imshow() method by using two accessing methods.

As a Generator (Recommended)Calling with Statement
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\nwith FFdecoder(\"foo.mp4\", frame_format=\"bgr24\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # Show output window\n        cv2.imshow(\"Output\", frame)\n\n        # check for 'q' key if pressed\n        key = cv2.waitKey(1) & 0xFF\n        if key == ord(\"q\"):\n            break\n\n# close output window\ncv2.destroyAllWindows()\n

"},{"location":"recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats","title":"Playing with any other FFmpeg pixel formats","text":"

Similar to BGR, you can input any pixel format (supported by installed FFmpeg) by way of frame_format parameter of FFdecoder API for the desired video frame format.

In this example we will decode live Grayscale and YUV video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Decode GrayscaleDecode YUV frames
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"input_foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# grab the GRAYSCALE frames from the decoder\nfor gray in decoder.generateFrame():\n\n    # check if frame is None\n    if gray is None:\n        break\n\n    # {do something with the gray frame here}\n\n    # Show output window\n    cv2.imshow(\"Gray Output\", gray)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try decoding YUV420p pixel-format frames in following python code:

You can also use other YUV pixel formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# grab the YUV420p frames from the decoder\nfor yuv in decoder.generateFrame():\n\n    # check if frame is None\n    if yuv is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the bgr frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", bgr)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video","title":"Capturing and Previewing frames from a Looping Video","text":"

In this example we will decode live BGR24 video frames from looping video using different means in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Using -stream_loop optionUsing loop filter

The recommend way to loop video is to use -stream_loop option via. -ffprefixes list attribute of ffparam dictionary parameter in FFdecoder API. Possible values are integer values: >0 value of loop, 0 means no loop, -1 means infinite loop.

Using -stream_loop 3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-stream_loop 3` for looping 4 times\nffparams = {\"-ffprefixes\":[\"-stream_loop\", \"3\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Another way to loop video is to use loop complex filter via. -filter_complex FFmpeg flag as attribute of ffparam dictionary parameter in FFdecoder API.

This filter places all frames into memory(RAM), so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

Using loop filter for looping video

The filter accepts the following options:

  • loop: Sets the number of loops for integer values >0. Setting this value to -1 will result in infinite loops. Default is 0(no loops).
  • size: Sets maximal size in number of frames. Default is 0.
  • start: Sets first frame of loop. Default is 0.

Using loop=3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define loop 4 times, each loop is 15 frames, each loop skips the first 25 frames\nffparams = {\n    \"-filter_complex\": \"loop=loop=3:size=15:start=25\" # Or use: `loop=3:15:25`\n}  \n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/extract-video-metadata/","title":"Extracting Video Metadata","text":"

DeFFcode's Sourcer API acts as Source Probing Utility for easily probing metadata information for each multimedia stream available in the given video source, and return it as in Human-readable (as JSON string) or Machine-readable (as Dictionary object) type with its retrieve_metadata() class method. Apart from this, you can also use metadata property object in FFdecoder API to extract this metadata information (only as JSON string).

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api","title":"Extracting video metadata using Sourcer API","text":"

This is the recommended way for extracting video metadata.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it in both Human-readable (as JSON string) and Machine-readable (as Dictionary object) types using retrieve_metadata() class method in Sourcer API:

The Sourcer API's retrieve_metadata() class method provides pretty_json boolean parameter to return metadata as JSON string (if True) and as Dictionary (if False).

As JSON stringAs Dictionary object
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n}\n
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `dict`\nprint(sourcer.retrieve_metadata())\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{'ffmpeg_binary_path': 'C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe', 'source': 'foo.mp4', 'source_extension': '.mp4', 'source_video_resolution': [1280, 720], 'source_video_framerate': 25.0, 'source_video_pixfmt': 'yuv420p', 'source_video_decoder': 'h264', 'source_duration_sec': 5.31, 'approx_video_nframes': 133, 'source_video_bitrate': '1205k', 'source_audio_bitrate': '384k', 'source_audio_samplerate': '48000 Hz', 'source_has_video': True, 'source_has_audio': True, 'source_has_image_sequence': False}\n

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api","title":"Extracting video metadata using FFdecoder API","text":"

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it as JSON string using metadata property object in FFdecoder API.

You can also update video's metadata by using the same overloaded metadata property object in FFdecoder API. More information can be found in this Advanced Recipe \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\"\n}\n

"},{"location":"recipes/basic/save-keyframe-image/","title":"Saving Key-frames as Image","text":"

DeFFcode's FFdecoder API provide effortless and precise Frame Seeking with -ss FFmpeg parameter that enable us to save any frame from a specific part of our input source.

We'll discuss aboout it briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for saving video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • Pillow: Pillow is a Imaging Library required for saving frame as Image. You can easily install it directly via pip:

    pip install Pillow     \n
  • Matplotlib: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations, also required for saving frame as Image. You can easily install it directly via pip:

    pip install matplotlib   \n
  • Imageio: Imageio is a Library for reading and writing a wide range of image, video, scientific, and volumetric data formats, also required for saving frame as Image. You can easily install it directly via pip:

    pip install imageio      \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image","title":"Extracting Key-frames as PNG image","text":"

In this example we will seek to 00:00:01.45(or 1045msec) in time and decode one single frame in FFdecoder API, and thereby saving it as PNG image using few prominent Image processing python libraries by providing valid filename (e.g. foo_image.png).

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter:

  • Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS) format, such as in 01:23:45.678.
  • Fractional: such as in 02:30.05. This is interpreted as 2 minutes, 30 and a half a second, which would be the same as using 150.5 in seconds.
Using PillowUsing OpenCVUsing MatplotlibUsing Imageio

In Pillow, the fromarray() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as PNG\n    im.save(\"foo_image.png\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In OpenCV, the imwrite() function can export BGR frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder for BGR24 outputwith suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    cv2.imwrite('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Matplotlib, the imsave() function can save an RGB frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport matplotlib.pyplot as plt\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    plt.imsave('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Imageio, the imwrite() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport imageio\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our output\n    imageio.imwrite('foo_image.jpeg', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter","title":"Generating Thumbnail with a Fancy filter","text":"fancy_thumbnail.jpg (Courtesy - BigBuckBunny)

In this example we first apply FFmpeg\u2019s tblend filter with an hardmix blend mode (cool stuff) and then seek to 00:00:25.917(or 25.917sec) in time to retrieve our single frame thumbnail, and thereby save it as JPEG image with valid filename (e.g. fancy_thumbnail.jpg) using Pillow library.

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter: - [x] Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS), such as in 01:23:45.678 - [x] Fractional: such as in 02:30.05, this is interpreted as 2 minutes, 30 seconds, and a half a second, which would be the same as using 150.5 in seconds.

Available blend mode options

Other blend mode options for tblend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to\nffparams = {\n    \"-vf\": \"tblend=all_mode='hardmix'\",  # trim and reverse\n    \"-ss\": \"00:00:25.917\",  # seek to 00:00:25.917(or 25s 917msec)\n    \"-frames:v\": 1,  # get one single frame\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"BigBuckBunny.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as JPEG\n    im.save(\"fancy_thumbnail.jpg\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/","title":"Transcoding Live Simple Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API handles a single chain of filtergraphs (through -vf FFmpeg parameter) to the to real-time frames quite effortlessly.

We'll discuss the transcoding of live simple filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

OpenCV's' VideoWriter() class lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video","title":"Transcoding Trimmed and Reversed video","text":"Big Buck Bunny Reversed

In this example we will take the first 5 seconds of a video clip (using trim filter) and reverse it (by applying reverse filter), and encode them using OpenCV Library's VideoWriter() method in real-time.

The reverse filter requires memory to buffer the entire clip, so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

By default, OpenCV expects BGR format frames in its write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# trim 5 sec from end and reverse\nffparams = {\n    \"-vf\": \"trim=end=5,reverse\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video","title":"Transcoding Cropped video","text":"Big Buck Bunny Cropped

In this example we will crop real-time video frames by an area with size \u2154 of the input video (say foo.mp4) by applying crop filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using crop filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# cropped the central input area with size 2/3 of the input video\nffparams = {\n    \"-vf\": \"crop=2/3*in_w:2/3*in_h\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter","title":"Transcoding Rotated video (with rotate filter)","text":"

FFmpeg features Rotate Filter that is used to rotate videos by an arbitrary angle (expressed in radians).

Big Buck Bunny Rotated (with rotate filter)

In this example we will rotate real-time video frames at an arbitrary angle by applying rotate filter in FFdecoder API and also using green color to fill the output area not covered by the rotated image, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 0.35 rad and fill green\nffparams = {\n    \"-vf\": \"rotate=angle=-20*PI/180:fillcolor=green\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter","title":"Transcoding Rotated video (with transpose filter)","text":"

FFmpeg also features Transpose Filter that is used to rotate videos by 90 degrees clockwise and counter-clockwise direction as well as flip them vertically and horizontally.

Big Buck Bunny Rotated (with transpose filter)

In this example we will rotate real-time video frames by 90 degrees counterclockwise and preserve portrait geometry by applying transpose filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 90 degrees counter-clockwise and preserve portrait layout\nffparams = {\n    \"-vf\": \"transpose=dir=2:passthrough=portrait\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video","title":"Transcoding Horizontally flipped and Scaled video","text":"Big Buck Bunny Horizontally flipped and Scaled

In this example we will horizontally flip and scale real-time video frames to half its original size by applying hflip and scale filter one-by-one in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using scale filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# horizontally flip and scale to half its original size\nffparams = {\n    \"-vf\": \"hflip,scale=w=iw/2:h=ih/2\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/","title":"Transcoding Live frames","text":"What exactly is Transcoding?

Before heading directly into recipes we have to talk about Transcoding:

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

While decoding media into video frames is purely managed by DeFFcode's FFdecoder API, you can easily encode those video frames back into multimedia files using any well-known video processing library such as OpenCV and VidGear.

We'll discuss transcoding using both these libraries briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api","title":"Transcoding video using OpenCV VideoWriter API","text":"

OpenCV's' VideoWriter() class can be used directly with DeFFcode's FFdecoder API to encode video frames into a multimedia video file but it lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

BGR framesRGB framesGRAYSCALE framesYUV frames

By default, OpenCV expects BGR format frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n     # let's also show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

Since OpenCV expects BGR format frames in its cv2.write() method, therefore we need to convert RGB frames into BGR before encoding as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for RGB24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the RGB24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # converting RGB24 to BGR24 frame\n    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n    # writing BGR24 frame to writer\n    writer.write(frame_bgr)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

OpenCV also directly consumes GRAYSCALE frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try encoding YUV420p pixel-format frames with OpenCV's write() method in following python code:

You can also use other YUV pixel-formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the yuv420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api","title":"Transcoding lossless video using WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them into lossless video file with controlled framerate using WriteGear API in real-time.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

BGR framesRGB framesGRAYSCALE framesYUV frames

WriteGear API by default expects BGR format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for BGR24 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In WriteGear API, you can use rgb_mode parameter in write() class method to write RGB format frames instead of default BGR as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing RGB24 frame to writer\n    writer.write(frame, rgb_mode=True)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consumes GRAYSCALE format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` parameter\n# for controlled output framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_gray.mp4`\nwriter = WriteGear(output_filename=\"output_foo_gray.mp4\", **output_params)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consume YUV (or basically any other supported pixel format) frames in its write() class method with its -input_pixfmt attribute in compression mode. For its non-compression mode, see above example.

You can also use yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) instead for more higher dynamic ranges.

In WriteGear API, the support for -input_pixfmt attribute in output_params dictionary parameter was added in v0.3.0.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for YUV420 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"yuv420p\").formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as \n# `-input_framerate` parameter for controlled framerate\n# and add input pixfmt as yuv420p also\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-input_pixfmt\": \"yuv420p\"\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_yuv.mp4`\nwriter = WriteGear(output_filename=\"output_foo_yuv.mp4\", logging=True, **output_params)\n\n# grab the YUV420 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing YUV420 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"reference/ffhelper/","title":"deffcode.ffhelper","text":"

Following methods are exclusively design to handle FFmpeg related tasks. These tasks includes validation of installed FFmpeg binaries, downloading of FFmpeg binaries(on Windows), and parsing of FFmpeg metadata into useful information using various pattern matching methods.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.download_ffmpeg_binaries--download_ffmpeg_binaries","title":"download_ffmpeg_binaries","text":"

Generates FFmpeg Static Binaries for windows(if not available)

Parameters:

Name Type Description Default path string

path for downloading custom FFmpeg executables

required os_windows boolean

is running on Windows OS?

False os_bit string

32-bit or 64-bit OS?

''

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def download_ffmpeg_binaries(path, os_windows=False, os_bit=\"\"):\n    \"\"\"\n    ## download_ffmpeg_binaries\n\n    Generates FFmpeg Static Binaries for windows(if not available)\n\n    Parameters:\n        path (string): path for downloading custom FFmpeg executables\n        os_windows (boolean): is running on Windows OS?\n        os_bit (string): 32-bit or 64-bit OS?\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if os_windows and os_bit:\n        # initialize with available FFmpeg Static Binaries GitHub Server\n        file_url = \"https://github.com/abhiTronix/FFmpeg-Builds/releases/latest/download/ffmpeg-static-{}-gpl.zip\".format(\n            os_bit\n        )\n\n        file_name = os.path.join(\n            os.path.abspath(path), \"ffmpeg-static-{}-gpl.zip\".format(os_bit)\n        )\n        file_path = os.path.join(\n            os.path.abspath(path),\n            \"ffmpeg-static-{}-gpl/bin/ffmpeg.exe\".format(os_bit),\n        )\n        base_path, _ = os.path.split(file_name)  # extract file base path\n        # check if file already exists\n        if os.path.isfile(file_path):\n            final_path += file_path  # skip download if does\n        else:\n            # import libs\n            import zipfile\n\n            # check if given path has write access\n            assert os.access(path, os.W_OK), (\n                \"[Helper:ERROR] :: Permission Denied, Cannot write binaries to directory = \"\n                + path\n            )\n            # remove leftovers if exists\n            os.path.isfile(file_name) and delete_file_safe(file_name)\n            # download and write file to the given path\n            with open(file_name, \"wb\") as f:\n                logger.debug(\n                    \"No Custom FFmpeg path provided. Auto-Installing FFmpeg static binaries from GitHub Mirror now. Please wait...\"\n                )\n                # create session\n                with requests.Session() as http:\n                    # setup retry strategy\n                    retries = Retry(\n                        total=3,\n                        backoff_factor=1,\n                        status_forcelist=[429, 500, 502, 503, 504],\n                    )\n                    # Mount it for https usage\n                    adapter = TimeoutHTTPAdapter(timeout=2.0, max_retries=retries)\n                    http.mount(\"https://\", adapter)\n                    response = http.get(file_url, stream=True)\n                    response.raise_for_status()\n                    total_length = (\n                        response.headers.get(\"content-length\")\n                        if \"content-length\" in response.headers\n                        else len(response.content)\n                    )\n                    assert not (\n                        total_length is None\n                    ), \"[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!\"\n                    bar = tqdm(total=int(total_length), unit=\"B\", unit_scale=True)\n                    for data in response.iter_content(chunk_size=4096):\n                        f.write(data)\n                        len(data) > 0 and bar.update(len(data))\n                    bar.close()\n            logger.debug(\"Extracting executables.\")\n            with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n                zip_fname, _ = os.path.split(zip_ref.infolist()[0].filename)\n                zip_ref.extractall(base_path)\n            # perform cleaning\n            delete_file_safe(file_name)\n            logger.debug(\"FFmpeg binaries for Windows configured successfully!\")\n            final_path += file_path\n    # return final path\n    return final_path\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_ffmpeg--validate_ffmpeg","title":"validate_ffmpeg","text":"

Validate FFmpeg Binaries. Returns True if validity test passes successfully.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_ffmpeg(path, verbose=False):\n    \"\"\"\n    ## validate_ffmpeg\n\n    Validate FFmpeg Binaries. Returns `True` if validity test passes successfully.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    try:\n        # get the FFmpeg version\n        version = check_sp_output([path, \"-version\"])\n        firstline = version.split(b\"\\n\")[0]\n        version = firstline.split(b\" \")[2].strip()\n        if verbose:  # log if test are passed\n            logger.debug(\"FFmpeg validity Test Passed!\")\n            logger.debug(\n                \"Found valid FFmpeg Version: `{}` installed on this system\".format(\n                    version\n                )\n            )\n    except Exception as e:\n        # log if test are failed\n        if verbose:\n            logger.exception(str(e))\n            logger.warning(\"FFmpeg validity Test Failed!\")\n        return False\n    return True\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_pixfmts--get_supported_pixfmts","title":"get_supported_pixfmts","text":"

Find and returns all FFmpeg's supported pixel formats.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).

Source code in deffcode/ffhelper.py
def get_supported_pixfmts(path):\n    \"\"\"\n    ## get_supported_pixfmts\n\n    Find and returns all FFmpeg's supported pixel formats.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).\n    \"\"\"\n    pxfmts = check_sp_output([path, \"-hide_banner\", \"-pix_fmts\"])\n    splitted = pxfmts.split(b\"\\n\")\n    srtindex = [i for i, s in enumerate(splitted) if b\"-----\" in s]\n    # extract video encoders\n    supported_pxfmts = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[srtindex[0] + 1 :]\n        if x.decode(\"utf-8\").strip()\n    ]\n    # compile regex\n    finder = re.compile(r\"([A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*)(\\s+[0-4])(\\s+[0-9]+)\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_pxfmts))\n    # return output findings\n    return [\n        ([s for s in o[0].split(\" \")][-1], o[1].strip(), o[2].strip())\n        for o in outputs\n        if len(o) == 3\n    ]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_vdecoders--get_supported_vdecoders","title":"get_supported_vdecoders","text":"

Find and returns all FFmpeg's supported video decoders.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported decoders.

Source code in deffcode/ffhelper.py
def get_supported_vdecoders(path):\n    \"\"\"\n    ## get_supported_vdecoders\n\n    Find and returns all FFmpeg's supported video decoders.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported decoders.\n    \"\"\"\n    decoders = check_sp_output([path, \"-hide_banner\", \"-decoders\"])\n    splitted = decoders.split(b\"\\n\")\n    # extract video encoders\n    supported_vdecoders = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[2 : len(splitted) - 1]\n        if x.decode(\"utf-8\").strip().startswith(\"V\")\n    ]\n    # compile regex\n    finder = re.compile(r\"[A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_vdecoders))\n    # return output findings\n    return [[s for s in o.split(\" \")][-1] for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_demuxers--get_supported_demuxers","title":"get_supported_demuxers","text":"

Find and returns all FFmpeg's supported demuxers.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported demuxers.

Source code in deffcode/ffhelper.py
def get_supported_demuxers(path):\n    \"\"\"\n    ## get_supported_demuxers\n\n    Find and returns all FFmpeg's supported demuxers.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported demuxers.\n    \"\"\"\n    demuxers = check_sp_output([path, \"-hide_banner\", \"-demuxers\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in demuxers.split(b\"\\n\")]\n    split_index = [idx for idx, s in enumerate(splitted) if \"--\" in s][0]\n    supported_demuxers = splitted[split_index + 1 : len(splitted) - 1]\n    # compile regex\n    finder = re.compile(r\"\\s\\s[a-z0-9_,-]+\\s+\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_demuxers))\n    # return output findings\n    return [o.strip() if not (\",\" in o) else o.split(\",\")[-1].strip() for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_imgseqdir--validate_imgseqdir","title":"validate_imgseqdir","text":"

Validates Image Sequence by counting number of Image files.

Parameters:

Name Type Description Default source string

video source to be validated

required extension string

extension of image sequence.

'jpg'

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_imgseqdir(source, extension=\"jpg\", verbose=False):\n    \"\"\"\n    ## validate_imgseqdir\n\n    Validates Image Sequence by counting number of Image files.\n\n    Parameters:\n        source (string): video source to be validated\n        extension (string): extension of image sequence.\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    # check if path exists\n    dirpath = Path(source).parent\n    try:\n        if not (dirpath.exists() and dirpath.is_dir()):\n            verbose and logger.warning(\n                \"Specified path `{}` doesn't exists or valid.\".format(dirpath)\n            )\n            return False\n        else:\n            return (\n                True if len(list(dirpath.glob(\"*.{}\".format(extension)))) > 2 else False\n            )\n    except:\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_image_seq--is_valid_image_seq","title":"is_valid_image_seq","text":"

Checks Image sequence validity by testing its extension against FFmpeg's supported pipe formats and number of Image files.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required source string

video source to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_image_seq(path, source=None, verbose=False):\n    \"\"\"\n    ## is_valid_image_seq\n\n    Checks Image sequence validity by testing its extension against\n    FFmpeg's supported pipe formats and number of Image files.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        source (string): video source to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if source is None or not (source):\n        logger.error(\"Source is empty!\")\n        return False\n    # extract all FFmpeg supported protocols\n    formats = check_sp_output([path, \"-hide_banner\", \"-formats\"])\n    extract_formats = re.findall(r\"\\w+_pipe\", formats.decode(\"utf-8\").strip())\n    supported_image_formats = [\n        x.split(\"_\")[0] for x in extract_formats if x.endswith(\"_pipe\")\n    ]\n    filename, extension = os.path.splitext(source)\n    # Test and return result whether scheme is supported\n    if extension and source.endswith(tuple(supported_image_formats)):\n        if validate_imgseqdir(source, extension=extension[1:], verbose=verbose):\n            verbose and logger.debug(\n                \"A valid Image Sequence source of format `{}` found.\".format(extension)\n            )\n            return True\n        else:\n            ValueError(\n                \"Given Image Sequence source of format `{}` contains insignificant(invalid) sample size, Check the `source` parameter value again!\".format(\n                    source.split(\".\")[1]\n                )\n            )\n    else:\n        verbose and logger.warning(\"Source isn't a valid Image Sequence\")\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_url--is_valid_url","title":"is_valid_url","text":"

Checks URL validity by testing its scheme against FFmpeg's supported protocols.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required url string

URL to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_url(path, url=None, verbose=False):\n    \"\"\"\n    ## is_valid_url\n\n    Checks URL validity by testing its scheme against\n    FFmpeg's supported protocols.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        url (string): URL to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if url is None or not (url):\n        logger.warning(\"URL is empty!\")\n        return False\n    # extract URL scheme\n    extracted_scheme_url = url.split(\"://\", 1)[0]\n    # extract all FFmpeg supported protocols\n    protocols = check_sp_output([path, \"-hide_banner\", \"-protocols\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in protocols.split(b\"\\n\")]\n    supported_protocols = splitted[splitted.index(\"Output:\") + 1 : len(splitted) - 1]\n    # RTSP is a demuxer somehow\n    # support both RTSP and RTSPS(over SSL)\n    supported_protocols += (\n        [\"rtsp\", \"rtsps\"] if \"rtsp\" in get_supported_demuxers(path) else []\n    )\n    # Test and return result whether scheme is supported\n    if extracted_scheme_url and extracted_scheme_url in supported_protocols:\n        verbose and logger.debug(\n            \"URL scheme `{}` is supported by FFmpeg.\".format(extracted_scheme_url)\n        )\n        return True\n    else:\n        verbose and logger.warning(\n            \"URL scheme `{}` isn't supported by FFmpeg!\".format(extracted_scheme_url)\n        )\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.check_sp_output--check_sp_output","title":"check_sp_output","text":"

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default args based on input

Non Keyword Arguments

() kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):\n    \"\"\"\n    ## check_sp_output\n\n    Returns FFmpeg `stdout` output from subprocess module.\n\n    Parameters:\n        args (based on input): Non Keyword Arguments\n        kwargs (based on input): Keyword Arguments\n\n    **Returns:** A string value.\n    \"\"\"\n    # workaround for python bug: https://bugs.python.org/issue37380\n    if platform.system() == \"Windows\":\n        # see comment https://bugs.python.org/msg370334\n        sp._cleanup = lambda: None\n    # handle additional params\n    retrieve_stderr = kwargs.pop(\"force_retrieve_stderr\", False)\n    # execute command in subprocess\n    process = sp.Popen(\n        stdout=sp.PIPE,\n        stderr=sp.DEVNULL if not (retrieve_stderr) else sp.PIPE,\n        *args,\n        **kwargs,\n    )\n    # communicate and poll process\n    output, stderr = process.communicate()\n    retcode = process.poll()\n    # handle return code\n    if retcode and not (retrieve_stderr):\n        logger.error(\"[Pipline-Error] :: {}\".format(output.decode(\"utf-8\")))\n        cmd = kwargs.get(\"args\")\n        if cmd is None:\n            cmd = args[0]\n        error = sp.CalledProcessError(retcode, cmd)\n        error.output = output\n        raise error\n    # raise error if no output\n    bool(output) or bool(stderr) or logger.error(\n        \"[Pipline-Error] :: Pipline failed to exact any data from command: {}!\".format(\n            args[0] if args else []\n        )\n    )\n    # return output otherwise\n    return stderr if retrieve_stderr and stderr else output\n
"},{"location":"reference/utils/","title":"deffcode.utils","text":"

Following are the helper methods required by the DeFFcode APIs.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/utils/#deffcode.utils.logger_handler--logger_handler","title":"logger_handler","text":"

Returns the logger handler

Returns: A logger handler

Source code in deffcode/utils.py
def logger_handler():\n    \"\"\"\n    ## logger_handler\n\n    Returns the logger handler\n\n    **Returns:** A logger handler\n    \"\"\"\n    # logging formatter\n    formatter = ColoredFormatter(\n        \"{green}{asctime}{reset} :: {bold_purple}{name:^13}{reset} :: {log_color}{levelname:^8}{reset} :: {bold_white}{message}\",\n        datefmt=\"%H:%M:%S\",\n        reset=True,\n        log_colors={\n            \"INFO\": \"bold_cyan\",\n            \"DEBUG\": \"bold_yellow\",\n            \"WARNING\": \"bold_red,fg_thin_yellow\",\n            \"ERROR\": \"bold_red\",\n            \"CRITICAL\": \"bold_red,bg_white\",\n        },\n        style=\"{\",\n    )\n    # check if FFdecoder_LOGFILE defined\n    file_mode = os.environ.get(\"DEFFCODE_LOGFILE\", False)\n    # define handler\n    handler = logging.StreamHandler()\n    if file_mode and isinstance(file_mode, str):\n        file_path = os.path.abspath(file_mode)\n        if (os.name == \"nt\" or os.access in os.supports_effective_ids) and os.access(\n            os.path.dirname(file_path), os.W_OK\n        ):\n            file_path = (\n                os.path.join(file_path, \"deffcode.log\")\n                if os.path.isdir(file_path)\n                else file_path\n            )\n            handler = logging.FileHandler(file_path, mode=\"a\")\n            formatter = logging.Formatter(\n                \"{asctime} :: {name} :: {levelname} :: {message}\",\n                datefmt=\"%H:%M:%S\",\n                style=\"{\",\n            )\n\n    handler.setFormatter(formatter)\n    return handler\n
"},{"location":"reference/utils/#deffcode.utils.dict2Args--dict2args","title":"dict2Args","text":"

Converts dictionary attributes to list(args)

Parameters:

Name Type Description Default param_dict dict

Parameters dictionary

required

Returns: Arguments list

Source code in deffcode/utils.py
def dict2Args(param_dict):\n    \"\"\"\n    ## dict2Args\n\n    Converts dictionary attributes to list(args)\n\n    Parameters:\n        param_dict (dict): Parameters dictionary\n\n    **Returns:** Arguments list\n    \"\"\"\n    args = []\n    for key in param_dict.keys():\n        if key in [\"-clones\"] or key.startswith(\"-core\"):\n            if isinstance(param_dict[key], list):\n                args.extend(param_dict[key])\n            else:\n                logger.warning(\n                    \"{} with invalid datatype:`{}`, Skipped!\".format(\n                        \"Core parameter\" if key.startswith(\"-core\") else \"Clone\",\n                        param_dict[key],\n                    )\n                )\n        else:\n            args.append(key)\n            args.append(str(param_dict[key]))\n    return args\n
"},{"location":"reference/utils/#deffcode.utils.delete_file_safe--delete_ext_safe","title":"delete_ext_safe","text":"

Safely deletes files at given path.

Parameters:

Name Type Description Default file_path string

path to the file

required Source code in deffcode/utils.py
def delete_file_safe(file_path):\n    \"\"\"\n    ## delete_ext_safe\n\n    Safely deletes files at given path.\n\n    Parameters:\n        file_path (string): path to the file\n    \"\"\"\n    try:\n        dfile = Path(file_path)\n        if sys.version_info >= (3, 8, 0):\n            dfile.unlink(missing_ok=True)\n        else:\n            dfile.exists() and dfile.unlink()\n    except Exception as e:\n        logger.exception(str(e))\n
"},{"location":"reference/ffdecoder/","title":"FFdecoder API","text":"

FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1

FFdecoder API implements a standalone highly-extensible wrapper around FFmpeg multimedia framework that provides complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as framerate, resolution, hardware decoder(s), complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.

FFdecoder API compiles its FFmpeg pipeline by processing input Video Source metadata and User-defined options, and runs it inside a subprocess pipe concurrently with the main thread, while extracting output dataframes(1D arrays) into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into 24-bit RGB (default) ndarray 3D frames that are readily available through its generateFrame() method.

FFdecoder API employs Sourcer API at its backend for gathering, processing, and validating metadata of all multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also available as a JSON string with its metadata property object and can be updated as desired.

FFdecoder API supports a wide-ranging media stream as input source such as USB/Virtual/IP Camera Feed, Multimedia video file, Screen Capture, Image Sequence, Network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Furthermore, FFdecoder API maintains the standard OpenCV-Python (Python API for OpenCV) coding syntax, thereby making it even easier to integrate this API in any Computer Vision application.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

FFdecoder API parameters are explained here \u27b6

Source code in deffcode/ffdecoder.py
class FFdecoder:\n    \"\"\"\n    > FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames\n    with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1\n\n    FFdecoder API implements a **standalone highly-extensible wrapper around [FFmpeg](https://ffmpeg.org/)** multimedia framework that provides complete\n    control over the underline pipeline including **access to almost any FFmpeg specification thinkable** such as framerate, resolution, hardware decoder(s),\n    complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.\n\n    FFdecoder API **compiles its FFmpeg pipeline** by processing input Video Source metadata and User-defined options, and **runs it inside a\n    [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe** concurrently with the main thread, while extracting output dataframes(1D arrays)\n    into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into ==[24-bit RGB](https://en.wikipedia.org/wiki/List_of_monochrome_and_RGB_color_formats#24-bit_RGB) _(default)_\n    [`ndarray`](https://numpy.org/doc/stable/reference/arrays.ndarray.html#the-n-dimensional-array-ndarray) 3D frames== that are readily available\n    through its [`generateFrame()`](#deffcode.ffdecoder.FFdecoder.generateFrame) method.\n\n    FFdecoder API **employs [Sourcer API](../../reference/sourcer) at its backend** for gathering, processing, and validating metadata of all\n    multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also\n    available as a JSON string with its [`metadata`](#deffcode.ffdecoder.FFdecoder.metadata) property object and can be updated as desired.\n\n    FFdecoder API **supports a wide-ranging media stream** as input source such as USB/Virtual/IP Camera Feed, Multimedia video file,\n    Screen Capture, Image Sequence, Network protocols _(such as HTTP(s), RTP/RSTP, etc.)_, so on and so forth.\n\n    Furthermore, FFdecoder API maintains the **standard [OpenCV-Python](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) _(Python API for OpenCV)_ coding syntax**, thereby making it even easier to\n    integrate this API in any Computer Vision application.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"FFdecoder API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        frame_format=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **ffparams\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n\n        # enable verbose if specified\n        self.__verbose_logs = (\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # define whether initializing\n        self.__initializing = True\n\n        # define frame pixel-format for decoded frames\n        self.__frame_format = (\n            frame_format.lower().strip() if isinstance(frame_format, str) else None\n        )\n\n        # handles user-defined parameters\n        self.__extra_params = {}\n\n        # handle process to be frames written\n        self.__process = None\n\n        # handle exclusive metadata\n        self.__ff_pixfmt_metadata = None  # metadata\n        self.__raw_frame_num = None  # raw-frame number\n        self.__raw_frame_pixfmt = None  # raw-frame pixformat\n        self.__raw_frame_dtype = None  # raw-frame dtype\n        self.__raw_frame_depth = None  # raw-frame depth\n        self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n        # define supported mode of operation\n        self.__supported_opmodes = {\n            \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n            \"vo\": \"Video-Only\",\n            \"imgseq\": \"Image-Sequence\",\n            # \"ao\":\"Audio-Only\", # reserved for future\n        }\n        # operation mode variable\n        self.__opmode = None\n\n        # handle termination\n        self.__terminate_stream = False\n\n        # cleans and reformat user-defined parameters\n        self.__extra_params = {\n            str(k).strip(): str(v).strip()\n            if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in ffparams.items()\n        }\n\n        # handle custom Sourcer API params\n        sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n        # reset improper values\n        sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n        # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n        # check if not valid type\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n        else:\n            # also pass valid ffmpeg pre-headers to Sourcer API\n            sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n        # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n        # assets on Windows(if specified)\n        sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n            \"-ffmpeg_download_path\", \"\"\n        )\n\n        # handle video and audio stream indexes in case of multiple ones.\n        default_stream_indexes = self.__extra_params.pop(\n            \"-default_stream_indexes\", (0, 0)\n        )\n        # reset improper values\n        default_stream_indexes = (\n            (0, 0)\n            if not isinstance(default_stream_indexes, (list, tuple))\n            else default_stream_indexes\n        )\n\n        # pass FFmpeg filter to Sourcer API params for processing\n        if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n            key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n            sourcer_params[key] = self.__extra_params[key]\n\n        # define dict to store user-defined parameters\n        self.__user_metadata = {}\n        # extract and assign source metadata as dict\n        (self.__sourcer_metadata, self.__missing_prop) = (\n            Sourcer(\n                source=source,\n                source_demuxer=source_demuxer,\n                verbose=verbose,\n                custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n                **sourcer_params\n            )\n            .probe_stream(default_stream_indexes=default_stream_indexes)\n            .retrieve_metadata(force_retrieve_missing=True)\n        )\n\n        # handle valid FFmpeg assets location\n        self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n        # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n        # patch for compatibility with OpenCV APIs.\n        self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n        if not (isinstance(self.__cv_patch, bool)):\n            self.__cv_patch = False\n            self.__verbose_logs and logger.critical(\n                \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n            )\n\n        # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n        self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n        if not (isinstance(self.__passthrough_mode, bool)):\n            self.__passthrough_mode = False\n\n        # handle mode of operation\n        if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n            # image-sequence mode\n            self.__opmode = \"imgseq\"\n        elif (\n            self.__sourcer_metadata[\n                \"source_has_video\"\n            ]  # audio is only for pass-through, not really for audio decoding yet.\n            and self.__sourcer_metadata[\"source_has_audio\"]\n            and self.__passthrough_mode  # [TODO]\n        ):\n            self.__opmode = \"av\"\n        # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n        #    self.__opmode = \"ao\"\n        elif self.__sourcer_metadata[\"source_has_video\"]:\n            # video-only mode\n            self.__opmode = \"vo\"\n        else:\n            # raise if unknown mode\n            raise ValueError(\n                \"Unable to find any usable video stream in the given source!\"\n            )\n        # store as metadata\n        self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n            self.__opmode\n        ]\n\n        # handle user-defined output framerate\n        __framerate = self.__extra_params.pop(\"-framerate\", None)\n        if (\n            isinstance(__framerate, str)\n            and __framerate\n            == \"null\"  # special mode to discard `-framerate/-r` parameter\n        ):\n            self.__inputframerate = __framerate\n        elif isinstance(__framerate, (float, int)):\n            self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n        else:\n            # warn if wrong type\n            not (__framerate is None) and logger.warning(\n                \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                    type(__framerate).__name__\n                )\n            )\n            # reset to default\n            self.__inputframerate = 0.0\n\n        # handle user defined decoded frame resolution\n        self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n        if (\n            isinstance(self.__custom_resolution, str)\n            and self.__custom_resolution\n            == \"null\"  # special mode to discard `-size/-s` parameter\n        ) or (\n            isinstance(self.__custom_resolution, (list, tuple))\n            and len(self.__custom_resolution)\n            == 2  # valid resolution(must be a tuple or list)\n        ):\n            # log it\n            self.__verbose_logs and not isinstance(\n                self.__custom_resolution, str\n            ) and logger.debug(\n                \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n            )\n        else:\n            # log it\n            not (self.__custom_resolution is None) and logger.warning(\n                \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                    self.__custom_resolution\n                )\n            )\n            # reset improper values\n            self.__custom_resolution = None\n\n    def formulate(self):\n\n        \"\"\"\n        This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n        **Returns:** A reference to the FFdecoder class object.\n        \"\"\"\n        # assign values to class variables on first run\n        if self.__initializing:\n            # prepare parameter dict\n            input_params = OrderedDict()\n            output_params = OrderedDict()\n\n            # dynamically pre-assign a default video-decoder (if not assigned by user).\n            supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n            default_vdecodec = (\n                self.__sourcer_metadata[\"source_video_decoder\"]\n                if self.__sourcer_metadata[\"source_video_decoder\"]\n                in supported_vdecodecs\n                else \"unknown\"\n            )\n            if \"-c:v\" in self.__extra_params:\n                self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-c:v\", default_vdecodec\n                )\n            # handle image sequence separately\n            if self.__opmode == \"imgseq\":\n                # -vcodec is discarded by default\n                # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n                self.__extra_params.pop(\"-vcodec\", None)\n            elif (\n                \"-vcodec\" in self.__extra_params\n                and self.__extra_params[\"-vcodec\"] is None\n            ):\n                # special case when -vcodec is not needed intentionally\n                self.__extra_params.pop(\"-vcodec\", None)\n            else:\n                # assign video decoder selected here.\n                if not \"-vcodec\" in self.__extra_params:\n                    input_params[\"-vcodec\"] = default_vdecodec\n                else:\n                    input_params[\"-vcodec\"] = self.__extra_params.pop(\n                        \"-vcodec\", default_vdecodec\n                    )\n                if (\n                    default_vdecodec != \"unknown\"\n                    and not input_params[\"-vcodec\"] in supported_vdecodecs\n                ):\n                    # reset to default if not supported\n                    logger.warning(\n                        \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                            input_params[\"-vcodec\"], default_vdecodec\n                        )\n                    )\n                    input_params[\"-vcodec\"] = default_vdecodec\n                # raise error if not valid decoder found\n                if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                    raise RuntimeError(\n                        \"Provided FFmpeg does not support any known usable video-decoders.\"\n                        \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                    )\n\n            # handle user-defined number of frames.\n            if \"-vframes\" in self.__extra_params:\n                self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                    \"-vframes\", None\n                )\n            if \"-frames:v\" in self.__extra_params:\n                value = self.__extra_params.pop(\"-frames:v\", None)\n                if not (value is None) and value > 0:\n                    output_params[\"-frames:v\"] = value\n\n            # dynamically calculate default raw-frames pixel format(if not assigned by user).\n            # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n            if \"-pix_fmt\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n                )\n                self.__extra_params.pop(\"-pix_fmt\", None)\n            # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n            self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n            supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n            # calculate default pixel-format\n            # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n            self.__frame_format == \"null\" and logger.critical(\n                \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n            )\n            # choose between rgb24(if available) or source pixel-format\n            # otherwise, only source pixel-format for special case\n            default_pixfmt = (\n                \"rgb24\"\n                if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n                else self.__sourcer_metadata[\"source_video_pixfmt\"]\n            )\n            # assign output raw-frames pixel format\n            rawframe_pixfmt = None\n            if (\n                not (self.__frame_format is None)\n                and self.__frame_format in supported_pixfmts\n            ):\n                # check if valid and supported `frame_format` parameter assigned\n                rawframe_pixfmt = self.__frame_format.strip()\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                        rawframe_pixfmt\n                    )\n                )\n            elif (\n                \"output_frames_pixfmt\"\n                in self.__sourcer_metadata  # means `format` filter is defined\n                and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n            ):\n                # assign if valid and supported\n                rawframe_pixfmt = self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ].strip()\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n                )\n            else:\n                # reset to default if not supported\n                rawframe_pixfmt = default_pixfmt\n                # log it accordingly\n                if self.__frame_format is None:\n                    logger.info(\n                        \"Using default `{}` pixel-format for this pipeline.\".format(\n                            default_pixfmt\n                        )\n                    )\n                else:\n                    logger.warning(\n                        \"{} Switching to default `{}` pixel-format!\".format(\n                            \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                                self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                                if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                                else self.__frame_format\n                            )\n                            if self.__frame_format != \"null\"\n                            else \"No usable pixel-format defined.\",\n                            default_pixfmt,\n                        )\n                    )\n\n            # dynamically calculate raw-frame datatype based on pixel-format selected\n            (self.__raw_frame_depth, rawframesbpp) = [\n                (int(x[1]), int(x[2]))\n                for x in self.__ff_pixfmt_metadata\n                if x[0] == rawframe_pixfmt\n            ][0]\n            raw_bit_per_component = (\n                rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n            )\n            if 4 <= raw_bit_per_component <= 8:\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n            elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n                (\"le\", \"be\")\n            ):\n                if rawframe_pixfmt.endswith(\"le\"):\n                    self.__raw_frame_dtype = np.dtype(\"<u2\")\n                else:\n                    self.__raw_frame_dtype = np.dtype(\">u2\")\n            else:\n                # reset to both pixel-format and datatype to default if not supported\n                not (self.__frame_format is None) and logger.warning(\n                    \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                        rawframe_pixfmt\n                    )\n                )\n                rawframe_pixfmt = \"rgb24\"\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n\n            # Check if not special case\n            if self.__frame_format != \"null\":\n                # assign to FFmpeg pipeline otherwise\n                output_params[\"-pix_fmt\"] = rawframe_pixfmt\n            # assign to global parameter further usage\n            self.__raw_frame_pixfmt = rawframe_pixfmt\n            # also override as metadata(if available)\n            if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n                self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ] = self.__raw_frame_pixfmt\n\n            # handle raw-frame resolution\n            # notify FFmpeg `-s` parameter cannot be assigned directly\n            if \"-s\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n                )\n                self.__extra_params.pop(\"-s\", None)\n            # assign output rawframe resolution\n            if not (self.__custom_resolution is None) and not isinstance(\n                self.__custom_resolution, str\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                self.__raw_frame_resolution = self.__custom_resolution\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                        self.__raw_frame_resolution\n                    )\n                )\n            elif (\n                \"output_frames_resolution\"\n                in self.__sourcer_metadata  # means `scale` filter is defined\n                and self.__sourcer_metadata[\"output_frames_resolution\"]\n                and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on output.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"output_frames_resolution\"\n                ]\n            elif (\n                self.__sourcer_metadata[\"source_video_resolution\"]\n                and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on source.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"source_video_resolution\"\n                ]\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n            # special mode to discard `-size/-s` FFmpeg parameter completely\n            if isinstance(self.__custom_resolution, str):\n                logger.critical(\n                    \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # add to pipeline\n                dimensions = \"{}x{}\".format(\n                    self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n                )\n                output_params[\"-s\"] = str(dimensions)\n            # log if filters or default source is used\n            self.__verbose_logs and (\n                self.__custom_resolution is None\n                or isinstance(self.__custom_resolution, str)\n            ) and logger.info(\n                \"{} for this pipeline for defining output resolution.\".format(\n                    \"FFmpeg filter values will be used\"\n                    if \"output_frames_resolution\" in self.__sourcer_metadata\n                    else \"Default source resolution will be used\"\n                )\n            )\n\n            # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n            if (\n                not isinstance(self.__inputframerate, str)\n                and self.__inputframerate > 0.0\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                output_params[\"-framerate\"] = str(self.__inputframerate)\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                        str(self.__inputframerate)\n                    )\n                )\n            elif (\n                \"output_framerate\"\n                in self.__sourcer_metadata  # means `fps` filter is defined\n                and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n            ):\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on output\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"output_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n                )\n            elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on source\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"source_video_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"Default source framerate will be used for this pipeline for defining output framerate.\"\n                )\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n\n            # add rest to output parameters\n            output_params.update(self.__extra_params)\n\n            # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n            # TODO Added support for `-re -stream_loop` and `-loop`\n            if \"-frames:v\" in input_params:\n                self.__raw_frame_num = input_params[\"-frames:v\"]\n            elif (\n                not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n                and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n            ):\n                self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n            else:\n                self.__raw_frame_num = None\n                # log that number of frames are unknown\n                self.__verbose_logs and logger.info(\n                    \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n                )\n\n            # log Mode of Operation\n            self.__verbose_logs and logger.critical(\n                \"Activating {} Mode of Operation.\".format(\n                    self.__supported_opmodes[self.__opmode]\n                )\n            )\n\n            # compose the Pipeline using formulated FFmpeg parameters\n            self.__launch_FFdecoderline(input_params, output_params)\n\n            # inform the initialization is completed\n            self.__initializing = False\n        else:\n            # warn if pipeline is recreated\n            logger.error(\"This pipeline is already created and running!\")\n        return self\n\n    def __fetchNextfromPipeline(self):\n        \"\"\"\n        This Internal method to fetch next dataframes(1D arrays) from `subprocess` pipe's standard output(`stdout`) into a Numpy buffer.\n        \"\"\"\n        assert not (\n            self.__process is None\n        ), \"Pipeline is not running! You must call `formulate()` method first.\"\n\n        # formulated raw frame size and apply YUV pixel formats patch(if applicable)\n        raw_frame_size = (\n            (self.__raw_frame_resolution[0] * (self.__raw_frame_resolution[1] * 3 // 2))\n            if self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch\n            else (\n                self.__raw_frame_depth\n                * self.__raw_frame_resolution[0]\n                * self.__raw_frame_resolution[1]\n            )\n        )\n        # next dataframe as numpy ndarray\n        nparray = None\n        try:\n            # read bytes frames from buffer\n            nparray = np.frombuffer(\n                self.__process.stdout.read(\n                    raw_frame_size * self.__raw_frame_dtype.itemsize\n                ),\n                dtype=self.__raw_frame_dtype,\n            )\n        except Exception as e:\n            raise RuntimeError(\"Frame buffering failed with error: {}\".format(str(e)))\n        return (\n            nparray\n            if not (nparray is None) and len(nparray) == raw_frame_size\n            else None\n        )\n\n    def __fetchNextFrame(self):\n        \"\"\"\n        This Internal method grabs and decodes next 3D `ndarray` video-frame from the buffer.\n        \"\"\"\n        # Read next and reconstruct as numpy array\n        frame = self.__fetchNextfromPipeline()\n        # check if empty\n        if frame is None:\n            return frame\n        elif self.__raw_frame_pixfmt.startswith(\"gray\"):\n            # reconstruct exclusive `gray` frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )[:, :, 0]\n        elif self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch:\n            # reconstruct exclusive YUV formats frames for OpenCV APIs\n            frame = frame.reshape(\n                self.__raw_frame_resolution[1] * 3 // 2,\n                self.__raw_frame_resolution[0],\n            )\n        else:\n            # reconstruct default frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )\n        # return frame\n        return frame\n\n    def generateFrame(self):\n        \"\"\"\n        This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n        _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n        \"\"\"\n        if self.__raw_frame_num is None or not self.__raw_frame_num:\n            while not self.__terminate_stream:  # infinite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n        else:\n            for _ in range(self.__raw_frame_num):  # finite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n\n    def __enter__(self):\n        \"\"\"\n        Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n        **Returns:** Output of `formulate()` method.\n        \"\"\"\n        return self.formulate()\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"\n        Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n        \"\"\"\n        self.terminate()\n\n    @property\n    def metadata(self):\n        \"\"\"\n        A property object that dumps metadata information as JSON string.\n\n        **Returns:** Metadata as JSON string.\n        \"\"\"\n        # import dependency\n        import json\n\n        # return complete metadata information as JSON string\n        return json.dumps(\n            {\n                **self.__sourcer_metadata,  # source video\n                **self.__missing_prop,  # missing properties\n                **self.__user_metadata,  # user-defined\n            },\n            indent=2,\n        )\n\n    @metadata.setter\n    def metadata(self, value):\n        \"\"\"\n        A property object that updates metadata information with user-defined dictionary.\n\n        Parameters:\n            value (dict): User-defined dictionary.\n        \"\"\"\n        # check if value dict type\n        if value and isinstance(value, dict):\n            # log it\n            self.__verbose_logs and logger.info(\"Updating Metadata...\")\n            # extract any source and output internal metadata keys\n            default_keys = set(value).intersection(\n                {**self.__sourcer_metadata, **self.__missing_prop}\n            )\n            # counterpart source properties for each output properties\n            counterpart_prop = {\n                \"output_frames_resolution\": \"source_video_resolution\",\n                \"output_frames_pixfmt\": \"source_video_pixfmt\",\n                \"output_framerate\": \"source_video_framerate\",\n            }\n            # iterate over source metadata keys and sanitize it\n            for key in default_keys or []:\n                if key == \"source\":\n                    # metadata properties that cannot be altered\n                    logger.warning(\n                        \"`{}` metadata property value cannot be altered. Discarding!\".format(\n                            key\n                        )\n                    )\n                elif key in self.__missing_prop:\n                    # missing metadata properties are unavailable and read-only\n                    # notify user about alternative counterpart property (if available)\n                    logger.warning(\n                        \"`{}` metadata property is read-only\".format(key)\n                        + (\n                            \". Try updating `{}` property instead!\".format(\n                                counterpart_prop[key]\n                            )\n                            if key in counterpart_prop.keys()\n                            else \" and cannot be updated!\"\n                        )\n                    )\n                elif isinstance(value[key], type(self.__sourcer_metadata[key])):\n                    # check if correct datatype as original\n                    self.__verbose_logs and logger.info(\n                        \"Updating `{}`{} metadata property to `{}`.\".format(\n                            key,\n                            \" and its counterpart\"\n                            if key in counterpart_prop.values()\n                            else \"\",\n                            value[key],\n                        )\n                    )\n                    # update source metadata if valid\n                    self.__sourcer_metadata[key] = value[key]\n                    # also update missing counterpart property (if available)\n                    counter_key = next(\n                        (k for k, v in counterpart_prop.items() if v == key), \"\"\n                    )\n                    if counter_key:\n                        self.__missing_prop[counter_key] = value[key]\n                else:\n                    # otherwise discard and log it\n                    logger.warning(\n                        \"Manually assigned `{}` metadata property value is of invalid type. Discarding!\"\n                    ).format(key)\n                # delete invalid key\n                del value[key]\n            # There is no concept of a tuple in the JSON format.\n            # Python's `json` module converts Python tuples to JSON lists\n            # because that's the closest thing in JSON to a tuple.\n            any(isinstance(value[x], tuple) for x in value) and logger.warning(\n                \"All TUPLE metadata properties will be converted to LIST datatype. Read docs for more details.\"\n            )\n            # update user-defined metadata\n            self.__user_metadata.update(value)\n        else:\n            # otherwise raise error\n            raise ValueError(\"Invalid datatype metadata assigned. Aborting!\")\n\n    def __launch_FFdecoderline(self, input_params, output_params):\n\n        \"\"\"\n        This Internal method executes FFmpeg pipeline arguments inside a `subprocess` pipe in a new process.\n\n        Parameters:\n            input_params (dict): Input FFmpeg parameters\n            output_params (dict): Output FFmpeg parameters\n        \"\"\"\n        # convert input parameters to list\n        input_parameters = dict2Args(input_params)\n\n        # convert output parameters to list\n        output_parameters = dict2Args(output_params)\n\n        # format command\n        cmd = (\n            [self.__ffmpeg]\n            + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n            + self.__ffmpeg_prefixes\n            + input_parameters\n            + (\n                [\"-f\", self.__sourcer_metadata[\"source_demuxer\"]]\n                if (\"source_demuxer\" in self.__sourcer_metadata.keys())\n                else []\n            )\n            + [\"-i\", self.__sourcer_metadata[\"source\"]]\n            + output_parameters\n            + [\"-f\", \"rawvideo\", \"-\"]\n        )\n        # compose the FFmpeg process\n        if self.__verbose_logs:\n            logger.debug(\"Executing FFmpeg command: `{}`\".format(\" \".join(cmd)))\n            # In debugging mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=None\n            )\n        else:\n            # In silent mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=sp.DEVNULL\n            )\n\n    def terminate(self):\n        \"\"\"\n        Safely terminates all processes.\n        \"\"\"\n\n        # signal we are closing\n        self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n        self.__terminate_stream = True\n        # check if no process was initiated at first place\n        if self.__process is None or not (self.__process.poll() is None):\n            logger.info(\"Pipeline already terminated.\")\n            return\n        # Attempt to close pipeline.\n        # close `stdin` output\n        self.__process.stdin and self.__process.stdin.close()\n        # close `stdout` output\n        self.__process.stdout and self.__process.stdout.close()\n        # terminate/kill process if still processing\n        if self.__process.poll() is None:\n            # demuxers prefer kill\n            self.__process.kill()\n        # wait if not exiting\n        self.__process.wait()\n        self.__process = None\n        logger.info(\"Pipeline terminated successfully.\")\n

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata","title":"metadata property writable","text":"

A property object that dumps metadata information as JSON string.

Returns: Metadata as JSON string.

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__enter__","title":"__enter__(self) special","text":"

Handles entry with the with statement. See PEP343 -- The 'with' statement'.

Returns: Output of formulate() method.

Source code in deffcode/ffdecoder.py
def __enter__(self):\n    \"\"\"\n    Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n    **Returns:** Output of `formulate()` method.\n    \"\"\"\n    return self.formulate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__exit__","title":"__exit__(self, exc_type, exc_val, exc_tb) special","text":"

Handles exit with the with statement. See PEP343 -- The 'with' statement'.

Source code in deffcode/ffdecoder.py
def __exit__(self, exc_type, exc_val, exc_tb):\n    \"\"\"\n    Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n    \"\"\"\n    self.terminate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__init__","title":"__init__(self, source, source_demuxer=None, frame_format=None, custom_ffmpeg='', verbose=False, **ffparams) special","text":"

This constructor method initializes the object state and attributes of the FFdecoder Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None frame_format str

sets pixel format(-pix_fmt) of the decoded frames.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False ffparams dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/ffdecoder.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    frame_format=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **ffparams\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n\n    # enable verbose if specified\n    self.__verbose_logs = (\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # define whether initializing\n    self.__initializing = True\n\n    # define frame pixel-format for decoded frames\n    self.__frame_format = (\n        frame_format.lower().strip() if isinstance(frame_format, str) else None\n    )\n\n    # handles user-defined parameters\n    self.__extra_params = {}\n\n    # handle process to be frames written\n    self.__process = None\n\n    # handle exclusive metadata\n    self.__ff_pixfmt_metadata = None  # metadata\n    self.__raw_frame_num = None  # raw-frame number\n    self.__raw_frame_pixfmt = None  # raw-frame pixformat\n    self.__raw_frame_dtype = None  # raw-frame dtype\n    self.__raw_frame_depth = None  # raw-frame depth\n    self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n    # define supported mode of operation\n    self.__supported_opmodes = {\n        \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n        \"vo\": \"Video-Only\",\n        \"imgseq\": \"Image-Sequence\",\n        # \"ao\":\"Audio-Only\", # reserved for future\n    }\n    # operation mode variable\n    self.__opmode = None\n\n    # handle termination\n    self.__terminate_stream = False\n\n    # cleans and reformat user-defined parameters\n    self.__extra_params = {\n        str(k).strip(): str(v).strip()\n        if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in ffparams.items()\n    }\n\n    # handle custom Sourcer API params\n    sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n    # reset improper values\n    sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n    # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n    # check if not valid type\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n    else:\n        # also pass valid ffmpeg pre-headers to Sourcer API\n        sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n    # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n    # assets on Windows(if specified)\n    sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n        \"-ffmpeg_download_path\", \"\"\n    )\n\n    # handle video and audio stream indexes in case of multiple ones.\n    default_stream_indexes = self.__extra_params.pop(\n        \"-default_stream_indexes\", (0, 0)\n    )\n    # reset improper values\n    default_stream_indexes = (\n        (0, 0)\n        if not isinstance(default_stream_indexes, (list, tuple))\n        else default_stream_indexes\n    )\n\n    # pass FFmpeg filter to Sourcer API params for processing\n    if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n        key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n        sourcer_params[key] = self.__extra_params[key]\n\n    # define dict to store user-defined parameters\n    self.__user_metadata = {}\n    # extract and assign source metadata as dict\n    (self.__sourcer_metadata, self.__missing_prop) = (\n        Sourcer(\n            source=source,\n            source_demuxer=source_demuxer,\n            verbose=verbose,\n            custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n            **sourcer_params\n        )\n        .probe_stream(default_stream_indexes=default_stream_indexes)\n        .retrieve_metadata(force_retrieve_missing=True)\n    )\n\n    # handle valid FFmpeg assets location\n    self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n    # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n    # patch for compatibility with OpenCV APIs.\n    self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n    if not (isinstance(self.__cv_patch, bool)):\n        self.__cv_patch = False\n        self.__verbose_logs and logger.critical(\n            \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n        )\n\n    # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n    self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n    if not (isinstance(self.__passthrough_mode, bool)):\n        self.__passthrough_mode = False\n\n    # handle mode of operation\n    if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n        # image-sequence mode\n        self.__opmode = \"imgseq\"\n    elif (\n        self.__sourcer_metadata[\n            \"source_has_video\"\n        ]  # audio is only for pass-through, not really for audio decoding yet.\n        and self.__sourcer_metadata[\"source_has_audio\"]\n        and self.__passthrough_mode  # [TODO]\n    ):\n        self.__opmode = \"av\"\n    # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n    #    self.__opmode = \"ao\"\n    elif self.__sourcer_metadata[\"source_has_video\"]:\n        # video-only mode\n        self.__opmode = \"vo\"\n    else:\n        # raise if unknown mode\n        raise ValueError(\n            \"Unable to find any usable video stream in the given source!\"\n        )\n    # store as metadata\n    self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n        self.__opmode\n    ]\n\n    # handle user-defined output framerate\n    __framerate = self.__extra_params.pop(\"-framerate\", None)\n    if (\n        isinstance(__framerate, str)\n        and __framerate\n        == \"null\"  # special mode to discard `-framerate/-r` parameter\n    ):\n        self.__inputframerate = __framerate\n    elif isinstance(__framerate, (float, int)):\n        self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n    else:\n        # warn if wrong type\n        not (__framerate is None) and logger.warning(\n            \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                type(__framerate).__name__\n            )\n        )\n        # reset to default\n        self.__inputframerate = 0.0\n\n    # handle user defined decoded frame resolution\n    self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n    if (\n        isinstance(self.__custom_resolution, str)\n        and self.__custom_resolution\n        == \"null\"  # special mode to discard `-size/-s` parameter\n    ) or (\n        isinstance(self.__custom_resolution, (list, tuple))\n        and len(self.__custom_resolution)\n        == 2  # valid resolution(must be a tuple or list)\n    ):\n        # log it\n        self.__verbose_logs and not isinstance(\n            self.__custom_resolution, str\n        ) and logger.debug(\n            \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n        )\n    else:\n        # log it\n        not (self.__custom_resolution is None) and logger.warning(\n            \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                self.__custom_resolution\n            )\n        )\n        # reset improper values\n        self.__custom_resolution = None\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.formulate","title":"formulate(self)","text":"

This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg subprocess pipe.

Returns: A reference to the FFdecoder class object.

Source code in deffcode/ffdecoder.py
def formulate(self):\n\n    \"\"\"\n    This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n    **Returns:** A reference to the FFdecoder class object.\n    \"\"\"\n    # assign values to class variables on first run\n    if self.__initializing:\n        # prepare parameter dict\n        input_params = OrderedDict()\n        output_params = OrderedDict()\n\n        # dynamically pre-assign a default video-decoder (if not assigned by user).\n        supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n        default_vdecodec = (\n            self.__sourcer_metadata[\"source_video_decoder\"]\n            if self.__sourcer_metadata[\"source_video_decoder\"]\n            in supported_vdecodecs\n            else \"unknown\"\n        )\n        if \"-c:v\" in self.__extra_params:\n            self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                \"-c:v\", default_vdecodec\n            )\n        # handle image sequence separately\n        if self.__opmode == \"imgseq\":\n            # -vcodec is discarded by default\n            # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n            self.__extra_params.pop(\"-vcodec\", None)\n        elif (\n            \"-vcodec\" in self.__extra_params\n            and self.__extra_params[\"-vcodec\"] is None\n        ):\n            # special case when -vcodec is not needed intentionally\n            self.__extra_params.pop(\"-vcodec\", None)\n        else:\n            # assign video decoder selected here.\n            if not \"-vcodec\" in self.__extra_params:\n                input_params[\"-vcodec\"] = default_vdecodec\n            else:\n                input_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-vcodec\", default_vdecodec\n                )\n            if (\n                default_vdecodec != \"unknown\"\n                and not input_params[\"-vcodec\"] in supported_vdecodecs\n            ):\n                # reset to default if not supported\n                logger.warning(\n                    \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                        input_params[\"-vcodec\"], default_vdecodec\n                    )\n                )\n                input_params[\"-vcodec\"] = default_vdecodec\n            # raise error if not valid decoder found\n            if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                raise RuntimeError(\n                    \"Provided FFmpeg does not support any known usable video-decoders.\"\n                    \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                )\n\n        # handle user-defined number of frames.\n        if \"-vframes\" in self.__extra_params:\n            self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                \"-vframes\", None\n            )\n        if \"-frames:v\" in self.__extra_params:\n            value = self.__extra_params.pop(\"-frames:v\", None)\n            if not (value is None) and value > 0:\n                output_params[\"-frames:v\"] = value\n\n        # dynamically calculate default raw-frames pixel format(if not assigned by user).\n        # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n        if \"-pix_fmt\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n            )\n            self.__extra_params.pop(\"-pix_fmt\", None)\n        # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n        self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n        supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n        # calculate default pixel-format\n        # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n        self.__frame_format == \"null\" and logger.critical(\n            \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n        )\n        # choose between rgb24(if available) or source pixel-format\n        # otherwise, only source pixel-format for special case\n        default_pixfmt = (\n            \"rgb24\"\n            if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n            else self.__sourcer_metadata[\"source_video_pixfmt\"]\n        )\n        # assign output raw-frames pixel format\n        rawframe_pixfmt = None\n        if (\n            not (self.__frame_format is None)\n            and self.__frame_format in supported_pixfmts\n        ):\n            # check if valid and supported `frame_format` parameter assigned\n            rawframe_pixfmt = self.__frame_format.strip()\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                    rawframe_pixfmt\n                )\n            )\n        elif (\n            \"output_frames_pixfmt\"\n            in self.__sourcer_metadata  # means `format` filter is defined\n            and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n        ):\n            # assign if valid and supported\n            rawframe_pixfmt = self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ].strip()\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n            )\n        else:\n            # reset to default if not supported\n            rawframe_pixfmt = default_pixfmt\n            # log it accordingly\n            if self.__frame_format is None:\n                logger.info(\n                    \"Using default `{}` pixel-format for this pipeline.\".format(\n                        default_pixfmt\n                    )\n                )\n            else:\n                logger.warning(\n                    \"{} Switching to default `{}` pixel-format!\".format(\n                        \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                            self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                            if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                            else self.__frame_format\n                        )\n                        if self.__frame_format != \"null\"\n                        else \"No usable pixel-format defined.\",\n                        default_pixfmt,\n                    )\n                )\n\n        # dynamically calculate raw-frame datatype based on pixel-format selected\n        (self.__raw_frame_depth, rawframesbpp) = [\n            (int(x[1]), int(x[2]))\n            for x in self.__ff_pixfmt_metadata\n            if x[0] == rawframe_pixfmt\n        ][0]\n        raw_bit_per_component = (\n            rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n        )\n        if 4 <= raw_bit_per_component <= 8:\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n        elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n            (\"le\", \"be\")\n        ):\n            if rawframe_pixfmt.endswith(\"le\"):\n                self.__raw_frame_dtype = np.dtype(\"<u2\")\n            else:\n                self.__raw_frame_dtype = np.dtype(\">u2\")\n        else:\n            # reset to both pixel-format and datatype to default if not supported\n            not (self.__frame_format is None) and logger.warning(\n                \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                    rawframe_pixfmt\n                )\n            )\n            rawframe_pixfmt = \"rgb24\"\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n\n        # Check if not special case\n        if self.__frame_format != \"null\":\n            # assign to FFmpeg pipeline otherwise\n            output_params[\"-pix_fmt\"] = rawframe_pixfmt\n        # assign to global parameter further usage\n        self.__raw_frame_pixfmt = rawframe_pixfmt\n        # also override as metadata(if available)\n        if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n            self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ] = self.__raw_frame_pixfmt\n\n        # handle raw-frame resolution\n        # notify FFmpeg `-s` parameter cannot be assigned directly\n        if \"-s\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n            )\n            self.__extra_params.pop(\"-s\", None)\n        # assign output rawframe resolution\n        if not (self.__custom_resolution is None) and not isinstance(\n            self.__custom_resolution, str\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            self.__raw_frame_resolution = self.__custom_resolution\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                    self.__raw_frame_resolution\n                )\n            )\n        elif (\n            \"output_frames_resolution\"\n            in self.__sourcer_metadata  # means `scale` filter is defined\n            and self.__sourcer_metadata[\"output_frames_resolution\"]\n            and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on output.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"output_frames_resolution\"\n            ]\n        elif (\n            self.__sourcer_metadata[\"source_video_resolution\"]\n            and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on source.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"source_video_resolution\"\n            ]\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n        # special mode to discard `-size/-s` FFmpeg parameter completely\n        if isinstance(self.__custom_resolution, str):\n            logger.critical(\n                \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n            )\n        else:\n            # add to pipeline\n            dimensions = \"{}x{}\".format(\n                self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n            )\n            output_params[\"-s\"] = str(dimensions)\n        # log if filters or default source is used\n        self.__verbose_logs and (\n            self.__custom_resolution is None\n            or isinstance(self.__custom_resolution, str)\n        ) and logger.info(\n            \"{} for this pipeline for defining output resolution.\".format(\n                \"FFmpeg filter values will be used\"\n                if \"output_frames_resolution\" in self.__sourcer_metadata\n                else \"Default source resolution will be used\"\n            )\n        )\n\n        # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n        if (\n            not isinstance(self.__inputframerate, str)\n            and self.__inputframerate > 0.0\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            output_params[\"-framerate\"] = str(self.__inputframerate)\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                    str(self.__inputframerate)\n                )\n            )\n        elif (\n            \"output_framerate\"\n            in self.__sourcer_metadata  # means `fps` filter is defined\n            and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n        ):\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on output\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"output_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n            )\n        elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on source\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"source_video_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"Default source framerate will be used for this pipeline for defining output framerate.\"\n            )\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n\n        # add rest to output parameters\n        output_params.update(self.__extra_params)\n\n        # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n        # TODO Added support for `-re -stream_loop` and `-loop`\n        if \"-frames:v\" in input_params:\n            self.__raw_frame_num = input_params[\"-frames:v\"]\n        elif (\n            not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n            and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n        ):\n            self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n        else:\n            self.__raw_frame_num = None\n            # log that number of frames are unknown\n            self.__verbose_logs and logger.info(\n                \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n            )\n\n        # log Mode of Operation\n        self.__verbose_logs and logger.critical(\n            \"Activating {} Mode of Operation.\".format(\n                self.__supported_opmodes[self.__opmode]\n            )\n        )\n\n        # compose the Pipeline using formulated FFmpeg parameters\n        self.__launch_FFdecoderline(input_params, output_params)\n\n        # inform the initialization is completed\n        self.__initializing = False\n    else:\n        # warn if pipeline is recreated\n        logger.error(\"This pipeline is already created and running!\")\n    return self\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.generateFrame","title":"generateFrame(self)","text":"

This method returns a Generator function (also an Iterator using next()) of video frames, grabbed continuously from the buffer.

Source code in deffcode/ffdecoder.py
def generateFrame(self):\n    \"\"\"\n    This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n    _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n    \"\"\"\n    if self.__raw_frame_num is None or not self.__raw_frame_num:\n        while not self.__terminate_stream:  # infinite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n    else:\n        for _ in range(self.__raw_frame_num):  # finite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate","title":"terminate(self)","text":"

Safely terminates all processes.

Source code in deffcode/ffdecoder.py
def terminate(self):\n    \"\"\"\n    Safely terminates all processes.\n    \"\"\"\n\n    # signal we are closing\n    self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n    self.__terminate_stream = True\n    # check if no process was initiated at first place\n    if self.__process is None or not (self.__process.poll() is None):\n        logger.info(\"Pipeline already terminated.\")\n        return\n    # Attempt to close pipeline.\n    # close `stdin` output\n    self.__process.stdin and self.__process.stdin.close()\n    # close `stdout` output\n    self.__process.stdout and self.__process.stdout.close()\n    # terminate/kill process if still processing\n    if self.__process.poll() is None:\n        # demuxers prefer kill\n        self.__process.kill()\n    # wait if not exiting\n    self.__process.wait()\n    self.__process = None\n    logger.info(\"Pipeline terminated successfully.\")\n
"},{"location":"reference/ffdecoder/params/","title":"FFdecoder API Parameters","text":""},{"location":"reference/ffdecoder/params/#source","title":"source","text":"

This parameter defines the input source (-i) for decoding real-time frames.

FFdecoder API will throw Assertion if source provided is invalid or missing.

FFdecoder API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder('/home/foo.mp4').formulate()\n

    Related usage recipes can found here \u27b6

  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
    # initialize and formulate the decoder\ndecoder = FFdecoder('img%03d.png').formulate()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img*.png', verbose=True, **sourcer_params).formulate()\n
    # define `-loop 1` for looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img.jpg', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nffparams = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value (use Sourcer API's enumerate_devices to list them) either as integer or string of integer type to its source parameter. For example, for capturing \"0\" index device on Windows, we can do as follows in FFdecoder API:

    Requirement for Index based Camera Device Capturing in FFdecoder API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".
    Important Facts related to Camera Device Indexing
    • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
    • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
    • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
      • For example, If there are three devices:
        {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
      • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

        Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

    Out of Index Camera Device index values will raise ValueError in FFdecoder API

    # initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipes can found here \u27b6

  • Video Capture Device Name/Path: Valid video capture device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for capturing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in FFdecoder API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

      # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

      # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

      You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

      # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

      # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipe can found here \u27b6

  • Screen Capturing/Recording: Valid screen capture device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. You can also specify additional specifications (such as limiting capture area to a region, setting capturing coordinates, whether to capture mouse pointer and clicks etc.). For example, for capturing \"0:\" indexed device with avfoundation source demuxer on MacOS along with mouse pointer and clicks, we can do as follows in FFdecoder API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

    For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for stream capturing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for stream capturing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

      # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

      # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Related usage recipe can found here \u27b6

  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and decoding Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in FFdecoder API:

    # initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).formulate()\n

    Related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph, and

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for Index based Camera Device Capturing in FFdecoder API

For enabling Index based Camera Device Capturing in FFdecoder API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\").formulate()\n
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", source_demuxer=\"auto, frame_format=\"bgr24\").formulate()\n

Related usage recipes can found here \u27b6

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize and formulate the decoder with `dshow` demuxer\ndecoder = FFdecoder(\"foo.mp4\", source_demuxer=\"dshow\").formulate()\n

"},{"location":"reference/ffdecoder/params/#frame_format","title":"frame_format","text":"

This parameter select the pixel format for output video frames (such as gray for grayscale output).

Any invalid or unsupported value to frame_format parameter will discarded!

Any improper frame_format parameter value (i.e. either null(special-case), undefined, or invalid type) , then -pix_fmt FFmpeg parameter value in Decoding pipeline uses output_frames_pixfmt metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to Default pixel-format1 (calculated variably).

Use frame_format=\"null\" to manually discard -pix_fmt FFmpeg parameter entirely from Decoding pipeline.

This feature allows users to manually skip -pix_fmt FFmpeg parameter in Decoding pipeline, essentially for using only format ffmpeg filter values instead, or even better let FFmpeg itself choose the best available output frame pixel-format for the given source.

Data-Type: String

Default Value: Its default value is Default pixel-format1 (calculated variably).

Usage:

# initialize and formulate the decoder for grayscale frames\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\").formulate()\n

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Various Pixel formats related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then FFdecoder API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path (via. -custom_sourcer_params) exclusive parameter in FFdecoder API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in FFdecoder API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n\n# initialize and formulate the decoder\nFFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nFFdecoder(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").formulate()\n

"},{"location":"reference/ffdecoder/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize and formulate decoder with verbose logs\nFFdecoder(\"foo.mp4\", verbose=True).formulate()\n

"},{"location":"reference/ffdecoder/params/#ffparams","title":"ffparams","text":"

This dictionary parameter accepts all supported parameters formatted as its attributes:

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/ffdecoder/params/#supported-parameters","title":"Supported Parameters","text":""},{"location":"reference/ffdecoder/params/#a-ffmpeg-parameters","title":"A. FFmpeg Parameters","text":"

Almost any FFmpeg parameter (supported by installed FFmpeg) can be passed as dictionary attributes in ffparams parameter.

Let's assume we want to 00:00:01.45(or 1045msec) in time and decode one single frame from given source (say foo.mp4) in FFdecoder API, then we can assign required FFmpeg parameters as dictionary attributes as follows:

Kindly read FFmpeg Docs carefully before passing any additional values to ffparams parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate decoder with suitable source and FFmpeg params\ndecoder = FFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

"},{"location":"reference/ffdecoder/params/#b-exclusive-parameters","title":"B. Exclusive Parameters","text":"

In addition to FFmpeg parameters, FFdecoder API also supports few Exclusive Parameters to allow users to flexibly change its internal pipeline, properties, and handle some special FFmpeg parameters (such as repeated map) that cannot be assigned via. python dictionary.

These parameters are discussed below:

  • -vcodec (str) : This attribute works similar to -vcodec FFmpeg parameter for specifying supported decoders that are compiled with FFmpeg in use. If not specified, it's value is derived from source video metadata. Its usage is as follows:

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

    Use {\"-vcodec\":None} in ffparams to discard -vcodec FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -vcodec FFmpeg parameter in Decoding pipeline, for letting FFmpeg itself choose the best available video decoder for the given source.

    # define suitable parameter\nffparams = {\"-vcodec\": \"h264\"} # set decoder to `h264`\n

  • -framerate (float/int) : This attribute works similar to -framerate FFmpeg parameter for generating video-frames at specified framerate. If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -framerate attribute will discarded!

    The output_frames_framerate metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -framerate parameter value (i.e. either null(special-case), undefined, or invalid type) , then -framerate/-r FFmpeg parameter value in Decoding pipeline uses output_frames_framerate metadata property extracted from Output Stream. Thereby, in case if no valid output_framerate metadata property is found, then API finally defaults to source_video_framerate metadata property extracted from Input Source Stream.

    In case neither output_framerate nor source_video_framerate valid metadata properties are found, then RuntimeError is raised.

    Use {\"-framerate\":\"null\"} in ffparams to discard -framerate/-r FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -framerate/-r FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output framerate for the given source.

    # define suitable parameter\nffparams = {\"-framerate\": 60.0} # set input video source framerate to 60fps\n

  • -custom_resolution (tuple/list) : This attribute sets the custom resolution/size of the output frames. Its value can either be a tuple ((width,height)) or a list ([width, height]). If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -custom_resolution attribute will discarded!

    The output_frames_resolution metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -custom_resolution parameter value (i.e. either null(special-case), undefined, or invalid type) , then -s/-size FFmpeg parameter value in Decoding pipeline uses output_frames_resolution metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to source_video_resolution metadata property extracted from Input Source Stream.

    In case neither output_frames_resolution nor source_video_resolution valid metadata properties are found, then RuntimeError is raised.

    Use {\"-custom_resolution\":\"null\"} in ffparams to discard -size/-s FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -size/-s FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output frames resolution for the given source.

    # define suitable parameter\nffparams = {\"-output_dimensions\": (1280,720)} # to produce a 1280x720 resolution/scale output video\n

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Difference from -clones parameter

    The -clones and -ffprefixes parameters even tho fundamentally work the same, they're meant to serve at different positions in the FFmpeg command. Normally, FFdecoder API pipeline looks something like following with these parameters in place:

    ffmpeg {{-ffprefixes FFmpeg params}} -vcodec h264 -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 {{-clones FFmpeg params}} -f rawvideo -\n

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -clones (list): This attribute sets the special FFmpeg parameters after that are repeated more than once or occurs in a specific order (that cannot be altered) in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-clones\": ['-map', '0:v:0', '-map', '1:a?']} \n\n# NOTE: Will be format as `ffmpeg -vcodec -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 -map 0:v:0 -map 1:a -f rawvideo -`\n

  • -custom_sourcer_params (dict) : This attribute assigns all Exclusive Parameter meant for Sourcer API's sourcer_params dictionary parameter directly through FFdecoder API. Its usage is as follows:

    # define suitable parameter meant for `sourcer_params`\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n

  • -default_stream_indexes (list/tuple) : This attribute assign value directly to default_stream_indexes parameter in Sourcer API's probe_stream() method for selecting specific video and audio stream index in case of multiple ones. Value can be of format: (int,int) or [int,int] as follows:

    # define suitable parameter meant for `probe_stream()` method\nffparams = {\"-default_stream_indexes\": (0,1)} # (\"0th video stream\", \"1st audio stream\")\n

  • -enforce_cv_patch (bool) : This attribute can be enabled(True) for patching YUV pixel-formats (such as YUV420p, yuv444p, NV12, NV21 etc.) frames to be seamless compatibility with OpenCV APIs such as imshow(), write() etc. It can be used as follows:

    As of now, YUV pixel-formats starting with YUV and NV are only supported.

    # define suitable parameter\nffparams = {\"-enforce_cv_patch\": True} # enables OpenCV patch for YUV frames\n

    YUV pixel-formats usage recipe can found here \u27b6

  • -passthrough_audio (bool/list) : (Yet to be supported)

  1. Default pixel-format is calculated variably in FFdecoder API:

    • If frame_format != \"null\":
      • If frame_format parameter is valid and supported: Default pixel-format is frame_format parameter value.
      • If frame_format parameter is NOT valid or supported:
        • If output_frame_pixfmt metadata is available: Default pixel-format is output_frame_pixfmt metadata value.
        • If output_frame_pixfmt metadata is NOT available: Default pixel-format is rgb24 if supported otherwise source_video_pixfmt metadata value.
    • If frame_format == \"null\": Default pixel-format is source_video_pixfmt metadata value

    \u21a9\u21a9

"},{"location":"reference/sourcer/","title":"Sourcer API","text":"

Sourcer API acts as Source Probing Utility that unlike other FFmpeg Wrappers which mostly uses ffprobe module, attempts to open the given Input Source directly with FFmpeg inside a subprocess pipe, and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each media stream contained in it.

Sourcer API primarily acts as a backend for FFdecoder API for gathering, processing, and validating all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in formulating its default FFmpeg pipeline parameters for real-time video-frames generation.

Sourcer API is design as a standalone Metadata Extraction API for easily parsing information from multimedia streams available in the given Input Source and returns it in either Human-readable (JSON string) or Machine-readable (Dictionary object) type with its retrieve_metadata() method.

All metadata attributes available with Sourcer API(On Windows) are discussed here \u27b6.

Furthermore, Sourcer's sourcer_params dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

Sourcer API parameters are explained here \u27b6

Source code in deffcode/sourcer.py
class Sourcer:\n    \"\"\"\n    > Sourcer API acts as **Source Probing Utility** that unlike other FFmpeg Wrappers which mostly uses [`ffprobe`](https://ffmpeg.org/ffprobe.html) module,\n    attempts to open the given Input Source directly with [**FFmpeg**](https://ffmpeg.org/) inside a [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe,\n    and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each\n    media stream contained in it.\n\n    Sourcer API primarily acts as a **backend for [FFdecoder API](../../reference/ffdecoder)** for gathering, processing, and validating\n    all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in\n    formulating its default FFmpeg pipeline parameters for real-time video-frames generation.\n\n    Sourcer API is design as a standalone **Metadata Extraction API** for easily parsing information from multimedia streams available in the\n    given Input Source and returns it in either Human-readable _(JSON string)_ or Machine-readable _(Dictionary object)_ type with its\n    [`retrieve_metadata()`](#deffcode.sourcer.Sourcer.retrieve_metadata) method.\n\n    !!! info \"All metadata attributes available with Sourcer API(On :fontawesome-brands-windows: Windows) are discussed [here \u27b6](../../recipes/basic/#display-source-video-metadata).\"\n\n    Furthermore, Sourcer's [`sourcer_params`](params/#sourcer_params) dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"Sourcer API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **sourcer_params,\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the Sourcer Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n        # checks if machine in-use is running windows os or not\n        self.__machine_OS = platform.system()\n\n        # define internal parameters\n        self.__verbose_logs = (  # enable verbose if specified\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # handle metadata received\n        self.__ffsp_output = None\n\n        # sanitize sourcer_params\n        self.__sourcer_params = {\n            str(k).strip(): (\n                str(v).strip()\n                if not isinstance(v, (dict, list, int, float, tuple))\n                else v\n            )\n            for k, v in sourcer_params.items()\n        }\n\n        # handle whether to force validate source\n        self.__forcevalidatesource = self.__sourcer_params.pop(\n            \"-force_validate_source\", False\n        )\n        if not isinstance(self.__forcevalidatesource, bool):\n            # reset improper values\n            self.__forcevalidatesource = False\n\n        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n\n        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n        __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n        if not isinstance(__ffmpeg_download_path, str):\n            # reset improper values\n            __ffmpeg_download_path = \"\"\n\n        # validate the FFmpeg assets and return location (also downloads static assets on windows)\n        self.__ffmpeg = get_valid_ffmpeg_path(\n            str(custom_ffmpeg),\n            True if self.__machine_OS == \"Windows\" else False,\n            ffmpeg_download_path=__ffmpeg_download_path,\n            verbose=self.__verbose_logs,\n        )\n\n        # check if valid FFmpeg path returned\n        if self.__ffmpeg:\n            self.__verbose_logs and logger.debug(\n                \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n            )\n        else:\n            # else raise error\n            raise RuntimeError(\n                \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n            )\n\n        # sanitize externally accessible parameters and assign them\n        # handles source demuxer\n        if source is None:\n            # first check if source value is empty\n            # raise error if true\n            raise ValueError(\"Input `source` parameter is empty!\")\n        elif isinstance(source_demuxer, str):\n            # assign if valid demuxer value\n            self.__source_demuxer = source_demuxer.strip().lower()\n            # assign if valid demuxer value\n            assert self.__source_demuxer != \"auto\" or validate_device_index(\n                source\n            ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n                source\n            )\n        else:\n            # otherwise find valid default source demuxer value\n            # enforce \"auto\" if valid index device\n            self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n            # log if not valid index device and invalid type\n            self.__verbose_logs and not self.__source_demuxer in [\n                \"auto\",\n                None,\n            ] and logger.warning(\n                \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                    type(source_demuxer).__name__\n                )\n            )\n            # log if not valid index device and invalid type\n            self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n                \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                    source\n                )\n            )\n\n        # handles source stream\n        self.__source = source\n\n        # creates shallow copy for further usage #TODO\n        self.__source_org = copy.copy(self.__source)\n        self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n        # handles all extracted devices names/paths list\n        # when source_demuxer = \"auto\"\n        self.__extracted_devices_list = []\n\n        # various source stream params\n        self.__default_video_resolution = \"\"  # handles stream resolution\n        self.__default_video_orientation = \"\"  # handles stream's video orientation\n        self.__default_video_framerate = \"\"  # handles stream framerate\n        self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n        self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n        self.__default_video_decoder = \"\"  # handles stream's video decoder\n        self.__default_source_duration = \"\"  # handles stream's video duration\n        self.__approx_video_nframes = \"\"  # handles approx stream frame number\n        self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n        self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n        # handle various stream flags\n        self.__contains_video = False  # contains video\n        self.__contains_audio = False  # contains audio\n        self.__contains_images = False  # contains image-sequence\n\n        # handles output parameters through filters\n        self.__metadata_output = None  # handles output stream metadata\n        self.__output_frames_resolution = \"\"  # handles output stream resolution\n        self.__output_framerate = \"\"  # handles output stream framerate\n        self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n        self.__output_orientation = \"\"  # handles output frame orientation\n\n        # check whether metadata probed or not?\n        self.__metadata_probed = False\n\n    def probe_stream(self, default_stream_indexes=(0, 0)):\n        \"\"\"\n        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n        Parameters:\n            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n        **Returns:** Reference to the instance object.\n        \"\"\"\n        assert (\n            isinstance(default_stream_indexes, (list, tuple))\n            and len(default_stream_indexes) == 2\n            and all(isinstance(x, int) for x in default_stream_indexes)\n        ), \"Invalid default_stream_indexes value!\"\n        # validate source and extract metadata\n        self.__ffsp_output = self.__validate_source(\n            self.__source,\n            source_demuxer=self.__source_demuxer,\n            forced_validate=(\n                self.__forcevalidatesource if self.__source_demuxer is None else True\n            ),\n        )\n        # parse resolution and framerate\n        video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0]\n        )\n        if video_rfparams:\n            self.__default_video_resolution = video_rfparams[\"resolution\"]\n            self.__default_video_framerate = video_rfparams[\"framerate\"]\n            self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n        # parse output parameters through filters (if available)\n        if not (self.__metadata_output is None):\n            # parse output resolution and framerate\n            out_video_rfparams = self.__extract_resolution_framerate(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n            if out_video_rfparams:\n                self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n                self.__output_framerate = out_video_rfparams[\"framerate\"]\n                self.__output_orientation = out_video_rfparams[\"orientation\"]\n            # parse output pixel-format\n            self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n\n        # parse pixel-format\n        self.__default_video_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0]\n        )\n\n        # parse video decoder\n        self.__default_video_decoder = self.__extract_video_decoder(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse rest of metadata\n        if not self.__contains_images:\n            # parse video bitrate\n            self.__default_video_bitrate = self.__extract_video_bitrate(\n                default_stream=default_stream_indexes[0]\n            )\n            # parse audio bitrate and samplerate\n            audio_params = self.__extract_audio_bitrate_nd_samplerate(\n                default_stream=default_stream_indexes[1]\n            )\n            if audio_params:\n                self.__default_audio_bitrate = audio_params[\"bitrate\"]\n                self.__default_audio_samplerate = audio_params[\"samplerate\"]\n            # parse video duration\n            self.__default_source_duration = self.__extract_duration()\n            # calculate all flags\n            if (\n                self.__default_video_bitrate\n                or (self.__default_video_framerate and self.__default_video_resolution)\n            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n                self.__contains_video = True\n                self.__contains_audio = True\n            elif self.__default_video_bitrate or (\n                self.__default_video_framerate and self.__default_video_resolution\n            ):\n                self.__contains_video = True\n            elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n                self.__contains_audio = True\n            else:\n                raise ValueError(\n                    \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n                )\n        # calculate approximate number of video frame\n        if self.__default_video_framerate and self.__default_source_duration:\n            self.__approx_video_nframes = np.rint(\n                self.__default_video_framerate * self.__default_source_duration\n            ).astype(int, casting=\"unsafe\")\n\n        # signal metadata has been probed\n        self.__metadata_probed = True\n\n        # return reference to the instance object.\n        return self\n\n    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n        \"\"\"\n        This method returns Parsed/Probed Metadata of the given source.\n\n        Parameters:\n            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n        # log it\n        self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n        # create metadata dictionary from information populated in private class variables\n        metadata = {\n            \"ffmpeg_binary_path\": self.__ffmpeg,\n            \"source\": self.__source,\n        }\n        metadata_missing = {}\n        # Only either `source_demuxer` or `source_extension` attribute can be\n        # present in metadata.\n        if self.__source_demuxer is None:\n            metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n        else:\n            metadata.update({\"source_demuxer\": self.__source_demuxer})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n        # add source video metadata properties\n        metadata.update(\n            {\n                \"source_video_resolution\": self.__default_video_resolution,\n                \"source_video_pixfmt\": self.__default_video_pixfmt,\n                \"source_video_framerate\": self.__default_video_framerate,\n                \"source_video_orientation\": self.__default_video_orientation,\n                \"source_video_decoder\": self.__default_video_decoder,\n                \"source_duration_sec\": self.__default_source_duration,\n                \"approx_video_nframes\": (\n                    int(self.__approx_video_nframes)\n                    if self.__approx_video_nframes\n                    and not any(\n                        \"loop\" in x for x in self.__ffmpeg_prefixes\n                    )  # check if any loops in prefix\n                    and not any(\n                        \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                    )  # check if any loops in filters\n                    else None\n                ),\n                \"source_video_bitrate\": self.__default_video_bitrate,\n                \"source_audio_bitrate\": self.__default_audio_bitrate,\n                \"source_audio_samplerate\": self.__default_audio_samplerate,\n                \"source_has_video\": self.__contains_video,\n                \"source_has_audio\": self.__contains_audio,\n                \"source_has_image_sequence\": self.__contains_images,\n            }\n        )\n        # add output metadata properties (if available)\n        if not (self.__metadata_output is None):\n            metadata.update(\n                {\n                    \"output_frames_resolution\": self.__output_frames_resolution,\n                    \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                    \"output_framerate\": self.__output_framerate,\n                    \"output_orientation\": self.__output_orientation,\n                }\n            )\n        else:\n            # since output stream metadata properties are only available when additional\n            # FFmpeg parameters(such as filters) are defined manually, thereby missing\n            # output stream properties are handled by assigning them counterpart source\n            # stream metadata property values\n            force_retrieve_missing and metadata_missing.update(\n                {\n                    \"output_frames_resolution\": self.__default_video_resolution,\n                    \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                    \"output_framerate\": self.__default_video_framerate,\n                    \"output_orientation\": self.__default_video_orientation,\n                }\n            )\n        # log it\n        self.__verbose_logs and logger.debug(\n            \"Metadata Extraction completed successfully!\"\n        )\n        # parse as JSON string(`json.dumps`), if defined\n        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n        metadata_missing = (\n            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n        )\n        # return `metadata` or `(metadata, metadata_missing)`\n        return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n\n    @property\n    def enumerate_devices(self):\n        \"\"\"\n        A property object that enumerate all probed Camera Devices connected to your system names\n        along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.\n\n        **Returns:** Probed Camera Devices as python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n\n        # log if specified\n        self.__verbose_logs and logger.debug(\"Enumerating all probed Camera Devices.\")\n\n        # return probed Camera Devices as python dictionary.\n        return {\n            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)\n        }\n\n    def __validate_source(self, source, source_demuxer=None, forced_validate=False):\n        \"\"\"\n        This Internal method validates source and extracts its metadata.\n\n        Parameters:\n            source_demuxer(str): specifies the demuxer(`-f`) for the input source.\n            forced_validate (bool): whether to skip validation tests or not?\n\n        **Returns:** `True` if passed tests else `False`.\n        \"\"\"\n        logger.critical(\n            \"{} :: {} :: {}\".format(\n                source_demuxer,\n                source_demuxer in get_supported_demuxers(self.__ffmpeg),\n                get_supported_demuxers(self.__ffmpeg),\n            )\n        )\n        # validate source demuxer(if defined)\n        if not (source_demuxer is None):\n            # check if \"auto\" demuxer is specified\n            if source_demuxer == \"auto\":\n                # integerise source to get index\n                index = int(source)\n                # extract devices list and actual demuxer value\n                (\n                    self.__extracted_devices_list,\n                    source_demuxer,\n                ) = extract_device_n_demuxer(\n                    self.__ffmpeg,\n                    machine_OS=self.__machine_OS,\n                    verbose=self.__verbose_logs,\n                )\n                # valid indexes range\n                valid_indexes = [\n                    x\n                    for x in range(\n                        -len(self.__extracted_devices_list),\n                        len(self.__extracted_devices_list),\n                    )\n                ]\n                # check index is within valid range\n                if self.__extracted_devices_list and index in valid_indexes:\n                    # overwrite actual source device name/path/index\n                    if self.__machine_OS == \"Windows\":\n                        # Windows OS requires \"video=\" suffix\n                        self.__source = source = \"video={}\".format(\n                            self.__extracted_devices_list[index]\n                        )\n                    elif self.__machine_OS == \"Darwin\":\n                        # Darwin OS requires only device indexes\n                        self.__source = source = (\n                            str(index)\n                            if index >= 0\n                            else str(len(self.__extracted_devices_list) + index)\n                        )\n                    else:\n                        # Linux OS require /dev/video format\n                        self.__source = source = next(\n                            iter(self.__extracted_devices_list[index].keys())\n                        )\n                    # overwrite source_demuxer global variable\n                    self.__source_demuxer = source_demuxer\n                    self.__verbose_logs and logger.debug(\n                        \"Successfully configured device `{}` at index `{}` with demuxer `{}`.\".format(\n                            (\n                                self.__extracted_devices_list[index]\n                                if self.__machine_OS != \"Linux\"\n                                else next(\n                                    iter(self.__extracted_devices_list[index].values())\n                                )[0]\n                            ),\n                            (\n                                index\n                                if index >= 0\n                                else len(self.__extracted_devices_list) + index\n                            ),\n                            self.__source_demuxer,\n                        )\n                    )\n                else:\n                    # raise error otherwise\n                    raise ValueError(\n                        \"Given source `{}` is not a valid device index. Possible values index values can be: {}\".format(\n                            source,\n                            \",\".join(f\"{x}\" for x in valid_indexes),\n                        )\n                    )\n            # otherwise validate against supported demuxers\n            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):\n                # raise if fails\n                raise ValueError(\n                    \"Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!\".format(\n                        source_demuxer\n                    )\n                )\n            else:\n                pass\n\n        # assert if valid source\n        assert source and isinstance(\n            source, str\n        ), \"Input `source` parameter is of invalid type!\"\n\n        # Differentiate input\n        if forced_validate:\n            source_demuxer is None and logger.critical(\n                \"Forcefully passing validation test for given source!\"\n            )\n            self.__source = source\n        elif os.path.isfile(source):\n            self.__source = os.path.abspath(source)\n        elif is_valid_image_seq(\n            self.__ffmpeg, source=source, verbose=self.__verbose_logs\n        ):\n            self.__source = source\n            self.__contains_images = True\n        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):\n            self.__source = source\n        else:\n            logger.error(\"`source` value is unusable or unsupported!\")\n            # discard the value otherwise\n            raise ValueError(\"Input source is invalid. Aborting!\")\n        # format command\n        if self.__sourcer_params:\n            # handle additional params separately\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + [\"-t\", \"0.0001\"]\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n                + dict2Args(self.__sourcer_params)\n                + [\"-f\", \"null\", \"-\"]\n            )\n        else:\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n            )\n        # extract metadata, decode, and filter\n        metadata = (\n            check_sp_output(\n                meta_cmd,\n                force_retrieve_stderr=True,\n            )\n            .decode(\"utf-8\")\n            .strip()\n        )\n        # separate input and output metadata (if available)\n        if \"Output #\" in metadata:\n            (metadata, self.__metadata_output) = metadata.split(\"Output #\")\n        # return metadata based on params\n        return metadata\n\n    def __extract_video_bitrate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream bitrate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video bitrate as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        video_bitrate_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if video_bitrate_text:\n            selected_stream = video_bitrate_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(video_bitrate_text)\n                    else 0\n                )\n            ]\n            filtered_bitrate = re.findall(\n                r\",\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            if len(filtered_bitrate):\n                default_video_bitrate = filtered_bitrate[0].split(\" \")[1:3]\n                final_bitrate = \"{}{}\".format(\n                    int(default_video_bitrate[0].strip()),\n                    \"k\" if (default_video_bitrate[1].strip().startswith(\"k\")) else \"M\",\n                )\n                return final_bitrate\n        return \"\"\n\n    def __extract_video_decoder(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream decoder from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video decoder as string value.\n        \"\"\"\n        assert isinstance(default_stream, int), \"Invalid input!\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            filtered_pixfmt = re.findall(\n                r\"Video:\\s[a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream pixel-format from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video pixel-format as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            filtered_pixfmt = re.findall(\n                r\",\\s[a-z][a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default audio-stream bitrate and sample-rate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n\n        **Returns:** Default Audio-stream bitrate and sample-rate as string value.\n        \"\"\"\n        identifiers = [\"Audio:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            # filter data\n            filtered_audio_bitrate = re.findall(\n                r\"fltp,\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            filtered_audio_samplerate = re.findall(\n                r\",\\s[0-9]+\\sHz\", selected_stream.strip()\n            )\n            # get audio bitrate metadata\n            if filtered_audio_bitrate:\n                filtered = filtered_audio_bitrate[0].split(\" \")[1:3]\n                result[\"bitrate\"] = \"{}{}\".format(\n                    int(filtered[0].strip()),\n                    \"k\" if (filtered[1].strip().startswith(\"k\")) else \"M\",\n                )\n            else:\n                result[\"bitrate\"] = \"\"\n            # get audio samplerate metadata\n            result[\"samplerate\"] = (\n                filtered_audio_samplerate[0].split(\", \")[1]\n                if filtered_audio_samplerate\n                else \"\"\n            )\n        return result if result and (len(result) == 2) else {}\n\n    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?\n\n        **Returns:** Default Video resolution and framerate as dictionary value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        # use output metadata if available\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        # extract video orientation metadata if available\n        identifiers_orientation = [\"displaymatrix:\", \"rotation\"]\n        meta_text_orientation = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n        )\n        # use metadata if available\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n\n            # filter data\n            filtered_resolution = re.findall(\n                r\"([1-9]\\d+)x([1-9]\\d+)\", selected_stream.strip()\n            )\n            filtered_framerate = re.findall(\n                r\"\\d+(?:\\.\\d+)?\\sfps\", selected_stream.strip()\n            )\n            filtered_tbr = re.findall(r\"\\d+(?:\\.\\d+)?\\stbr\", selected_stream.strip())\n\n            # extract framerate metadata\n            if filtered_framerate:\n                # calculate actual framerate\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_framerate[0])[0]\n                )\n            elif filtered_tbr:\n                # guess from TBR(if fps unavailable)\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_tbr[0])[0]\n                )\n\n            # extract resolution metadata\n            if filtered_resolution:\n                result[\"resolution\"] = [int(x) for x in filtered_resolution[0]]\n\n            # extract video orientation metadata\n            if meta_text_orientation:\n                selected_stream = meta_text_orientation[\n                    (\n                        default_stream\n                        if default_stream > 0 and default_stream < len(meta_text)\n                        else 0\n                    )\n                ]\n                filtered_orientation = re.findall(\n                    r\"[-]?\\d+\\.\\d+\", selected_stream.strip()\n                )\n                result[\"orientation\"] = float(filtered_orientation[0])\n            else:\n                result[\"orientation\"] = 0.0\n\n        return result if result and (len(result) == 3) else {}\n\n    def __extract_duration(self, inseconds=True):\n        \"\"\"\n        This Internal method parses stream duration from metadata.\n\n        Parameters:\n            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?\n\n        **Returns:** Default Stream duration as string value.\n        \"\"\"\n        identifiers = [\"Duration:\"]\n        stripped_data = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if stripped_data:\n            t_duration = re.findall(\n                r\"(?:[01]\\d|2[0123]):(?:[012345]\\d):(?:[012345]\\d+(?:\\.\\d+)?)\",\n                stripped_data[0],\n            )\n            if t_duration:\n                return (\n                    sum(\n                        float(x) * 60**i\n                        for i, x in enumerate(reversed(t_duration[0].split(\":\")))\n                    )\n                    if inseconds\n                    else t_duration\n                )\n        return 0\n

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.enumerate_devices","title":"enumerate_devices property readonly","text":"

A property object that enumerate all probed Camera Devices connected to your system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.__init__","title":"__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special","text":"

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/sourcer.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **sourcer_params,\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the Sourcer Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n    # checks if machine in-use is running windows os or not\n    self.__machine_OS = platform.system()\n\n    # define internal parameters\n    self.__verbose_logs = (  # enable verbose if specified\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # handle metadata received\n    self.__ffsp_output = None\n\n    # sanitize sourcer_params\n    self.__sourcer_params = {\n        str(k).strip(): (\n            str(v).strip()\n            if not isinstance(v, (dict, list, int, float, tuple))\n            else v\n        )\n        for k, v in sourcer_params.items()\n    }\n\n    # handle whether to force validate source\n    self.__forcevalidatesource = self.__sourcer_params.pop(\n        \"-force_validate_source\", False\n    )\n    if not isinstance(self.__forcevalidatesource, bool):\n        # reset improper values\n        self.__forcevalidatesource = False\n\n    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n\n    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n    __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n    if not isinstance(__ffmpeg_download_path, str):\n        # reset improper values\n        __ffmpeg_download_path = \"\"\n\n    # validate the FFmpeg assets and return location (also downloads static assets on windows)\n    self.__ffmpeg = get_valid_ffmpeg_path(\n        str(custom_ffmpeg),\n        True if self.__machine_OS == \"Windows\" else False,\n        ffmpeg_download_path=__ffmpeg_download_path,\n        verbose=self.__verbose_logs,\n    )\n\n    # check if valid FFmpeg path returned\n    if self.__ffmpeg:\n        self.__verbose_logs and logger.debug(\n            \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n        )\n    else:\n        # else raise error\n        raise RuntimeError(\n            \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n        )\n\n    # sanitize externally accessible parameters and assign them\n    # handles source demuxer\n    if source is None:\n        # first check if source value is empty\n        # raise error if true\n        raise ValueError(\"Input `source` parameter is empty!\")\n    elif isinstance(source_demuxer, str):\n        # assign if valid demuxer value\n        self.__source_demuxer = source_demuxer.strip().lower()\n        # assign if valid demuxer value\n        assert self.__source_demuxer != \"auto\" or validate_device_index(\n            source\n        ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n            source\n        )\n    else:\n        # otherwise find valid default source demuxer value\n        # enforce \"auto\" if valid index device\n        self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n        # log if not valid index device and invalid type\n        self.__verbose_logs and not self.__source_demuxer in [\n            \"auto\",\n            None,\n        ] and logger.warning(\n            \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                type(source_demuxer).__name__\n            )\n        )\n        # log if not valid index device and invalid type\n        self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n            \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                source\n            )\n        )\n\n    # handles source stream\n    self.__source = source\n\n    # creates shallow copy for further usage #TODO\n    self.__source_org = copy.copy(self.__source)\n    self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n    # handles all extracted devices names/paths list\n    # when source_demuxer = \"auto\"\n    self.__extracted_devices_list = []\n\n    # various source stream params\n    self.__default_video_resolution = \"\"  # handles stream resolution\n    self.__default_video_orientation = \"\"  # handles stream's video orientation\n    self.__default_video_framerate = \"\"  # handles stream framerate\n    self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n    self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n    self.__default_video_decoder = \"\"  # handles stream's video decoder\n    self.__default_source_duration = \"\"  # handles stream's video duration\n    self.__approx_video_nframes = \"\"  # handles approx stream frame number\n    self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n    self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n    # handle various stream flags\n    self.__contains_video = False  # contains video\n    self.__contains_audio = False  # contains audio\n    self.__contains_images = False  # contains image-sequence\n\n    # handles output parameters through filters\n    self.__metadata_output = None  # handles output stream metadata\n    self.__output_frames_resolution = \"\"  # handles output stream resolution\n    self.__output_framerate = \"\"  # handles output stream framerate\n    self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n    self.__output_orientation = \"\"  # handles output frame orientation\n\n    # check whether metadata probed or not?\n    self.__metadata_probed = False\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.probe_stream","title":"probe_stream(self, default_stream_indexes=(0, 0))","text":"

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is (\"0th video stream\", \"1st audio stream\").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):\n    \"\"\"\n    This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n    Parameters:\n        default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n    **Returns:** Reference to the instance object.\n    \"\"\"\n    assert (\n        isinstance(default_stream_indexes, (list, tuple))\n        and len(default_stream_indexes) == 2\n        and all(isinstance(x, int) for x in default_stream_indexes)\n    ), \"Invalid default_stream_indexes value!\"\n    # validate source and extract metadata\n    self.__ffsp_output = self.__validate_source(\n        self.__source,\n        source_demuxer=self.__source_demuxer,\n        forced_validate=(\n            self.__forcevalidatesource if self.__source_demuxer is None else True\n        ),\n    )\n    # parse resolution and framerate\n    video_rfparams = self.__extract_resolution_framerate(\n        default_stream=default_stream_indexes[0]\n    )\n    if video_rfparams:\n        self.__default_video_resolution = video_rfparams[\"resolution\"]\n        self.__default_video_framerate = video_rfparams[\"framerate\"]\n        self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n    # parse output parameters through filters (if available)\n    if not (self.__metadata_output is None):\n        # parse output resolution and framerate\n        out_video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n        if out_video_rfparams:\n            self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n            self.__output_framerate = out_video_rfparams[\"framerate\"]\n            self.__output_orientation = out_video_rfparams[\"orientation\"]\n        # parse output pixel-format\n        self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n\n    # parse pixel-format\n    self.__default_video_pixfmt = self.__extract_video_pixfmt(\n        default_stream=default_stream_indexes[0]\n    )\n\n    # parse video decoder\n    self.__default_video_decoder = self.__extract_video_decoder(\n        default_stream=default_stream_indexes[0]\n    )\n    # parse rest of metadata\n    if not self.__contains_images:\n        # parse video bitrate\n        self.__default_video_bitrate = self.__extract_video_bitrate(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse audio bitrate and samplerate\n        audio_params = self.__extract_audio_bitrate_nd_samplerate(\n            default_stream=default_stream_indexes[1]\n        )\n        if audio_params:\n            self.__default_audio_bitrate = audio_params[\"bitrate\"]\n            self.__default_audio_samplerate = audio_params[\"samplerate\"]\n        # parse video duration\n        self.__default_source_duration = self.__extract_duration()\n        # calculate all flags\n        if (\n            self.__default_video_bitrate\n            or (self.__default_video_framerate and self.__default_video_resolution)\n        ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n            self.__contains_video = True\n            self.__contains_audio = True\n        elif self.__default_video_bitrate or (\n            self.__default_video_framerate and self.__default_video_resolution\n        ):\n            self.__contains_video = True\n        elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n            self.__contains_audio = True\n        else:\n            raise ValueError(\n                \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n            )\n    # calculate approximate number of video frame\n    if self.__default_video_framerate and self.__default_source_duration:\n        self.__approx_video_nframes = np.rint(\n            self.__default_video_framerate * self.__default_source_duration\n        ).astype(int, casting=\"unsafe\")\n\n    # signal metadata has been probed\n    self.__metadata_probed = True\n\n    # return reference to the instance object.\n    return self\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.retrieve_metadata","title":"retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False)","text":"

This method returns Parsed/Probed Metadata of the given source.

Parameters:

Name Type Description Default pretty_json bool

whether to return metadata as JSON string(if True) or Dictionary(if False) type?

False force_retrieve_output bool

whether to also return metadata missing in current Pipeline. This method returns (metadata, metadata_missing) tuple if force_retrieve_output=True instead of metadata.

required

Returns: metadata or (metadata, metadata_missing), formatted as JSON string or python dictionary.

Source code in deffcode/sourcer.py
def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n    \"\"\"\n    This method returns Parsed/Probed Metadata of the given source.\n\n    Parameters:\n        pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n        force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n    **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n    \"\"\"\n    # check if metadata has been probed or not\n    assert (\n        self.__metadata_probed\n    ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n    # log it\n    self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n    # create metadata dictionary from information populated in private class variables\n    metadata = {\n        \"ffmpeg_binary_path\": self.__ffmpeg,\n        \"source\": self.__source,\n    }\n    metadata_missing = {}\n    # Only either `source_demuxer` or `source_extension` attribute can be\n    # present in metadata.\n    if self.__source_demuxer is None:\n        metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n    else:\n        metadata.update({\"source_demuxer\": self.__source_demuxer})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n    # add source video metadata properties\n    metadata.update(\n        {\n            \"source_video_resolution\": self.__default_video_resolution,\n            \"source_video_pixfmt\": self.__default_video_pixfmt,\n            \"source_video_framerate\": self.__default_video_framerate,\n            \"source_video_orientation\": self.__default_video_orientation,\n            \"source_video_decoder\": self.__default_video_decoder,\n            \"source_duration_sec\": self.__default_source_duration,\n            \"approx_video_nframes\": (\n                int(self.__approx_video_nframes)\n                if self.__approx_video_nframes\n                and not any(\n                    \"loop\" in x for x in self.__ffmpeg_prefixes\n                )  # check if any loops in prefix\n                and not any(\n                    \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                )  # check if any loops in filters\n                else None\n            ),\n            \"source_video_bitrate\": self.__default_video_bitrate,\n            \"source_audio_bitrate\": self.__default_audio_bitrate,\n            \"source_audio_samplerate\": self.__default_audio_samplerate,\n            \"source_has_video\": self.__contains_video,\n            \"source_has_audio\": self.__contains_audio,\n            \"source_has_image_sequence\": self.__contains_images,\n        }\n    )\n    # add output metadata properties (if available)\n    if not (self.__metadata_output is None):\n        metadata.update(\n            {\n                \"output_frames_resolution\": self.__output_frames_resolution,\n                \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                \"output_framerate\": self.__output_framerate,\n                \"output_orientation\": self.__output_orientation,\n            }\n        )\n    else:\n        # since output stream metadata properties are only available when additional\n        # FFmpeg parameters(such as filters) are defined manually, thereby missing\n        # output stream properties are handled by assigning them counterpart source\n        # stream metadata property values\n        force_retrieve_missing and metadata_missing.update(\n            {\n                \"output_frames_resolution\": self.__default_video_resolution,\n                \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                \"output_framerate\": self.__default_video_framerate,\n                \"output_orientation\": self.__default_video_orientation,\n            }\n        )\n    # log it\n    self.__verbose_logs and logger.debug(\n        \"Metadata Extraction completed successfully!\"\n    )\n    # parse as JSON string(`json.dumps`), if defined\n    metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n    metadata_missing = (\n        json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n    )\n    # return `metadata` or `(metadata, metadata_missing)`\n    return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n
"},{"location":"reference/sourcer/params/","title":"Sourcer API Parameters","text":""},{"location":"reference/sourcer/params/#source","title":"source","text":"

This parameter defines the input source (-i) for probing.

Sourcer API will throw AssertionError if source provided is invalid or missing.

Sourcer API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize the sourcer and probe it\nsourcer = Sourcer('/home/foo.mp4').probe_stream()\n
  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nsourcer_params = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize the sourcer with define parameters\nsourcer = Sourcer('img%03d.png', verbose=True, **sourcer_params).probe_stream()\n
    # initialize the sourcer and probe it\nsourcer = Sourcer('img%03d.png', verbose=True).probe_stream()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img*.png', verbose=True, **sourcer_params).probe_stream()\n
    # define `-loop 1` for looping\nsourcer_params = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img.jpg', verbose=True, **sourcer_params).probe_stream()\n
  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nsourcer_params = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **sourcer_params).probe_stream()\n
  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. For example, for using \"0\" index device as source on Windows, we can do as follows in Sourcer API:

    Requirement for using Camera Device as source in Sourcer API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

    # initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", verbose=True).probe_stream()\n
  • Video Capture Devices: Valid video probe device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in Sourcer API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in Sourcer API as follows:

      # initialize the sourcer with \"USB2.0 Camera\" source and probe it\nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nsourcer_params = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize the sourcer with \"Camera\" source and probe it\nsourcer = Sourcer(\"Camera\", source_demuxer=\"dshow\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all video capture devices such as from an USB webcam. You can refer following steps to identify and specify your probe video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in Sourcer API as follows:

      # initialize the sourcer with \"/dev/video0\" source and probe it\nsourcer = Sourcer(\"/dev/video0\", source_demuxer=\"v4l2\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in Sourcer API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize the sourcer with `1` index source and probe it\nsourcer = Sourcer(\"1\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to probe from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize the sourcer with \"Integrated iSight-camera\" source \nsourcer = Sourcer(\"Integrated\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"USB2.0 Camera\" source \nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
  • Screen Capturing/Recording: Valid screen probe device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"0:\" indexed device with avfoundation source demuxer on MacOS, we can do as follows in Sourcer API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for probing:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    # define framerate\nsourcer_params = {\"-framerate\": \"30\"}\n\n# initialize the sourcer with \"desktop\" source and probe it\nsourcer = Sourcer(\"desktop\", source_demuxer=\"gdigrab\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the x11grab to probe an X11 display. You can refer following steps to specify source for probing:

    # initialize the sourcer with \":0.0\" desktop source and probe it\nsourcer = Sourcer(\":0.0\", source_demuxer=\"x11grab\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index in Sourcer API:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    You can enumerate all the available input devices including screens ready to be probed using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n

    Then, you can specify and initialize your located screens in Sourcer API using its index shown:

    # initialize the sourcer with `0:` index desktop screen and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"0:\" source and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n
  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and probing Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in Sourcer API:

    # initialize the sourcer with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate and probe it\nsourcer = Sourcer(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).probe_stream()\n

"},{"location":"reference/sourcer/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, as well as lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph.

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for using Camera Device Index as source in Sourcer API

For using Camera Device Index as source in Sourcer API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\").probe_stream()\n
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", source_demuxer=\"auto).probe_stream()\n

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize the sourcer with `dshow` demuxer and probe it\nsourcer = Sourcer(\"foo.mp4\", source_demuxer=\"dshow\").probe_stream()\n

"},{"location":"reference/sourcer/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then Sourcer API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path exclusive parameter in Sourcer API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in Sourcer API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nsourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}\n\n# initialize the sourcer\nSourcer(\"foo.mp4\", verbose=True, **sourcer_params).probe_stream()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nSourcer(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").probe_stream()\n

"},{"location":"reference/sourcer/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize the sourcer with verbose logs\nSourcer(\"foo.mp4\", verbose=True).probe_stream()\n

"},{"location":"reference/sourcer/params/#sourcer_params","title":"sourcer_params","text":"

This dictionary parameter accepts all Exclusive Parameters formatted as its attributes:

Additional FFmpeg parameters

In addition to Exclusive Parameters, Sourcer API supports almost any FFmpeg parameter (supported by installed FFmpeg), and thereby can be passed as dictionary attributes in sourcer_params parameter.

Kindly read FFmpeg Docs carefully before passing any additional values to sourcer_params parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/sourcer/params/#exclusive-parameters","title":"Exclusive Parameters","text":"

Sourcer API supports few Exclusive Parameters to allow users to flexibly change its probing properties and handle some special FFmpeg parameters.

These parameters are discussed below:

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in Sourcer's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nsourcer_params = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -ffmpeg_download_path (string): sets the custom directory for downloading FFmpeg Static Binaries in Compression Mode, during the Auto-Installation on Windows Machines Only. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows:

    sourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"} # will be saved to \"C:/User/foo/foo1\"\n

  • -force_validate_source (bool): forcefully passes validation test for given source which is required for some special cases with unusual input. It can be used as follows:

    sourcer_params = {\"-force_validate_source\": True} # will pass validation test forcefully\n

"}]} \ No newline at end of file diff --git a/v0.2.6-dev/reference/ffhelper/index.html b/v0.2.6-dev/reference/ffhelper/index.html index efb61bc..dd26d31 100644 --- a/v0.2.6-dev/reference/ffhelper/index.html +++ b/v0.2.6-dev/reference/ffhelper/index.html @@ -461,21 +461,20 @@ supported_protocols = splitted[splitted.index("Output:") + 1 : len(splitted) - 1] # RTSP is a demuxer somehow # support both RTSP and RTSPS(over SSL) - logger.critical(get_supported_demuxers(path)) - supported_protocols += ( - ["rtsp", "rtsps"] if "rtsp" in get_supported_demuxers(path) else [] - ) - # Test and return result whether scheme is supported - if extracted_scheme_url and extracted_scheme_url in supported_protocols: - verbose and logger.debug( - "URL scheme `{}` is supported by FFmpeg.".format(extracted_scheme_url) - ) - return True - else: - verbose and logger.warning( - "URL scheme `{}` isn't supported by FFmpeg!".format(extracted_scheme_url) - ) - return False + supported_protocols += ( + ["rtsp", "rtsps"] if "rtsp" in get_supported_demuxers(path) else [] + ) + # Test and return result whether scheme is supported + if extracted_scheme_url and extracted_scheme_url in supported_protocols: + verbose and logger.debug( + "URL scheme `{}` is supported by FFmpeg.".format(extracted_scheme_url) + ) + return True + else: + verbose and logger.warning( + "URL scheme `{}` isn't supported by FFmpeg!".format(extracted_scheme_url) + ) + return False

 

check_sp_output

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default
args based on input

Non Keyword Arguments

()
kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):
     """
     ## check_sp_output
diff --git a/v0.2.6-dev/reference/sourcer/index.html b/v0.2.6-dev/reference/sourcer/index.html
index 1f292f6..98fc885 100644
--- a/v0.2.6-dev/reference/sourcer/index.html
+++ b/v0.2.6-dev/reference/sourcer/index.html
@@ -53,756 +53,781 @@
 
         # sanitize sourcer_params
         self.__sourcer_params = {
-            str(k).strip(): str(v).strip()
-            if not isinstance(v, (dict, list, int, float, tuple))
-            else v
-            for k, v in sourcer_params.items()
-        }
-
-        # handle whether to force validate source
-        self.__forcevalidatesource = self.__sourcer_params.pop(
-            "-force_validate_source", False
-        )
-        if not isinstance(self.__forcevalidatesource, bool):
-            # reset improper values
-            self.__forcevalidatesource = False
-
-        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
-        self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
-        if not isinstance(self.__ffmpeg_prefixes, list):
-            # log it
-            logger.warning(
-                "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
-                    type(self.__ffmpeg_prefixes).__name__
-                )
-            )
-            # reset improper values
-            self.__ffmpeg_prefixes = []
-
-        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
-        __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
-        if not isinstance(__ffmpeg_download_path, str):
-            # reset improper values
-            __ffmpeg_download_path = ""
-
-        # validate the FFmpeg assets and return location (also downloads static assets on windows)
-        self.__ffmpeg = get_valid_ffmpeg_path(
-            str(custom_ffmpeg),
-            True if self.__machine_OS == "Windows" else False,
-            ffmpeg_download_path=__ffmpeg_download_path,
-            verbose=self.__verbose_logs,
-        )
-
-        # check if valid FFmpeg path returned
-        if self.__ffmpeg:
-            self.__verbose_logs and logger.debug(
-                "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
-            )
-        else:
-            # else raise error
-            raise RuntimeError(
-                "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
-            )
-
-        # sanitize externally accessible parameters and assign them
-        # handles source demuxer
-        if source is None:
-            # first check if source value is empty
-            # raise error if true
-            raise ValueError("Input `source` parameter is empty!")
-        elif isinstance(source_demuxer, str):
-            # assign if valid demuxer value
-            self.__source_demuxer = source_demuxer.strip().lower()
+            str(k).strip(): (
+                str(v).strip()
+                if not isinstance(v, (dict, list, int, float, tuple))
+                else v
+            )
+            for k, v in sourcer_params.items()
+        }
+
+        # handle whether to force validate source
+        self.__forcevalidatesource = self.__sourcer_params.pop(
+            "-force_validate_source", False
+        )
+        if not isinstance(self.__forcevalidatesource, bool):
+            # reset improper values
+            self.__forcevalidatesource = False
+
+        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
+        self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
+        if not isinstance(self.__ffmpeg_prefixes, list):
+            # log it
+            logger.warning(
+                "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
+                    type(self.__ffmpeg_prefixes).__name__
+                )
+            )
+            # reset improper values
+            self.__ffmpeg_prefixes = []
+
+        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
+        __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
+        if not isinstance(__ffmpeg_download_path, str):
+            # reset improper values
+            __ffmpeg_download_path = ""
+
+        # validate the FFmpeg assets and return location (also downloads static assets on windows)
+        self.__ffmpeg = get_valid_ffmpeg_path(
+            str(custom_ffmpeg),
+            True if self.__machine_OS == "Windows" else False,
+            ffmpeg_download_path=__ffmpeg_download_path,
+            verbose=self.__verbose_logs,
+        )
+
+        # check if valid FFmpeg path returned
+        if self.__ffmpeg:
+            self.__verbose_logs and logger.debug(
+                "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
+            )
+        else:
+            # else raise error
+            raise RuntimeError(
+                "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
+            )
+
+        # sanitize externally accessible parameters and assign them
+        # handles source demuxer
+        if source is None:
+            # first check if source value is empty
+            # raise error if true
+            raise ValueError("Input `source` parameter is empty!")
+        elif isinstance(source_demuxer, str):
             # assign if valid demuxer value
-            assert self.__source_demuxer != "auto" or validate_device_index(
-                source
-            ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+            self.__source_demuxer = source_demuxer.strip().lower()
+            # assign if valid demuxer value
+            assert self.__source_demuxer != "auto" or validate_device_index(
                 source
-            )
-        else:
-            # otherwise find valid default source demuxer value
-            # enforce "auto" if valid index device
-            self.__source_demuxer = "auto" if validate_device_index(source) else None
-            # log if not valid index device and invalid type
-            self.__verbose_logs and not self.__source_demuxer in [
-                "auto",
-                None,
-            ] and logger.warning(
-                "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
-                    type(source_demuxer).__name__
-                )
-            )
-            # log if not valid index device and invalid type
-            self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
-                "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
-                    source
-                )
-            )
-
-        # handles source stream
-        self.__source = source
-
-        # creates shallow copy for further usage #TODO
-        self.__source_org = copy.copy(self.__source)
-        self.__source_demuxer_org = copy.copy(self.__source_demuxer)
-
-        # handles all extracted devices names/paths list
-        # when source_demuxer = "auto"
-        self.__extracted_devices_list = []
-
-        # various source stream params
-        self.__default_video_resolution = ""  # handles stream resolution
-        self.__default_video_orientation = ""  # handles stream's video orientation
-        self.__default_video_framerate = ""  # handles stream framerate
-        self.__default_video_bitrate = ""  # handles stream's video bitrate
-        self.__default_video_pixfmt = ""  # handles stream's video pixfmt
-        self.__default_video_decoder = ""  # handles stream's video decoder
-        self.__default_source_duration = ""  # handles stream's video duration
-        self.__approx_video_nframes = ""  # handles approx stream frame number
-        self.__default_audio_bitrate = ""  # handles stream's audio bitrate
-        self.__default_audio_samplerate = ""  # handles stream's audio samplerate
-
-        # handle various stream flags
-        self.__contains_video = False  # contains video
-        self.__contains_audio = False  # contains audio
-        self.__contains_images = False  # contains image-sequence
-
-        # handles output parameters through filters
-        self.__metadata_output = None  # handles output stream metadata
-        self.__output_frames_resolution = ""  # handles output stream resolution
-        self.__output_framerate = ""  # handles output stream framerate
-        self.__output_frames_pixfmt = ""  # handles output frame pixel format
-        self.__output_orientation = ""  # handles output frame orientation
-
-        # check whether metadata probed or not?
-        self.__metadata_probed = False
-
-    def probe_stream(self, default_stream_indexes=(0, 0)):
-        """
-        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
-
-        Parameters:
-            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is ("0th video stream", "1st audio stream").
-
-        **Returns:** Reference to the instance object.
-        """
-        assert (
-            isinstance(default_stream_indexes, (list, tuple))
-            and len(default_stream_indexes) == 2
-            and all(isinstance(x, int) for x in default_stream_indexes)
-        ), "Invalid default_stream_indexes value!"
-        # validate source and extract metadata
-        self.__ffsp_output = self.__validate_source(
-            self.__source,
-            source_demuxer=self.__source_demuxer,
-            forced_validate=(
-                self.__forcevalidatesource if self.__source_demuxer is None else True
-            ),
-        )
-        # parse resolution and framerate
-        video_rfparams = self.__extract_resolution_framerate(
-            default_stream=default_stream_indexes[0]
-        )
-        if video_rfparams:
-            self.__default_video_resolution = video_rfparams["resolution"]
-            self.__default_video_framerate = video_rfparams["framerate"]
-            self.__default_video_orientation = video_rfparams["orientation"]
-
-        # parse output parameters through filters (if available)
-        if not (self.__metadata_output is None):
-            # parse output resolution and framerate
-            out_video_rfparams = self.__extract_resolution_framerate(
-                default_stream=default_stream_indexes[0], extract_output=True
-            )
-            if out_video_rfparams:
-                self.__output_frames_resolution = out_video_rfparams["resolution"]
-                self.__output_framerate = out_video_rfparams["framerate"]
-                self.__output_orientation = out_video_rfparams["orientation"]
-            # parse output pixel-format
-            self.__output_frames_pixfmt = self.__extract_video_pixfmt(
-                default_stream=default_stream_indexes[0], extract_output=True
-            )
-
-        # parse pixel-format
-        self.__default_video_pixfmt = self.__extract_video_pixfmt(
-            default_stream=default_stream_indexes[0]
-        )
-
-        # parse video decoder
-        self.__default_video_decoder = self.__extract_video_decoder(
-            default_stream=default_stream_indexes[0]
-        )
-        # parse rest of metadata
-        if not self.__contains_images:
-            # parse video bitrate
-            self.__default_video_bitrate = self.__extract_video_bitrate(
-                default_stream=default_stream_indexes[0]
-            )
-            # parse audio bitrate and samplerate
-            audio_params = self.__extract_audio_bitrate_nd_samplerate(
-                default_stream=default_stream_indexes[1]
-            )
-            if audio_params:
-                self.__default_audio_bitrate = audio_params["bitrate"]
-                self.__default_audio_samplerate = audio_params["samplerate"]
-            # parse video duration
-            self.__default_source_duration = self.__extract_duration()
-            # calculate all flags
-            if (
-                self.__default_video_bitrate
-                or (self.__default_video_framerate and self.__default_video_resolution)
-            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):
-                self.__contains_video = True
-                self.__contains_audio = True
-            elif self.__default_video_bitrate or (
-                self.__default_video_framerate and self.__default_video_resolution
-            ):
-                self.__contains_video = True
-            elif self.__default_audio_bitrate or self.__default_audio_samplerate:
-                self.__contains_audio = True
-            else:
-                raise ValueError(
-                    "Invalid source with no decodable audio or video stream provided. Aborting!"
-                )
-        # calculate approximate number of video frame
-        if self.__default_video_framerate and self.__default_source_duration:
-            self.__approx_video_nframes = np.rint(
-                self.__default_video_framerate * self.__default_source_duration
-            ).astype(int, casting="unsafe")
-
-        # signal metadata has been probed
-        self.__metadata_probed = True
-
-        # return reference to the instance object.
-        return self
-
-    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):
-        """
-        This method returns Parsed/Probed Metadata of the given source.
-
-        Parameters:
-            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?
-            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.
-
-        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.
-        """
-        # check if metadata has been probed or not
-        assert (
-            self.__metadata_probed
-        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
-        # log it
-        self.__verbose_logs and logger.debug("Extracting Metadata...")
-        # create metadata dictionary from information populated in private class variables
-        metadata = {
-            "ffmpeg_binary_path": self.__ffmpeg,
-            "source": self.__source,
-        }
-        metadata_missing = {}
-        # Only either `source_demuxer` or `source_extension` attribute can be
-        # present in metadata.
-        if self.__source_demuxer is None:
-            metadata.update({"source_extension": os.path.splitext(self.__source)[-1]})
-            # update missing
-            force_retrieve_missing and metadata_missing.update({"source_demuxer": ""})
-        else:
-            metadata.update({"source_demuxer": self.__source_demuxer})
-            # update missing
-            force_retrieve_missing and metadata_missing.update({"source_extension": ""})
-        # add source video metadata properties
-        metadata.update(
-            {
-                "source_video_resolution": self.__default_video_resolution,
-                "source_video_pixfmt": self.__default_video_pixfmt,
-                "source_video_framerate": self.__default_video_framerate,
-                "source_video_orientation": self.__default_video_orientation,
-                "source_video_decoder": self.__default_video_decoder,
-                "source_duration_sec": self.__default_source_duration,
-                "approx_video_nframes": (
-                    int(self.__approx_video_nframes)
-                    if self.__approx_video_nframes
-                    and not any(
-                        "loop" in x for x in self.__ffmpeg_prefixes
-                    )  # check if any loops in prefix
-                    and not any(
-                        "loop" in x for x in dict2Args(self.__sourcer_params)
-                    )  # check if any loops in filters
-                    else None
-                ),
-                "source_video_bitrate": self.__default_video_bitrate,
-                "source_audio_bitrate": self.__default_audio_bitrate,
-                "source_audio_samplerate": self.__default_audio_samplerate,
-                "source_has_video": self.__contains_video,
-                "source_has_audio": self.__contains_audio,
-                "source_has_image_sequence": self.__contains_images,
-            }
-        )
-        # add output metadata properties (if available)
-        if not (self.__metadata_output is None):
-            metadata.update(
-                {
-                    "output_frames_resolution": self.__output_frames_resolution,
-                    "output_frames_pixfmt": self.__output_frames_pixfmt,
-                    "output_framerate": self.__output_framerate,
-                    "output_orientation": self.__output_orientation,
-                }
-            )
-        else:
-            # since output stream metadata properties are only available when additional
-            # FFmpeg parameters(such as filters) are defined manually, thereby missing
-            # output stream properties are handled by assigning them counterpart source
-            # stream metadata property values
-            force_retrieve_missing and metadata_missing.update(
-                {
-                    "output_frames_resolution": self.__default_video_resolution,
-                    "output_frames_pixfmt": self.__default_video_pixfmt,
-                    "output_framerate": self.__default_video_framerate,
-                    "output_orientation": self.__default_video_orientation,
-                }
-            )
-        # log it
-        self.__verbose_logs and logger.debug(
-            "Metadata Extraction completed successfully!"
-        )
-        # parse as JSON string(`json.dumps`), if defined
-        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata
-        metadata_missing = (
-            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing
-        )
-        # return `metadata` or `(metadata, metadata_missing)`
-        return metadata if not force_retrieve_missing else (metadata, metadata_missing)
-
-    @property
-    def enumerate_devices(self):
-        """
-        A property object that enumerate all probed Camera Devices connected to your system names
-        along with their respective "device indexes" or "camera indexes" as python dictionary.
-
-        **Returns:** Probed Camera Devices as python dictionary.
-        """
-        # check if metadata has been probed or not
-        assert (
-            self.__metadata_probed
-        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
-
-        # log if specified
-        self.__verbose_logs and logger.debug("Enumerating all probed Camera Devices.")
-
-        # return probed Camera Devices as python dictionary.
-        return {
-            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)
-        }
-
-    def __validate_source(self, source, source_demuxer=None, forced_validate=False):
-        """
-        This Internal method validates source and extracts its metadata.
-
-        Parameters:
-            source_demuxer(str): specifies the demuxer(`-f`) for the input source.
-            forced_validate (bool): whether to skip validation tests or not?
-
-        **Returns:** `True` if passed tests else `False`.
-        """
-        # validate source demuxer(if defined)
-        if not (source_demuxer is None):
-            # check if "auto" demuxer is specified
-            if source_demuxer == "auto":
-                # integerise source to get index
-                index = int(source)
-                # extract devices list and actual demuxer value
-                (
-                    self.__extracted_devices_list,
-                    source_demuxer,
-                ) = extract_device_n_demuxer(
-                    self.__ffmpeg,
-                    machine_OS=self.__machine_OS,
-                    verbose=self.__verbose_logs,
-                )
-                # valid indexes range
-                valid_indexes = [
-                    x
-                    for x in range(
-                        -len(self.__extracted_devices_list),
-                        len(self.__extracted_devices_list),
-                    )
-                ]
-                # check index is within valid range
-                if self.__extracted_devices_list and index in valid_indexes:
-                    # overwrite actual source device name/path/index
-                    if self.__machine_OS == "Windows":
-                        # Windows OS requires "video=" suffix
-                        self.__source = source = "video={}".format(
-                            self.__extracted_devices_list[index]
-                        )
-                    elif self.__machine_OS == "Darwin":
-                        # Darwin OS requires only device indexes
-                        self.__source = source = (
-                            str(index)
-                            if index >= 0
-                            else str(len(self.__extracted_devices_list) + index)
-                        )
-                    else:
-                        # Linux OS require /dev/video format
-                        self.__source = source = next(
-                            iter(self.__extracted_devices_list[index].keys())
-                        )
-                    # overwrite source_demuxer global variable
-                    self.__source_demuxer = source_demuxer
-                    self.__verbose_logs and logger.debug(
-                        "Successfully configured device `{}` at index `{}` with demuxer `{}`.".format(
-                            self.__extracted_devices_list[index]
-                            if self.__machine_OS != "Linux"
-                            else next(
-                                iter(self.__extracted_devices_list[index].values())
-                            )[0],
-                            index
-                            if index >= 0
-                            else len(self.__extracted_devices_list) + index,
-                            self.__source_demuxer,
-                        )
-                    )
-                else:
-                    # raise error otherwise
-                    raise ValueError(
-                        "Given source `{}` is not a valid device index. Possible values index values can be: {}".format(
-                            source,
-                            ",".join(f"{x}" for x in valid_indexes),
-                        )
-                    )
-            # otherwise validate against supported demuxers
-            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):
-                # raise if fails
-                raise ValueError(
-                    "Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!".format(
-                        source_demuxer
-                    )
-                )
-            else:
-                pass
-
-        # assert if valid source
-        assert source and isinstance(
-            source, str
-        ), "Input `source` parameter is of invalid type!"
-
-        # Differentiate input
-        if forced_validate:
-            source_demuxer is None and logger.critical(
-                "Forcefully passing validation test for given source!"
-            )
-            self.__source = source
-        elif os.path.isfile(source):
-            self.__source = os.path.abspath(source)
-        elif is_valid_image_seq(
-            self.__ffmpeg, source=source, verbose=self.__verbose_logs
-        ):
-            self.__source = source
-            self.__contains_images = True
-        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):
-            self.__source = source
-        else:
-            logger.error("`source` value is unusable or unsupported!")
-            # discard the value otherwise
-            raise ValueError("Input source is invalid. Aborting!")
-        # format command
-        if self.__sourcer_params:
-            # handle additional params separately
-            meta_cmd = (
-                [self.__ffmpeg]
-                + (["-hide_banner"] if not self.__verbose_logs else [])
-                + ["-t", "0.0001"]
-                + self.__ffmpeg_prefixes
-                + (["-f", source_demuxer] if source_demuxer else [])
-                + ["-i", source]
-                + dict2Args(self.__sourcer_params)
-                + ["-f", "null", "-"]
-            )
-        else:
-            meta_cmd = (
-                [self.__ffmpeg]
-                + (["-hide_banner"] if not self.__verbose_logs else [])
-                + self.__ffmpeg_prefixes
-                + (["-f", source_demuxer] if source_demuxer else [])
-                + ["-i", source]
-            )
-        # extract metadata, decode, and filter
-        metadata = (
-            check_sp_output(
-                meta_cmd,
-                force_retrieve_stderr=True,
-            )
-            .decode("utf-8")
-            .strip()
-        )
-        # separate input and output metadata (if available)
-        if "Output #" in metadata:
-            (metadata, self.__metadata_output) = metadata.split("Output #")
-        # return metadata based on params
-        return metadata
-
-    def __extract_video_bitrate(self, default_stream=0):
-        """
-        This Internal method parses default video-stream bitrate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video bitrate as string value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        video_bitrate_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if video_bitrate_text:
-            selected_stream = video_bitrate_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(video_bitrate_text)
-                else 0
-            ]
-            filtered_bitrate = re.findall(
-                r",\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
-            )
-            if len(filtered_bitrate):
-                default_video_bitrate = filtered_bitrate[0].split(" ")[1:3]
-                final_bitrate = "{}{}".format(
-                    int(default_video_bitrate[0].strip()),
-                    "k" if (default_video_bitrate[1].strip().startswith("k")) else "M",
-                )
-                return final_bitrate
-        return ""
-
-    def __extract_video_decoder(self, default_stream=0):
-        """
-        This Internal method parses default video-stream decoder from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video decoder as string value.
-        """
-        assert isinstance(default_stream, int), "Invalid input!"
-        identifiers = ["Video:", "Stream #"]
-        meta_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            filtered_pixfmt = re.findall(
-                r"Video:\s[a-z0-9_-]*", selected_stream.strip()
-            )
-            if filtered_pixfmt:
-                return filtered_pixfmt[0].split(" ")[-1]
-        return ""
-
-    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):
-        """
-        This Internal method parses default video-stream pixel-format from metadata.
-
-        Parameters:
-            default_stream (int): selects specific video-stream in case of multiple ones.
-
-        **Returns:** Default Video pixel-format as string value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        meta_text = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-            if not extract_output
-            else [
-                line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-        )
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            filtered_pixfmt = re.findall(
-                r",\s[a-z][a-z0-9_-]*", selected_stream.strip()
-            )
-            if filtered_pixfmt:
-                return filtered_pixfmt[0].split(" ")[-1]
-        return ""
-
-    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):
-        """
-        This Internal method parses default audio-stream bitrate and sample-rate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific audio-stream in case of multiple ones.
-
-        **Returns:** Default Audio-stream bitrate and sample-rate as string value.
-        """
-        identifiers = ["Audio:", "Stream #"]
-        meta_text = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        result = {}
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-            # filter data
-            filtered_audio_bitrate = re.findall(
-                r"fltp,\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
-            )
-            filtered_audio_samplerate = re.findall(
-                r",\s[0-9]+\sHz", selected_stream.strip()
-            )
-            # get audio bitrate metadata
-            if filtered_audio_bitrate:
-                filtered = filtered_audio_bitrate[0].split(" ")[1:3]
-                result["bitrate"] = "{}{}".format(
-                    int(filtered[0].strip()),
-                    "k" if (filtered[1].strip().startswith("k")) else "M",
-                )
-            else:
-                result["bitrate"] = ""
-            # get audio samplerate metadata
-            result["samplerate"] = (
-                filtered_audio_samplerate[0].split(", ")[1]
-                if filtered_audio_samplerate
-                else ""
-            )
-        return result if result and (len(result) == 2) else {}
-
-    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):
-        """
-        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.
-
-        Parameters:
-            default_stream (int): selects specific audio-stream in case of multiple ones.
-            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?
-
-        **Returns:** Default Video resolution and framerate as dictionary value.
-        """
-        identifiers = ["Video:", "Stream #"]
-        # use output metadata if available
-        meta_text = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-            if not extract_output
-            else [
-                line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers)
-            ]
-        )
-        # extract video orientation metadata if available
-        identifiers_orientation = ["displaymatrix:", "rotation"]
-        meta_text_orientation = (
-            [
-                line.strip()
-                for line in self.__ffsp_output.split("\n")
-                if all(x in line for x in identifiers_orientation)
-            ]
-            if not extract_output
-            else [
+            ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+                source
+            )
+        else:
+            # otherwise find valid default source demuxer value
+            # enforce "auto" if valid index device
+            self.__source_demuxer = "auto" if validate_device_index(source) else None
+            # log if not valid index device and invalid type
+            self.__verbose_logs and not self.__source_demuxer in [
+                "auto",
+                None,
+            ] and logger.warning(
+                "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
+                    type(source_demuxer).__name__
+                )
+            )
+            # log if not valid index device and invalid type
+            self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
+                "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
+                    source
+                )
+            )
+
+        # handles source stream
+        self.__source = source
+
+        # creates shallow copy for further usage #TODO
+        self.__source_org = copy.copy(self.__source)
+        self.__source_demuxer_org = copy.copy(self.__source_demuxer)
+
+        # handles all extracted devices names/paths list
+        # when source_demuxer = "auto"
+        self.__extracted_devices_list = []
+
+        # various source stream params
+        self.__default_video_resolution = ""  # handles stream resolution
+        self.__default_video_orientation = ""  # handles stream's video orientation
+        self.__default_video_framerate = ""  # handles stream framerate
+        self.__default_video_bitrate = ""  # handles stream's video bitrate
+        self.__default_video_pixfmt = ""  # handles stream's video pixfmt
+        self.__default_video_decoder = ""  # handles stream's video decoder
+        self.__default_source_duration = ""  # handles stream's video duration
+        self.__approx_video_nframes = ""  # handles approx stream frame number
+        self.__default_audio_bitrate = ""  # handles stream's audio bitrate
+        self.__default_audio_samplerate = ""  # handles stream's audio samplerate
+
+        # handle various stream flags
+        self.__contains_video = False  # contains video
+        self.__contains_audio = False  # contains audio
+        self.__contains_images = False  # contains image-sequence
+
+        # handles output parameters through filters
+        self.__metadata_output = None  # handles output stream metadata
+        self.__output_frames_resolution = ""  # handles output stream resolution
+        self.__output_framerate = ""  # handles output stream framerate
+        self.__output_frames_pixfmt = ""  # handles output frame pixel format
+        self.__output_orientation = ""  # handles output frame orientation
+
+        # check whether metadata probed or not?
+        self.__metadata_probed = False
+
+    def probe_stream(self, default_stream_indexes=(0, 0)):
+        """
+        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
+
+        Parameters:
+            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is ("0th video stream", "1st audio stream").
+
+        **Returns:** Reference to the instance object.
+        """
+        assert (
+            isinstance(default_stream_indexes, (list, tuple))
+            and len(default_stream_indexes) == 2
+            and all(isinstance(x, int) for x in default_stream_indexes)
+        ), "Invalid default_stream_indexes value!"
+        # validate source and extract metadata
+        self.__ffsp_output = self.__validate_source(
+            self.__source,
+            source_demuxer=self.__source_demuxer,
+            forced_validate=(
+                self.__forcevalidatesource if self.__source_demuxer is None else True
+            ),
+        )
+        # parse resolution and framerate
+        video_rfparams = self.__extract_resolution_framerate(
+            default_stream=default_stream_indexes[0]
+        )
+        if video_rfparams:
+            self.__default_video_resolution = video_rfparams["resolution"]
+            self.__default_video_framerate = video_rfparams["framerate"]
+            self.__default_video_orientation = video_rfparams["orientation"]
+
+        # parse output parameters through filters (if available)
+        if not (self.__metadata_output is None):
+            # parse output resolution and framerate
+            out_video_rfparams = self.__extract_resolution_framerate(
+                default_stream=default_stream_indexes[0], extract_output=True
+            )
+            if out_video_rfparams:
+                self.__output_frames_resolution = out_video_rfparams["resolution"]
+                self.__output_framerate = out_video_rfparams["framerate"]
+                self.__output_orientation = out_video_rfparams["orientation"]
+            # parse output pixel-format
+            self.__output_frames_pixfmt = self.__extract_video_pixfmt(
+                default_stream=default_stream_indexes[0], extract_output=True
+            )
+
+        # parse pixel-format
+        self.__default_video_pixfmt = self.__extract_video_pixfmt(
+            default_stream=default_stream_indexes[0]
+        )
+
+        # parse video decoder
+        self.__default_video_decoder = self.__extract_video_decoder(
+            default_stream=default_stream_indexes[0]
+        )
+        # parse rest of metadata
+        if not self.__contains_images:
+            # parse video bitrate
+            self.__default_video_bitrate = self.__extract_video_bitrate(
+                default_stream=default_stream_indexes[0]
+            )
+            # parse audio bitrate and samplerate
+            audio_params = self.__extract_audio_bitrate_nd_samplerate(
+                default_stream=default_stream_indexes[1]
+            )
+            if audio_params:
+                self.__default_audio_bitrate = audio_params["bitrate"]
+                self.__default_audio_samplerate = audio_params["samplerate"]
+            # parse video duration
+            self.__default_source_duration = self.__extract_duration()
+            # calculate all flags
+            if (
+                self.__default_video_bitrate
+                or (self.__default_video_framerate and self.__default_video_resolution)
+            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):
+                self.__contains_video = True
+                self.__contains_audio = True
+            elif self.__default_video_bitrate or (
+                self.__default_video_framerate and self.__default_video_resolution
+            ):
+                self.__contains_video = True
+            elif self.__default_audio_bitrate or self.__default_audio_samplerate:
+                self.__contains_audio = True
+            else:
+                raise ValueError(
+                    "Invalid source with no decodable audio or video stream provided. Aborting!"
+                )
+        # calculate approximate number of video frame
+        if self.__default_video_framerate and self.__default_source_duration:
+            self.__approx_video_nframes = np.rint(
+                self.__default_video_framerate * self.__default_source_duration
+            ).astype(int, casting="unsafe")
+
+        # signal metadata has been probed
+        self.__metadata_probed = True
+
+        # return reference to the instance object.
+        return self
+
+    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):
+        """
+        This method returns Parsed/Probed Metadata of the given source.
+
+        Parameters:
+            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?
+            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.
+
+        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.
+        """
+        # check if metadata has been probed or not
+        assert (
+            self.__metadata_probed
+        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
+        # log it
+        self.__verbose_logs and logger.debug("Extracting Metadata...")
+        # create metadata dictionary from information populated in private class variables
+        metadata = {
+            "ffmpeg_binary_path": self.__ffmpeg,
+            "source": self.__source,
+        }
+        metadata_missing = {}
+        # Only either `source_demuxer` or `source_extension` attribute can be
+        # present in metadata.
+        if self.__source_demuxer is None:
+            metadata.update({"source_extension": os.path.splitext(self.__source)[-1]})
+            # update missing
+            force_retrieve_missing and metadata_missing.update({"source_demuxer": ""})
+        else:
+            metadata.update({"source_demuxer": self.__source_demuxer})
+            # update missing
+            force_retrieve_missing and metadata_missing.update({"source_extension": ""})
+        # add source video metadata properties
+        metadata.update(
+            {
+                "source_video_resolution": self.__default_video_resolution,
+                "source_video_pixfmt": self.__default_video_pixfmt,
+                "source_video_framerate": self.__default_video_framerate,
+                "source_video_orientation": self.__default_video_orientation,
+                "source_video_decoder": self.__default_video_decoder,
+                "source_duration_sec": self.__default_source_duration,
+                "approx_video_nframes": (
+                    int(self.__approx_video_nframes)
+                    if self.__approx_video_nframes
+                    and not any(
+                        "loop" in x for x in self.__ffmpeg_prefixes
+                    )  # check if any loops in prefix
+                    and not any(
+                        "loop" in x for x in dict2Args(self.__sourcer_params)
+                    )  # check if any loops in filters
+                    else None
+                ),
+                "source_video_bitrate": self.__default_video_bitrate,
+                "source_audio_bitrate": self.__default_audio_bitrate,
+                "source_audio_samplerate": self.__default_audio_samplerate,
+                "source_has_video": self.__contains_video,
+                "source_has_audio": self.__contains_audio,
+                "source_has_image_sequence": self.__contains_images,
+            }
+        )
+        # add output metadata properties (if available)
+        if not (self.__metadata_output is None):
+            metadata.update(
+                {
+                    "output_frames_resolution": self.__output_frames_resolution,
+                    "output_frames_pixfmt": self.__output_frames_pixfmt,
+                    "output_framerate": self.__output_framerate,
+                    "output_orientation": self.__output_orientation,
+                }
+            )
+        else:
+            # since output stream metadata properties are only available when additional
+            # FFmpeg parameters(such as filters) are defined manually, thereby missing
+            # output stream properties are handled by assigning them counterpart source
+            # stream metadata property values
+            force_retrieve_missing and metadata_missing.update(
+                {
+                    "output_frames_resolution": self.__default_video_resolution,
+                    "output_frames_pixfmt": self.__default_video_pixfmt,
+                    "output_framerate": self.__default_video_framerate,
+                    "output_orientation": self.__default_video_orientation,
+                }
+            )
+        # log it
+        self.__verbose_logs and logger.debug(
+            "Metadata Extraction completed successfully!"
+        )
+        # parse as JSON string(`json.dumps`), if defined
+        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata
+        metadata_missing = (
+            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing
+        )
+        # return `metadata` or `(metadata, metadata_missing)`
+        return metadata if not force_retrieve_missing else (metadata, metadata_missing)
+
+    @property
+    def enumerate_devices(self):
+        """
+        A property object that enumerate all probed Camera Devices connected to your system names
+        along with their respective "device indexes" or "camera indexes" as python dictionary.
+
+        **Returns:** Probed Camera Devices as python dictionary.
+        """
+        # check if metadata has been probed or not
+        assert (
+            self.__metadata_probed
+        ), "Source Metadata not been probed yet! Check if you called `probe_stream()` method."
+
+        # log if specified
+        self.__verbose_logs and logger.debug("Enumerating all probed Camera Devices.")
+
+        # return probed Camera Devices as python dictionary.
+        return {
+            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)
+        }
+
+    def __validate_source(self, source, source_demuxer=None, forced_validate=False):
+        """
+        This Internal method validates source and extracts its metadata.
+
+        Parameters:
+            source_demuxer(str): specifies the demuxer(`-f`) for the input source.
+            forced_validate (bool): whether to skip validation tests or not?
+
+        **Returns:** `True` if passed tests else `False`.
+        """
+        logger.critical(
+            "{} :: {} :: {}".format(
+                source_demuxer,
+                source_demuxer in get_supported_demuxers(self.__ffmpeg),
+                get_supported_demuxers(self.__ffmpeg),
+            )
+        )
+        # validate source demuxer(if defined)
+        if not (source_demuxer is None):
+            # check if "auto" demuxer is specified
+            if source_demuxer == "auto":
+                # integerise source to get index
+                index = int(source)
+                # extract devices list and actual demuxer value
+                (
+                    self.__extracted_devices_list,
+                    source_demuxer,
+                ) = extract_device_n_demuxer(
+                    self.__ffmpeg,
+                    machine_OS=self.__machine_OS,
+                    verbose=self.__verbose_logs,
+                )
+                # valid indexes range
+                valid_indexes = [
+                    x
+                    for x in range(
+                        -len(self.__extracted_devices_list),
+                        len(self.__extracted_devices_list),
+                    )
+                ]
+                # check index is within valid range
+                if self.__extracted_devices_list and index in valid_indexes:
+                    # overwrite actual source device name/path/index
+                    if self.__machine_OS == "Windows":
+                        # Windows OS requires "video=" suffix
+                        self.__source = source = "video={}".format(
+                            self.__extracted_devices_list[index]
+                        )
+                    elif self.__machine_OS == "Darwin":
+                        # Darwin OS requires only device indexes
+                        self.__source = source = (
+                            str(index)
+                            if index >= 0
+                            else str(len(self.__extracted_devices_list) + index)
+                        )
+                    else:
+                        # Linux OS require /dev/video format
+                        self.__source = source = next(
+                            iter(self.__extracted_devices_list[index].keys())
+                        )
+                    # overwrite source_demuxer global variable
+                    self.__source_demuxer = source_demuxer
+                    self.__verbose_logs and logger.debug(
+                        "Successfully configured device `{}` at index `{}` with demuxer `{}`.".format(
+                            (
+                                self.__extracted_devices_list[index]
+                                if self.__machine_OS != "Linux"
+                                else next(
+                                    iter(self.__extracted_devices_list[index].values())
+                                )[0]
+                            ),
+                            (
+                                index
+                                if index >= 0
+                                else len(self.__extracted_devices_list) + index
+                            ),
+                            self.__source_demuxer,
+                        )
+                    )
+                else:
+                    # raise error otherwise
+                    raise ValueError(
+                        "Given source `{}` is not a valid device index. Possible values index values can be: {}".format(
+                            source,
+                            ",".join(f"{x}" for x in valid_indexes),
+                        )
+                    )
+            # otherwise validate against supported demuxers
+            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):
+                # raise if fails
+                raise ValueError(
+                    "Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!".format(
+                        source_demuxer
+                    )
+                )
+            else:
+                pass
+
+        # assert if valid source
+        assert source and isinstance(
+            source, str
+        ), "Input `source` parameter is of invalid type!"
+
+        # Differentiate input
+        if forced_validate:
+            source_demuxer is None and logger.critical(
+                "Forcefully passing validation test for given source!"
+            )
+            self.__source = source
+        elif os.path.isfile(source):
+            self.__source = os.path.abspath(source)
+        elif is_valid_image_seq(
+            self.__ffmpeg, source=source, verbose=self.__verbose_logs
+        ):
+            self.__source = source
+            self.__contains_images = True
+        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):
+            self.__source = source
+        else:
+            logger.error("`source` value is unusable or unsupported!")
+            # discard the value otherwise
+            raise ValueError("Input source is invalid. Aborting!")
+        # format command
+        if self.__sourcer_params:
+            # handle additional params separately
+            meta_cmd = (
+                [self.__ffmpeg]
+                + (["-hide_banner"] if not self.__verbose_logs else [])
+                + ["-t", "0.0001"]
+                + self.__ffmpeg_prefixes
+                + (["-f", source_demuxer] if source_demuxer else [])
+                + ["-i", source]
+                + dict2Args(self.__sourcer_params)
+                + ["-f", "null", "-"]
+            )
+        else:
+            meta_cmd = (
+                [self.__ffmpeg]
+                + (["-hide_banner"] if not self.__verbose_logs else [])
+                + self.__ffmpeg_prefixes
+                + (["-f", source_demuxer] if source_demuxer else [])
+                + ["-i", source]
+            )
+        # extract metadata, decode, and filter
+        metadata = (
+            check_sp_output(
+                meta_cmd,
+                force_retrieve_stderr=True,
+            )
+            .decode("utf-8")
+            .strip()
+        )
+        # separate input and output metadata (if available)
+        if "Output #" in metadata:
+            (metadata, self.__metadata_output) = metadata.split("Output #")
+        # return metadata based on params
+        return metadata
+
+    def __extract_video_bitrate(self, default_stream=0):
+        """
+        This Internal method parses default video-stream bitrate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video bitrate as string value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        video_bitrate_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if video_bitrate_text:
+            selected_stream = video_bitrate_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(video_bitrate_text)
+                    else 0
+                )
+            ]
+            filtered_bitrate = re.findall(
+                r",\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
+            )
+            if len(filtered_bitrate):
+                default_video_bitrate = filtered_bitrate[0].split(" ")[1:3]
+                final_bitrate = "{}{}".format(
+                    int(default_video_bitrate[0].strip()),
+                    "k" if (default_video_bitrate[1].strip().startswith("k")) else "M",
+                )
+                return final_bitrate
+        return ""
+
+    def __extract_video_decoder(self, default_stream=0):
+        """
+        This Internal method parses default video-stream decoder from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video decoder as string value.
+        """
+        assert isinstance(default_stream, int), "Invalid input!"
+        identifiers = ["Video:", "Stream #"]
+        meta_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            filtered_pixfmt = re.findall(
+                r"Video:\s[a-z0-9_-]*", selected_stream.strip()
+            )
+            if filtered_pixfmt:
+                return filtered_pixfmt[0].split(" ")[-1]
+        return ""
+
+    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):
+        """
+        This Internal method parses default video-stream pixel-format from metadata.
+
+        Parameters:
+            default_stream (int): selects specific video-stream in case of multiple ones.
+
+        **Returns:** Default Video pixel-format as string value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        meta_text = (
+            [
+                line.strip()
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+        )
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            filtered_pixfmt = re.findall(
+                r",\s[a-z][a-z0-9_-]*", selected_stream.strip()
+            )
+            if filtered_pixfmt:
+                return filtered_pixfmt[0].split(" ")[-1]
+        return ""
+
+    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):
+        """
+        This Internal method parses default audio-stream bitrate and sample-rate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific audio-stream in case of multiple ones.
+
+        **Returns:** Default Audio-stream bitrate and sample-rate as string value.
+        """
+        identifiers = ["Audio:", "Stream #"]
+        meta_text = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        result = {}
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+            # filter data
+            filtered_audio_bitrate = re.findall(
+                r"fltp,\s[0-9]+\s\w\w[\/]s", selected_stream.strip()
+            )
+            filtered_audio_samplerate = re.findall(
+                r",\s[0-9]+\sHz", selected_stream.strip()
+            )
+            # get audio bitrate metadata
+            if filtered_audio_bitrate:
+                filtered = filtered_audio_bitrate[0].split(" ")[1:3]
+                result["bitrate"] = "{}{}".format(
+                    int(filtered[0].strip()),
+                    "k" if (filtered[1].strip().startswith("k")) else "M",
+                )
+            else:
+                result["bitrate"] = ""
+            # get audio samplerate metadata
+            result["samplerate"] = (
+                filtered_audio_samplerate[0].split(", ")[1]
+                if filtered_audio_samplerate
+                else ""
+            )
+        return result if result and (len(result) == 2) else {}
+
+    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):
+        """
+        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.
+
+        Parameters:
+            default_stream (int): selects specific audio-stream in case of multiple ones.
+            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?
+
+        **Returns:** Default Video resolution and framerate as dictionary value.
+        """
+        identifiers = ["Video:", "Stream #"]
+        # use output metadata if available
+        meta_text = (
+            [
                 line.strip()
-                for line in self.__metadata_output.split("\n")
-                if all(x in line for x in identifiers_orientation)
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers)
             ]
-        )
-        # use metadata if available
-        result = {}
-        if meta_text:
-            selected_stream = meta_text[
-                default_stream
-                if default_stream > 0 and default_stream < len(meta_text)
-                else 0
-            ]
-
-            # filter data
-            filtered_resolution = re.findall(
-                r"([1-9]\d+)x([1-9]\d+)", selected_stream.strip()
-            )
-            filtered_framerate = re.findall(
-                r"\d+(?:\.\d+)?\sfps", selected_stream.strip()
-            )
-            filtered_tbr = re.findall(r"\d+(?:\.\d+)?\stbr", selected_stream.strip())
-
-            # extract framerate metadata
-            if filtered_framerate:
-                # calculate actual framerate
-                result["framerate"] = float(
-                    re.findall(r"[\d\.\d]+", filtered_framerate[0])[0]
-                )
-            elif filtered_tbr:
-                # guess from TBR(if fps unavailable)
-                result["framerate"] = float(
-                    re.findall(r"[\d\.\d]+", filtered_tbr[0])[0]
-                )
-
-            # extract resolution metadata
-            if filtered_resolution:
-                result["resolution"] = [int(x) for x in filtered_resolution[0]]
-
-            # extract video orientation metadata
-            if meta_text_orientation:
-                selected_stream = meta_text_orientation[
-                    default_stream
-                    if default_stream > 0 and default_stream < len(meta_text)
-                    else 0
-                ]
-                filtered_orientation = re.findall(
-                    r"[-]?\d+\.\d+", selected_stream.strip()
-                )
-                result["orientation"] = float(filtered_orientation[0])
-            else:
-                result["orientation"] = 0.0
-
-        return result if result and (len(result) == 3) else {}
-
-    def __extract_duration(self, inseconds=True):
-        """
-        This Internal method parses stream duration from metadata.
-
-        Parameters:
-            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers)
+            ]
+        )
+        # extract video orientation metadata if available
+        identifiers_orientation = ["displaymatrix:", "rotation"]
+        meta_text_orientation = (
+            [
+                line.strip()
+                for line in self.__ffsp_output.split("\n")
+                if all(x in line for x in identifiers_orientation)
+            ]
+            if not extract_output
+            else [
+                line.strip()
+                for line in self.__metadata_output.split("\n")
+                if all(x in line for x in identifiers_orientation)
+            ]
+        )
+        # use metadata if available
+        result = {}
+        if meta_text:
+            selected_stream = meta_text[
+                (
+                    default_stream
+                    if default_stream > 0 and default_stream < len(meta_text)
+                    else 0
+                )
+            ]
+
+            # filter data
+            filtered_resolution = re.findall(
+                r"([1-9]\d+)x([1-9]\d+)", selected_stream.strip()
+            )
+            filtered_framerate = re.findall(
+                r"\d+(?:\.\d+)?\sfps", selected_stream.strip()
+            )
+            filtered_tbr = re.findall(r"\d+(?:\.\d+)?\stbr", selected_stream.strip())
+
+            # extract framerate metadata
+            if filtered_framerate:
+                # calculate actual framerate
+                result["framerate"] = float(
+                    re.findall(r"[\d\.\d]+", filtered_framerate[0])[0]
+                )
+            elif filtered_tbr:
+                # guess from TBR(if fps unavailable)
+                result["framerate"] = float(
+                    re.findall(r"[\d\.\d]+", filtered_tbr[0])[0]
+                )
+
+            # extract resolution metadata
+            if filtered_resolution:
+                result["resolution"] = [int(x) for x in filtered_resolution[0]]
 
-        **Returns:** Default Stream duration as string value.
-        """
-        identifiers = ["Duration:"]
-        stripped_data = [
-            line.strip()
-            for line in self.__ffsp_output.split("\n")
-            if all(x in line for x in identifiers)
-        ]
-        if stripped_data:
-            t_duration = re.findall(
-                r"(?:[01]\d|2[0123]):(?:[012345]\d):(?:[012345]\d+(?:\.\d+)?)",
-                stripped_data[0],
-            )
-            if t_duration:
-                return (
-                    sum(
-                        float(x) * 60**i
-                        for i, x in enumerate(reversed(t_duration[0].split(":")))
-                    )
-                    if inseconds
-                    else t_duration
-                )
-        return 0
+            # extract video orientation metadata
+            if meta_text_orientation:
+                selected_stream = meta_text_orientation[
+                    (
+                        default_stream
+                        if default_stream > 0 and default_stream < len(meta_text)
+                        else 0
+                    )
+                ]
+                filtered_orientation = re.findall(
+                    r"[-]?\d+\.\d+", selected_stream.strip()
+                )
+                result["orientation"] = float(filtered_orientation[0])
+            else:
+                result["orientation"] = 0.0
+
+        return result if result and (len(result) == 3) else {}
+
+    def __extract_duration(self, inseconds=True):
+        """
+        This Internal method parses stream duration from metadata.
+
+        Parameters:
+            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?
+
+        **Returns:** Default Stream duration as string value.
+        """
+        identifiers = ["Duration:"]
+        stripped_data = [
+            line.strip()
+            for line in self.__ffsp_output.split("\n")
+            if all(x in line for x in identifiers)
+        ]
+        if stripped_data:
+            t_duration = re.findall(
+                r"(?:[01]\d|2[0123]):(?:[012345]\d):(?:[012345]\d+(?:\.\d+)?)",
+                stripped_data[0],
+            )
+            if t_duration:
+                return (
+                    sum(
+                        float(x) * 60**i
+                        for i, x in enumerate(reversed(t_duration[0].split(":")))
+                    )
+                    if inseconds
+                    else t_duration
+                )
+        return 0
 

enumerate_devices property readonly

A property object that enumerate all probed Camera Devices connected to your system names along with their respective "device indexes" or "camera indexes" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default
source str

defines the input(-i) source filename/URL/device-name/device-path.

required
source_demuxer str

specifies the demuxer(-f) for the input source.

None
custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

''
verbose bool

enables/disables verbose.

False
sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{}
Source code in deffcode/sourcer.py
def __init__(
     self,
     source,
@@ -834,129 +859,131 @@
 
     # sanitize sourcer_params
     self.__sourcer_params = {
-        str(k).strip(): str(v).strip()
-        if not isinstance(v, (dict, list, int, float, tuple))
-        else v
-        for k, v in sourcer_params.items()
-    }
-
-    # handle whether to force validate source
-    self.__forcevalidatesource = self.__sourcer_params.pop(
-        "-force_validate_source", False
-    )
-    if not isinstance(self.__forcevalidatesource, bool):
-        # reset improper values
-        self.__forcevalidatesource = False
-
-    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
-    self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
-    if not isinstance(self.__ffmpeg_prefixes, list):
-        # log it
-        logger.warning(
-            "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
-                type(self.__ffmpeg_prefixes).__name__
-            )
-        )
-        # reset improper values
-        self.__ffmpeg_prefixes = []
-
-    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
-    __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
-    if not isinstance(__ffmpeg_download_path, str):
-        # reset improper values
-        __ffmpeg_download_path = ""
-
-    # validate the FFmpeg assets and return location (also downloads static assets on windows)
-    self.__ffmpeg = get_valid_ffmpeg_path(
-        str(custom_ffmpeg),
-        True if self.__machine_OS == "Windows" else False,
-        ffmpeg_download_path=__ffmpeg_download_path,
-        verbose=self.__verbose_logs,
-    )
-
-    # check if valid FFmpeg path returned
-    if self.__ffmpeg:
-        self.__verbose_logs and logger.debug(
-            "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
-        )
-    else:
-        # else raise error
-        raise RuntimeError(
-            "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
-        )
-
-    # sanitize externally accessible parameters and assign them
-    # handles source demuxer
-    if source is None:
-        # first check if source value is empty
-        # raise error if true
-        raise ValueError("Input `source` parameter is empty!")
-    elif isinstance(source_demuxer, str):
-        # assign if valid demuxer value
-        self.__source_demuxer = source_demuxer.strip().lower()
+        str(k).strip(): (
+            str(v).strip()
+            if not isinstance(v, (dict, list, int, float, tuple))
+            else v
+        )
+        for k, v in sourcer_params.items()
+    }
+
+    # handle whether to force validate source
+    self.__forcevalidatesource = self.__sourcer_params.pop(
+        "-force_validate_source", False
+    )
+    if not isinstance(self.__forcevalidatesource, bool):
+        # reset improper values
+        self.__forcevalidatesource = False
+
+    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)
+    self.__ffmpeg_prefixes = self.__sourcer_params.pop("-ffprefixes", [])
+    if not isinstance(self.__ffmpeg_prefixes, list):
+        # log it
+        logger.warning(
+            "Discarding invalid `-ffprefixes` value of wrong type `{}`!".format(
+                type(self.__ffmpeg_prefixes).__name__
+            )
+        )
+        # reset improper values
+        self.__ffmpeg_prefixes = []
+
+    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)
+    __ffmpeg_download_path = self.__sourcer_params.pop("-ffmpeg_download_path", "")
+    if not isinstance(__ffmpeg_download_path, str):
+        # reset improper values
+        __ffmpeg_download_path = ""
+
+    # validate the FFmpeg assets and return location (also downloads static assets on windows)
+    self.__ffmpeg = get_valid_ffmpeg_path(
+        str(custom_ffmpeg),
+        True if self.__machine_OS == "Windows" else False,
+        ffmpeg_download_path=__ffmpeg_download_path,
+        verbose=self.__verbose_logs,
+    )
+
+    # check if valid FFmpeg path returned
+    if self.__ffmpeg:
+        self.__verbose_logs and logger.debug(
+            "Found valid FFmpeg executable: `{}`.".format(self.__ffmpeg)
+        )
+    else:
+        # else raise error
+        raise RuntimeError(
+            "[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!"
+        )
+
+    # sanitize externally accessible parameters and assign them
+    # handles source demuxer
+    if source is None:
+        # first check if source value is empty
+        # raise error if true
+        raise ValueError("Input `source` parameter is empty!")
+    elif isinstance(source_demuxer, str):
         # assign if valid demuxer value
-        assert self.__source_demuxer != "auto" or validate_device_index(
-            source
-        ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+        self.__source_demuxer = source_demuxer.strip().lower()
+        # assign if valid demuxer value
+        assert self.__source_demuxer != "auto" or validate_device_index(
             source
-        )
-    else:
-        # otherwise find valid default source demuxer value
-        # enforce "auto" if valid index device
-        self.__source_demuxer = "auto" if validate_device_index(source) else None
-        # log if not valid index device and invalid type
-        self.__verbose_logs and not self.__source_demuxer in [
-            "auto",
-            None,
-        ] and logger.warning(
-            "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
-                type(source_demuxer).__name__
-            )
-        )
-        # log if not valid index device and invalid type
-        self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
-            "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
-                source
-            )
-        )
-
-    # handles source stream
-    self.__source = source
-
-    # creates shallow copy for further usage #TODO
-    self.__source_org = copy.copy(self.__source)
-    self.__source_demuxer_org = copy.copy(self.__source_demuxer)
-
-    # handles all extracted devices names/paths list
-    # when source_demuxer = "auto"
-    self.__extracted_devices_list = []
-
-    # various source stream params
-    self.__default_video_resolution = ""  # handles stream resolution
-    self.__default_video_orientation = ""  # handles stream's video orientation
-    self.__default_video_framerate = ""  # handles stream framerate
-    self.__default_video_bitrate = ""  # handles stream's video bitrate
-    self.__default_video_pixfmt = ""  # handles stream's video pixfmt
-    self.__default_video_decoder = ""  # handles stream's video decoder
-    self.__default_source_duration = ""  # handles stream's video duration
-    self.__approx_video_nframes = ""  # handles approx stream frame number
-    self.__default_audio_bitrate = ""  # handles stream's audio bitrate
-    self.__default_audio_samplerate = ""  # handles stream's audio samplerate
-
-    # handle various stream flags
-    self.__contains_video = False  # contains video
-    self.__contains_audio = False  # contains audio
-    self.__contains_images = False  # contains image-sequence
-
-    # handles output parameters through filters
-    self.__metadata_output = None  # handles output stream metadata
-    self.__output_frames_resolution = ""  # handles output stream resolution
-    self.__output_framerate = ""  # handles output stream framerate
-    self.__output_frames_pixfmt = ""  # handles output frame pixel format
-    self.__output_orientation = ""  # handles output frame orientation
-
-    # check whether metadata probed or not?
-    self.__metadata_probed = False
+        ), "Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!".format(
+            source
+        )
+    else:
+        # otherwise find valid default source demuxer value
+        # enforce "auto" if valid index device
+        self.__source_demuxer = "auto" if validate_device_index(source) else None
+        # log if not valid index device and invalid type
+        self.__verbose_logs and not self.__source_demuxer in [
+            "auto",
+            None,
+        ] and logger.warning(
+            "Discarding invalid `source_demuxer` parameter value of wrong type: `{}`".format(
+                type(source_demuxer).__name__
+            )
+        )
+        # log if not valid index device and invalid type
+        self.__verbose_logs and self.__source_demuxer == "auto" and logger.critical(
+            "Given source `{}` is a valid device index. Enforcing 'auto' demuxer.".format(
+                source
+            )
+        )
+
+    # handles source stream
+    self.__source = source
+
+    # creates shallow copy for further usage #TODO
+    self.__source_org = copy.copy(self.__source)
+    self.__source_demuxer_org = copy.copy(self.__source_demuxer)
+
+    # handles all extracted devices names/paths list
+    # when source_demuxer = "auto"
+    self.__extracted_devices_list = []
+
+    # various source stream params
+    self.__default_video_resolution = ""  # handles stream resolution
+    self.__default_video_orientation = ""  # handles stream's video orientation
+    self.__default_video_framerate = ""  # handles stream framerate
+    self.__default_video_bitrate = ""  # handles stream's video bitrate
+    self.__default_video_pixfmt = ""  # handles stream's video pixfmt
+    self.__default_video_decoder = ""  # handles stream's video decoder
+    self.__default_source_duration = ""  # handles stream's video duration
+    self.__approx_video_nframes = ""  # handles approx stream frame number
+    self.__default_audio_bitrate = ""  # handles stream's audio bitrate
+    self.__default_audio_samplerate = ""  # handles stream's audio samplerate
+
+    # handle various stream flags
+    self.__contains_video = False  # contains video
+    self.__contains_audio = False  # contains audio
+    self.__contains_images = False  # contains image-sequence
+
+    # handles output parameters through filters
+    self.__metadata_output = None  # handles output stream metadata
+    self.__output_frames_resolution = ""  # handles output stream resolution
+    self.__output_framerate = ""  # handles output stream framerate
+    self.__output_frames_pixfmt = ""  # handles output frame pixel format
+    self.__output_orientation = ""  # handles output frame orientation
+
+    # check whether metadata probed or not?
+    self.__metadata_probed = False
 

probe_stream(self, default_stream_indexes=(0, 0))

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default
default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is ("0th video stream", "1st audio stream").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):
     """
     This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.
diff --git a/v0.2.6-dev/search/search_index.json b/v0.2.6-dev/search/search_index.json
index e45fff8..b5498d7 100644
--- a/v0.2.6-dev/search/search_index.json
+++ b/v0.2.6-dev/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

A cross-platform High-performance Video Frames Decoder that flexibly executes FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in just a few lines of python code

Highly Adaptive - DeFFcode APIs implements a standalone highly-extensible wrapper around FFmpeg multimedia framework. These APIs supports a wide-ranging media streams as input source such as live USB/Virtual/IP camera feeds, regular multimedia files, screen recordings, image sequences, network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Highly Flexible - DeFFcode APIs gains an edge over other Wrappers by providing complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as specifying framerate, resolution, hardware decoder(s), filtergraph(s), and pixel-format(s) that are readily supported by all well known Computer Vision libraries.

Highly Convenient - FFmpeg has a steep learning curve especially for users unfamiliar with a command line interface. DeFFcode helps users by providing similar to OpenCV, Index based Camera Device Capturing and the same OpenCV-Python (Python API for OpenCV) coding syntax for its APIs, thereby making it even easier to learn, create, and develop FFmpeg based apps in Python.

"},{"location":"#key-features-of-deffcode","title":"Key features of DeFFcode","text":"

Here are some key features that stand out:

  • High-performance, low-overhead video frames decoding with robust error-handling.
  • Flexible API with access to almost any FFmpeg specification thinkable.
  • Supports a wide-range of media streams/devices/protocols as input source.
  • Curated list of well-documented recipes ranging from Basic to Advanced skill levels.
  • Hands down the easiest Index based Camera Device Capturing, similar to OpenCV.
  • Memory efficient Live Simple & Complex Filtergraphs. (Yes, You read it correctly \"Live\"!)
  • Lightning fast dedicated GPU-Accelerated Video Decoding & Transcoding.
  • Enables precise FFmpeg Frame Seeking with pinpoint accuracy.
  • Effortless Metadata Extraction from all streams available in the source.
  • Maintains the standard easy to learn OpenCV-Python coding syntax.
  • Out-of-the-box support for all prominent Computer Vision libraries.
  • Cross-platform, runs on Python 3.7+, and easy to install.
Still missing a key feature in DeFFcode?

Please review DeFFcode's Roadmap. If you still can't find the desired feature there, then you can request one simply by Commenting or Upvoting an existing comment on that issue.

"},{"location":"#getting-started","title":"Getting Started","text":"

In case you're run into any problems, consult our Help section.

"},{"location":"#installation-notes","title":"Installation Notes","text":"

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode on your machine.

"},{"location":"#recipes-aka-examples","title":"Recipes a.k.a Examples","text":"

Once you have DeFFcode installed, checkout our Well-Documented Recipes for usage examples:

How to Begin?

If you\u2019re just starting, check out the Beginner Basic Recipes and as your confidence grows, move up to Advanced Recipes .

  • Basic Recipes : Recipes for beginners of any skill level to get started.
  • Advanced Recipes : Recipes to take your skills to the next level.
"},{"location":"#api-in-a-nutshell","title":"API in a nutshell","text":"

As a user, you just have to remember only two DeFFcode APIs, namely:

See API Reference for more in-depth information.

"},{"location":"#a-ffdecoder-api","title":"A. FFdecoder API","text":"

The primary function of FFdecoder API is to decode 24-bit RGB video frames from the given source:

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# formulate the decoder with suitable source\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").formulate()\n\n# grab RGB24(default) 3D frames from decoder\nfor frame in decoder.generateFrame():\n\n    # lets print its shape\n    print(frame.shape) # (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n
"},{"location":"#b-sourcer-api","title":"B. Sourcer API","text":"

The primary function of Sourcer API is to gather information from all multimedia streams available in the given source:

# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
The resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 60.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 10.0,\n  \"approx_video_nframes\": 600,\n  \"source_video_bitrate\": \"832k\",\n  \"source_audio_bitrate\": \"\",\n  \"source_audio_samplerate\": \"\",\n  \"source_has_video\": true,\n  \"source_has_audio\": false,\n  \"source_has_image_sequence\": false\n}\n

"},{"location":"#contribution-guidelines","title":"Contribution Guidelines","text":"

Contributions are welcome, and greatly appreciated!

Please read our Contribution Guidelines for more details.

"},{"location":"#community-channel","title":"Community Channel","text":"

If you've come up with some new idea, or looking for the fastest way troubleshoot your problems. Please checkout our Gitter community channel \u27b6

"},{"location":"#become-a-stargazer","title":"Become a Stargazer","text":"

You can be a Stargazer by starring us on Github, it helps us a lot and you're making it easier for others to find & trust this library. Thanks!

"},{"location":"#donations","title":"Donations","text":"

DeFFcode is free and open source and will always remain so.

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

"},{"location":"#citation","title":"Citation","text":"

Here is a Bibtex entry you can use to cite this project in a publication:

@software{deffcode,\n  author       = {Abhishek Singh Thakur},\n  title        = {abhiTronix/deffcode: v0.2.4},\n  month        = oct,\n  year         = 2022,\n  publisher    = {Zenodo},\n  version      = {v0.2.4},\n  doi          = {10.5281/zenodo.7155399},\n  url          = {https://doi.org/10.5281/zenodo.7155399}\n}\n

"},{"location":"changelog/","title":"Release Notes","text":""},{"location":"changelog/#v025-2023-01-11","title":"v0.2.5 (2023-01-11)","text":"New Features
  • FFdecoder:
    • Added OpenCV compatibility patch for YUV pixel-formats.
      • Implemented new patch for handling YUV pixel-formats(such as YUV420p, yuv444p, NV12, NV21 etc.) for exclusive compatibility with OpenCV APIs.
        • Note: Only YUV pixel-formats starting with YUV and NV are currently supported.
      • Added new -enforce_cv_patch boolean attribute for enabling OpenCV compatibility patch.
  • Sourcer:
    • Added Looping Video support.
      • Now raw-frame numbers revert to null(None) whenever any looping is defined through filter(such as -filter_complex \"loop=loop=3:size=75:start=25\") or prefix(\"-ffprefixes\":[\"-stream_loop\", \"3\"]).
  • Docs:
    • Added YUV frames example code for Capturing and Previewing BGR frames from a video file recipe.
    • Added YUV frames example code for `Transcoding video using OpenCV VideoWriter API recipe.
    • Added YUV frames example code for `Transcoding lossless video using WriteGear API recipe.
    • Added new CUVID-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Video Transcoding with OpenCV`s VideoWriter API recipe.
    • Added new CUDA-NVENC-accelerated Video Transcoding with WriteGear API recipe both for consuming BGR and NV12 frames.
    • Added new CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API recipe which is still WIP(\ud83d\udcacconfirmed with a GIF from tenor).
    • Added new Capturing and Previewing frames from a Looping Video recipe using -stream_loop option and loop filter.
    • Added docs for -enforce_cv_patch boolean attribute in ffparam dictionary parameter.
    • Added new python dependency block for recipes.
    • Reflected new OpenCV compatibility patch for YUV pixel-formats in code.
    • Added new content.code.copy and content.code.link features.
Updates/Improvements
  • FFhelper:
    • Replaced depreciating Retry API from requests.packages with requests.adapters.
  • Maintenance:
    • Replaced raw.github.com links with GitLab and GH links.
    • Removed unused code.
    • Updated log message.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
    • Updated test_frame_format test to include -enforce_cv_patch boolean attribute.
    • Updated test_source to test looping video support.
  • Setup:
    • Removed unused imports and patches.
    • Bumped version to 0.2.5.
  • Docs:
    • Updated Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing passage.
    • Updated and corrected docs hyperlinks in index.md and ReadMe.md
    • Update Zenodo Badge and BibTex entry.
    • Updated Readme.md banner image URLs.
    • Updated md-typeset text font size to .75rem.
    • Updated text and admonitions.
    • Updated recipe assumptions.
    • Updated Readme.md GIF URLs.
    • Updated abstract text in recipes.
    • Updated changelog.md.
    • Updated recipe code.
    • Removed old recipes.
Bug-fixes
  • FFdecoder API:
    • Fixed Zero division bug while calculating raw_bit_per_component.
  • FFhelper:
    • Fixed response.headers returning content-length as Nonetype since it may not necessarily have the Content-Length header set.
      • Reason: The response from gitlab.com contains a Transfer-Encoding field as 'Transfer-Encoding': 'chunked', which means data is sent in a series of chunks, so the Content-Length header is emitted. More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding#Directives
  • Docs:
    • Fixed https://github.com/badges/shields/issues/8671 badge issue in README.md
    • Removed depreciated text.
    • Fixed several typos in docs.
  • CI:
    • Added fix for codecov upload bug (https://github.com/codecov/codecov-action/issues/598).
      • Updated codecov-action workflow to `v3.
      • Added new CODECOV_TOKEN GitHub secret.
Pull Requests
  • PR #37
"},{"location":"changelog/#v024-2022-10-07","title":"v0.2.4 (2022-10-07)","text":"New Features
  • FFdecoder API:
    • Implemented new comprehensive support for both discarding key default FFmpeg parameters from Decoding pipeline simply by assigning them null string values, and concurrently using values extracted from Output Stream metadata properties (available only when FFmpeg filters are defined) for formulating pipelines.
      • Added null string value support to -framerate and -custom_resolution attributes, as well as frame_format parameter for easily discarding them.
      • Re-Implemented calculation of rawframe pixel-format.
        • Reconfigured default rawframe pixel-format, Now rawframe pixel-format will always default to source_video_pixfmt with frame_format=\"null\".
        • Now with frame_format parameter value either \"null\" or invalid or undefined, rawframe pixel-format value is taken from output_frames_pixfmt metadata property extracted from Output Stream (available only when filters are defined). If valid output_video_resolution metadata property is found then it defaults to default pixel-format(calculated variably).
        • With frame_format=\"null\", -pix_fmt FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of rawframe resolution value.
        • Now with -custom_resolution dictionary attribute value either \"null\" or invalid or undefined, rawframe resolution value is first taken from output_video_resolution metadata property extracted from Output Stream (available only when filters are defined), next from source_video_resolution metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_resolution valid metadata properties are found then RuntimeError is raised.
        • With -custom_resolution dictionary attribute value \"null\", -s/-size FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of output framerate value.
        • Now with -framerate dictionary attribute either null or invalid or undefined, output framerate value is first taken from output_video_framerate metadata property extracted from Output Stream (available only when filters are defined), next from source_video_framerate metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_framerate valid metadata properties are found then RuntimeError is raised.
        • With -framerate dictionary attribute value \"null\", -r/-framerate FFmpeg parameter will not be added to Decoding pipeline.
    • Implemented passing of simple -vf filters, complex -filter_complex filters, and pre-headers(via -ffprefixes) directly to Sourcer API's sourcer_params parameter for probing Output Stream metadata and filter values.
  • Sourcer API:
    • Implemented new comprehensive approach to handle source_demuxer parameter w.r.t different source parameter values.
      • The source_demuxer parameter now accepts \"auto\" as its value for enabling Index based Camera Device Capture feature in Sourcer API.
      • Sourcer API auto-enforces source_demuxer=\"auto\" by default, whenever a valid device index (uses validate_device_index method for validation) is provided as its source parameter value.
        • \u26a0\ufe0f Sourcer API will throw Assertion error if source_demuxer=\"auto\" is provided explicitly without a valid device index at its source parameter.
      • Source API now accepts all +ve and -ve device indexes (e.g. -1,0,1,2 etc.) to its source parameter, both as in integer and string of integer types as source in Index based Camera Device Capture feature.
        • Sourcer API imports and utilizes extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system.
          • \u26a0\ufe0f Sourcer API will throw RuntimeError on failure to identify any device.
        • Sourcer API auto verifies that the specified source device index is in range of the devices discovered.
          • \u26a0\ufe0f Sourcer API will raise ValueError if value goes out of valid range.
        • Sourcer API also automatically handle -ve indexes if specified within the valid range.
        • Implemented patch to auto-add video= suffix to selected device name before using it as video source on Windows OSes.
        • Added patch for handling dictionary of devices paths(with devices names as values) and log messages on Linux Oses.
        • Added copy import for shallow copying various class parameters.
      • Implemented new Support for additional FFmpeg parameters and Output metadata.
        • Added three new metadata properties: output_video_resolution, output_video_framerate, output_frames_pixfmt for handling extracted Output Stream values, whenever additional FFmpeg parameters(such as FFmpeg filters) are defined.
        • Added support for auto-handling additional FFmpeg parameters defined by sourcer_params dictionary parameters.
        • Implement new separate pipeline for parsing Output Stream metadata by decoding video source using null muxer for few microseconds whenever additional FFmpeg parameters(such as -vf filters) are defined by the user.
        • Included new metadata_output internal parameter for holding Output Stream metadata splitted from original Sourcer Metadata extracted from new pipeline.
        • Included new output_video_resolution, output_video_framerate, output_frames_pixfmt internal parameters for metadata properties, whenever Output Stream Metadata available.
        • Added new extract_output boolean parameter to extract_video_pixfmt and extract_resolution_framerate internal methods for extracting output pixel-format, framerate and resolution using Output Stream metadata instead of Sourcer Metadata, whenever available.
      • Added tuple datatype to sourcer_params exception.
      • Added dict2Args import.
    • Added enumerate_devices property object to enumerate all probed Camera Devices connected to a system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.
    • Added new force_retrieve_missing parameter to retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata, when force_retrieve_missing=True.
    • Added various output stream metadata properties that are only available when additional FFmpeg parameters(such as filters) are defined manually, by assigning them counterpart source stream metadata property values
  • FFhelper:
    • Implemented new extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system and supported by valid OS specific FFmpeg demuxer.
      • Added support for three OS specific FFmpeg demuxers: namely dshow for Windows, v4l2 for Linux, and avfoundation for Darwin/Mac OSes.
      • Implemented separate code for parsing outputs of python subprocess module outputs provided with different commands for discovering all Video-Capture devices present on system.
        • Processed dshow (on Windows) and avfoundation (on Darwin) demuxers in FFmpeg commands with -list_devices true parameters using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names/indexes.
        • Used v4l2-ctl submodule command on Linux machines for listing all Video-Capture devices using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names and true system /dev/video paths.
          • Added patch for a single device with multiple /dev/video paths (each for metadata, video, controls), where it iterates on each path to find the exact path that contains valid video stream.
          • Added elaborated checks for catching all possible system errors that can occur while running v4l2-ctl submodule command.
          • The method will return discovered devices as list of dictionaries with device paths(/dev/video) as keys and respective device name as the values, instead of default list of device names.
          • Added patch for handling Linux specific log messages.
      • Added various logging messages to notify users about all discover devices names/paths w.r.t indexes.
      • \u26a0\ufe0f The extract_device_n_demuxer method will raise RuntimeError if it fails to identify any device.
      • Added various checks to assert invalid input parameters and unsupported OSes.
      • Added machine_OS parameter to specify OS running on the system, must be value of platform.system() module. If invalid the method will raise ValueError.
  • Utilities:
    • Added new new validate_device_index() method to verify if given device index is valid or not?
      • Only Integers or String of integers are valid indexes.
      • Returns a boolean value, confirming whether valid(If true), or not(If False).
    • Added checks to support all +ve and -ve integers, both as integer and string types.
  • Docs:
    • Added new validate_device_index() method and its parameters description.
    • Added new extract_device_n_demuxer() method and its parameters description.
    • Added Decoding Camera Devices using Indexes support docs.
      • Added decode-camera-devices.md doc for Decoding Camera Devices using Indexes.
        • Added Enumerating all Camera Devices with Indexes example doc with code.
        • Added Capturing and Previewing frames from a Camera using Indexes example doc with code.
      • Added Camera Device Index support docs to FFdecoder and Sourcer API params.
  • CI:
    • Added check exception for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Added new test_discard_n_filter_params unittest for test recently added supported for both discarded parameters and filter values.
Updates/Improvements
  • FFdecoder API:
    • Extended range of supported output frame pixel-formats.
      • Added new pixel-formats to supported group by extending raw bits-per-component range.
    • Simplified raw frame dtype calculation based on selected pixel-format.
      • output_frames_pixfmt metadata property(if available) will be overridden to rgb24.
    • Replaced continue with break in generateFrame() method.
    • Improved handling of frame_format parameter.
  • Sourcer API:
    • Simplified JSON formatting and returning values logic.
    • Updated logging messages text and position.
    • Removed redundant variable definitions.
    • Changed related internal variable names w.r.t metadata property names.
    • Replaced os_windows internal parameter with machine_OS, and changed its input from os.name to more flexible platform.system().
    • Removed source_extension internal parameter and assigned values directly.
  • FFhelper:
    • Implemented more robust pattern matching for Linux machines.
    • Updated logs in check_sp_output() method for improving error output message.
    • Implemented \"Cannot open device\" v4l2-ctl command Error logs.
  • Maintenance:
    • Bumped version to 0.2.4.
    • Updated code comments.
  • CI:
    • Updated FFdecoder API's test_camera_capture unittest to test new Index based Camera Device Capturing on different platforms.
      • Added various parametrize source and source_demuxer parameter data to attain maximum coverage.
      • Added result field to fail and xfail unittest according to parametrize data provided on different platforms.
      • Removed pytest.mark.skipif to support all platforms.
    • Added and updated various parametrize test data to attain maximum coverage.
    • Limited range of extracted frames, for finishing tests faster.
    • Updated unittests to reflect recent name changes.
    • Disabled capturing of stdout/stderr with -s flag in pytest.
  • Setup:
    • Updated description metadata.
  • Bash Script:
    • Created undeleteable undelete.txt file for testing on Linux envs.
    • Updated undelete.txt file path.
    • Made FFmpeg output less verbose.
  • Docs:
    • Updated FFdecoder API params docs w.r.t recent changes and supported for both discarded parameters and filter values.
      • Added new admonitions to explain handling of \"null\" and (special-case), undefined, or invalid type values in various parameters/attributes.
      • Added new footer reference explaining the handling of Default pixel-format for frame_format parameter.
      • Added missing docs for -default_stream_indexes ffparams attribute.
    • Added docs for recently added additional FFmpeg parameter in Sourcer API's sourcer_params parameter.
      • Removed unsupported -custom_resolution sourcer_params attributes from sourcer_params parameter docs.
      • Removed redundant -vcodec and -framerate attributes from sourcer_params parameter docs.
    • Updated both basic and advanced project Index hyperlinks.
    • Moved decoding-live-feed-devices.md doc from basic to advanced directory.
    • Updated page navigation in mkdocs.yml.
    • Update announcement bar to feature Index based Camera Device Capture support.
    • Updated Project description and Key features of DeFFcode.
    • Updated README.md with latest information.
    • Updated source and source_demuxer param doc.
    • Updated Hardware-Acceleration docs.
      • Updated Hardware-Accelerated Video Decoding and Transcoding docs to inform users about DeFFcode generated YUV frames not yet supported by OpenCV and its APIs.
    • Updated recipes docs to reflect recent changes in APIs.
    • Updated parameter docs to reflect recent name changes.
    • Updated parameters/attributes introductory descriptions.
    • Updated various parametrize data to attain maximum coverage.
    • Updated Zenodo badge and the BibTeX entry.
    • Updated method description texts and logging messages.
    • Update title headings, icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Breaking Updates/Changes
  • API:
    • Implemented new Index based Camera Device Capture feature (Similar to OpenCV), where the user just have to assign device index as integer (-n to n-1) in source parameter of DeFFcode APIs to directly access the given input device in few seconds.
  • FFdecoder API
    • Unsupported dtype pixel-format always defaults to rgb24.
  • Sourcer API:
    • Renamed output_video_resolution metadata property to output_frames_resolution.
    • Renamed output_video_framerate metadata property to output_framerate.
Bug-fixes
  • FFdecoder API:
    • Removed redundant dummy value for output_frames_pixfmt metadata property.
    • Fixed critical KeyError bug arises due to missing output metadata properties.
      • Enforced force_retrieve_missing parameter in Sourcer API's retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata.
      • Added new missing_prop internal class variable for handling metadata properties missing, received from Sourcer API.
      • Moved ffdecoder_operational_mode to missing metadata properties that cannot be updated but are read only.
      • Added missing metadata properties to metadata class property object for easy printing along with other metadata information.
      • Implemented missing metadata properties updation via. overridden metadata class property object.
        • Added counterpart_prop dict to handle all counterpart source properties for each missing output properties.
        • Implemented missing output properties auto-updation w.r.t counterpart source property.
        • Added separate case for handling only missing metadata properties and notifying user about counterpart source properties.
    • Fixed source metadata properties update bug causing non-existential missing metadata properties to be added to source metadata properties dictionary along with source metadata property.
      • Replaced update() calling on value dict directly with explicitly assigning values to source metadata properties dictionary.
      • Simplified missing_prop validation.
      • Removed unwanted continue in middle of loop.
    • Remove unusable exclusive yuv frames patch.
    • Fixed KeyError bug arises due to wrong variable placement.
    • Fixed approx_video_nframes metadata property check.
    • Fixed av_interleaved_write_frame(): broken pipe warning bug by switching process.terminate() with process.kill().
    • Fixed AttributeError bug caused due to typo in logger.
  • FFhelper:
    • Fixed check_sp_output() method returning Standard Error (stderr) even when Nonetype.
    • Fixed logger requiring utf-8 decoding.
    • Fixed missing force_retrieve_stderr argument to check_sp_output in extract_device_n_demuxer method on Linux platforms.
    • Fixed logger message bug.
  • Utils:
    • Fixed logger name typo.
  • Maintenance:
    • Fixed hyperlinks to new GitHub's form schemas.
    • Fixed typos in logs messages.
    • Removed redundant code.
    • Updated code comments.
  • Setup:
    • Rearranged long_description patches to address unused patch bug.
  • Bash Script:
    • Fixed chattr: No such file or directory bug.
  • CI:
    • Fixed missing lavfi demuxer for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Fixed missing ffparams parameter bug in test_discard_n_filter_params() unittest.
    • Fixed test_camera_capture test.
    • Removed redundant similar ValueError checks.
    • Fixed typo in pytest arguments.
    • Fixed missing arguments.
  • Docs:
    • Fixed invalid hyperlinks in ReadMe.md
    • Fixed bad formatting and context.
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #29
  • PR #32
"},{"location":"changelog/#v023-2022-08-11","title":"v0.2.3 (2022-08-11)","text":"New Features
  • Docs:
    • Added Zenodo Bibtex entry and badge in docs for easy citation.
    • Added new <div> tag bounding-box style to the Static FFmpeg binary download links in FFmpeg Installation Doc for better accessibility.
  • Maintenance:
    • Switched to new Issue GitHub's form schema using YAML:
      • Added new bug_report.yaml Issue GitHub's form schema for Bug Reports.
      • Added new idea.yaml Issue GitHub's form schema for new Ideas.
      • Added new question.yaml Issue GitHub's form schema for Questions.
      • Deleted old depreciated markdown(.md) files.
      • Polished forms.
Updates/Improvements
  • Maintenance:
    • Added new patterns to .gitignore to ignore vim files.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
  • Setup:
    • Added new patches for using README.md text as long_description metadata.
      • Implemented new patch to remove GitHub README UI specific text.
    • Simplified multiple str.replace to chained str.replace of better readability.
    • Bumped version to 0.2.3.
  • Docs:
    • Updated recipes to include with statement access method.
      • Updated existing recipes to include with statement access method in FFdecoder APIs.
      • Included new example code of accessing RGB frames using with statement access method.
      • Updated Recipe title to \"Accessing RGB frames from a video file\" across docs.
    • Included warning admonition for advising users to always use trim with reverse filter.
    • Updated docs text font to Libre Franklin.
    • Updated method description texts and logging messages.
    • Update icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Bug-fixes
  • FFdecoder API:
    • Fixed Context Manager methods.
      • Fixed __enter__ method returning class instance instead of formulating pipeline.
      • Fixed __exit__ method calling wrong non-existent method.
  • Setup:
    • Fixed missing comma(,) in keywords metadata.
    • Fixed bug in patch string.
  • Docs:
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #26
"},{"location":"changelog/#v022-2022-08-09","title":"v0.2.2 (2022-08-09)","text":"New Features
  • Sourcer API:
    • Added support for -ffprefixes attribute through Sourcer API's sourcer_param dictionary parameter (similar to FFdecoder API).
  • FFdecoder API:
    • Added new output_frames_pixfmt metadata property to preview and handle output frames pixel-format.
  • Docs:
    • Added separate \"Basic\" and \"Advanced\" Recipes markdowns files with self-explanatory text, related usage code, asset (such as images, diagrams, GIFs, etc.), and UI upgrades for bringing standard quality to visual design.
    • Added separate index.md for Basic and Advanced Recipes with introductory text and curated hyperlinks for quick references to various recipes (separated with sub-categories \"Decoding\", \"Transcoding\", and \"Extracting Video Metadata\").
    • Added related admonitions to specify python dependencies as well as other requirements and relevant information required for each of these recipes.
    • Added new Basic Decoding Recipes:
      • Added Decoding Video files with various pixel formats recipes.
      • Added Decoding Live Feed Devices recipes with source_demuxer FFdecoder API parameter.
      • Added Decoding Image sequences recipes supporting Sequential, Glob pattern , Single (looping) image.
      • Added Decoding Network Streams recipes.
    • Added new Basic Transcoding Recipes:
      • Added Transcoding Live frames recipes with OpenCV and WriteGear.
      • Added Transcoding Live Simple Filtergraphs recipes with OpenCV.
      • Added Saving Key-frames as Image recipes with different image processing libraries.
    • Added new Basic Extracting Video Metadata Recipes:
      • Added Extracting Video Metadata recipes with FFdecoder and Sourcer APIs.
    • Added new Advanced Decoding Recipes:
      • Added Hardware-Accelerated Video Decoding recipe using NVIDIA's H.264 CUVID Video-decoder(h264_cuvid).
      • Added Decoding Live Virtual Sources recipes with many test patterns using lavfi input virtual device.
    • Added new Advanced Decoding Recipes:
      • Added lossless Hardware-Accelerated Video Transcoding recipe with WriteGear API.
      • Added Transcoding Live Complex Filtergraphs recipes with WriteGear API.
      • Added Transcoding Video Art with Filtergraphs recipes with WriteGear API for creating real-time artistic generative video art using simple and complex filtergraphs.
    • Added new Advanced Updating Video Metadata Recipes:
      • Added Updating Video Metadata recipes with user-defined as well as source metadata in FFdecoder API.
    • Added new dark and light theme logo support.
    • Added new recipes GIF assets to gifs folder.
    • Added new dark logo deffcode-dark.png asset to images folder.
    • Added new ffdecoder.png and sourcer.png Image assets to images folder.
    • Added new navigation.tabs feature.
    • Added Material Announcement-Bar notifying recent changes.
Updates/Improvements
  • Sourcer API:
    • Implemented new validation checks to ensure given source has usable video stream available by checking availability of either video bitrate or both frame-size and framerate_ properties in the source metadata.
    • Improved extract_resolution_framerate method for making framerate extraction more robust by falling back to extracting TBR value when no framerate value available in the source metadata.
  • FFdecoder API:
    • Updated metadata property object to validate and override source metadata properties directly by overloading same property object before formulating Frames Decoder Pipeline:
      • Implemented validation checks to verify each validate manually assigned source metadata property against specific datatype before overriding.
      • Updated logging to notify invalid datatype values when assigned through metadata property object.
      • Added support for overriding source_video_resolution source metadata property to control frame-size directly through metadata.
      • Added support for overriding output_frames_pixfmt metadata attribute to be used as default pixel-format, when frame_format parameter value is None-type.
      • Improved handling of source metadata keys in metadata property object.
    • Updated metadata property object to handle and assign User-defined metadata directly by overloading the same property object:
      • Added new internal user_metadata class variable to handle all User-defined metadata information separately.
      • FFdecoder API's metadata property object now returns User-defined metadata information merged with Source Video metadata.
      • Added tuple value warning log to notify users json module converts Python tuples to JSON lists.
    • Improved logic to test validity of -custom_resolution attribute value through ffparams dictionary parameter.
    • Improved handling of FFmpeg pipeline framerate with both user-defined and metadata defined values.
    • Added tuple to exception in datatype check for ffparams dictionary parameter.
    • Added datatype validation check for frame_format parameter.
    • Improved handling of -framerate parameter.
  • Maintenance:
    • Reformatted all Core class and methods text descriptions:
      • Rewritten introductory each API class description.
      • Moved reference block from index.md to class description.
      • Fixed missing class and methods parameter description.
      • Fixed typos and context in texts.
      • Reformatted code comments.
    • Simplified for loop with if condition checking in metadata property object.
    • Updated logging comments.
  • Setup:
    • Updated project description in metadata.
    • Bumped version to 0.2.2.
  • Docs:
    • Updated Introduction doc:
      • Added new text sections such as \"Getting Started\", \"Installation Notes\", \"Recipes a.k.a Examples\" and \"API in a nutshell\".
      • Rewritten Introduction(index.md) with recent Information, redefined context, UI changes, updated recipe codes, curated hyperlinks to various recipes(separated with categories), and relatable GIFs.
      • Updated spacing in index.md using spacer class within <div> tag and &nbsp;.
      • Reformatted and centered DeFFcode Introductory description.
      • Reformatted FFmpeg Installation doc and Issue & PR guidelines.
      • Updated static FFmpeg binaries download URLs in FFmpeg Installation doc.
      • Refashioned text contexts, icons, and recipes codes.
      • Updated Key Features section with reflecting new features.
    • Updated README.md:
      • Updated README.md w.r.t recent changes in Introduction(index.md) doc.
      • Simplified and Reformatted text sections similar to Introduction doc.
      • Imported new \"Contributions\" and \"Donations\" sections from VidGear docs.
      • Added collapsible text and output section using <summary> and <detail> tags.
      • Added experimental note GitHub blockquote to simulate admonition in README.md.
      • Removed tag-line from README.md and related image asset.
      • Simplified and Grouped README URL hyperlinks.
      • Removed Roadmap section.
    • Updated Recipes docs:
      • Revamped DeFFcode Introduction index.md with new Information, Context and UI changes, Updated example codes and hyperlinks.
      • Updated Announcement Bar to fix announcement_link variable and text.
      • Updated footer note to notify users regarding tuple value warning in FFdecoder API.
      • Rewritten recipes w.r.t breaking changes in APIs.
    • Updated Reference docs:
      • Completely revamped API's parameter reference docs.
      • Added new Functional Block Diagrams to FFdecoder and Sourcer API References.
      • Rewritten and Reformatted FFdecoder and Sourcer API's parameter reference docs with new information w.r.t recent changes.
      • Implemented new admonitions explaining new changes, related warnings/errors, usage examples etc.
      • Removed redundant advanced.md and basic.md docs.
      • Added new abstracts to FFhelper and Utils docs.
    • Updated docs site navigation and titles:
      • Reformatted index.md and installation/index.md.
      • Renamed help/index.md to help/help.md.
      • Moved basic and advanced recipes from example to recipes folder.
      • Imported \"Donations\" sections from VidGear docs to help.md.
      • Added updated page-title and navigation hyperlinks in mkdocs.yml to new markdown files incorporated recently.
      • Updated internal navigation hyperlinks in docs and removed old redundant file links.
    • Updated docs UI:
      • Added custom spacer class in CSS for custom vertical spacing.
      • Imported new \"New\", \"Advance\", \"Alert\", \"Danger\" and \"Bug\" admonitions custom CSS UI patches from vidgear.
      • Updated all admonitions icons with new custom icon SVG+XML URLs.
      • Reformatted custom.css and added missing comments.
      • Updated docs fonts:
        • Updated text font to Heebo.
        • Updated code font to JetBrains Mono.
      • Updated primary and accent colors:
        • Updated primary light color to light green.
        • Updated primary dark color to amber.
        • Updated accent light color to green.
        • Updated accent dark color to lime.
      • Replaced admonitions with appropriate ones.
      • Changed Color palette toggle icons.
      • Updated icons in title headings.
    • Updated admonitions messages.
    • Updated changelog.md.
  • CI:
    • Pinned jinja2 version to <3.1.0, since jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799).
    • Updated unittests w.r.t recent changes in APIs:
      • Updated test_frame_format unittest to include manually assign output pixel-format via metadata property object.
      • Updated test_metadata unittest to include new checks parameter to decide whether to perform Assertion test on assigned metadata properties in FFdecoder API.
      • Added new parametrize attributes in test_metadata and test_seek_n_save unittests to cover every use-cases.
      • Replaced IOError with ValueError in Sourcer API unittests.
    • Updated test_metadata unittest to verify tuple value warning.
    • Updated unittests to increase code coverage significantly.
Breaking Updates/Changes
  • Sourcer API:
    • Sourcer API's retrieve_metadata() method now returns parsed metadata either as JSON string or dictionary type.
      • Added new pretty_json boolean parameter to retrieve_metadata(), that is when True, returns metadata formatted as JSON string instead of default python dictionary.
    • Changed IOError to ValueError in Sourcer API, raised when source with no decodable audio or video stream is provided.
  • FFdecoder API:
    • Rename extraparams dictionary parameter to ffparams in FFdecoder API.
    • The source metadata value cannot be altered through metadata property object in FFdecoder API.
    • Removed -ffpostfixes attribute support from ffparams dictionary parameter in FFdecoder API, since totally redundant in favor of similar -ffprefixes and -clones attributes.
Bug-fixes
  • FFdecoder API:
    • Fixed metadata property object unable to process user-defined keys when any source metadata keys are defined.
    • Fixed TypeError bug with string type -framerate parameter values.
  • Sourcer API:
    • Fixed Sourcer API throws IOError for videos containing streams without both source bitrate and framerate defined (such as from lavfi input virtual device).
    • Fixed AttributeError bug due to typo in variable name.
  • CI:
    • Fixed support for newer mkdocstring version in DeFFcode Docs Deployer workflow.
      • Added new mkdocstrings-python-legacy dependency.
      • Replaced rendering variable with options.
      • Removed pinned mkdocstrings==0.17.0 version.
      • Removed redundant variables.
    • Updated test_metadata unittest to fix AssertionError Bug.
  • Docs:
    • Fixed some admonitions icons not showing bug using !important rule in CSS.
    • Fixed 404.html static page not showing up.
    • Fixed invalid internal navigation hyperlinks and asset paths.
    • Removed quote/cite/summary admonition custom UI patches.
    • Removed redundant information texts.
    • Fixed typos in code comments.
    • Fixed typos in example code.
Pull Requests
  • PR #23
"},{"location":"changelog/#v021-2022-07-14","title":"v0.2.1 (2022-07-14)","text":"New Features
  • Sourcer API:
    • Implemented support for extracting metadata from live input devices/sources.
    • Added new source_demuxer and forced_validate parameters to validate_source internal method.
    • Implemented logic to validate source_demuxer value against FFmpeg supported demuxers.
    • Rearranged metadata dict.
    • Updated Code comments.
  • FFdecoder API:
    • Implemented functionality to supported live devices by allowing device path and respective demuxer into pipeline.
    • Included -f FFmpeg parameter into pipeline to specify source device demuxer.
    • Added special case for discarding -framerate value with Nonetype.
  • CI:
    • Added new unittest test_camera_capture() to test support for live Virtual Camera devices.
    • Added new v4l2loopback-dkms, v4l2loopback-utils and kernel related APT dependencies.
  • Bash Script:
    • Added new FFmpeg command to extract image datasets from given video on Linux envs.
    • Created live Virtual Camera devices through v4l2loopback library on Github Actions Linux envs.
      • Added v4l2loopback modprobe command to setup Virtual Camera named VCamera dynamically at /dev/video2.
      • Added v4l2-ctl --list-devices command for debugging.
      • Implemented FFmpeg command through nohup(no hangup) to feed video loop input to Virtual Camera in the background.
Updates/Improvements
  • Sourcer API:
    • Only either source_demuxer or source_extension attribute can be present in metadata.
    • Enforced forced_validate for live input devices/sources in validate_source internal method.
  • FFdecoder API:
    • Rearranged FFmpeg parameters in pipeline.
    • Removed redundant code.
    • Updated Code comments.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
  • CI:
    • Restricted test_camera_capture() unittest to Linux envs only.
    • Removed return_generated_frames_path() method support for Linux envs.
    • Pinned jinja2 3.1.0 or above breaking mkdocs.
      • jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799), therefore pinned jinja2 version to <3.1.0.
  • Bash Script:
    • Updated to latest FFmpeg Static Binaries links.
      • Updated download links to abhiTronix/ffmpeg-static-builds * hosting latest available versions.
      • Updated date/version tag to 12-07-2022.
      • Removed depreciated binaries download links and code.
  • Setup:
    • Bumped version to 0.2.1.
  • Docs:
    • Updated changelog.md.
Breaking Updates/Changes
  • Implement support for live input devices/sources.
    • source parameter now accepts device name or path.
    • Added source_demuxer parameter to specify demuxer for live input devices/sources.
    • Implemented Automated inserting of -f FFmpeg parameter whenever source_demuxer is specified by the user.
Bug-fixes
  • Sourcer API:
    • Fixed Nonetype value bug in source_demuxer assertion logic.
    • Fixed typos in parameter names.
    • Added missing import.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
    • Fixed bug with get_supported_demuxers not detecting name patterns with commas.
    • Removed redundant logging.
  • CI:
    • Fixed critical permission bug causing v4l2loopback to fail on Github Actions Linux envs.
      • Elevated privileges to root by adding sudo to all commands(including bash scripts and python commands).
      • Updated vidgear dependency to pip install from its git testing branch with recent bug fixes.
      • Replaced relative paths with absolute paths in unit tests.
    • Fixed WriteGear API unable to write frames due to permission errors.
    • Fixed test_source_playback() test failing on Darwin envs with OLD FFmpeg binaries.
      • Removed custom_ffmpeg value for Darwin envs.
    • Fixed various naming typos.
    • Fixed missing APT dependencies.
Pull Requests
  • PR #17
"},{"location":"changelog/#v020-2022-03-21","title":"v0.2.0 (2022-03-21)","text":"New Features
  • Sourcer API:
    • Added a new source_audio_samplerate metadata parameter:
      • Re-implemented __extract_audio_bitrate internal function from scratch as __extract_audio_bitrate_nd_samplerate.
        • Implemented new algorithm to extract both extract both audio bitrate and samplerate from given source.
        • Updated regex patterns according to changes.
      • Updated __contains_video and __contains_audio logic to support new changes.
    • Added metadata extraction support:
      • Added retrieve_metadata class method to Sourcer API for extracting source metadata as python dictionary.
        • Populated private source member values in dictionary with distinct keys.
    • Added new -force_validate_source attribute to Sourcer API's sourcer_params dict parameter for special cases.
    • Implemented check whether probe_stream() called or not in Sourcer API.
  • FFdecoder API:
    • Added metadata extraction and updation support:
      • Added metadata property object function to FFdecoder API for retrieving source metadata form Sourcer API as dict and return it as JSON dump for pretty printing.
        • Added Operational Mode as read-only property in metadata.
      • Added metadata property object with setter() method for updating source metadata with user-defined dictionary.
        • Implemented way to manually alter metadata keys and values for custom results.
  • Docs:
    • Added new comprehensive documentation with Mkdocs:
      • Added new image assets:
        • Added new Deffcode banner image, logo and tagline
        • Added new icon ICO file with each layer of the favicon holds a different size of the image.
        • Added new png images for best compatibility with different web browsers.
      • Added new docs files:
        • Added new index.md with introduction to project.
        • Added new changelog.md.
        • Added license.md
        • Added new index.md with instructions for contributing in DeFFcode.
          • Added issue.md with Issue Contribution Guidelines.
          • Added PR.md with PR Contribution Guidelines.
        • Added new custom.js to add gitter sidecard support.
        • Added new custom.css that brings standard and quality visual design experience to DeFFcode docs.
          • Added new admonitions new and alert.
        • Added separate LICENSE(under CC creative commons) and REAME.md for assets.
        • Added new main.html extending base.html for defining custom site metadata.
        • Added deFFcode banner image to metadata.
        • Added twitter card and metadata.
        • Added version warning for displaying a warning when the user visits any other version.
        • Added footer sponsorship block.
        • Added gitter card official JS script dist.
        • Added new custom 404.html to handle HTTP status code 404 Not Found.
          • Implemented custom theming with new CSS style.
          • Added custom 404 image asset.
        • Added new index.md with DeFFcode Installation notes.
          • Added info about Supported Systems, Supported Python legacies, Prerequisites, Installation instructions.
          • Added Pip and Source Installation instructions.
        • Added new ffmpeg_install.md with machine-specific instructions for FFmpeg installation.
        • Added new index.md with different ways to help DeFFcode, other users, and the author.
          • Added info about Starring and Watching DeFFcode on GitHub, Helping with open issues etc.
          • Added Tweeter intent used for tweeting #deffode hastags easily.
          • Added Kofi Donation link button.
          • Added author contact links and left align avatar image.
        • Added new get_help.md to get help with DeFFcode.
          • Added DeFFcode gitter community link.
          • Added other helpful links.
      • Added new assets folders.
      • Added Basic Recipes with basic.md
      • Added Advanced Recipes with advanced.md
      • Added all API References.
        • Added mkdocstrings automatic documentation from sources.
        • Added new index.md for FFdecoder API with its description and explaining its API.
        • Added new index.md for Sourcer API with its description and explaining its API.
        • Added ffhelper methods API references.
        • Added utils methods API references.
      • Added all API Parameters.
        • Added new params.md for FFdecoder API explaining all its parameters.
        • Added new params.md for Sourcer API explaining all its parameters.
        • Added Mkdocs support with mkdocs.yml
      • Implemented new mkdocs.yml with relevant parameters.
        • Added extended material theme with overridden parts.
        • Added site metadata with site_name, site_url, site_author, site_description, repo_name, repo_url, edit_uri, copyright etc.
        • Added navigation under sections for easily accessing each document.
        • Implemented Page tree for DeFFcode docs.
        • Added features like navigation.tracking, navigation.indexes, navigation.top, search.suggest, search.highlight, search.share, content.code.annotate.
        • Added separate palette [default]light(with primary:green accent: dark green) and [slate]dark(with primary:teal accent: light green) mode.
        • Added Color palette toggle switch with icon material/home-lightning-bolt.
        • Added support for all pymarkdown-extensions.
        • Added google fonts for text: Quicksand and code: Fira Code.
        • Added custom logo and icon for DeFFcode.
        • Added support for plugins like search, git-revision-date-localized, minify.
        • Added support for mkdocstrings plugin for auto-built API references.
          • Added python handler for parsing python source-code to mkdocstrings.
          • Improved source-code docs for compatibility with mkdocstrings.
        • Added support for extensions like admonition, attr_list, codehilite, def_list, footnotes, meta, and toc.
        • Added social icons and links.
        • Added custom extra_css and extra_javascript.
        • Added support for en (English) language.
      • Added new badges to README.md for displaying current status of CI jobs and coverage.
      • Added Roadmap to README.md
  • CI:
    • Automated CI support for different environments:
      • Implemented auto-handling of dependencies installation, unit testing, and coverage report uploading.
      • Added GitHub Action workflow for Linux envs:
        • Added and configured CIlinux.yml to enable GitHub Action workflow for Linux-based Testing Envs.
        • Added 3.7+ python-versions to build matrix.
        • Added code coverage through codecov/codecov-action@v2 workflow for measuring unit-tests effectiveness.
          • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
      • Added Appveyor workflow for Windows envs:
        • Add and configured appveyor.yml to enable Appveyor workflow for Windows-based Testing Envs.
        • Added 3.7+ 64-bit python-versions to build matrix.
        • Enabled fast_finish to exit immediately on error.
      • Added Azure-Pipelines workflow for MacOS envs:
        • Add and configured azure-pipelines.yml to enable Azure-Pipelines workflow for MacOS-based Testing Envs.
        • Added code coverage through codecov workflow for measuring unit-tests effectiveness.
          • Added online auto validation of codecov bash script using SH256SUM and sig files as recommended.
        • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
        • Added 3.7+ python-versions to build matrix.
      • Added automated flake8 testing to discover any anomalies in code.
      • Added master branches for triggering CI.
    • Implement new automated Docs Building and Deployment on gh-pages through GitHub Actions workflow:
      • Added new workflow yaml docs_deployer.yml for automated docs deployment.
      • Added different jobs with ubuntu-latest environement to build matrix.
      • Added actions/checkout@v2 for repo checkout and actions/setup-python@v2 for python environment.
      • Pinned python version to 3.8 for python environment in docs building.
      • Added GIT_TOKEN, GIT_NAME, GIT_EMAIL environment variables through secrets.
      • Added Mkdocs Material theme related python dependencies and environments.
      • Added push on master and dev branch release with published as triggers.
      • Pinned mkdocstrings==0.17.0.
    • Added new Automated Docs Versioning:
      • Implemented Docs versioning through mike.
      • Separate new workflow steps to handle different versions.
      • Added step to auto-create RELEASE_NAME environment variable from DeFFcode version file.
      • Update docs deploy workflow to support latest, release and dev builds.
      • Added automatic release version extraction from GitHub events.
    • Added Skip Duplicate Actions Workflow to DeFFcode Docs Deployer:
      • Added Skip Duplicate Actions(fkirc/skip-duplicate-actions@master) Workflow to DeFFcode Docs Deployer to prevent redundant duplicate workflow-runs.
  • Maintenance:
    • New DeFFcode project issue and PR templates:
      • Added PR template:
        • Added a pull request template(PULL_REQUEST_TEMPLATE.md) for project contributors to automatically see the template's contents in the pull request body.
        • Added Brief Description, Requirements / Checklist, Related Issue, Context, Types of changes blocks.
      • Added Proposal, Bug-Report and Question templates:
        • Created an ISSUE_TEMPLATE subdirectory to contain multiple issue templates.
        • Add manually-created Proposal(proposal.md) and Question(question.md) issue template for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Any Other Information like blocks.
        • Add an manually-created Bug Report(bug_report.md) issue template to ISSUE_TEMPLATE subdirectory for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Expected Behavior, Actual Behavior, Possible Fix, Steps to reproduce, Miscellaneous like blocks.
        • Added YAML frontmatter to each issue template to pre-fill the issue title, automatically add labels and assignees, and give the template a name and description.
        • Added a config.yml file to the .github/ISSUE_TEMPLATE folder to customize the issue template chooser that people see when creating a new issue.
        • Set blank_issues_enabled parameter to false to encourage contributors to use issue templates.
        • Added contact_links parameter with gitter community link to receive regular issues outside of GitHub.
      • Added new FUNDING.yml with ko-fi donation link.
      • Added .gitattributes for DeFFcode, that set the default behavior, in case people don't have core.autocrlf set.
      • Imported Codecov config(codecov.yml) from vidgear to modify coverage parameters.
  • Tests:
    • Added DeFFcode unit tests with pytest:
      • Added essential.py for defining all essential functions necessary for DeFFcode unit tests.
      • Added return_static_ffmpeg, remove_file_safe, return_testvideo_path, return_generated_frames_path, actual_frame_count_n_frame_size essential functions.
      • Added is_windows global variable.
      • Added related imports and logging.
      • Added __init__.py.
      • Moved all files to test folder.
      • Added DeFFcode's utils unit tests with pytest.
        • Added new test_loggerhandler and test_dict2Args tests.
      • Added DeFFcode's ffhelper unit tests with pytest.
        • Added new test_ffmpeg_binaries_download, test_validate_ffmpeg, test_get_valid_ffmpeg_path, test_check_sp_output, test_is_valid_url, test_is_valid_image_seq, and test_validate_imgseqdir parametrize tests.
      • Added DeFFcode's Sourcer API unit tests with pytest.
        • Added new test_source and test_probe_stream_n_retrieve_metadata parametrize tests.
      • Added DeFFcode's FFdecoder API unit tests with pytest.
        • Added new test_source_playback, test_frame_format, test_metadata, test_seek_n_save, and test_FFdecoder_params parametrize unit tests.
      • Added related imports and logging.
      • Added unit test for delete_file_safe utils function.
  • Bash:
    • \ud83d\udd27 Imported prepare_dataset.sh from vidgear for downloading pytest datasets to temp dir.
Updates/Improvements
  • FFdecoder API:
    • Removed redundant forcing -r FFmpeg parameter for image sequences as source.
    • Removed redundant checks on -vf FFmpeg parameter.
    • FFmpeg parameter -s will be discarded in favor of -custom_resolution attribute.
    • Replaced -constant_framerate with FFmpeg -framerate attribute.
    • Replaced -custom_source_params with correct -custom_sourcer_params attribute.
    • Renamed operational_mode metadata parameter to ffdecoder_operational_mode.
  • Sourcer API:
    • Converted all Sourcer APIs public available variables into private ones for stability.
    • All Sourcer's publicly accessed variable metadata values in FFdecoder, therefore replaced with dictionary counterparts.
    • Moved FFmpeg path validation and handling to Sourcer from FFdecoder API.
    • Moved -ffmpeg_download_path dictionary attribute to Sourcer API's sourcer_params parameter.
    • Moved dependencies and related functions.
  • CI:
    • Excluded dev branch from triggering workflow on any environment.
      • Updated yaml files to exclude beta dev branch from triggering workflow on any environment.
      • Restricted codecov to use only master branch.
    • Re-implemented fkirc/skip-duplicate-actions@master to Skip individual deploy steps instead of Skip entire jobs
  • Docs:
    • Updated PR.md
      • Added instructions to download prepare_dataset.sh using curl.
      • Updated dependencies for pytest.
    • Updated advanced.md
      • Updated generating Video from Image sequence to save video using OpenCV writer instead of WriteGear API.
      • Added frame_format=\"bgr24\"and additional instructions regarding OpenCV writer.
      • Updated example codes with new changes.
      • Rearranged examples placement.
    • Updates to custom.css
      • Added donation sponsor link in page footer with heart animation.
      • Added bouncing heart animation through pure CSS.
      • Added Bold property to currently highlighted link in Navigation Bar.
      • Updated Navigation Bar title font size.
      • Updated version list text to uppercase and bold.
      • Updated icon for task list unchecked.
      • Added more top-padding to docs heading.
      • Updated Block quote symbol and theming.
      • Updated Custom Button theming to match docs.
      • Added new custom classes to create shadow effect in dark mode for better visibility.
      • Updated dark mode theme \"slate\" hue to 285.
    • Updated admonitions colors.
    • Updated gitter sidecard UI colors and properties.
    • Reflected recent changes in Sourcer and FFdecoder API's metadata.
    • Updated sample code formatting from sh to json.
    • Added missing docs for delete_file_safe utils function.
    • Updated Download Test Datasets instructions.
    • Updated contribution guidelines and installation docs with related changes.
    • Updated License Notice.
    • Updated code comments.
    • Updated logging messages.
    • Updated Deffcode Logo and Tagline to be dark-mode friendly.
    • Adjusted asset alignment.
    • Updated example code.
    • Updated Installation instructions, Requirements and Roadmap.
    • Corrected links to documents.
    • Updated project description.
    • Updated LICENSE.
    • Updated indentation and code comments
    • Re-aligned text and images in README.md
    • Adjusted image classes and width.
  • Maintenance:
    • Updated LICENSE notice to add vidgear notice.
    • Bumped version to 0.2.0
    • Added useful comments for convenience.
Breaking Updates/Changes
  • Sourcer API will now raises Assertion error if probe_stream() not called before calling retrieve_metadata().
  • Only -framerate values greater than 0.0 are now valid.
  • Renamed decode_stream to probe_stream in Sourcer API.
  • Any of video bitrate or video framerate are sufficient to validate if source contains valid video stream(s).
  • Any of audio bitrate or audio samplerate are sufficient to validate if source contains valid audio stream(s).
Bug-fixes
  • APIs:
    • Added missing delete_file_safe function in utils.
      • Imported delete_file_safe from vidgear to safely deletes files at given path.
    • Fixed forward slash bugs in regex patterns.
    • Fixed IndexError when no bitrate was discovered in given source.
    • Fixed FFmpeg subprocess pipeline not terminating gracefully in FFdecoder API.
    • Fixed __version__ not defined in DeFFcode's __init__.py that throws AttributeError: module 'deffcode' has no attribute '__version__' on query.
      • Added necessary import in __init__.py.
  • Docs:
    • Fixed missing \"-vcodec\": \"h264_cuvid\" value in example code.
    • Fixed typos in filenames in utils.py
    • Fixed internal missing or invalid hyperlinks.
    • Fixed improper docs context and typos.
    • Fixed \"year\" in license notice.
    • Fixed content spacing.
    • Fixed Gitter Community Link in Mkdocs.
    • Fixed typos in README.md.
    • Fixed typos in license notices.
    • Fixed typos in code comments.
    • Fixed typos in example code.
  • CI:
    • Fixed missing FFmpeg dependency bug in GitHub Actions.
    • Fixes typo in Docs Deployer yaml.
    • Fixed if condition skipping when need is skipping
  • Maintenance:
    • Added missing imports.
    • Fixed redundant conditional logics.
    • Removed or Replaced redundant conditions and definitions.
    • Fixed minor typos in templates.
Pull Requests
  • PR #5
  • PR #6
  • PR #8
  • PR #9
  • PR #11
  • PR #12
  • PR #13
  • PR #14
"},{"location":"changelog/#v010-2022-03-07","title":"v0.1.0 (2022-03-07)","text":"New Features
  • Open-Sourced DeFFcode under the Apache 2.0 License.
  • Added new Classes(APIs):
    • FFdecoder: Performant Real-time Video frames Generator for generating blazingly fast video frames(RGB ndarray by default).
    • Sourcer: Extracts source video metadata (bitrate, resolution, framerate, nframes etc.) using its subprocess FFmpeg output.
  • Added new Helper functions:
    • ffhelper: Backend FFmpeg Wrapper that handles all subprocess transactions and gather data.
    • utils: Handles all additional Utilizes required for functioning of DeFFcode.
  • First PyPi Release:
    • Released DeFFcode to Python Package Index (PyPI)
    • Added setup.py and related metadata.
    • Added version.py
  • Docs:
    • Added abstract and related information in README.md
    • Added installation instructions.
    • Added preliminary usage examples.
  • Maintenance:
    • Added LICENSE.
    • Added .gitignore
Updates/Improvements
  • Maintenance:
    • Bumped version to 0.1.0
    • Updated LICENSE notice to add vidgear code usage notice.
Breaking Updates/Changes
  • Fixed support for Python-3.7 and above legacies only.
Bug-fixes
  • Docs:
    • Fixed hyperlinks in README.
    • Fixed indentation and spacing.
    • Fixed typos and updated context.
    • Removed dead code.
"},{"location":"help/","title":"Helping Us","text":"

Liked DeFFcode? Would you like to help DeFFcode, other users, and the author?

There are many simple ways to help us:

"},{"location":"help/#star-deffcode-on-github","title":"Star DeFFcode on GitHub","text":"

You can star DeFFcode on GitHub:

It helps us a lot by making it easier for others to find & trust this library. Thanks!

"},{"location":"help/#help-others-with-issues-on-github","title":"Help others with issues on GitHub","text":"

You can see through any opened or pinned existing issues on our GitHub repository, and try helping others, wherever possible:

"},{"location":"help/#watch-the-github-repository","title":"Watch the GitHub repository","text":"

You can watch \ud83d\udc40 DeFFcode Activities on GitHub:

When you watch a repository, you will be notified of all conversations for that repository, including when someone creates a new issue, or pushes a new pull request.

You can try helping solving those issues, or give valuable feedback/review on new Pull Requests.

"},{"location":"help/#tweet-about-deffcode","title":"Tweet about DeFFcode","text":"

Tweet about DeFFcode and Spread the word \ud83d\udde3:

Tweet #deffcode

Let others know how you are using DeFFcode and why you like it!

"},{"location":"help/#helping-author","title":"Helping Author","text":"

Donations help keep DeFFcode's development alive and motivate me (as author).

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

Thanks a million!

"},{"location":"help/#connect-with-author","title":"Connect with Author","text":"

You can connect with me, the author \ud83d\udc4b:

  • Follow author on GitHub:
  • Follow author on Twitter: Follow @abhi_una12
  • Get in touch with author on Linkedin:

"},{"location":"license/","title":"License","text":"

This library is released under the Apache 2.0 License.

"},{"location":"license/#copyright-notice","title":"Copyright Notice","text":"
Copyright (c) 2021 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n
"},{"location":"contribution/","title":"Overview","text":""},{"location":"contribution/#contribution-overview","title":"Contribution Overview","text":"

Contributions are always welcomed

We'd love your contribution to DeFFcode in order to fix bugs or to implement new features!

"},{"location":"contribution/#submission-guidelines","title":"Submission Guidelines","text":"
  • Submitting an Issue Guidelines \u27b6
  • Submitting Pull Request(PR) Guidelines \u27b6
"},{"location":"contribution/#submission-contexts","title":"Submission Contexts","text":""},{"location":"contribution/#got-a-question-or-problem","title":"Got a question or problem?","text":"

For quick questions, please refrain from opening an issue, instead you can reach us on Gitter community channel.

"},{"location":"contribution/#found-a-typo","title":"Found a typo?","text":"

There's no need to contribute for some typos. Just reach us on Gitter \u27b6 community channel, We will correct them in (less than) no time.

"},{"location":"contribution/#found-a-bug","title":"Found a bug?","text":"

If you encountered a bug, you can help us by submitting an issue in our GitHub repository. Even better, you can submit a Pull Request(PR) with a fix, but make sure to read the guidelines \u27b6.

"},{"location":"contribution/#request-for-a-featureimprovement","title":"Request for a feature/improvement?","text":"Subscribe to Github Repository

You can subscribe our GitHub Repository to receive notifications through email for new pull requests, commits and issues that are created in DeFFcode. Learn more about it here \u27b6

You can request our GitHub Repository for a new feature/improvement based on the type of request:

Please submit an issue with a proposal template for your request to explain how it benefits everyone in the community.

  • Major Feature Requests: If you require a major feature for DeFFcode, then first open an issue and outline your proposal so that it can be discussed. This will also allow us to better coordinate our efforts, prevent duplication of work, and help you to craft the change so that it is successfully accepted into the project. The purposed feature, if accepted, may take time based on its complexity and availability/time-schedule of our maintainers, but once it's completed, you will be notified right away. Please be patient!

  • Minor Feature Requests: Small features and bugs resolved on priority. You just have to submit an issue to our GitHub Repository.

"},{"location":"contribution/PR/","title":"Submitting Pull Request(PR) Guidelines:","text":"

The following guidelines tells you how to submit a valid PR for DeFFcode:

Working on your first Pull Request for DeFFcode?

  • You can learn about \"How to contribute to an Open Source Project on GitHub\" from this doc \u27b6
  • If you're stuck at something, please join our Gitter community channel. We will help you get started!

"},{"location":"contribution/PR/#clone-branch-for-pr","title":"Clone branch for PR","text":"

You can clone your Forked remote git to local and create your PR working branch as a sub-branch of latest master branch as follows:

Make sure the master branch of your Forked repository is up-to-date with DeFFcode, before starting working on a Pull Request.

# clone your forked repository(change with your username) and get inside\ngit clone https://github.com/{YOUR USERNAME}/DeFFcode.git && cd DeFFcode\n\n# pull any recent updates\ngit pull\n\n# Now create your new branch with suitable name(such as \"subbranch_of_master\")\ngit checkout -b subbranch_of_master\n

Now after working with this newly created branch for your Pull Request, you can commit and push or merge it locally or remotely as usual.

"},{"location":"contribution/PR/#pr-submission-checklist","title":"PR Submission Checklist","text":"

There are some important checks you need to perform while submitting your Pull Request(s) for DeFFcode library:

  • Submit a Related Issue:

  • The first thing you do is submit an issue with a proposal template for your work first and then work on your Pull Request.

  • Submit a Draft Pull Request:

  • Submit the draft pull request from the first day of your development.

  • Add a brief but descriptive title for your PR.
  • Explain what the PR adds, fixes, or improves.
  • In case of bug fixes, add a new unit test case that would fail against your bug fix.
  • Provide output or screenshots, if you can.
  • Make sure your pull request passed all the CI checks (triggers automatically on pushing commits against master branch). If it's somehow failing, then ask the maintainer for a review.
  • Click \"ready for review\" when finished.

  • Test, Format & lint code locally:

  • Make sure to test, format, and lint the modified code locally before every commit. The details are discussed below \u27b6

  • Make sensible commit messages:

  • If your pull request fixes a separate issue number, remember to include \"resolves #issue_number\" in the commit message. Learn more about it here \u27b6.

  • Keep the commit message concisely as much as possible at every submit. You can make a supplement to the previous commit with git commit --amend command.

  • Perform Integrity Checks:

    Any duplicate pull request will be Rejected!

  • Search GitHub if there's a similar open or closed PR that relates to your submission.

  • Check if your purpose code matches the overall direction of the DeFFcode APIs and improves it.
  • Retain copyright for your contributions, but also agree to license them for usage by the project and author(s) under the Apache 2.0 license \u27b6.

  • Link your Issues:

    For more information on Linking a pull request to an issue, See this doc\u27b6

  • Finally, when you're confident enough, make your pull request public.

  • You can link an issue to a pull request manually or using a supported keyword in the pull request description. It helps collaborators see that someone is working on the issue. For more information, see this doc\u27b6

"},{"location":"contribution/PR/#testing-formatting-linting","title":"Testing, Formatting & Linting","text":"

All Pull Request(s) must be tested, formatted & linted against our library standards as discussed below:

"},{"location":"contribution/PR/#requirements","title":"Requirements","text":"

Testing DeFFcode requires additional test dependencies and dataset, which can be handled manually as follows:

  • Install additional python libraries:

    You can easily install these dependencies via pip:

    # Install opencv(only if not installed previously)\n$ pip install opencv-python\n\n# install rest of dependencies\n$ pip install --upgrade flake8 black pytest vidgear[core]\n
  • Download Tests Dataset:

    To perform tests, you also need to download additional dataset (to your temp dir) by running prepare_dataset.sh bash script as follows:

    On Linux/MacOSOn Windows
    $ chmod +x scripts/bash/prepare_dataset.sh\n$ ./scripts/bash/prepare_dataset.sh\n
    $ sh scripts/bash/prepare_dataset.sh\n
"},{"location":"contribution/PR/#running-tests","title":"Running Tests","text":"

All tests can be run with pytest(in DeFFcode's root folder) as follows:

$ pytest -sv  #-sv for verbose output.\n
"},{"location":"contribution/PR/#formatting-linting","title":"Formatting & Linting","text":"

For formatting and linting, following libraries are used:

  • Flake8: You must run flake8 linting for checking the code base against the coding style (PEP8), programming errors and other cyclomatic complexity:

    $ flake8 {source_file_or_directory} --count --select=E9,F63,F7,F82 --show-source --statistics\n
  • Black: DeFFcode follows black formatting to make code review faster by producing the smallest diffs possible. You must run it with sensible defaults as follows:

    $ black {source_file_or_directory}\n

"},{"location":"contribution/PR/#frequently-asked-questions","title":"Frequently Asked Questions","text":"

Q1. Why do my changes taking so long to be Reviewed and/or Merged?

Submission Aftermaths

  • After your PR is merged, you can safely delete your branch and pull the changes from the main (upstream) repository.
  • The changes will remain in dev branch until next DeFFcode version is released, then it will be merged into master branch.
  • After a successful Merge, your newer contributions will be given priority over others.

Pull requests will be reviewed by the maintainers and the rationale behind the maintainer\u2019s decision to accept or deny the changes will be posted in the pull request. Please wait for our code review and approval, possibly enhancing your change on request.

Q2. Would you accept a huge Pull Request with Lots of Changes?

First, make sure that the changes are somewhat related. Otherwise, please create separate pull requests. Anyway, before submitting a huge change, it's probably a good idea to open an issue in the DeFFcode Github repository to ask the maintainers if they agree with your proposed changes. Otherwise, they could refuse your proposal after you put all that hard work into making the changes. We definitely don't want you to waste your time!

"},{"location":"contribution/issue/","title":"Submitting an Issue Guidelines","text":"

If you've found a new bug or you've come up with some new feature which can improve the quality of the DeFFcode, then related issues are welcomed! But, Before you do, please read the following guidelines:

First Issue on GitHub?

You can easily learn about it from creating an issue wiki.

Info

Please note that your issue will be fixed much faster if you spend about half an hour preparing it, including the exact reproduction steps and a demo. If you're in a hurry or don't feel confident, it's fine to report issues with less details, but this makes it less likely they'll get fixed soon.

"},{"location":"contribution/issue/#search-the-docs-and-previous-issues","title":"Search the Docs and Previous Issues","text":"
  • Remember to first search GitHub for a open or closed issue that relates to your submission or already been reported. You may find related information and the discussion might inform you of workarounds that may help to resolve the issue.
  • For quick questions, please refrain from opening an issue, as you can reach us on Gitter community channel.
  • Also, go comprehensively through our dedicated FAQ & Troubleshooting section.
"},{"location":"contribution/issue/#gather-required-information","title":"Gather Required Information","text":"
  • All DeFFcode APIs provides a verbose boolean flag in parameters, to log debugged output to terminal. Kindly turn this parameter True in the respective API for getting debug output, and paste it with your Issue.
  • In order to reproduce bugs we will systematically ask you to provide a minimal reproduction code for your report.
  • Check and paste, exact DeFFcode version by running command python -c \"import deffcode; print(deffcode.__version__)\".
"},{"location":"contribution/issue/#follow-the-issue-template","title":"Follow the Issue Template","text":"
  • Please format your issue by choosing the appropriate template.
  • Any improper/insufficient reports will be marked Invalid \u26d4, and if we don't hear back from you we may close the issue.
"},{"location":"contribution/issue/#raise-the-issue","title":"Raise the Issue","text":"
  • Add a brief but descriptive title for your issue.
  • Keep the issue phrasing in context of the problem.
  • Attach source-code/screenshots if you have one.
  • Finally, raise it by choosing the appropriate Issue Template: Bug report \ud83d\udc1e, Idea \ud83d\udca1, Question \u2754.
"},{"location":"help/get_help/","title":"Getting Help","text":"Courtesy - tenor

Would you like to get help with DeFFcode?

There are several ways to get help with DeFFcode:

"},{"location":"help/get_help/#join-our-gitter-community-channel","title":"Join our Gitter Community channel","text":"

Have you come up with some new idea \ud83d\udca1 or looking for the fastest way troubleshoot your problems

Join and chat on our Gitter Community channel:

There you can ask quick questions, swiftly troubleshoot your problems, help others, share ideas & information, etc.

"},{"location":"help/get_help/#this-is-what-you-do-when","title":"This is what you do when...","text":"
  • Got a question or problem?
  • Found a typo?
  • Found a bug?
  • Missing a feature/improvement?
"},{"location":"help/get_help/#reporting-an-issues","title":"Reporting an issues","text":"

Want to report a bug? Suggest a new feature?

Before you do, please read our guidelines \u27b6

"},{"location":"help/get_help/#preparing-a-pull-request","title":"Preparing a Pull Request","text":"

Interested in contributing to DeFFcode?

Before you do, please read our guidelines \u27b6

"},{"location":"installation/","title":"Overview","text":""},{"location":"installation/#installation-notes","title":"Installation Notes","text":""},{"location":"installation/#supported-systems","title":"Supported Systems","text":"

DeFFcode is well-tested and supported on the following systems(but not limited to), with python 3.7+ and pip installed:

Upgrade your pip

It strongly advised to upgrade to latest pip before installing deffcode to avoid any undesired installation error(s).

There are two mechanisms to upgrade pip:

pipensurepip

You can use existing pip to upgrade itself:

Install pip if not present
  • Download the script, from https://bootstrap.pypa.io/get-pip.py.
  • Open a terminal/command prompt, cd to the folder containing the get-pip.py file and run:
Linux/MacOSWindows
python get-pip.py\n
py get-pip.py\n

More details about this script can be found in pypa/get-pip\u2019s README.

Linux/MacOSWindows
python -m pip install pip --upgrade\n
py -m pip install pip --upgrade\n

Python also comes with an ensurepip module1, which can easily upgrade/install pip in any Python environment.

Linux/MacOSWindows
python -m ensurepip --upgrade\n
py -m ensurepip --upgrade\n
  • Any Linux distro released in 2016 or later
  • Windows 7 or later
  • MacOS 10.12.6 (Sierra) or later

"},{"location":"installation/#supported-python-legacies","title":"Supported Python legacies","text":"

Python 3.7+ are only supported legacies for installing DeFFcode v0.1.0 and above.

"},{"location":"installation/#prerequisites","title":"Prerequisites","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

"},{"location":"installation/#ffmpeg","title":"FFmpeg","text":"

When installing DeFFcode, FFmpeg is the only prerequisites you need to configure/install manually. You could easily do it by referring FFmpeg Installation doc.

"},{"location":"installation/#installation","title":"Installation","text":""},{"location":"installation/#a-installation-using-pip-recommended","title":"A. Installation using pip (Recommended)","text":"

Best option for easily getting stable DeFFcode installed.

Installation is as simple as:

Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest stable release\npython -m pip install -U deffcode\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest stable release\npython -m pip install --upgrade --user deffcode\n

Or, If you're using py as alias for installed python, then:

# Install latest stable release\npy -m pip install --upgrade --user deffcode\n
# Install latest stable release\npip install -U deffcode\n

And you can also download its wheel (.whl) package from our repository's releases section, thereby can be installed as follows:

# Install latest release\npip install deffcode-0.2.0-py3-none-any.whl\n

"},{"location":"installation/#b-installation-from-source","title":"B. Installation from Source","text":"

Best option for trying latest patches(maybe experimental), forking for Pull Requests, or automatically installing all prerequisites(with a few exceptions).

Installation using dev banch

If you're looking for latest work-in-progress enhancements or bug-fixes, then you want to checkout our beta dev branch with the following commands:

The beta dev branch at times can be very unstable or even unusable, User discretion is advised!

# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# checkout the dev beta branch\ngit checkout dev\n\n# Install it\npip install -U .\n
Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest beta branch\npython -m pip install -U .\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest beta branch\npython -m pip install --upgrade --user .\n

Or, If you're using py as alias for installed python, then:

# Install latest beta branch\npy -m pip install --upgrade --user .\n
# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# Install it\npip install -U .\n

  1. The ensurepip module is missing/disabled on Ubuntu. Use pip method only.\u00a0\u21a9

"},{"location":"installation/ffmpeg_install/","title":"FFmpeg Installation Doc","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

You can following machine-specific instructions for its configuration/installation:

DeFFcode APIs will throw RuntimeError, if they failed to detect valid FFmpeg executables on your system.

Enable verbose (verbose=True) for debugging FFmpeg validation process.

"},{"location":"installation/ffmpeg_install/#linux-ffmpeg-installation","title":"Linux FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on a Linux OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection","title":"A. Auto-Detection","text":"

This is a recommended approach on Linux Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the Linux OS systems.

You can install easily install official FFmpeg according to your Linux Distro by following this post \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Linux Static Binaries (based on your machine architecture) from the link below:

    Linux Static Binaries: http://johnvansickle.com/ffmpeg/

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#windows-ffmpeg-installation","title":"Windows FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Installation and Manual Configuration methods on Windows OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-installation","title":"A. Auto-Installation","text":"

This is a recommended approach on Windows Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-generate the required FFmpeg Static Binaries from our dedicated Github Server into the temporary directory(e.g. C:\\Temp) of your machine on the Windows OS systems.

Active Internet connection is required while downloading required FFmpeg Static Binaries from our dedicated Github Server onto your Windows machine.

Important Information regarding Auto-Installation
  • The files downloaded to a temporary directory (e.g. C:\\TEMP), may get erased if your machine shutdowns/restarts in some cases.

  • You can also provide a custom save path for auto-downloading FFmpeg Static Binaries through exclusive -ffmpeg_download_path attribute in Sourcer API.

    How to use -ffmpeg_download_path attribute in FFdecoder API?

    -ffmpeg_download_path is also available in FFdecoder API through the -custom_sourcer_params attribute of its ffparams dictionary parameter.

  • If binaries were found at the specified path, DeFFcode APIs automatically skips the Auto-Installation step.

  • If the required FFmpeg static binary fails to download, extract, or validate during Auto-Installation, then DeFFcode APIs will exit with RuntimeError!

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_1","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Windows Static Binaries (based on your machine arch(x86/x64)) from the link below:

    Windows Static Binaries: https://ffmpeg.org/download.html#build-windows

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'C:/foo/Downloads/ffmpeg/bin') or path of ffmpeg.exe executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#macos-ffmpeg-installation","title":"MacOS FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on MacOS OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection_1","title":"A. Auto-Detection","text":"

This is a recommended approach on MacOS Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the MacOS systems.

You can easily install FFmpeg on your MacOS machine by following this tutorial \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_2","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest MacOS Static Binaries (only x64 Binaries) from the link below:

    MacOS Static Binaries: https://ffmpeg.org/download.html#build-mac

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"recipes/advanced/","title":"Advanced Recipes","text":"

The following challenging recipes will take your skills to the next level and will give access to new DeFFcode techniques, tricky examples, and advanced FFmpeg parameters:

Courtesy - tenor

Refer Basic Recipes first!

If you're just getting started, check out the Beginner's Basic Recipes first before trying these advanced recipes.

Any proficiency with OpenCV-Python will be Helpful

Any proficiency with OpenCV-Python (Python API for OpenCV) surely help you with these recipes.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

"},{"location":"recipes/advanced/#advanced-decoding-recipes","title":"Advanced Decoding Recipes","text":"
  • Decoding Live Virtual Sources
    • Generate and Decode frames from Sierpinski pattern
    • Generate and Decode frames from Test Source pattern
    • Generate and Decode frames from Gradients with custom Text effect
    • Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms
    • Generate and Decode frames from Game of Life Visualization
  • Decoding Live Feed Devices
    • Capturing and Previewing frames from a Webcam using Custom Demuxer
    • Capturing and Previewing frames from your Desktop (Screen Recording)
  • Hardware-Accelerated Video Decoding
    • CUVID-accelerated Hardware-based Video Decoding and Previewing
    • CUDA-accelerated Hardware-based Video Decoding and Previewing
"},{"location":"recipes/advanced/#advanced-transcoding-recipes","title":"Advanced Transcoding Recipes","text":"
  • Transcoding Live Complex Filtergraphs
    • Transcoding video with Live Custom watermark image overlay
    • Transcoding video from sequence of Images with additional filtering
  • Transcoding Video Art with Filtergraphs
    • Transcoding video art with YUV Bitplane Visualization
    • Transcoding video art with Jetcolor effect
    • Transcoding video art with Ghosting effect
    • Transcoding video art with Pixelation effect
  • Hardware-Accelerated Video Transcoding
    • CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API
    • CUDA-NVENC-accelerated Video Transcoding with WriteGear API
    • CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API
"},{"location":"recipes/advanced/#advanced-metadata-recipes","title":"Advanced Metadata Recipes","text":"
  • Updating Video Metadata
    • Added new attributes to metadata in FFdecoder API
    • Overriding source video metadata in FFdecoder API
"},{"location":"recipes/advanced/decode-hw-acceleration/","title":"Hardware-Accelerated Video Decoding","text":"

FFmpeg offer access to dedicated GPU hardware with varying support on different platforms for performing a range of video-related tasks to be completed faster or using less of other resources (particularly CPU).

By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder (extracted using Sourcer API) itself for decoding its input. However, you could easily change the video-decoder to your desired specific supported Video-Decoder using FFmpeg options by way of its ffparams dictionary parameter. This feature provides easy access to GPU Accelerated Hardware Decoder in FFdecoder API that will generate faster video frames while using little to no CPU power, as opposed to CPU intensive Software Decoders.

We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing","title":"CUVID-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

  • Using h264_cuvid decoder: Remember to check if your FFmpeg compiled with H.264 CUVID decoder support by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    Verifying H.264 CUVID decoder support in FFmpeg
    $ ffmpeg  -hide_banner -decoders | grep cuvid\n\nV..... av1_cuvid            Nvidia CUVID AV1 decoder (codec av1)\nV..... h264_cuvid           Nvidia CUVID H264 decoder (codec h264)\nV..... hevc_cuvid           Nvidia CUVID HEVC decoder (codec hevc)\nV..... mjpeg_cuvid          Nvidia CUVID MJPEG decoder (codec mjpeg)\nV..... mpeg1_cuvid          Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)\nV..... mpeg2_cuvid          Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)\nV..... mpeg4_cuvid          Nvidia CUVID MPEG4 decoder (codec mpeg4)\nV..... vc1_cuvid            Nvidia CUVID VC1 decoder (codec vc1)\nV..... vp8_cuvid            Nvidia CUVID VP8 decoder (codec vp8)\nV..... vp9_cuvid            Nvidia CUVID VP9 decoder (codec vp9)\n

    You can also use any of above decoder in the similar way, if supported.

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's H.264 CUVID Video decoder in FFdecoder API to achieve GPU-accelerated hardware video decoding of YUV420p frames from a given Video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's CUVID can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": \"h264_cuvid\",  # use H.264 CUVID Video-decoder\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(YUV420p) frames\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"yuv420p\",  # use YUV420p frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the YUV420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing","title":"CUDA-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters), thereby achieving GPU-accelerated decoding of NV12 pixel-format frames from a given video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

NV12(for 4:2:0 input) and NV21(for 4:4:4 input) are the only supported pixel format. You cannot change pixel format to any other since NV-accelerated video codec supports only them.

NV12 is a biplanar format with a full sized Y plane followed by a single chroma plane with weaved U and V values. NV21 is the same but with weaved V and U values. The 12 in NV12 refers to 12 bits per pixel. NV12 has a half width and half height chroma channel, and therefore is a 420 subsampling. NV16 is 16 bits per pixel, with half width and full height. aka 422. NV24 is 24 bits per pixel with full sized chroma channel. aka 444. Most NV12 functions allow the destination Y pointer to be NULL.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's GPU Accelerated Decoding can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"fps=60.0,\"  # framerate 60.0fps in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the NV12 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/","title":"Decoding Live Feed Devices","text":"

DeFFcode's FFdecoder API provide effortless support for any Live Feed Devices using two parameters: source parameter which accepts device name or its path, and source_demuxer parameter to specify demuxer for the given input device.

We'll discuss the Live Feed Devices support using both these parameters briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer","title":"Capturing and Previewing frames from a Webcam using Custom Demuxer","text":"Example Assumptions

FFmpeg provide set of specific Demuxers on different platforms to read the multimedia streams from a particular type of Video Capture source/device. Please note that following recipe explicitly assumes:

  • You're running Linux Machine with USB webcam connected to it at node/path /dev/video0.
  • You already have appropriate Linux video drivers and related softwares installed on your machine.
  • You machine uses FFmpeg binaries built with --enable-libv4l2 flag to support video4linux2, v4l2 demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode BGR24 video frames from a USB webcam device connected at path /dev/video0 on a Linux Machine with video4linux2 (or simply v4l2) demuxer, and preview them using OpenCV Library's cv2.imshow() method.

Identifying and Specifying Video Capture Device Name/Path/Index and suitable Demuxer on different OS platforms Windows Linux MacOS

Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

  • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

    c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
  • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

    # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

  • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

    You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

    $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
  • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

    # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

    You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

    # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

    Using device's indexUsing device's name
    # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

    # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

    # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop","title":"Capturing and Previewing frames from your Desktop","text":"Example Assumptions

Similar to Webcam capturing, FFmpeg provide set of specific Demuxers on different platforms for capturing your desktop (Screen recording). Please note that following recipe explicitly assumes:

  • You're running Linux Machine with libxcb module installed properly on your machine.
  • You machine uses FFmpeg binaries built with --enable-libxcb flag to support x11grab demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode live BGR video frames from your complete screen as well as a region in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OS platforms Windows Linux MacOS

Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

    # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

Capturing entire desktopCapturing a region

For capturing all your displays as one big contiguous display in FFdecoder API:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

For limit capturing to a region, and show the area being grabbed:

x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/","title":"Decoding Live Virtual Sources","text":"

Instead of using prerecorded video files as streams, DeFFcode's FFdecoder API with the help of powerful lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph, is also capable of creating virtual video frames out of thin air in real-time, which you might want to use as input for testing, compositing, and merging with other streams to obtain desired output on-the-fly.

We'll discuss the recipies for generating Live Fake Sources briefly below:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern","title":"Generate and Decode frames from Sierpinski pattern","text":"

The sierpinski graph generates a Sierpinski carpet/triangle fractal, and randomly pan around by a single pixel each frame.

Sierpinski carpet fractal

In this example we will generate and decode 8 seconds of a Sierpinski carpet fractal pattern of 1280x720 frame size and 30 framerate using sierpinski graph source with lavfi input virtual device in FFdecoder API, and preview decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# playback time of 8 seconds\nffparams = {\"-ffprefixes\": [\"-t\", \"8\"]}\n\n# initialize and formulate the decoder with \"sierpinski\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"sierpinski=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        cv2.imwrite('foo_image.gif', frame)\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern","title":"Generate and Decode frames from Test Source pattern","text":"

The testsrc graph generates a test video pattern showing a color pattern, a scrolling gradient, and a timestamp. This is useful for testing purposes.

Test Source pattern

In this example we will generate and decode 10 seconds of a Test Source pattern (1280x720 frame size & 30 framerate) using testsrc graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n}\n\n# initialize and formulate the decoder with \"testsrc\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"testsrc=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect","title":"Generate and Decode frames from Gradients with custom Text effect","text":"

The gradients graph (as name suggests) generates several random gradients.

Gradients pattern with real-time text output

In this example we will generate and decode 15 seconds of Gradients using gradients graph source with lavfi input virtual device and also draw real-time text output (format HH::MM::SS) scrolling upward direction on it using drawtext filter in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

This example assumes you're running Windows machine. If not, then change fontfile parameter path in drawtext video filtergraph definition accordingly.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"15\"],  # playback time of 15 seconds\n    \"-vf\": \"drawtext=\"  # draw text\n    + \"text='%{localtime\\:%X}':\"  # real time text (HH::MM::SS)\n    + \"fontfile='c\\:\\/windows\\/fonts\\/arial.ttf':\"  # fontfile path (Only Windows)\n    + \"x=(w-text_w)/2:y=h-40*t:\"  # scroll upward effect\n    + \"fontsize=50:\"  # font size 50\n    + \"fontcolor=white\",  # font color white\n}\n\n\n# initialize and formulate the decoder with \n# \"gradients\" source for BGR24 output\ndecoder = FFdecoder(\n    \"gradients=n=3\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms","title":"Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms","text":"

The mandelbrot graph generate a Mandelbrot set fractal, that progressively zoom towards a specfic point.

Mandelbrot pattern with a Vectorscope & two Waveforms

In this example we will generate and decode 20 seconds of a Mandelbrot test pattern (1280x720 frame size & 30 framerate) using mandelbrot graph source with lavfi input virtual device with a vectorscope (plots 2 color component values) & two waveforms (plots YUV color component intensity) stacked to it in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"20\"],  # playback time of 20 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"split=4[a][b][c][d],\" # split input into 4 identical outputs.\n    + \"[a]waveform[aa],\"  # apply waveform on first output\n    + \"[b][aa]vstack[V],\"  # vertical stack 2nd output with waveform [V]\n    + \"[c]waveform=m=0[cc],\"  # apply waveform on 3rd output\n    + \"[d]vectorscope=color4[dd],\"  # apply vectorscope on 4th output\n    + \"[cc][dd]vstack[V2],\"  # vertical stack waveform and vectorscope [V2]\n    + \"[V][V2]hstack\",  # horizontal stack [V] and [V2] vertical stacks\n}\n\n# initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization","title":"Generate and Decode frames from Game of Life Visualization","text":"

The life graph generates a life pattern based on a generalization of John Conway\u2019s life game. The sourced input represents a life grid, each pixel represents a cell which can be in one of two possible states, alive or dead. Every cell interacts with its eight neighbours, which are the cells that are horizontally, vertically, or diagonally adjacent. At each interaction the grid evolves according to the adopted rule, which specifies the number of neighbor alive cells which will make a cell stay alive or born.

Game of Life Visualization

In this example we will generate and decode 25 seconds of Game of Life Visualization using life graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"25\"],  # playback time of 25 seconds\n}\n\n# initialize and formulate the decoder with \"life\" source for BGR24 output\ndecoder = FFdecoder(\n    \"life=\"  # life graph\n    + \"s=640x480:\"  # grid size (in pixels)\n    + \"mold=10:\"  # cell mold speed\n    + \"r=36:\"  # framerate\n    + \"ratio=0.5:\"  # random fill ratio for the initial random grid\n    + \"death_color=#39FF14:\"  # color of dead cells\n    + \"life_color=#1d1160\" # color of living (or new born) cells\n    + \",scale=640:480:\" # frame size\n    + \"flags=16\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/","title":"Transcoding Video Art with Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API unlocks the power of ffmpeg backend for creating real-time artistic generative video art using simple and complex filtergraphs, and decoding them into live video frames.

We'll discuss the Transcoding Video Art with Filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization","title":"Transcoding video art with YUV Bitplane Visualization","text":"

Based on the QCTools bitplane visualization, this video art has numerical values ranging between -1(no change) and 10(noisiest) for the Y (luminance), U and V (chroma or color difference) planes, yielding cool and different results for different values.

YUV Bitplane Visualization

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Bitplane Visualization by binding the bit position of the Y, U, and V planes of a video file (say foo.mp4) by using FFmpeg's lutyuv filter and assigning them random values (between -1(no change) and 10(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"lutyuv=\"  # use  lutyuv filter for binding bit position of the Y, U, and V planes\n    + \"y=if(eq({y}\\,-1)\\,512\\,if(eq({y}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{y}))*pow(2\\,{y}))):\".format(\n        y=3 # define `Y` (luminance) plane value (b/w -1 and 10)\n    )\n    + \"u=if(eq({u}\\,-1)\\,512\\,if(eq({u}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{u}))*pow(2\\,{u}))):\".format(\n        u=1 # define `U` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"v=if(eq({v}\\,-1)\\,512\\,if(eq({v}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{v}))*pow(2\\,{v}))),\".format(\n        v=3 # define `V` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"format=yuv422p10le\", # change output format to yuv422p10le\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect","title":"Transcoding video art with Jetcolor effect","text":"

This video art uses FFmpeg's pseudocolor filter to create a Jetcolor effect which is high contrast, high brightness, and high saturation colormap that ranges from blue to red, and passes through the colors cyan, yellow, and orange. The jet colormap is associated with an astrophysical fluid jet simulation from the National Center for Supercomputer Applications.

Jetcolor effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Jetcolor effect by changing frame colors of a video file (say foo.mp4) using FFmpeg's pseudocolor filter in different modes (values between 0 (cleaner) [default] and 2(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to `yuv444p`\n    + \"eq=brightness=0.40:saturation=8,\"  # default `brightness = 0.40` and `saturation=8`\n    + \"pseudocolor='\"  # dynamically controlled colors through `pseudocolor` filter\n    + \"if(between(val,0,85),lerp(45,159,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(159,177,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(177,70,(val-170)/(255-170))))):\"  # mode 0 (cleaner) [default]\n    + \"if(between(val,0,85),lerp(205,132,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(132,59,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(59,100,(val-170)/(255-170))))):\"  # mode 1\n    + \"if(between(val,0,85),lerp(110,59,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(59,127,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(127,202,(val-170)/(255-170))))):\"  # mode 2 (noisiest)\n    + \"i={mode}',\".format(\n        mode=0  # define mode value (b/w `0` and `2`) to control colors\n    )\n    + \"format=yuv422p10le\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect","title":"Transcoding video art with Ghosting effect","text":"

This video art using FFmpeg\u2019s lagfun filter to create a video echo/ghost/trailing effect.

Ghosting effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Ghosting effect using FFmpeg's lagfun filter on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-filter_complex\": \"format=yuv444p[formatted];\"  # change video input format to yuv444p\n    + \"[formatted]split[a][b];\"  # split input into 2 identical outputs\n    + \"[a]lagfun=decay=.99:planes=1[a];\"  # apply lagfun filter on first output\n    + \"[b]lagfun=decay=.98:planes=2[b];\"  # apply lagfun filter on 2nd output\n    + \"[a][b]blend=all_mode=screen:c0_opacity=.5:c1_opacity=.5,\"  # apply screen blend mode both outputs\n    + \"format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-map\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect","title":"Transcoding video art with Pixelation effect","text":"

This video art uses FFmpeg\u2019s overlay, smartblur and stacks of dilation filters to intentionally Pixelate your video in artistically cool looking ways such that each pixel become visible to the naked eye.

Pixelation effect

This Video Art idea credits goes to oioiiooixiii blogspot.

In this example we will generate 8 seconds of Pixelation effect using FFmpeg\u2019s smartblur and stacks of dilation filters overlayed on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to yuv444p\n    + \"split [out1][out2];\"  # split input into 2 identical outputs\n    + \"[out1][out2] overlay,smartblur,\"  # apply overlay,smartblur filter on both outputs\n    + \"dilation,dilation,dilation,dilation,dilation,\"  # apply stacks of dilation filters on both outputs\n    + \"eq=contrast=1.4:brightness=-0.09 [pixels];\"  # change brightness and contrast\n    + \"[pixels]format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-mode\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/","title":"Hardware-Accelerated Video Transcoding","text":"What exactly is Transcoding?

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allowing us to process real-time video frames with immense flexibility. Both these APIs are capable of utilizing the potential of GPU backed fully-accelerated Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder), thus dramatically improving the transcoding performance. At same time, FFdecoder API Hardware-decoded frames are fully compatible with OpenCV's VideoWriter API for producing high-quality output video in real-time.

Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing

As we know, using the \u2013hwaccel cuda -hwaccel_output_format cuda flags in FFmpeg pipeline will keep video frames in GPU memory, and this ensures that the memory transfers (system memory to video memory and vice versa) are eliminated, and that transcoding is performed with the highest possible performance on the available GPU hardware.

General Memory Flow with Hardware Acceleration

But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gain by explicitly copying raw decoded frames between System and GPU memory (via the PCIe bus), thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Moreover, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus.

Memory Flow with Hardware Acceleration and Real-time Processing

On the bright side, however, GPU enabled Hardware based encoding/decoding is inherently faster and more efficient (do not use much CPU resources when frames in GPU) thus freeing up the CPU for other tasks, as compared to Software based encoding/decoding that is known to be completely CPU intensive. Plus scaling, de-interlacing, filtering, etc. tasks will be way faster and efficient than usual using these Hardware based decoders/encoders as oppose to Software ones.

As you can see the pros definitely outweigh the cons and you're getting to process video frames in the real-time with immense speed and flexibility, which is impossible to do otherwise.

We'll discuss its Hardware-Accelerated Video Transcoding capabilities using these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api","title":"CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with OpenCV's VideoWriter API.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's NVENC Encoder can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\" # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since write() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated Video Transcoding with WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

Consuming BGR framesConsuming NV12 frames

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting patched NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as NV12 frames.
  4. Encoding NV12 frames directly with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\"  # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n    \"-input_pixfmt\": \"nv12\", # input frames pixel format as `NV12`\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the NV12 frame here}\n\n    # writing NV12 frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API","text":"

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a High-performance Lossless FFmpeg Transcoding Pipeline

Courtesy - tenor"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/","title":"Transcoding Live Complex Filtergraphs","text":"What are Complex filtergraphs?

Before heading straight into recipes we will talk about Complex filtergraphs:

Complex filtergraphs are those which cannot be described as simply a linear processing chain applied to one stream.

Complex filtergraphs are configured with the -filter_complex global option.

The -lavfi option is equivalent to -filter_complex.

A trivial example of a complex filtergraph is the overlay filter, which has two video inputs and one video output, containing one video overlaid on top of the other.

DeFFcode's FFdecoder API seamlessly supports processing multiple input streams including real-time frames through multiple filter chains combined into a filtergraph (via. -filter_complex FFmpeg parameter), and use their outputs as inputs for other filter chains.

We'll discuss the transcoding of live complex filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay","title":"Transcoding video with Live Custom watermark image overlay","text":"Big Buck Bunny with custom watermark

In this example we will apply a watermark image (say watermark.png with transparent background) overlay to the 10 seconds of video file (say foo.mp4) using FFmpeg's overlay filter with some additional filtering, , and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

You can use FFdecoder's metadata property object that dumps Source Metadata as JSON to retrieve source framerate and frame-size.

To learn about exclusive -ffprefixes & -clones parameter. See Exclusive Parameters \u27b6

Remember to replace watermark.png watermark image file-path with yours before using this recipe.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json, cv2\n\n# define the Complex Video Filter with additional `watermark.png` image input\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n    \"-clones\": [\n        \"-i\",\n        \"watermark.png\",  # !!! [WARNING] define your `watermark.png` here.\n    ],\n    \"-filter_complex\": \"[1]format=rgba,\"  # change 2nd(image) input format to yuv444p\n    + \"colorchannelmixer=aa=0.7[logo];\"  # apply colorchannelmixer to image for controlling alpha [logo]\n    + \"[0][logo]overlay=W-w-{pixel}:H-h-{pixel}:format=auto,\".format(  # apply overlay to 1st(video) with [logo]\n        pixel=5  # at 5 pixels from the bottom right corner of the input video\n    )\n    + \"format=bgr24\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering","title":"Transcoding video from sequence of Images with additional filtering","text":"Mandelbrot pattern blend with Fish school video Available blend mode options

Other blend mode options for blend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

In this example we will blend 10 seconds of Mandelbrot test pattern (generated using lavfi input virtual device) that serves as the \"top\" layer with 10 seconds of Image Sequence that serves as the \"bottom\" layer, using blend filter (with heat blend mode), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4 (restricted to 12 seconds):

$ ffmpeg -t 12 -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('/path/to/img%03d.png', verbose=True, **ffparams).formulate()\n

FFdecoder API also accepts Glob pattern(*.png) as well Single looping image as as input to its source parameter. See this Basic Recipe \u27b6 for more information.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define mandelbrot pattern generator\n# and the Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\n        \"-t\", \"10\", # playback time of 10 seconds for mandelbrot pattern\n        \"-f\", \"lavfi\", # use input virtual device\n        \"-i\", \"mandelbrot=rate=25\", # create mandelbrot pattern at 25 fps\n        \"-t\", \"10\", # playback time of 10 seconds for video\n    ],  \n    \"-custom_resolution\": (1280, 720), # resize to 1280x720\n    \"-filter_complex\":\"[1:v]format=yuv444p[v1];\" # change 2nd(video) input format to yuv444p\n        + \"[0:v]format=gbrp10le[v0];\" # change 1st(mandelbrot pattern) input format to gbrp10le\n        + \"[v1][v0]scale2ref[v1][v0];\" # resize the 1st(mandelbrot pattern), based on a 2nd(video).\n        + \"[v0][v1]blend=all_mode='heat',\" # apply heat blend mode to output\n        + \"format=yuv422p10le[v]\", # change output format to `yuv422p10le`\n    \"-map\": \"[v]\", # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"/path/to/image-%03d.png\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# define your parameters\n# [WARNING] framerate must match original source framerate !!!\noutput_params = {\n    \"-input_framerate\": 25,  # Default\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/update-metadata/","title":"Updating Video Metadata","text":"

In addition of using metadata property object in FFdecoder API for probing metadata information (only as JSON string) for each multimedia stream available in the given video source, you can also easily update the video metadata on-the-fly by assigning desired data as python dictionary to the same overloaded metadata property object. This feature can be used either for adding new custom properties to metadata, or to override source metadata properties used by FFdecoder API to formulate its default Decoder Pipeline for real-time video-frames generation.

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

This feature is not yet fully explored, but in the near future you'll be able to use it to dynamically override any Video frames Decoder Pipeline property (such as frame-size, pixel-format, etc.) in real-time like a pro. Stay tuned for more updates

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/update-metadata/#added-new-properties-to-metadata-in-ffdecoder-api","title":"Added new properties to metadata in FFdecoder API","text":"

In FFdecoder API, you can easily define any number of new properties for its metadata (formatted as python dictionary) with desired data of any datatype(s)1 , without affecting its default Video frames Decoder pipeline.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, thereby add new propertys (formatted as python dictionary) with desired data of different datatype(s) through overloaded metadata property object, and then finally print it as JSON string using the same metadata property object in FFdecoder API.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\n\n# initialize the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# format your data as dictionary (with data of any [printable] datatype)\ndata = dict(\n    mystring=\"abcd\",  # string data\n    myint=1234,  # integers data\n    mylist=[1, \"Rohan\", [\"inner_list\"]],  # list data\n    mytuple=(1, \"John\", (\"inner_tuple\")),  # tuple data\n    mydict={\"anotherstring\": \"hello\"},  # dictionary data\n    myjson=json.loads('{\"name\": \"John\", \"age\": 30, \"city\": \"New York\"}'),  # json data\n)\n\n# assign your dictionary data\ndecoder.metadata = data\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"D:\\\\foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 29.97,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 21.03,\n  \"approx_video_nframes\": 630,\n  \"source_video_bitrate\": \"4937k\",\n  \"source_audio_bitrate\": \"256k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\",\n  \"mystring\": \"abcd\",\n  \"myint\": 1234,\n  \"mylist\": [\n    1,\n    \"Rohan\",\n    [\n      \"inner_list\"\n    ]\n  ],\n  \"mytuple\": [\n    1,\n    \"John\",\n    \"inner_tuple\"\n  ],\n  \"mydict\": {\n    \"anotherstring\": \"hello\"\n  },\n  \"myjson\": {\n    \"name\": \"John\",\n    \"age\": 30,\n    \"city\": \"New York\"\n  }\n}\n

"},{"location":"recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api","title":"Overriding source video metadata in FFdecoder API","text":"

In FFdecoder API, you can also use its metadata to manually override the source properties (as frame-size, frame pixel-format, video-framerate, video-decoder etc.) that directly affects its default Video frames Decoder pipeline that decodes real-time video-frames.

The \"source\" property in metadata cannot be altered in any manner.

Source Video metadata values must be handled carefully

Source Video metadata information is used by FFdecoder API to formulate its default Video frames Decoder pipeline, and any improper or invalid inputted source property could crash the pipeline with RuntimeError.

Therefore to safeguard against it, FFdecoder API discards any Source Video metadata dictionary keys, if its value's datatype fails to match the exact valid datatype defined in following table:

Only either source_demuxer or source_extension property can be present in source metadata.

Not all Source Video metadata properties directly affects the pipeline (as mentioned in the table). But this might change in future versions.

Source Video Metadata Keys Valid Value Datatype Effect on Pipeline \"source_extension\" string None \"source_demuxer\" string Direct \"source_video_resolution\" list of integers e.g. [1280,720] Direct \"source_video_framerate\" float Direct \"source_video_pixfmt\" string Direct \"source_video_decoder\" string Direct \"source_duration_sec\" float None \"approx_video_nframes\" integer Direct \"source_video_bitrate\" string None \"source_audio_bitrate\" string None \"source_audio_samplerate\" string None \"source_has_video\" bool Direct \"source_has_audio\" bool None \"source_has_image_sequence\" bool Direct \"ffdecoder_operational_mode\" str None \"output_frames_pixfmt\" str Direct

Hence for instance, if \"source_video_resolution\" is assigned \"1280x720\" (i.e. string datatype value instead of list), then it will be discarded.

In this example we will probe all metadata information available within foo.mp4 video file, and override frame size (originally 1920x1080) and pixel-format (originally rgb24) to our desired values through overloaded metadata property object in FFdecoder API, and thereby preview them using OpenCV Library's cv2.imshow() method.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

Once the formulate() method is called, the metadata information present in FFdecoder API is finalized and thereby used to formulate its default pipeline for decoding real-time video-frames. Therefore make all changes to video properties beforehand.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# override source metadata values\n# !!! [WARNING] Make sure each value datatype matches the table !!!\ndecoder.metadata = {\n    \"output_frames_pixfmt\": \"gray\",  # gray frame-pixfmt\n    \"source_video_resolution\": [1280, 720],  # 1280x720 frame-size\n}\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# [NOTE] uncomment following line to debug values\n# print(decoder.metadata)\n\n# let's grab the 1280x720 sized gray frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with gray frame here}\n\n    # Show gray frames in output window\n    cv2.imshow(\"Output gray\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

  1. There is no concept of tuple datatype in the JSON format. Thereby, Python's json module auto-converts all tuple python values into JSON list because that's the closest thing in JSON format to a tuple.\u00a0\u21a9

"},{"location":"recipes/basic/","title":"Basic Recipes","text":"

The following recipes should be reasonably accessible to beginners of any skill level to get started with DeFFcode APIs:

Courtesy - tenor

Refer Installation doc first!

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode with required prerequisites on your machine.

Any proficiency with OpenCV-Python will be Helpful

If you've any proficiency with OpenCV-Python (Python API for OpenCV), you will find these recipes really easy.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

Frames are actually 3D Numpy arrays

In python, \"Frames\" are actually three-dimensional NumPy ndarray composed of 3 nested levels of arrays, one for each dimension.

"},{"location":"recipes/basic/#basic-decoding-recipes","title":"Basic Decoding Recipes","text":"
  • Decoding Video files
    • Accessing RGB frames from a video file
    • Capturing and Previewing BGR frames from a video file (OpenCV Support)
    • Playing with any other FFmpeg pixel formats
    • Capturing and Previewing frames from a Looping Video
  • Decoding Camera Devices using Indexes
    • Enumerating all Camera Devices with Indexes
    • Capturing and Previewing frames from a Camera using Indexes
  • Decoding Network Streams
    • Capturing and Previewing frames from a HTTPs Stream
    • Capturing and Previewing frames from a RTSP/RTP Stream
  • Decoding Image sequences
    • Capturing and Previewing frames from Sequence of images
    • Capturing and Previewing frames from Single looping image
"},{"location":"recipes/basic/#basic-transcoding-recipes","title":"Basic Transcoding Recipes","text":"
  • Transcoding Live frames
    • Transcoding video using OpenCV VideoWriter API
    • Transcoding lossless video using WriteGear API
  • Transcoding Live Simple Filtergraphs
    • Transcoding Trimmed and Reversed video
    • Transcoding Cropped video
    • Transcoding Rotated video (with rotate filter)
    • Transcoding Rotated video (with transpose filter)
    • Transcoding Horizontally flipped and Scaled video
  • Saving Key-frames as Image (Image processing)
    • Extracting Key-frames as PNG image
    • Generating Thumbnail with a Fancy filter
"},{"location":"recipes/basic/#basic-metadata-recipes","title":"Basic Metadata Recipes","text":"
  • Extracting Video Metadata
    • Extracting video metadata using Sourcer API
    • Extracting video metadata using FFdecoder API
"},{"location":"recipes/basic/#whats-next","title":"What's next?","text":"

Done already! Let's checkout Advanced Recipes to level up your skills!

"},{"location":"recipes/basic/decode-camera-devices/","title":"Decoding Camera Devices using Indexes","text":"

With DeFFcode APIs, we are able to probe and enumerate all Camera Devices names along with their respective \"device indexes\" or \"camera indexes\" no matter how many cameras are connected to your system. This makes Camera Devices decoding as simple as OpenCV, where one can effortlessly access a specific Camera Device just by the specifying the matching index of it. These indexes are much easier to read, memorize, and type, and one don't have to remember long Device names or worry about its Demuxer.

We'll discuss the Decoding Camera Devices using Indexes briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes","title":"Enumerating all Camera Devices with Indexes","text":"

In Sourcer API, you can easily use its enumerate_devices property object to enumerate all probed Camera Devices (connected to your system) as dictionary object with device indexes as keys and device names as their respective values.

Requirement for Enumerating all Camera Devices in Sourcer API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will enumerate all probed Camera Devices connected on a Windows machine using enumerate_devices property object in Sourcer API, both as dictionary object and JSON string.

# import the necessary packages\nfrom deffcode import Sourcer\nimport json\n\n# initialize and formulate the decoder\nsourcer = Sourcer(\"0\").probe_stream()\n\n# enumerate probed devices as Dictionary object(`dict`)\nprint(sourcer.enumerate_devices)\n\n# enumerate probed devices as JSON string(`json.dump`)\nprint(json.dumps(sourcer.enumerate_devices,indent=2))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine: As Dictionary objectAs JSON string
{0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
{\n  \"0\": \"Integrated Camera\",\n  \"1\": \"USB2.0 Camera\",\n  \"2\": \"DroidCam Source\"\n}\n

"},{"location":"recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes","title":"Capturing and Previewing frames from a Camera using Indexes","text":"

After knowing the index of Camera Device with Sourcer API, One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value either as integer or string of integer type to its source parameter.

Requirement for Index based Camera Device Capturing in FFdecoder API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will decode BGR24 video frames from Integrated Camera at index 0 on a Windows Machine, and preview them using OpenCV Library's cv2.imshow() method.

Important Facts related to Camera Device Indexing
  • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
  • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
  • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
    • For example, If there are three devices:
      {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
    • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

      Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

Out of Index Camera Device index values will raise ValueError in FFdecoder API

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/","title":"Decoding Image sequences","text":"

DeFFcode's FFdecoder API supports a wide-ranging media streams as input to its source parameter, which also includes Image Sequences such as Sequential(img%03d.png) and Glob pattern(*.png) as well as Single looping image.

We'll discuss both briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images","title":"Capturing and Previewing frames from Sequence of images","text":"

In this example we will capture video frames from a given Image Sequence using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

OpenCV expects BGR format frames in its cv2.imshow() method.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4:

$ ffmpeg -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

SequentialGlob pattern How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img%03d.png\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

The glob pattern is not available on Windows FFmpeg builds.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-pattern_type glob` for accepting glob pattern\nffparams = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img*.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image","title":"Capturing and Previewing frames from Single looping image","text":"

In this example we will capture video frames from a Single Looping image using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-loop 1` for infinite looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"img.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/","title":"Decoding Network Streams","text":"

Similar to decoding Video files, DeFFcode's FFdecoder API directly supports Network Streams with specific protocols (such as RTSP/RTP, HTTP(s), MPEG-TS, etc.) as input to its source parameter.

We'll discuss Network Streams support briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream","title":"Capturing and Previewing frames from a HTTPs Stream","text":"

In this example we will decode live BGR24 video frames from a HTTPs protocol Stream in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream","title":"Capturing and Previewing frames from a RTSP/RTP Stream","text":"

In this example we will decode live BGR24 video frames from RTSP/RTP protocol Streams in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

This example assume you already have a RSTP Server running at specified RSTP address with syntax rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH] and video data already being published to it.

For creating your own RSTP Server locally and publishing video data to it, You can refer this WriteGear API's bonus example \u27b6

Make sure to change RSTP address rtsp://localhost:8554/mystream with yours in following code before running

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\"-rtsp_transport\": \"tcp\"}\n\n# initialize and formulate the decoder with RTSP protocol source for BGR24 output\n# [WARNING] Change your RSTP address `rtsp://localhost:8554/mystream` with yours!\ndecoder = FFdecoder(\"rtsp://localhost:8554/mystream\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/","title":"Decoding Video files","text":"

DeFFcode's FFdecoder API readily supports multimedia Video files path as input to its source parameter. And with its frame_format parameter, you can easily decode video frames in any pixel format(s) that are readily supported by all well known Computer Vision libraries (such as OpenCV).

We'll discuss its video files support and pixel format capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file","title":"Accessing RGB frames from a video file","text":"

The default function of FFdecoder API is to decode 24-bit RGB video frames from the given source.

FFdecoder API's generateFrame() function can be used in multiple methods to access RGB frames from a given source, such as as a Generator (Recommended Approach), calling with Statement, and as a Iterator.

In this example we will decode the default RGB24 video frames from a given Video file (say foo.mp4) using above mentioned accessing methods:

As a Generator (Recommended)Calling with StatementAs a Iterator

This is a recommended approach for faster and error-proof access of decoded frames. We'll use it throughout the recipes.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# grab RGB24(default) frame from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder\nwith FFdecoder(\"foo.mp4\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # lets print its shape\n        print(frame.shape)  # for e.g. (1080, 1920, 3)\n

This Iterator Approach bears a close resemblance to OpenCV-Python (Python API for OpenCV) coding syntax, thereby easier to learn and remember.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# loop over frames\nwhile True:\n\n    # grab RGB24(default) frames from decoder\n    frame = next(decoder.generateFrame(), None)\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file","title":"Capturing and Previewing BGR frames from a video file","text":"

In this example we will decode OpenCV supported live BGR24 video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

By default, OpenCV expects BGR format frames in its cv2.imshow() method by using two accessing methods.

As a Generator (Recommended)Calling with Statement
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\nwith FFdecoder(\"foo.mp4\", frame_format=\"bgr24\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # Show output window\n        cv2.imshow(\"Output\", frame)\n\n        # check for 'q' key if pressed\n        key = cv2.waitKey(1) & 0xFF\n        if key == ord(\"q\"):\n            break\n\n# close output window\ncv2.destroyAllWindows()\n

"},{"location":"recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats","title":"Playing with any other FFmpeg pixel formats","text":"

Similar to BGR, you can input any pixel format (supported by installed FFmpeg) by way of frame_format parameter of FFdecoder API for the desired video frame format.

In this example we will decode live Grayscale and YUV video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Decode GrayscaleDecode YUV frames
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"input_foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# grab the GRAYSCALE frames from the decoder\nfor gray in decoder.generateFrame():\n\n    # check if frame is None\n    if gray is None:\n        break\n\n    # {do something with the gray frame here}\n\n    # Show output window\n    cv2.imshow(\"Gray Output\", gray)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try decoding YUV420p pixel-format frames in following python code:

You can also use other YUV pixel formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# grab the YUV420p frames from the decoder\nfor yuv in decoder.generateFrame():\n\n    # check if frame is None\n    if yuv is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the bgr frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", bgr)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video","title":"Capturing and Previewing frames from a Looping Video","text":"

In this example we will decode live BGR24 video frames from looping video using different means in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Using -stream_loop optionUsing loop filter

The recommend way to loop video is to use -stream_loop option via. -ffprefixes list attribute of ffparam dictionary parameter in FFdecoder API. Possible values are integer values: >0 value of loop, 0 means no loop, -1 means infinite loop.

Using -stream_loop 3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-stream_loop 3` for looping 4 times\nffparams = {\"-ffprefixes\":[\"-stream_loop\", \"3\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Another way to loop video is to use loop complex filter via. -filter_complex FFmpeg flag as attribute of ffparam dictionary parameter in FFdecoder API.

This filter places all frames into memory(RAM), so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

Using loop filter for looping video

The filter accepts the following options:

  • loop: Sets the number of loops for integer values >0. Setting this value to -1 will result in infinite loops. Default is 0(no loops).
  • size: Sets maximal size in number of frames. Default is 0.
  • start: Sets first frame of loop. Default is 0.

Using loop=3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define loop 4 times, each loop is 15 frames, each loop skips the first 25 frames\nffparams = {\n    \"-filter_complex\": \"loop=loop=3:size=15:start=25\" # Or use: `loop=3:15:25`\n}  \n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/extract-video-metadata/","title":"Extracting Video Metadata","text":"

DeFFcode's Sourcer API acts as Source Probing Utility for easily probing metadata information for each multimedia stream available in the given video source, and return it as in Human-readable (as JSON string) or Machine-readable (as Dictionary object) type with its retrieve_metadata() class method. Apart from this, you can also use metadata property object in FFdecoder API to extract this metadata information (only as JSON string).

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api","title":"Extracting video metadata using Sourcer API","text":"

This is the recommended way for extracting video metadata.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it in both Human-readable (as JSON string) and Machine-readable (as Dictionary object) types using retrieve_metadata() class method in Sourcer API:

The Sourcer API's retrieve_metadata() class method provides pretty_json boolean parameter to return metadata as JSON string (if True) and as Dictionary (if False).

As JSON stringAs Dictionary object
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n}\n
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `dict`\nprint(sourcer.retrieve_metadata())\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{'ffmpeg_binary_path': 'C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe', 'source': 'foo.mp4', 'source_extension': '.mp4', 'source_video_resolution': [1280, 720], 'source_video_framerate': 25.0, 'source_video_pixfmt': 'yuv420p', 'source_video_decoder': 'h264', 'source_duration_sec': 5.31, 'approx_video_nframes': 133, 'source_video_bitrate': '1205k', 'source_audio_bitrate': '384k', 'source_audio_samplerate': '48000 Hz', 'source_has_video': True, 'source_has_audio': True, 'source_has_image_sequence': False}\n

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api","title":"Extracting video metadata using FFdecoder API","text":"

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it as JSON string using metadata property object in FFdecoder API.

You can also update video's metadata by using the same overloaded metadata property object in FFdecoder API. More information can be found in this Advanced Recipe \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\"\n}\n

"},{"location":"recipes/basic/save-keyframe-image/","title":"Saving Key-frames as Image","text":"

DeFFcode's FFdecoder API provide effortless and precise Frame Seeking with -ss FFmpeg parameter that enable us to save any frame from a specific part of our input source.

We'll discuss aboout it briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for saving video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • Pillow: Pillow is a Imaging Library required for saving frame as Image. You can easily install it directly via pip:

    pip install Pillow     \n
  • Matplotlib: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations, also required for saving frame as Image. You can easily install it directly via pip:

    pip install matplotlib   \n
  • Imageio: Imageio is a Library for reading and writing a wide range of image, video, scientific, and volumetric data formats, also required for saving frame as Image. You can easily install it directly via pip:

    pip install imageio      \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image","title":"Extracting Key-frames as PNG image","text":"

In this example we will seek to 00:00:01.45(or 1045msec) in time and decode one single frame in FFdecoder API, and thereby saving it as PNG image using few prominent Image processing python libraries by providing valid filename (e.g. foo_image.png).

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter:

  • Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS) format, such as in 01:23:45.678.
  • Fractional: such as in 02:30.05. This is interpreted as 2 minutes, 30 and a half a second, which would be the same as using 150.5 in seconds.
Using PillowUsing OpenCVUsing MatplotlibUsing Imageio

In Pillow, the fromarray() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as PNG\n    im.save(\"foo_image.png\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In OpenCV, the imwrite() function can export BGR frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder for BGR24 outputwith suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    cv2.imwrite('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Matplotlib, the imsave() function can save an RGB frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport matplotlib.pyplot as plt\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    plt.imsave('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Imageio, the imwrite() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport imageio\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our output\n    imageio.imwrite('foo_image.jpeg', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter","title":"Generating Thumbnail with a Fancy filter","text":"fancy_thumbnail.jpg (Courtesy - BigBuckBunny)

In this example we first apply FFmpeg\u2019s tblend filter with an hardmix blend mode (cool stuff) and then seek to 00:00:25.917(or 25.917sec) in time to retrieve our single frame thumbnail, and thereby save it as JPEG image with valid filename (e.g. fancy_thumbnail.jpg) using Pillow library.

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter: - [x] Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS), such as in 01:23:45.678 - [x] Fractional: such as in 02:30.05, this is interpreted as 2 minutes, 30 seconds, and a half a second, which would be the same as using 150.5 in seconds.

Available blend mode options

Other blend mode options for tblend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to\nffparams = {\n    \"-vf\": \"tblend=all_mode='hardmix'\",  # trim and reverse\n    \"-ss\": \"00:00:25.917\",  # seek to 00:00:25.917(or 25s 917msec)\n    \"-frames:v\": 1,  # get one single frame\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"BigBuckBunny.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as JPEG\n    im.save(\"fancy_thumbnail.jpg\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/","title":"Transcoding Live Simple Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API handles a single chain of filtergraphs (through -vf FFmpeg parameter) to the to real-time frames quite effortlessly.

We'll discuss the transcoding of live simple filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

OpenCV's' VideoWriter() class lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video","title":"Transcoding Trimmed and Reversed video","text":"Big Buck Bunny Reversed

In this example we will take the first 5 seconds of a video clip (using trim filter) and reverse it (by applying reverse filter), and encode them using OpenCV Library's VideoWriter() method in real-time.

The reverse filter requires memory to buffer the entire clip, so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

By default, OpenCV expects BGR format frames in its write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# trim 5 sec from end and reverse\nffparams = {\n    \"-vf\": \"trim=end=5,reverse\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video","title":"Transcoding Cropped video","text":"Big Buck Bunny Cropped

In this example we will crop real-time video frames by an area with size \u2154 of the input video (say foo.mp4) by applying crop filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using crop filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# cropped the central input area with size 2/3 of the input video\nffparams = {\n    \"-vf\": \"crop=2/3*in_w:2/3*in_h\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter","title":"Transcoding Rotated video (with rotate filter)","text":"

FFmpeg features Rotate Filter that is used to rotate videos by an arbitrary angle (expressed in radians).

Big Buck Bunny Rotated (with rotate filter)

In this example we will rotate real-time video frames at an arbitrary angle by applying rotate filter in FFdecoder API and also using green color to fill the output area not covered by the rotated image, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 0.35 rad and fill green\nffparams = {\n    \"-vf\": \"rotate=angle=-20*PI/180:fillcolor=green\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter","title":"Transcoding Rotated video (with transpose filter)","text":"

FFmpeg also features Transpose Filter that is used to rotate videos by 90 degrees clockwise and counter-clockwise direction as well as flip them vertically and horizontally.

Big Buck Bunny Rotated (with transpose filter)

In this example we will rotate real-time video frames by 90 degrees counterclockwise and preserve portrait geometry by applying transpose filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 90 degrees counter-clockwise and preserve portrait layout\nffparams = {\n    \"-vf\": \"transpose=dir=2:passthrough=portrait\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video","title":"Transcoding Horizontally flipped and Scaled video","text":"Big Buck Bunny Horizontally flipped and Scaled

In this example we will horizontally flip and scale real-time video frames to half its original size by applying hflip and scale filter one-by-one in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using scale filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# horizontally flip and scale to half its original size\nffparams = {\n    \"-vf\": \"hflip,scale=w=iw/2:h=ih/2\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/","title":"Transcoding Live frames","text":"What exactly is Transcoding?

Before heading directly into recipes we have to talk about Transcoding:

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

While decoding media into video frames is purely managed by DeFFcode's FFdecoder API, you can easily encode those video frames back into multimedia files using any well-known video processing library such as OpenCV and VidGear.

We'll discuss transcoding using both these libraries briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api","title":"Transcoding video using OpenCV VideoWriter API","text":"

OpenCV's' VideoWriter() class can be used directly with DeFFcode's FFdecoder API to encode video frames into a multimedia video file but it lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

BGR framesRGB framesGRAYSCALE framesYUV frames

By default, OpenCV expects BGR format frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n     # let's also show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

Since OpenCV expects BGR format frames in its cv2.write() method, therefore we need to convert RGB frames into BGR before encoding as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for RGB24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the RGB24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # converting RGB24 to BGR24 frame\n    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n    # writing BGR24 frame to writer\n    writer.write(frame_bgr)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

OpenCV also directly consumes GRAYSCALE frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try encoding YUV420p pixel-format frames with OpenCV's write() method in following python code:

You can also use other YUV pixel-formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the yuv420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api","title":"Transcoding lossless video using WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them into lossless video file with controlled framerate using WriteGear API in real-time.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

BGR framesRGB framesGRAYSCALE framesYUV frames

WriteGear API by default expects BGR format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for BGR24 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In WriteGear API, you can use rgb_mode parameter in write() class method to write RGB format frames instead of default BGR as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing RGB24 frame to writer\n    writer.write(frame, rgb_mode=True)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consumes GRAYSCALE format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` parameter\n# for controlled output framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_gray.mp4`\nwriter = WriteGear(output_filename=\"output_foo_gray.mp4\", **output_params)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consume YUV (or basically any other supported pixel format) frames in its write() class method with its -input_pixfmt attribute in compression mode. For its non-compression mode, see above example.

You can also use yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) instead for more higher dynamic ranges.

In WriteGear API, the support for -input_pixfmt attribute in output_params dictionary parameter was added in v0.3.0.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for YUV420 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"yuv420p\").formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as \n# `-input_framerate` parameter for controlled framerate\n# and add input pixfmt as yuv420p also\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-input_pixfmt\": \"yuv420p\"\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_yuv.mp4`\nwriter = WriteGear(output_filename=\"output_foo_yuv.mp4\", logging=True, **output_params)\n\n# grab the YUV420 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing YUV420 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"reference/ffhelper/","title":"deffcode.ffhelper","text":"

Following methods are exclusively design to handle FFmpeg related tasks. These tasks includes validation of installed FFmpeg binaries, downloading of FFmpeg binaries(on Windows), and parsing of FFmpeg metadata into useful information using various pattern matching methods.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.download_ffmpeg_binaries--download_ffmpeg_binaries","title":"download_ffmpeg_binaries","text":"

Generates FFmpeg Static Binaries for windows(if not available)

Parameters:

Name Type Description Default path string

path for downloading custom FFmpeg executables

required os_windows boolean

is running on Windows OS?

False os_bit string

32-bit or 64-bit OS?

''

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def download_ffmpeg_binaries(path, os_windows=False, os_bit=\"\"):\n    \"\"\"\n    ## download_ffmpeg_binaries\n\n    Generates FFmpeg Static Binaries for windows(if not available)\n\n    Parameters:\n        path (string): path for downloading custom FFmpeg executables\n        os_windows (boolean): is running on Windows OS?\n        os_bit (string): 32-bit or 64-bit OS?\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if os_windows and os_bit:\n        # initialize with available FFmpeg Static Binaries GitHub Server\n        file_url = \"https://github.com/abhiTronix/FFmpeg-Builds/releases/latest/download/ffmpeg-static-{}-gpl.zip\".format(\n            os_bit\n        )\n\n        file_name = os.path.join(\n            os.path.abspath(path), \"ffmpeg-static-{}-gpl.zip\".format(os_bit)\n        )\n        file_path = os.path.join(\n            os.path.abspath(path),\n            \"ffmpeg-static-{}-gpl/bin/ffmpeg.exe\".format(os_bit),\n        )\n        base_path, _ = os.path.split(file_name)  # extract file base path\n        # check if file already exists\n        if os.path.isfile(file_path):\n            final_path += file_path  # skip download if does\n        else:\n            # import libs\n            import zipfile\n\n            # check if given path has write access\n            assert os.access(path, os.W_OK), (\n                \"[Helper:ERROR] :: Permission Denied, Cannot write binaries to directory = \"\n                + path\n            )\n            # remove leftovers if exists\n            os.path.isfile(file_name) and delete_file_safe(file_name)\n            # download and write file to the given path\n            with open(file_name, \"wb\") as f:\n                logger.debug(\n                    \"No Custom FFmpeg path provided. Auto-Installing FFmpeg static binaries from GitHub Mirror now. Please wait...\"\n                )\n                # create session\n                with requests.Session() as http:\n                    # setup retry strategy\n                    retries = Retry(\n                        total=3,\n                        backoff_factor=1,\n                        status_forcelist=[429, 500, 502, 503, 504],\n                    )\n                    # Mount it for https usage\n                    adapter = TimeoutHTTPAdapter(timeout=2.0, max_retries=retries)\n                    http.mount(\"https://\", adapter)\n                    response = http.get(file_url, stream=True)\n                    response.raise_for_status()\n                    total_length = (\n                        response.headers.get(\"content-length\")\n                        if \"content-length\" in response.headers\n                        else len(response.content)\n                    )\n                    assert not (\n                        total_length is None\n                    ), \"[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!\"\n                    bar = tqdm(total=int(total_length), unit=\"B\", unit_scale=True)\n                    for data in response.iter_content(chunk_size=4096):\n                        f.write(data)\n                        len(data) > 0 and bar.update(len(data))\n                    bar.close()\n            logger.debug(\"Extracting executables.\")\n            with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n                zip_fname, _ = os.path.split(zip_ref.infolist()[0].filename)\n                zip_ref.extractall(base_path)\n            # perform cleaning\n            delete_file_safe(file_name)\n            logger.debug(\"FFmpeg binaries for Windows configured successfully!\")\n            final_path += file_path\n    # return final path\n    return final_path\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_ffmpeg--validate_ffmpeg","title":"validate_ffmpeg","text":"

Validate FFmpeg Binaries. Returns True if validity test passes successfully.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_ffmpeg(path, verbose=False):\n    \"\"\"\n    ## validate_ffmpeg\n\n    Validate FFmpeg Binaries. Returns `True` if validity test passes successfully.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    try:\n        # get the FFmpeg version\n        version = check_sp_output([path, \"-version\"])\n        firstline = version.split(b\"\\n\")[0]\n        version = firstline.split(b\" \")[2].strip()\n        if verbose:  # log if test are passed\n            logger.debug(\"FFmpeg validity Test Passed!\")\n            logger.debug(\n                \"Found valid FFmpeg Version: `{}` installed on this system\".format(\n                    version\n                )\n            )\n    except Exception as e:\n        # log if test are failed\n        if verbose:\n            logger.exception(str(e))\n            logger.warning(\"FFmpeg validity Test Failed!\")\n        return False\n    return True\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_pixfmts--get_supported_pixfmts","title":"get_supported_pixfmts","text":"

Find and returns all FFmpeg's supported pixel formats.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).

Source code in deffcode/ffhelper.py
def get_supported_pixfmts(path):\n    \"\"\"\n    ## get_supported_pixfmts\n\n    Find and returns all FFmpeg's supported pixel formats.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).\n    \"\"\"\n    pxfmts = check_sp_output([path, \"-hide_banner\", \"-pix_fmts\"])\n    splitted = pxfmts.split(b\"\\n\")\n    srtindex = [i for i, s in enumerate(splitted) if b\"-----\" in s]\n    # extract video encoders\n    supported_pxfmts = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[srtindex[0] + 1 :]\n        if x.decode(\"utf-8\").strip()\n    ]\n    # compile regex\n    finder = re.compile(r\"([A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*)(\\s+[0-4])(\\s+[0-9]+)\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_pxfmts))\n    # return output findings\n    return [\n        ([s for s in o[0].split(\" \")][-1], o[1].strip(), o[2].strip())\n        for o in outputs\n        if len(o) == 3\n    ]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_vdecoders--get_supported_vdecoders","title":"get_supported_vdecoders","text":"

Find and returns all FFmpeg's supported video decoders.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported decoders.

Source code in deffcode/ffhelper.py
def get_supported_vdecoders(path):\n    \"\"\"\n    ## get_supported_vdecoders\n\n    Find and returns all FFmpeg's supported video decoders.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported decoders.\n    \"\"\"\n    decoders = check_sp_output([path, \"-hide_banner\", \"-decoders\"])\n    splitted = decoders.split(b\"\\n\")\n    # extract video encoders\n    supported_vdecoders = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[2 : len(splitted) - 1]\n        if x.decode(\"utf-8\").strip().startswith(\"V\")\n    ]\n    # compile regex\n    finder = re.compile(r\"[A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_vdecoders))\n    # return output findings\n    return [[s for s in o.split(\" \")][-1] for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_demuxers--get_supported_demuxers","title":"get_supported_demuxers","text":"

Find and returns all FFmpeg's supported demuxers.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported demuxers.

Source code in deffcode/ffhelper.py
def get_supported_demuxers(path):\n    \"\"\"\n    ## get_supported_demuxers\n\n    Find and returns all FFmpeg's supported demuxers.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported demuxers.\n    \"\"\"\n    demuxers = check_sp_output([path, \"-hide_banner\", \"-demuxers\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in demuxers.split(b\"\\n\")]\n    split_index = [idx for idx, s in enumerate(splitted) if \"--\" in s][0]\n    supported_demuxers = splitted[split_index + 1 : len(splitted) - 1]\n    # compile regex\n    finder = re.compile(r\"\\s\\s[a-z0-9_,-]+\\s+\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_demuxers))\n    # return output findings\n    return [o.strip() if not (\",\" in o) else o.split(\",\")[-1].strip() for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_imgseqdir--validate_imgseqdir","title":"validate_imgseqdir","text":"

Validates Image Sequence by counting number of Image files.

Parameters:

Name Type Description Default source string

video source to be validated

required extension string

extension of image sequence.

'jpg'

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_imgseqdir(source, extension=\"jpg\", verbose=False):\n    \"\"\"\n    ## validate_imgseqdir\n\n    Validates Image Sequence by counting number of Image files.\n\n    Parameters:\n        source (string): video source to be validated\n        extension (string): extension of image sequence.\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    # check if path exists\n    dirpath = Path(source).parent\n    try:\n        if not (dirpath.exists() and dirpath.is_dir()):\n            verbose and logger.warning(\n                \"Specified path `{}` doesn't exists or valid.\".format(dirpath)\n            )\n            return False\n        else:\n            return (\n                True if len(list(dirpath.glob(\"*.{}\".format(extension)))) > 2 else False\n            )\n    except:\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_image_seq--is_valid_image_seq","title":"is_valid_image_seq","text":"

Checks Image sequence validity by testing its extension against FFmpeg's supported pipe formats and number of Image files.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required source string

video source to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_image_seq(path, source=None, verbose=False):\n    \"\"\"\n    ## is_valid_image_seq\n\n    Checks Image sequence validity by testing its extension against\n    FFmpeg's supported pipe formats and number of Image files.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        source (string): video source to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if source is None or not (source):\n        logger.error(\"Source is empty!\")\n        return False\n    # extract all FFmpeg supported protocols\n    formats = check_sp_output([path, \"-hide_banner\", \"-formats\"])\n    extract_formats = re.findall(r\"\\w+_pipe\", formats.decode(\"utf-8\").strip())\n    supported_image_formats = [\n        x.split(\"_\")[0] for x in extract_formats if x.endswith(\"_pipe\")\n    ]\n    filename, extension = os.path.splitext(source)\n    # Test and return result whether scheme is supported\n    if extension and source.endswith(tuple(supported_image_formats)):\n        if validate_imgseqdir(source, extension=extension[1:], verbose=verbose):\n            verbose and logger.debug(\n                \"A valid Image Sequence source of format `{}` found.\".format(extension)\n            )\n            return True\n        else:\n            ValueError(\n                \"Given Image Sequence source of format `{}` contains insignificant(invalid) sample size, Check the `source` parameter value again!\".format(\n                    source.split(\".\")[1]\n                )\n            )\n    else:\n        verbose and logger.warning(\"Source isn't a valid Image Sequence\")\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_url--is_valid_url","title":"is_valid_url","text":"

Checks URL validity by testing its scheme against FFmpeg's supported protocols.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required url string

URL to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_url(path, url=None, verbose=False):\n    \"\"\"\n    ## is_valid_url\n\n    Checks URL validity by testing its scheme against\n    FFmpeg's supported protocols.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        url (string): URL to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if url is None or not (url):\n        logger.warning(\"URL is empty!\")\n        return False\n    # extract URL scheme\n    extracted_scheme_url = url.split(\"://\", 1)[0]\n    # extract all FFmpeg supported protocols\n    protocols = check_sp_output([path, \"-hide_banner\", \"-protocols\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in protocols.split(b\"\\n\")]\n    supported_protocols = splitted[splitted.index(\"Output:\") + 1 : len(splitted) - 1]\n    # RTSP is a demuxer somehow\n    # support both RTSP and RTSPS(over SSL)\n    logger.critical(get_supported_demuxers(path))\n    supported_protocols += (\n        [\"rtsp\", \"rtsps\"] if \"rtsp\" in get_supported_demuxers(path) else []\n    )\n    # Test and return result whether scheme is supported\n    if extracted_scheme_url and extracted_scheme_url in supported_protocols:\n        verbose and logger.debug(\n            \"URL scheme `{}` is supported by FFmpeg.\".format(extracted_scheme_url)\n        )\n        return True\n    else:\n        verbose and logger.warning(\n            \"URL scheme `{}` isn't supported by FFmpeg!\".format(extracted_scheme_url)\n        )\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.check_sp_output--check_sp_output","title":"check_sp_output","text":"

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default args based on input

Non Keyword Arguments

() kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):\n    \"\"\"\n    ## check_sp_output\n\n    Returns FFmpeg `stdout` output from subprocess module.\n\n    Parameters:\n        args (based on input): Non Keyword Arguments\n        kwargs (based on input): Keyword Arguments\n\n    **Returns:** A string value.\n    \"\"\"\n    # workaround for python bug: https://bugs.python.org/issue37380\n    if platform.system() == \"Windows\":\n        # see comment https://bugs.python.org/msg370334\n        sp._cleanup = lambda: None\n    # handle additional params\n    retrieve_stderr = kwargs.pop(\"force_retrieve_stderr\", False)\n    # execute command in subprocess\n    process = sp.Popen(\n        stdout=sp.PIPE,\n        stderr=sp.DEVNULL if not (retrieve_stderr) else sp.PIPE,\n        *args,\n        **kwargs,\n    )\n    # communicate and poll process\n    output, stderr = process.communicate()\n    retcode = process.poll()\n    # handle return code\n    if retcode and not (retrieve_stderr):\n        logger.error(\"[Pipline-Error] :: {}\".format(output.decode(\"utf-8\")))\n        cmd = kwargs.get(\"args\")\n        if cmd is None:\n            cmd = args[0]\n        error = sp.CalledProcessError(retcode, cmd)\n        error.output = output\n        raise error\n    # raise error if no output\n    bool(output) or bool(stderr) or logger.error(\n        \"[Pipline-Error] :: Pipline failed to exact any data from command: {}!\".format(\n            args[0] if args else []\n        )\n    )\n    # return output otherwise\n    return stderr if retrieve_stderr and stderr else output\n
"},{"location":"reference/utils/","title":"deffcode.utils","text":"

Following are the helper methods required by the DeFFcode APIs.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/utils/#deffcode.utils.logger_handler--logger_handler","title":"logger_handler","text":"

Returns the logger handler

Returns: A logger handler

Source code in deffcode/utils.py
def logger_handler():\n    \"\"\"\n    ## logger_handler\n\n    Returns the logger handler\n\n    **Returns:** A logger handler\n    \"\"\"\n    # logging formatter\n    formatter = ColoredFormatter(\n        \"{green}{asctime}{reset} :: {bold_purple}{name:^13}{reset} :: {log_color}{levelname:^8}{reset} :: {bold_white}{message}\",\n        datefmt=\"%H:%M:%S\",\n        reset=True,\n        log_colors={\n            \"INFO\": \"bold_cyan\",\n            \"DEBUG\": \"bold_yellow\",\n            \"WARNING\": \"bold_red,fg_thin_yellow\",\n            \"ERROR\": \"bold_red\",\n            \"CRITICAL\": \"bold_red,bg_white\",\n        },\n        style=\"{\",\n    )\n    # check if FFdecoder_LOGFILE defined\n    file_mode = os.environ.get(\"DEFFCODE_LOGFILE\", False)\n    # define handler\n    handler = logging.StreamHandler()\n    if file_mode and isinstance(file_mode, str):\n        file_path = os.path.abspath(file_mode)\n        if (os.name == \"nt\" or os.access in os.supports_effective_ids) and os.access(\n            os.path.dirname(file_path), os.W_OK\n        ):\n            file_path = (\n                os.path.join(file_path, \"deffcode.log\")\n                if os.path.isdir(file_path)\n                else file_path\n            )\n            handler = logging.FileHandler(file_path, mode=\"a\")\n            formatter = logging.Formatter(\n                \"{asctime} :: {name} :: {levelname} :: {message}\",\n                datefmt=\"%H:%M:%S\",\n                style=\"{\",\n            )\n\n    handler.setFormatter(formatter)\n    return handler\n
"},{"location":"reference/utils/#deffcode.utils.dict2Args--dict2args","title":"dict2Args","text":"

Converts dictionary attributes to list(args)

Parameters:

Name Type Description Default param_dict dict

Parameters dictionary

required

Returns: Arguments list

Source code in deffcode/utils.py
def dict2Args(param_dict):\n    \"\"\"\n    ## dict2Args\n\n    Converts dictionary attributes to list(args)\n\n    Parameters:\n        param_dict (dict): Parameters dictionary\n\n    **Returns:** Arguments list\n    \"\"\"\n    args = []\n    for key in param_dict.keys():\n        if key in [\"-clones\"] or key.startswith(\"-core\"):\n            if isinstance(param_dict[key], list):\n                args.extend(param_dict[key])\n            else:\n                logger.warning(\n                    \"{} with invalid datatype:`{}`, Skipped!\".format(\n                        \"Core parameter\" if key.startswith(\"-core\") else \"Clone\",\n                        param_dict[key],\n                    )\n                )\n        else:\n            args.append(key)\n            args.append(str(param_dict[key]))\n    return args\n
"},{"location":"reference/utils/#deffcode.utils.delete_file_safe--delete_ext_safe","title":"delete_ext_safe","text":"

Safely deletes files at given path.

Parameters:

Name Type Description Default file_path string

path to the file

required Source code in deffcode/utils.py
def delete_file_safe(file_path):\n    \"\"\"\n    ## delete_ext_safe\n\n    Safely deletes files at given path.\n\n    Parameters:\n        file_path (string): path to the file\n    \"\"\"\n    try:\n        dfile = Path(file_path)\n        if sys.version_info >= (3, 8, 0):\n            dfile.unlink(missing_ok=True)\n        else:\n            dfile.exists() and dfile.unlink()\n    except Exception as e:\n        logger.exception(str(e))\n
"},{"location":"reference/ffdecoder/","title":"FFdecoder API","text":"

FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1

FFdecoder API implements a standalone highly-extensible wrapper around FFmpeg multimedia framework that provides complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as framerate, resolution, hardware decoder(s), complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.

FFdecoder API compiles its FFmpeg pipeline by processing input Video Source metadata and User-defined options, and runs it inside a subprocess pipe concurrently with the main thread, while extracting output dataframes(1D arrays) into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into 24-bit RGB (default) ndarray 3D frames that are readily available through its generateFrame() method.

FFdecoder API employs Sourcer API at its backend for gathering, processing, and validating metadata of all multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also available as a JSON string with its metadata property object and can be updated as desired.

FFdecoder API supports a wide-ranging media stream as input source such as USB/Virtual/IP Camera Feed, Multimedia video file, Screen Capture, Image Sequence, Network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Furthermore, FFdecoder API maintains the standard OpenCV-Python (Python API for OpenCV) coding syntax, thereby making it even easier to integrate this API in any Computer Vision application.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

FFdecoder API parameters are explained here \u27b6

Source code in deffcode/ffdecoder.py
class FFdecoder:\n    \"\"\"\n    > FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames\n    with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1\n\n    FFdecoder API implements a **standalone highly-extensible wrapper around [FFmpeg](https://ffmpeg.org/)** multimedia framework that provides complete\n    control over the underline pipeline including **access to almost any FFmpeg specification thinkable** such as framerate, resolution, hardware decoder(s),\n    complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.\n\n    FFdecoder API **compiles its FFmpeg pipeline** by processing input Video Source metadata and User-defined options, and **runs it inside a\n    [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe** concurrently with the main thread, while extracting output dataframes(1D arrays)\n    into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into ==[24-bit RGB](https://en.wikipedia.org/wiki/List_of_monochrome_and_RGB_color_formats#24-bit_RGB) _(default)_\n    [`ndarray`](https://numpy.org/doc/stable/reference/arrays.ndarray.html#the-n-dimensional-array-ndarray) 3D frames== that are readily available\n    through its [`generateFrame()`](#deffcode.ffdecoder.FFdecoder.generateFrame) method.\n\n    FFdecoder API **employs [Sourcer API](../../reference/sourcer) at its backend** for gathering, processing, and validating metadata of all\n    multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also\n    available as a JSON string with its [`metadata`](#deffcode.ffdecoder.FFdecoder.metadata) property object and can be updated as desired.\n\n    FFdecoder API **supports a wide-ranging media stream** as input source such as USB/Virtual/IP Camera Feed, Multimedia video file,\n    Screen Capture, Image Sequence, Network protocols _(such as HTTP(s), RTP/RSTP, etc.)_, so on and so forth.\n\n    Furthermore, FFdecoder API maintains the **standard [OpenCV-Python](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) _(Python API for OpenCV)_ coding syntax**, thereby making it even easier to\n    integrate this API in any Computer Vision application.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"FFdecoder API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        frame_format=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **ffparams\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n\n        # enable verbose if specified\n        self.__verbose_logs = (\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # define whether initializing\n        self.__initializing = True\n\n        # define frame pixel-format for decoded frames\n        self.__frame_format = (\n            frame_format.lower().strip() if isinstance(frame_format, str) else None\n        )\n\n        # handles user-defined parameters\n        self.__extra_params = {}\n\n        # handle process to be frames written\n        self.__process = None\n\n        # handle exclusive metadata\n        self.__ff_pixfmt_metadata = None  # metadata\n        self.__raw_frame_num = None  # raw-frame number\n        self.__raw_frame_pixfmt = None  # raw-frame pixformat\n        self.__raw_frame_dtype = None  # raw-frame dtype\n        self.__raw_frame_depth = None  # raw-frame depth\n        self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n        # define supported mode of operation\n        self.__supported_opmodes = {\n            \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n            \"vo\": \"Video-Only\",\n            \"imgseq\": \"Image-Sequence\",\n            # \"ao\":\"Audio-Only\", # reserved for future\n        }\n        # operation mode variable\n        self.__opmode = None\n\n        # handle termination\n        self.__terminate_stream = False\n\n        # cleans and reformat user-defined parameters\n        self.__extra_params = {\n            str(k).strip(): str(v).strip()\n            if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in ffparams.items()\n        }\n\n        # handle custom Sourcer API params\n        sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n        # reset improper values\n        sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n        # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n        # check if not valid type\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n        else:\n            # also pass valid ffmpeg pre-headers to Sourcer API\n            sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n        # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n        # assets on Windows(if specified)\n        sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n            \"-ffmpeg_download_path\", \"\"\n        )\n\n        # handle video and audio stream indexes in case of multiple ones.\n        default_stream_indexes = self.__extra_params.pop(\n            \"-default_stream_indexes\", (0, 0)\n        )\n        # reset improper values\n        default_stream_indexes = (\n            (0, 0)\n            if not isinstance(default_stream_indexes, (list, tuple))\n            else default_stream_indexes\n        )\n\n        # pass FFmpeg filter to Sourcer API params for processing\n        if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n            key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n            sourcer_params[key] = self.__extra_params[key]\n\n        # define dict to store user-defined parameters\n        self.__user_metadata = {}\n        # extract and assign source metadata as dict\n        (self.__sourcer_metadata, self.__missing_prop) = (\n            Sourcer(\n                source=source,\n                source_demuxer=source_demuxer,\n                verbose=verbose,\n                custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n                **sourcer_params\n            )\n            .probe_stream(default_stream_indexes=default_stream_indexes)\n            .retrieve_metadata(force_retrieve_missing=True)\n        )\n\n        # handle valid FFmpeg assets location\n        self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n        # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n        # patch for compatibility with OpenCV APIs.\n        self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n        if not (isinstance(self.__cv_patch, bool)):\n            self.__cv_patch = False\n            self.__verbose_logs and logger.critical(\n                \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n            )\n\n        # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n        self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n        if not (isinstance(self.__passthrough_mode, bool)):\n            self.__passthrough_mode = False\n\n        # handle mode of operation\n        if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n            # image-sequence mode\n            self.__opmode = \"imgseq\"\n        elif (\n            self.__sourcer_metadata[\n                \"source_has_video\"\n            ]  # audio is only for pass-through, not really for audio decoding yet.\n            and self.__sourcer_metadata[\"source_has_audio\"]\n            and self.__passthrough_mode  # [TODO]\n        ):\n            self.__opmode = \"av\"\n        # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n        #    self.__opmode = \"ao\"\n        elif self.__sourcer_metadata[\"source_has_video\"]:\n            # video-only mode\n            self.__opmode = \"vo\"\n        else:\n            # raise if unknown mode\n            raise ValueError(\n                \"Unable to find any usable video stream in the given source!\"\n            )\n        # store as metadata\n        self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n            self.__opmode\n        ]\n\n        # handle user-defined output framerate\n        __framerate = self.__extra_params.pop(\"-framerate\", None)\n        if (\n            isinstance(__framerate, str)\n            and __framerate\n            == \"null\"  # special mode to discard `-framerate/-r` parameter\n        ):\n            self.__inputframerate = __framerate\n        elif isinstance(__framerate, (float, int)):\n            self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n        else:\n            # warn if wrong type\n            not (__framerate is None) and logger.warning(\n                \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                    type(__framerate).__name__\n                )\n            )\n            # reset to default\n            self.__inputframerate = 0.0\n\n        # handle user defined decoded frame resolution\n        self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n        if (\n            isinstance(self.__custom_resolution, str)\n            and self.__custom_resolution\n            == \"null\"  # special mode to discard `-size/-s` parameter\n        ) or (\n            isinstance(self.__custom_resolution, (list, tuple))\n            and len(self.__custom_resolution)\n            == 2  # valid resolution(must be a tuple or list)\n        ):\n            # log it\n            self.__verbose_logs and not isinstance(\n                self.__custom_resolution, str\n            ) and logger.debug(\n                \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n            )\n        else:\n            # log it\n            not (self.__custom_resolution is None) and logger.warning(\n                \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                    self.__custom_resolution\n                )\n            )\n            # reset improper values\n            self.__custom_resolution = None\n\n    def formulate(self):\n\n        \"\"\"\n        This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n        **Returns:** A reference to the FFdecoder class object.\n        \"\"\"\n        # assign values to class variables on first run\n        if self.__initializing:\n            # prepare parameter dict\n            input_params = OrderedDict()\n            output_params = OrderedDict()\n\n            # dynamically pre-assign a default video-decoder (if not assigned by user).\n            supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n            default_vdecodec = (\n                self.__sourcer_metadata[\"source_video_decoder\"]\n                if self.__sourcer_metadata[\"source_video_decoder\"]\n                in supported_vdecodecs\n                else \"unknown\"\n            )\n            if \"-c:v\" in self.__extra_params:\n                self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-c:v\", default_vdecodec\n                )\n            # handle image sequence separately\n            if self.__opmode == \"imgseq\":\n                # -vcodec is discarded by default\n                # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n                self.__extra_params.pop(\"-vcodec\", None)\n            elif (\n                \"-vcodec\" in self.__extra_params\n                and self.__extra_params[\"-vcodec\"] is None\n            ):\n                # special case when -vcodec is not needed intentionally\n                self.__extra_params.pop(\"-vcodec\", None)\n            else:\n                # assign video decoder selected here.\n                if not \"-vcodec\" in self.__extra_params:\n                    input_params[\"-vcodec\"] = default_vdecodec\n                else:\n                    input_params[\"-vcodec\"] = self.__extra_params.pop(\n                        \"-vcodec\", default_vdecodec\n                    )\n                if (\n                    default_vdecodec != \"unknown\"\n                    and not input_params[\"-vcodec\"] in supported_vdecodecs\n                ):\n                    # reset to default if not supported\n                    logger.warning(\n                        \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                            input_params[\"-vcodec\"], default_vdecodec\n                        )\n                    )\n                    input_params[\"-vcodec\"] = default_vdecodec\n                # raise error if not valid decoder found\n                if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                    raise RuntimeError(\n                        \"Provided FFmpeg does not support any known usable video-decoders.\"\n                        \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                    )\n\n            # handle user-defined number of frames.\n            if \"-vframes\" in self.__extra_params:\n                self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                    \"-vframes\", None\n                )\n            if \"-frames:v\" in self.__extra_params:\n                value = self.__extra_params.pop(\"-frames:v\", None)\n                if not (value is None) and value > 0:\n                    output_params[\"-frames:v\"] = value\n\n            # dynamically calculate default raw-frames pixel format(if not assigned by user).\n            # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n            if \"-pix_fmt\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n                )\n                self.__extra_params.pop(\"-pix_fmt\", None)\n            # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n            self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n            supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n            # calculate default pixel-format\n            # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n            self.__frame_format == \"null\" and logger.critical(\n                \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n            )\n            # choose between rgb24(if available) or source pixel-format\n            # otherwise, only source pixel-format for special case\n            default_pixfmt = (\n                \"rgb24\"\n                if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n                else self.__sourcer_metadata[\"source_video_pixfmt\"]\n            )\n            # assign output raw-frames pixel format\n            rawframe_pixfmt = None\n            if (\n                not (self.__frame_format is None)\n                and self.__frame_format in supported_pixfmts\n            ):\n                # check if valid and supported `frame_format` parameter assigned\n                rawframe_pixfmt = self.__frame_format.strip()\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                        rawframe_pixfmt\n                    )\n                )\n            elif (\n                \"output_frames_pixfmt\"\n                in self.__sourcer_metadata  # means `format` filter is defined\n                and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n            ):\n                # assign if valid and supported\n                rawframe_pixfmt = self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ].strip()\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n                )\n            else:\n                # reset to default if not supported\n                rawframe_pixfmt = default_pixfmt\n                # log it accordingly\n                if self.__frame_format is None:\n                    logger.info(\n                        \"Using default `{}` pixel-format for this pipeline.\".format(\n                            default_pixfmt\n                        )\n                    )\n                else:\n                    logger.warning(\n                        \"{} Switching to default `{}` pixel-format!\".format(\n                            \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                                self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                                if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                                else self.__frame_format\n                            )\n                            if self.__frame_format != \"null\"\n                            else \"No usable pixel-format defined.\",\n                            default_pixfmt,\n                        )\n                    )\n\n            # dynamically calculate raw-frame datatype based on pixel-format selected\n            (self.__raw_frame_depth, rawframesbpp) = [\n                (int(x[1]), int(x[2]))\n                for x in self.__ff_pixfmt_metadata\n                if x[0] == rawframe_pixfmt\n            ][0]\n            raw_bit_per_component = (\n                rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n            )\n            if 4 <= raw_bit_per_component <= 8:\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n            elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n                (\"le\", \"be\")\n            ):\n                if rawframe_pixfmt.endswith(\"le\"):\n                    self.__raw_frame_dtype = np.dtype(\"<u2\")\n                else:\n                    self.__raw_frame_dtype = np.dtype(\">u2\")\n            else:\n                # reset to both pixel-format and datatype to default if not supported\n                not (self.__frame_format is None) and logger.warning(\n                    \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                        rawframe_pixfmt\n                    )\n                )\n                rawframe_pixfmt = \"rgb24\"\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n\n            # Check if not special case\n            if self.__frame_format != \"null\":\n                # assign to FFmpeg pipeline otherwise\n                output_params[\"-pix_fmt\"] = rawframe_pixfmt\n            # assign to global parameter further usage\n            self.__raw_frame_pixfmt = rawframe_pixfmt\n            # also override as metadata(if available)\n            if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n                self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ] = self.__raw_frame_pixfmt\n\n            # handle raw-frame resolution\n            # notify FFmpeg `-s` parameter cannot be assigned directly\n            if \"-s\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n                )\n                self.__extra_params.pop(\"-s\", None)\n            # assign output rawframe resolution\n            if not (self.__custom_resolution is None) and not isinstance(\n                self.__custom_resolution, str\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                self.__raw_frame_resolution = self.__custom_resolution\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                        self.__raw_frame_resolution\n                    )\n                )\n            elif (\n                \"output_frames_resolution\"\n                in self.__sourcer_metadata  # means `scale` filter is defined\n                and self.__sourcer_metadata[\"output_frames_resolution\"]\n                and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on output.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"output_frames_resolution\"\n                ]\n            elif (\n                self.__sourcer_metadata[\"source_video_resolution\"]\n                and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on source.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"source_video_resolution\"\n                ]\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n            # special mode to discard `-size/-s` FFmpeg parameter completely\n            if isinstance(self.__custom_resolution, str):\n                logger.critical(\n                    \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # add to pipeline\n                dimensions = \"{}x{}\".format(\n                    self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n                )\n                output_params[\"-s\"] = str(dimensions)\n            # log if filters or default source is used\n            self.__verbose_logs and (\n                self.__custom_resolution is None\n                or isinstance(self.__custom_resolution, str)\n            ) and logger.info(\n                \"{} for this pipeline for defining output resolution.\".format(\n                    \"FFmpeg filter values will be used\"\n                    if \"output_frames_resolution\" in self.__sourcer_metadata\n                    else \"Default source resolution will be used\"\n                )\n            )\n\n            # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n            if (\n                not isinstance(self.__inputframerate, str)\n                and self.__inputframerate > 0.0\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                output_params[\"-framerate\"] = str(self.__inputframerate)\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                        str(self.__inputframerate)\n                    )\n                )\n            elif (\n                \"output_framerate\"\n                in self.__sourcer_metadata  # means `fps` filter is defined\n                and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n            ):\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on output\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"output_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n                )\n            elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on source\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"source_video_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"Default source framerate will be used for this pipeline for defining output framerate.\"\n                )\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n\n            # add rest to output parameters\n            output_params.update(self.__extra_params)\n\n            # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n            # TODO Added support for `-re -stream_loop` and `-loop`\n            if \"-frames:v\" in input_params:\n                self.__raw_frame_num = input_params[\"-frames:v\"]\n            elif (\n                not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n                and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n            ):\n                self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n            else:\n                self.__raw_frame_num = None\n                # log that number of frames are unknown\n                self.__verbose_logs and logger.info(\n                    \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n                )\n\n            # log Mode of Operation\n            self.__verbose_logs and logger.critical(\n                \"Activating {} Mode of Operation.\".format(\n                    self.__supported_opmodes[self.__opmode]\n                )\n            )\n\n            # compose the Pipeline using formulated FFmpeg parameters\n            self.__launch_FFdecoderline(input_params, output_params)\n\n            # inform the initialization is completed\n            self.__initializing = False\n        else:\n            # warn if pipeline is recreated\n            logger.error(\"This pipeline is already created and running!\")\n        return self\n\n    def __fetchNextfromPipeline(self):\n        \"\"\"\n        This Internal method to fetch next dataframes(1D arrays) from `subprocess` pipe's standard output(`stdout`) into a Numpy buffer.\n        \"\"\"\n        assert not (\n            self.__process is None\n        ), \"Pipeline is not running! You must call `formulate()` method first.\"\n\n        # formulated raw frame size and apply YUV pixel formats patch(if applicable)\n        raw_frame_size = (\n            (self.__raw_frame_resolution[0] * (self.__raw_frame_resolution[1] * 3 // 2))\n            if self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch\n            else (\n                self.__raw_frame_depth\n                * self.__raw_frame_resolution[0]\n                * self.__raw_frame_resolution[1]\n            )\n        )\n        # next dataframe as numpy ndarray\n        nparray = None\n        try:\n            # read bytes frames from buffer\n            nparray = np.frombuffer(\n                self.__process.stdout.read(\n                    raw_frame_size * self.__raw_frame_dtype.itemsize\n                ),\n                dtype=self.__raw_frame_dtype,\n            )\n        except Exception as e:\n            raise RuntimeError(\"Frame buffering failed with error: {}\".format(str(e)))\n        return (\n            nparray\n            if not (nparray is None) and len(nparray) == raw_frame_size\n            else None\n        )\n\n    def __fetchNextFrame(self):\n        \"\"\"\n        This Internal method grabs and decodes next 3D `ndarray` video-frame from the buffer.\n        \"\"\"\n        # Read next and reconstruct as numpy array\n        frame = self.__fetchNextfromPipeline()\n        # check if empty\n        if frame is None:\n            return frame\n        elif self.__raw_frame_pixfmt.startswith(\"gray\"):\n            # reconstruct exclusive `gray` frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )[:, :, 0]\n        elif self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch:\n            # reconstruct exclusive YUV formats frames for OpenCV APIs\n            frame = frame.reshape(\n                self.__raw_frame_resolution[1] * 3 // 2,\n                self.__raw_frame_resolution[0],\n            )\n        else:\n            # reconstruct default frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )\n        # return frame\n        return frame\n\n    def generateFrame(self):\n        \"\"\"\n        This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n        _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n        \"\"\"\n        if self.__raw_frame_num is None or not self.__raw_frame_num:\n            while not self.__terminate_stream:  # infinite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n        else:\n            for _ in range(self.__raw_frame_num):  # finite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n\n    def __enter__(self):\n        \"\"\"\n        Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n        **Returns:** Output of `formulate()` method.\n        \"\"\"\n        return self.formulate()\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"\n        Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n        \"\"\"\n        self.terminate()\n\n    @property\n    def metadata(self):\n        \"\"\"\n        A property object that dumps metadata information as JSON string.\n\n        **Returns:** Metadata as JSON string.\n        \"\"\"\n        # import dependency\n        import json\n\n        # return complete metadata information as JSON string\n        return json.dumps(\n            {\n                **self.__sourcer_metadata,  # source video\n                **self.__missing_prop,  # missing properties\n                **self.__user_metadata,  # user-defined\n            },\n            indent=2,\n        )\n\n    @metadata.setter\n    def metadata(self, value):\n        \"\"\"\n        A property object that updates metadata information with user-defined dictionary.\n\n        Parameters:\n            value (dict): User-defined dictionary.\n        \"\"\"\n        # check if value dict type\n        if value and isinstance(value, dict):\n            # log it\n            self.__verbose_logs and logger.info(\"Updating Metadata...\")\n            # extract any source and output internal metadata keys\n            default_keys = set(value).intersection(\n                {**self.__sourcer_metadata, **self.__missing_prop}\n            )\n            # counterpart source properties for each output properties\n            counterpart_prop = {\n                \"output_frames_resolution\": \"source_video_resolution\",\n                \"output_frames_pixfmt\": \"source_video_pixfmt\",\n                \"output_framerate\": \"source_video_framerate\",\n            }\n            # iterate over source metadata keys and sanitize it\n            for key in default_keys or []:\n                if key == \"source\":\n                    # metadata properties that cannot be altered\n                    logger.warning(\n                        \"`{}` metadata property value cannot be altered. Discarding!\".format(\n                            key\n                        )\n                    )\n                elif key in self.__missing_prop:\n                    # missing metadata properties are unavailable and read-only\n                    # notify user about alternative counterpart property (if available)\n                    logger.warning(\n                        \"`{}` metadata property is read-only\".format(key)\n                        + (\n                            \". Try updating `{}` property instead!\".format(\n                                counterpart_prop[key]\n                            )\n                            if key in counterpart_prop.keys()\n                            else \" and cannot be updated!\"\n                        )\n                    )\n                elif isinstance(value[key], type(self.__sourcer_metadata[key])):\n                    # check if correct datatype as original\n                    self.__verbose_logs and logger.info(\n                        \"Updating `{}`{} metadata property to `{}`.\".format(\n                            key,\n                            \" and its counterpart\"\n                            if key in counterpart_prop.values()\n                            else \"\",\n                            value[key],\n                        )\n                    )\n                    # update source metadata if valid\n                    self.__sourcer_metadata[key] = value[key]\n                    # also update missing counterpart property (if available)\n                    counter_key = next(\n                        (k for k, v in counterpart_prop.items() if v == key), \"\"\n                    )\n                    if counter_key:\n                        self.__missing_prop[counter_key] = value[key]\n                else:\n                    # otherwise discard and log it\n                    logger.warning(\n                        \"Manually assigned `{}` metadata property value is of invalid type. Discarding!\"\n                    ).format(key)\n                # delete invalid key\n                del value[key]\n            # There is no concept of a tuple in the JSON format.\n            # Python's `json` module converts Python tuples to JSON lists\n            # because that's the closest thing in JSON to a tuple.\n            any(isinstance(value[x], tuple) for x in value) and logger.warning(\n                \"All TUPLE metadata properties will be converted to LIST datatype. Read docs for more details.\"\n            )\n            # update user-defined metadata\n            self.__user_metadata.update(value)\n        else:\n            # otherwise raise error\n            raise ValueError(\"Invalid datatype metadata assigned. Aborting!\")\n\n    def __launch_FFdecoderline(self, input_params, output_params):\n\n        \"\"\"\n        This Internal method executes FFmpeg pipeline arguments inside a `subprocess` pipe in a new process.\n\n        Parameters:\n            input_params (dict): Input FFmpeg parameters\n            output_params (dict): Output FFmpeg parameters\n        \"\"\"\n        # convert input parameters to list\n        input_parameters = dict2Args(input_params)\n\n        # convert output parameters to list\n        output_parameters = dict2Args(output_params)\n\n        # format command\n        cmd = (\n            [self.__ffmpeg]\n            + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n            + self.__ffmpeg_prefixes\n            + input_parameters\n            + (\n                [\"-f\", self.__sourcer_metadata[\"source_demuxer\"]]\n                if (\"source_demuxer\" in self.__sourcer_metadata.keys())\n                else []\n            )\n            + [\"-i\", self.__sourcer_metadata[\"source\"]]\n            + output_parameters\n            + [\"-f\", \"rawvideo\", \"-\"]\n        )\n        # compose the FFmpeg process\n        if self.__verbose_logs:\n            logger.debug(\"Executing FFmpeg command: `{}`\".format(\" \".join(cmd)))\n            # In debugging mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=None\n            )\n        else:\n            # In silent mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=sp.DEVNULL\n            )\n\n    def terminate(self):\n        \"\"\"\n        Safely terminates all processes.\n        \"\"\"\n\n        # signal we are closing\n        self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n        self.__terminate_stream = True\n        # check if no process was initiated at first place\n        if self.__process is None or not (self.__process.poll() is None):\n            logger.info(\"Pipeline already terminated.\")\n            return\n        # Attempt to close pipeline.\n        # close `stdin` output\n        self.__process.stdin and self.__process.stdin.close()\n        # close `stdout` output\n        self.__process.stdout and self.__process.stdout.close()\n        # terminate/kill process if still processing\n        if self.__process.poll() is None:\n            # demuxers prefer kill\n            self.__process.kill()\n        # wait if not exiting\n        self.__process.wait()\n        self.__process = None\n        logger.info(\"Pipeline terminated successfully.\")\n

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata","title":"metadata property writable","text":"

A property object that dumps metadata information as JSON string.

Returns: Metadata as JSON string.

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__enter__","title":"__enter__(self) special","text":"

Handles entry with the with statement. See PEP343 -- The 'with' statement'.

Returns: Output of formulate() method.

Source code in deffcode/ffdecoder.py
def __enter__(self):\n    \"\"\"\n    Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n    **Returns:** Output of `formulate()` method.\n    \"\"\"\n    return self.formulate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__exit__","title":"__exit__(self, exc_type, exc_val, exc_tb) special","text":"

Handles exit with the with statement. See PEP343 -- The 'with' statement'.

Source code in deffcode/ffdecoder.py
def __exit__(self, exc_type, exc_val, exc_tb):\n    \"\"\"\n    Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n    \"\"\"\n    self.terminate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__init__","title":"__init__(self, source, source_demuxer=None, frame_format=None, custom_ffmpeg='', verbose=False, **ffparams) special","text":"

This constructor method initializes the object state and attributes of the FFdecoder Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None frame_format str

sets pixel format(-pix_fmt) of the decoded frames.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False ffparams dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/ffdecoder.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    frame_format=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **ffparams\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n\n    # enable verbose if specified\n    self.__verbose_logs = (\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # define whether initializing\n    self.__initializing = True\n\n    # define frame pixel-format for decoded frames\n    self.__frame_format = (\n        frame_format.lower().strip() if isinstance(frame_format, str) else None\n    )\n\n    # handles user-defined parameters\n    self.__extra_params = {}\n\n    # handle process to be frames written\n    self.__process = None\n\n    # handle exclusive metadata\n    self.__ff_pixfmt_metadata = None  # metadata\n    self.__raw_frame_num = None  # raw-frame number\n    self.__raw_frame_pixfmt = None  # raw-frame pixformat\n    self.__raw_frame_dtype = None  # raw-frame dtype\n    self.__raw_frame_depth = None  # raw-frame depth\n    self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n    # define supported mode of operation\n    self.__supported_opmodes = {\n        \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n        \"vo\": \"Video-Only\",\n        \"imgseq\": \"Image-Sequence\",\n        # \"ao\":\"Audio-Only\", # reserved for future\n    }\n    # operation mode variable\n    self.__opmode = None\n\n    # handle termination\n    self.__terminate_stream = False\n\n    # cleans and reformat user-defined parameters\n    self.__extra_params = {\n        str(k).strip(): str(v).strip()\n        if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in ffparams.items()\n    }\n\n    # handle custom Sourcer API params\n    sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n    # reset improper values\n    sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n    # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n    # check if not valid type\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n    else:\n        # also pass valid ffmpeg pre-headers to Sourcer API\n        sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n    # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n    # assets on Windows(if specified)\n    sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n        \"-ffmpeg_download_path\", \"\"\n    )\n\n    # handle video and audio stream indexes in case of multiple ones.\n    default_stream_indexes = self.__extra_params.pop(\n        \"-default_stream_indexes\", (0, 0)\n    )\n    # reset improper values\n    default_stream_indexes = (\n        (0, 0)\n        if not isinstance(default_stream_indexes, (list, tuple))\n        else default_stream_indexes\n    )\n\n    # pass FFmpeg filter to Sourcer API params for processing\n    if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n        key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n        sourcer_params[key] = self.__extra_params[key]\n\n    # define dict to store user-defined parameters\n    self.__user_metadata = {}\n    # extract and assign source metadata as dict\n    (self.__sourcer_metadata, self.__missing_prop) = (\n        Sourcer(\n            source=source,\n            source_demuxer=source_demuxer,\n            verbose=verbose,\n            custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n            **sourcer_params\n        )\n        .probe_stream(default_stream_indexes=default_stream_indexes)\n        .retrieve_metadata(force_retrieve_missing=True)\n    )\n\n    # handle valid FFmpeg assets location\n    self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n    # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n    # patch for compatibility with OpenCV APIs.\n    self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n    if not (isinstance(self.__cv_patch, bool)):\n        self.__cv_patch = False\n        self.__verbose_logs and logger.critical(\n            \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n        )\n\n    # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n    self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n    if not (isinstance(self.__passthrough_mode, bool)):\n        self.__passthrough_mode = False\n\n    # handle mode of operation\n    if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n        # image-sequence mode\n        self.__opmode = \"imgseq\"\n    elif (\n        self.__sourcer_metadata[\n            \"source_has_video\"\n        ]  # audio is only for pass-through, not really for audio decoding yet.\n        and self.__sourcer_metadata[\"source_has_audio\"]\n        and self.__passthrough_mode  # [TODO]\n    ):\n        self.__opmode = \"av\"\n    # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n    #    self.__opmode = \"ao\"\n    elif self.__sourcer_metadata[\"source_has_video\"]:\n        # video-only mode\n        self.__opmode = \"vo\"\n    else:\n        # raise if unknown mode\n        raise ValueError(\n            \"Unable to find any usable video stream in the given source!\"\n        )\n    # store as metadata\n    self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n        self.__opmode\n    ]\n\n    # handle user-defined output framerate\n    __framerate = self.__extra_params.pop(\"-framerate\", None)\n    if (\n        isinstance(__framerate, str)\n        and __framerate\n        == \"null\"  # special mode to discard `-framerate/-r` parameter\n    ):\n        self.__inputframerate = __framerate\n    elif isinstance(__framerate, (float, int)):\n        self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n    else:\n        # warn if wrong type\n        not (__framerate is None) and logger.warning(\n            \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                type(__framerate).__name__\n            )\n        )\n        # reset to default\n        self.__inputframerate = 0.0\n\n    # handle user defined decoded frame resolution\n    self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n    if (\n        isinstance(self.__custom_resolution, str)\n        and self.__custom_resolution\n        == \"null\"  # special mode to discard `-size/-s` parameter\n    ) or (\n        isinstance(self.__custom_resolution, (list, tuple))\n        and len(self.__custom_resolution)\n        == 2  # valid resolution(must be a tuple or list)\n    ):\n        # log it\n        self.__verbose_logs and not isinstance(\n            self.__custom_resolution, str\n        ) and logger.debug(\n            \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n        )\n    else:\n        # log it\n        not (self.__custom_resolution is None) and logger.warning(\n            \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                self.__custom_resolution\n            )\n        )\n        # reset improper values\n        self.__custom_resolution = None\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.formulate","title":"formulate(self)","text":"

This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg subprocess pipe.

Returns: A reference to the FFdecoder class object.

Source code in deffcode/ffdecoder.py
def formulate(self):\n\n    \"\"\"\n    This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n    **Returns:** A reference to the FFdecoder class object.\n    \"\"\"\n    # assign values to class variables on first run\n    if self.__initializing:\n        # prepare parameter dict\n        input_params = OrderedDict()\n        output_params = OrderedDict()\n\n        # dynamically pre-assign a default video-decoder (if not assigned by user).\n        supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n        default_vdecodec = (\n            self.__sourcer_metadata[\"source_video_decoder\"]\n            if self.__sourcer_metadata[\"source_video_decoder\"]\n            in supported_vdecodecs\n            else \"unknown\"\n        )\n        if \"-c:v\" in self.__extra_params:\n            self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                \"-c:v\", default_vdecodec\n            )\n        # handle image sequence separately\n        if self.__opmode == \"imgseq\":\n            # -vcodec is discarded by default\n            # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n            self.__extra_params.pop(\"-vcodec\", None)\n        elif (\n            \"-vcodec\" in self.__extra_params\n            and self.__extra_params[\"-vcodec\"] is None\n        ):\n            # special case when -vcodec is not needed intentionally\n            self.__extra_params.pop(\"-vcodec\", None)\n        else:\n            # assign video decoder selected here.\n            if not \"-vcodec\" in self.__extra_params:\n                input_params[\"-vcodec\"] = default_vdecodec\n            else:\n                input_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-vcodec\", default_vdecodec\n                )\n            if (\n                default_vdecodec != \"unknown\"\n                and not input_params[\"-vcodec\"] in supported_vdecodecs\n            ):\n                # reset to default if not supported\n                logger.warning(\n                    \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                        input_params[\"-vcodec\"], default_vdecodec\n                    )\n                )\n                input_params[\"-vcodec\"] = default_vdecodec\n            # raise error if not valid decoder found\n            if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                raise RuntimeError(\n                    \"Provided FFmpeg does not support any known usable video-decoders.\"\n                    \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                )\n\n        # handle user-defined number of frames.\n        if \"-vframes\" in self.__extra_params:\n            self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                \"-vframes\", None\n            )\n        if \"-frames:v\" in self.__extra_params:\n            value = self.__extra_params.pop(\"-frames:v\", None)\n            if not (value is None) and value > 0:\n                output_params[\"-frames:v\"] = value\n\n        # dynamically calculate default raw-frames pixel format(if not assigned by user).\n        # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n        if \"-pix_fmt\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n            )\n            self.__extra_params.pop(\"-pix_fmt\", None)\n        # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n        self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n        supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n        # calculate default pixel-format\n        # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n        self.__frame_format == \"null\" and logger.critical(\n            \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n        )\n        # choose between rgb24(if available) or source pixel-format\n        # otherwise, only source pixel-format for special case\n        default_pixfmt = (\n            \"rgb24\"\n            if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n            else self.__sourcer_metadata[\"source_video_pixfmt\"]\n        )\n        # assign output raw-frames pixel format\n        rawframe_pixfmt = None\n        if (\n            not (self.__frame_format is None)\n            and self.__frame_format in supported_pixfmts\n        ):\n            # check if valid and supported `frame_format` parameter assigned\n            rawframe_pixfmt = self.__frame_format.strip()\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                    rawframe_pixfmt\n                )\n            )\n        elif (\n            \"output_frames_pixfmt\"\n            in self.__sourcer_metadata  # means `format` filter is defined\n            and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n        ):\n            # assign if valid and supported\n            rawframe_pixfmt = self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ].strip()\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n            )\n        else:\n            # reset to default if not supported\n            rawframe_pixfmt = default_pixfmt\n            # log it accordingly\n            if self.__frame_format is None:\n                logger.info(\n                    \"Using default `{}` pixel-format for this pipeline.\".format(\n                        default_pixfmt\n                    )\n                )\n            else:\n                logger.warning(\n                    \"{} Switching to default `{}` pixel-format!\".format(\n                        \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                            self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                            if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                            else self.__frame_format\n                        )\n                        if self.__frame_format != \"null\"\n                        else \"No usable pixel-format defined.\",\n                        default_pixfmt,\n                    )\n                )\n\n        # dynamically calculate raw-frame datatype based on pixel-format selected\n        (self.__raw_frame_depth, rawframesbpp) = [\n            (int(x[1]), int(x[2]))\n            for x in self.__ff_pixfmt_metadata\n            if x[0] == rawframe_pixfmt\n        ][0]\n        raw_bit_per_component = (\n            rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n        )\n        if 4 <= raw_bit_per_component <= 8:\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n        elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n            (\"le\", \"be\")\n        ):\n            if rawframe_pixfmt.endswith(\"le\"):\n                self.__raw_frame_dtype = np.dtype(\"<u2\")\n            else:\n                self.__raw_frame_dtype = np.dtype(\">u2\")\n        else:\n            # reset to both pixel-format and datatype to default if not supported\n            not (self.__frame_format is None) and logger.warning(\n                \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                    rawframe_pixfmt\n                )\n            )\n            rawframe_pixfmt = \"rgb24\"\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n\n        # Check if not special case\n        if self.__frame_format != \"null\":\n            # assign to FFmpeg pipeline otherwise\n            output_params[\"-pix_fmt\"] = rawframe_pixfmt\n        # assign to global parameter further usage\n        self.__raw_frame_pixfmt = rawframe_pixfmt\n        # also override as metadata(if available)\n        if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n            self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ] = self.__raw_frame_pixfmt\n\n        # handle raw-frame resolution\n        # notify FFmpeg `-s` parameter cannot be assigned directly\n        if \"-s\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n            )\n            self.__extra_params.pop(\"-s\", None)\n        # assign output rawframe resolution\n        if not (self.__custom_resolution is None) and not isinstance(\n            self.__custom_resolution, str\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            self.__raw_frame_resolution = self.__custom_resolution\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                    self.__raw_frame_resolution\n                )\n            )\n        elif (\n            \"output_frames_resolution\"\n            in self.__sourcer_metadata  # means `scale` filter is defined\n            and self.__sourcer_metadata[\"output_frames_resolution\"]\n            and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on output.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"output_frames_resolution\"\n            ]\n        elif (\n            self.__sourcer_metadata[\"source_video_resolution\"]\n            and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on source.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"source_video_resolution\"\n            ]\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n        # special mode to discard `-size/-s` FFmpeg parameter completely\n        if isinstance(self.__custom_resolution, str):\n            logger.critical(\n                \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n            )\n        else:\n            # add to pipeline\n            dimensions = \"{}x{}\".format(\n                self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n            )\n            output_params[\"-s\"] = str(dimensions)\n        # log if filters or default source is used\n        self.__verbose_logs and (\n            self.__custom_resolution is None\n            or isinstance(self.__custom_resolution, str)\n        ) and logger.info(\n            \"{} for this pipeline for defining output resolution.\".format(\n                \"FFmpeg filter values will be used\"\n                if \"output_frames_resolution\" in self.__sourcer_metadata\n                else \"Default source resolution will be used\"\n            )\n        )\n\n        # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n        if (\n            not isinstance(self.__inputframerate, str)\n            and self.__inputframerate > 0.0\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            output_params[\"-framerate\"] = str(self.__inputframerate)\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                    str(self.__inputframerate)\n                )\n            )\n        elif (\n            \"output_framerate\"\n            in self.__sourcer_metadata  # means `fps` filter is defined\n            and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n        ):\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on output\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"output_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n            )\n        elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on source\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"source_video_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"Default source framerate will be used for this pipeline for defining output framerate.\"\n            )\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n\n        # add rest to output parameters\n        output_params.update(self.__extra_params)\n\n        # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n        # TODO Added support for `-re -stream_loop` and `-loop`\n        if \"-frames:v\" in input_params:\n            self.__raw_frame_num = input_params[\"-frames:v\"]\n        elif (\n            not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n            and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n        ):\n            self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n        else:\n            self.__raw_frame_num = None\n            # log that number of frames are unknown\n            self.__verbose_logs and logger.info(\n                \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n            )\n\n        # log Mode of Operation\n        self.__verbose_logs and logger.critical(\n            \"Activating {} Mode of Operation.\".format(\n                self.__supported_opmodes[self.__opmode]\n            )\n        )\n\n        # compose the Pipeline using formulated FFmpeg parameters\n        self.__launch_FFdecoderline(input_params, output_params)\n\n        # inform the initialization is completed\n        self.__initializing = False\n    else:\n        # warn if pipeline is recreated\n        logger.error(\"This pipeline is already created and running!\")\n    return self\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.generateFrame","title":"generateFrame(self)","text":"

This method returns a Generator function (also an Iterator using next()) of video frames, grabbed continuously from the buffer.

Source code in deffcode/ffdecoder.py
def generateFrame(self):\n    \"\"\"\n    This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n    _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n    \"\"\"\n    if self.__raw_frame_num is None or not self.__raw_frame_num:\n        while not self.__terminate_stream:  # infinite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n    else:\n        for _ in range(self.__raw_frame_num):  # finite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate","title":"terminate(self)","text":"

Safely terminates all processes.

Source code in deffcode/ffdecoder.py
def terminate(self):\n    \"\"\"\n    Safely terminates all processes.\n    \"\"\"\n\n    # signal we are closing\n    self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n    self.__terminate_stream = True\n    # check if no process was initiated at first place\n    if self.__process is None or not (self.__process.poll() is None):\n        logger.info(\"Pipeline already terminated.\")\n        return\n    # Attempt to close pipeline.\n    # close `stdin` output\n    self.__process.stdin and self.__process.stdin.close()\n    # close `stdout` output\n    self.__process.stdout and self.__process.stdout.close()\n    # terminate/kill process if still processing\n    if self.__process.poll() is None:\n        # demuxers prefer kill\n        self.__process.kill()\n    # wait if not exiting\n    self.__process.wait()\n    self.__process = None\n    logger.info(\"Pipeline terminated successfully.\")\n
"},{"location":"reference/ffdecoder/params/","title":"FFdecoder API Parameters","text":""},{"location":"reference/ffdecoder/params/#source","title":"source","text":"

This parameter defines the input source (-i) for decoding real-time frames.

FFdecoder API will throw Assertion if source provided is invalid or missing.

FFdecoder API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder('/home/foo.mp4').formulate()\n

    Related usage recipes can found here \u27b6

  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
    # initialize and formulate the decoder\ndecoder = FFdecoder('img%03d.png').formulate()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img*.png', verbose=True, **sourcer_params).formulate()\n
    # define `-loop 1` for looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img.jpg', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nffparams = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value (use Sourcer API's enumerate_devices to list them) either as integer or string of integer type to its source parameter. For example, for capturing \"0\" index device on Windows, we can do as follows in FFdecoder API:

    Requirement for Index based Camera Device Capturing in FFdecoder API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".
    Important Facts related to Camera Device Indexing
    • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
    • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
    • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
      • For example, If there are three devices:
        {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
      • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

        Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

    Out of Index Camera Device index values will raise ValueError in FFdecoder API

    # initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipes can found here \u27b6

  • Video Capture Device Name/Path: Valid video capture device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for capturing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in FFdecoder API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

      # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

      # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

      You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

      # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

      # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipe can found here \u27b6

  • Screen Capturing/Recording: Valid screen capture device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. You can also specify additional specifications (such as limiting capture area to a region, setting capturing coordinates, whether to capture mouse pointer and clicks etc.). For example, for capturing \"0:\" indexed device with avfoundation source demuxer on MacOS along with mouse pointer and clicks, we can do as follows in FFdecoder API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

    For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for stream capturing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for stream capturing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

      # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

      # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Related usage recipe can found here \u27b6

  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and decoding Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in FFdecoder API:

    # initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).formulate()\n

    Related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph, and

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for Index based Camera Device Capturing in FFdecoder API

For enabling Index based Camera Device Capturing in FFdecoder API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\").formulate()\n
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", source_demuxer=\"auto, frame_format=\"bgr24\").formulate()\n

Related usage recipes can found here \u27b6

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize and formulate the decoder with `dshow` demuxer\ndecoder = FFdecoder(\"foo.mp4\", source_demuxer=\"dshow\").formulate()\n

"},{"location":"reference/ffdecoder/params/#frame_format","title":"frame_format","text":"

This parameter select the pixel format for output video frames (such as gray for grayscale output).

Any invalid or unsupported value to frame_format parameter will discarded!

Any improper frame_format parameter value (i.e. either null(special-case), undefined, or invalid type) , then -pix_fmt FFmpeg parameter value in Decoding pipeline uses output_frames_pixfmt metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to Default pixel-format1 (calculated variably).

Use frame_format=\"null\" to manually discard -pix_fmt FFmpeg parameter entirely from Decoding pipeline.

This feature allows users to manually skip -pix_fmt FFmpeg parameter in Decoding pipeline, essentially for using only format ffmpeg filter values instead, or even better let FFmpeg itself choose the best available output frame pixel-format for the given source.

Data-Type: String

Default Value: Its default value is Default pixel-format1 (calculated variably).

Usage:

# initialize and formulate the decoder for grayscale frames\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\").formulate()\n

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Various Pixel formats related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then FFdecoder API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path (via. -custom_sourcer_params) exclusive parameter in FFdecoder API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in FFdecoder API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n\n# initialize and formulate the decoder\nFFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nFFdecoder(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").formulate()\n

"},{"location":"reference/ffdecoder/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize and formulate decoder with verbose logs\nFFdecoder(\"foo.mp4\", verbose=True).formulate()\n

"},{"location":"reference/ffdecoder/params/#ffparams","title":"ffparams","text":"

This dictionary parameter accepts all supported parameters formatted as its attributes:

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/ffdecoder/params/#supported-parameters","title":"Supported Parameters","text":""},{"location":"reference/ffdecoder/params/#a-ffmpeg-parameters","title":"A. FFmpeg Parameters","text":"

Almost any FFmpeg parameter (supported by installed FFmpeg) can be passed as dictionary attributes in ffparams parameter.

Let's assume we want to 00:00:01.45(or 1045msec) in time and decode one single frame from given source (say foo.mp4) in FFdecoder API, then we can assign required FFmpeg parameters as dictionary attributes as follows:

Kindly read FFmpeg Docs carefully before passing any additional values to ffparams parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate decoder with suitable source and FFmpeg params\ndecoder = FFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

"},{"location":"reference/ffdecoder/params/#b-exclusive-parameters","title":"B. Exclusive Parameters","text":"

In addition to FFmpeg parameters, FFdecoder API also supports few Exclusive Parameters to allow users to flexibly change its internal pipeline, properties, and handle some special FFmpeg parameters (such as repeated map) that cannot be assigned via. python dictionary.

These parameters are discussed below:

  • -vcodec (str) : This attribute works similar to -vcodec FFmpeg parameter for specifying supported decoders that are compiled with FFmpeg in use. If not specified, it's value is derived from source video metadata. Its usage is as follows:

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

    Use {\"-vcodec\":None} in ffparams to discard -vcodec FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -vcodec FFmpeg parameter in Decoding pipeline, for letting FFmpeg itself choose the best available video decoder for the given source.

    # define suitable parameter\nffparams = {\"-vcodec\": \"h264\"} # set decoder to `h264`\n

  • -framerate (float/int) : This attribute works similar to -framerate FFmpeg parameter for generating video-frames at specified framerate. If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -framerate attribute will discarded!

    The output_frames_framerate metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -framerate parameter value (i.e. either null(special-case), undefined, or invalid type) , then -framerate/-r FFmpeg parameter value in Decoding pipeline uses output_frames_framerate metadata property extracted from Output Stream. Thereby, in case if no valid output_framerate metadata property is found, then API finally defaults to source_video_framerate metadata property extracted from Input Source Stream.

    In case neither output_framerate nor source_video_framerate valid metadata properties are found, then RuntimeError is raised.

    Use {\"-framerate\":\"null\"} in ffparams to discard -framerate/-r FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -framerate/-r FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output framerate for the given source.

    # define suitable parameter\nffparams = {\"-framerate\": 60.0} # set input video source framerate to 60fps\n

  • -custom_resolution (tuple/list) : This attribute sets the custom resolution/size of the output frames. Its value can either be a tuple ((width,height)) or a list ([width, height]). If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -custom_resolution attribute will discarded!

    The output_frames_resolution metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -custom_resolution parameter value (i.e. either null(special-case), undefined, or invalid type) , then -s/-size FFmpeg parameter value in Decoding pipeline uses output_frames_resolution metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to source_video_resolution metadata property extracted from Input Source Stream.

    In case neither output_frames_resolution nor source_video_resolution valid metadata properties are found, then RuntimeError is raised.

    Use {\"-custom_resolution\":\"null\"} in ffparams to discard -size/-s FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -size/-s FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output frames resolution for the given source.

    # define suitable parameter\nffparams = {\"-output_dimensions\": (1280,720)} # to produce a 1280x720 resolution/scale output video\n

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Difference from -clones parameter

    The -clones and -ffprefixes parameters even tho fundamentally work the same, they're meant to serve at different positions in the FFmpeg command. Normally, FFdecoder API pipeline looks something like following with these parameters in place:

    ffmpeg {{-ffprefixes FFmpeg params}} -vcodec h264 -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 {{-clones FFmpeg params}} -f rawvideo -\n

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -clones (list): This attribute sets the special FFmpeg parameters after that are repeated more than once or occurs in a specific order (that cannot be altered) in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-clones\": ['-map', '0:v:0', '-map', '1:a?']} \n\n# NOTE: Will be format as `ffmpeg -vcodec -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 -map 0:v:0 -map 1:a -f rawvideo -`\n

  • -custom_sourcer_params (dict) : This attribute assigns all Exclusive Parameter meant for Sourcer API's sourcer_params dictionary parameter directly through FFdecoder API. Its usage is as follows:

    # define suitable parameter meant for `sourcer_params`\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n

  • -default_stream_indexes (list/tuple) : This attribute assign value directly to default_stream_indexes parameter in Sourcer API's probe_stream() method for selecting specific video and audio stream index in case of multiple ones. Value can be of format: (int,int) or [int,int] as follows:

    # define suitable parameter meant for `probe_stream()` method\nffparams = {\"-default_stream_indexes\": (0,1)} # (\"0th video stream\", \"1st audio stream\")\n

  • -enforce_cv_patch (bool) : This attribute can be enabled(True) for patching YUV pixel-formats (such as YUV420p, yuv444p, NV12, NV21 etc.) frames to be seamless compatibility with OpenCV APIs such as imshow(), write() etc. It can be used as follows:

    As of now, YUV pixel-formats starting with YUV and NV are only supported.

    # define suitable parameter\nffparams = {\"-enforce_cv_patch\": True} # enables OpenCV patch for YUV frames\n

    YUV pixel-formats usage recipe can found here \u27b6

  • -passthrough_audio (bool/list) : (Yet to be supported)

  1. Default pixel-format is calculated variably in FFdecoder API:

    • If frame_format != \"null\":
      • If frame_format parameter is valid and supported: Default pixel-format is frame_format parameter value.
      • If frame_format parameter is NOT valid or supported:
        • If output_frame_pixfmt metadata is available: Default pixel-format is output_frame_pixfmt metadata value.
        • If output_frame_pixfmt metadata is NOT available: Default pixel-format is rgb24 if supported otherwise source_video_pixfmt metadata value.
    • If frame_format == \"null\": Default pixel-format is source_video_pixfmt metadata value

    \u21a9\u21a9

"},{"location":"reference/sourcer/","title":"Sourcer API","text":"

Sourcer API acts as Source Probing Utility that unlike other FFmpeg Wrappers which mostly uses ffprobe module, attempts to open the given Input Source directly with FFmpeg inside a subprocess pipe, and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each media stream contained in it.

Sourcer API primarily acts as a backend for FFdecoder API for gathering, processing, and validating all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in formulating its default FFmpeg pipeline parameters for real-time video-frames generation.

Sourcer API is design as a standalone Metadata Extraction API for easily parsing information from multimedia streams available in the given Input Source and returns it in either Human-readable (JSON string) or Machine-readable (Dictionary object) type with its retrieve_metadata() method.

All metadata attributes available with Sourcer API(On Windows) are discussed here \u27b6.

Furthermore, Sourcer's sourcer_params dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

Sourcer API parameters are explained here \u27b6

Source code in deffcode/sourcer.py
class Sourcer:\n    \"\"\"\n    > Sourcer API acts as **Source Probing Utility** that unlike other FFmpeg Wrappers which mostly uses [`ffprobe`](https://ffmpeg.org/ffprobe.html) module,\n    attempts to open the given Input Source directly with [**FFmpeg**](https://ffmpeg.org/) inside a [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe,\n    and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each\n    media stream contained in it.\n\n    Sourcer API primarily acts as a **backend for [FFdecoder API](../../reference/ffdecoder)** for gathering, processing, and validating\n    all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in\n    formulating its default FFmpeg pipeline parameters for real-time video-frames generation.\n\n    Sourcer API is design as a standalone **Metadata Extraction API** for easily parsing information from multimedia streams available in the\n    given Input Source and returns it in either Human-readable _(JSON string)_ or Machine-readable _(Dictionary object)_ type with its\n    [`retrieve_metadata()`](#deffcode.sourcer.Sourcer.retrieve_metadata) method.\n\n    !!! info \"All metadata attributes available with Sourcer API(On :fontawesome-brands-windows: Windows) are discussed [here \u27b6](../../recipes/basic/#display-source-video-metadata).\"\n\n    Furthermore, Sourcer's [`sourcer_params`](params/#sourcer_params) dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"Sourcer API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **sourcer_params,\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the Sourcer Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n        # checks if machine in-use is running windows os or not\n        self.__machine_OS = platform.system()\n\n        # define internal parameters\n        self.__verbose_logs = (  # enable verbose if specified\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # handle metadata received\n        self.__ffsp_output = None\n\n        # sanitize sourcer_params\n        self.__sourcer_params = {\n            str(k).strip(): str(v).strip()\n            if not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in sourcer_params.items()\n        }\n\n        # handle whether to force validate source\n        self.__forcevalidatesource = self.__sourcer_params.pop(\n            \"-force_validate_source\", False\n        )\n        if not isinstance(self.__forcevalidatesource, bool):\n            # reset improper values\n            self.__forcevalidatesource = False\n\n        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n\n        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n        __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n        if not isinstance(__ffmpeg_download_path, str):\n            # reset improper values\n            __ffmpeg_download_path = \"\"\n\n        # validate the FFmpeg assets and return location (also downloads static assets on windows)\n        self.__ffmpeg = get_valid_ffmpeg_path(\n            str(custom_ffmpeg),\n            True if self.__machine_OS == \"Windows\" else False,\n            ffmpeg_download_path=__ffmpeg_download_path,\n            verbose=self.__verbose_logs,\n        )\n\n        # check if valid FFmpeg path returned\n        if self.__ffmpeg:\n            self.__verbose_logs and logger.debug(\n                \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n            )\n        else:\n            # else raise error\n            raise RuntimeError(\n                \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n            )\n\n        # sanitize externally accessible parameters and assign them\n        # handles source demuxer\n        if source is None:\n            # first check if source value is empty\n            # raise error if true\n            raise ValueError(\"Input `source` parameter is empty!\")\n        elif isinstance(source_demuxer, str):\n            # assign if valid demuxer value\n            self.__source_demuxer = source_demuxer.strip().lower()\n            # assign if valid demuxer value\n            assert self.__source_demuxer != \"auto\" or validate_device_index(\n                source\n            ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n                source\n            )\n        else:\n            # otherwise find valid default source demuxer value\n            # enforce \"auto\" if valid index device\n            self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n            # log if not valid index device and invalid type\n            self.__verbose_logs and not self.__source_demuxer in [\n                \"auto\",\n                None,\n            ] and logger.warning(\n                \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                    type(source_demuxer).__name__\n                )\n            )\n            # log if not valid index device and invalid type\n            self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n                \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                    source\n                )\n            )\n\n        # handles source stream\n        self.__source = source\n\n        # creates shallow copy for further usage #TODO\n        self.__source_org = copy.copy(self.__source)\n        self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n        # handles all extracted devices names/paths list\n        # when source_demuxer = \"auto\"\n        self.__extracted_devices_list = []\n\n        # various source stream params\n        self.__default_video_resolution = \"\"  # handles stream resolution\n        self.__default_video_orientation = \"\"  # handles stream's video orientation\n        self.__default_video_framerate = \"\"  # handles stream framerate\n        self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n        self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n        self.__default_video_decoder = \"\"  # handles stream's video decoder\n        self.__default_source_duration = \"\"  # handles stream's video duration\n        self.__approx_video_nframes = \"\"  # handles approx stream frame number\n        self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n        self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n        # handle various stream flags\n        self.__contains_video = False  # contains video\n        self.__contains_audio = False  # contains audio\n        self.__contains_images = False  # contains image-sequence\n\n        # handles output parameters through filters\n        self.__metadata_output = None  # handles output stream metadata\n        self.__output_frames_resolution = \"\"  # handles output stream resolution\n        self.__output_framerate = \"\"  # handles output stream framerate\n        self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n        self.__output_orientation = \"\"  # handles output frame orientation\n\n        # check whether metadata probed or not?\n        self.__metadata_probed = False\n\n    def probe_stream(self, default_stream_indexes=(0, 0)):\n        \"\"\"\n        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n        Parameters:\n            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n        **Returns:** Reference to the instance object.\n        \"\"\"\n        assert (\n            isinstance(default_stream_indexes, (list, tuple))\n            and len(default_stream_indexes) == 2\n            and all(isinstance(x, int) for x in default_stream_indexes)\n        ), \"Invalid default_stream_indexes value!\"\n        # validate source and extract metadata\n        self.__ffsp_output = self.__validate_source(\n            self.__source,\n            source_demuxer=self.__source_demuxer,\n            forced_validate=(\n                self.__forcevalidatesource if self.__source_demuxer is None else True\n            ),\n        )\n        # parse resolution and framerate\n        video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0]\n        )\n        if video_rfparams:\n            self.__default_video_resolution = video_rfparams[\"resolution\"]\n            self.__default_video_framerate = video_rfparams[\"framerate\"]\n            self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n        # parse output parameters through filters (if available)\n        if not (self.__metadata_output is None):\n            # parse output resolution and framerate\n            out_video_rfparams = self.__extract_resolution_framerate(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n            if out_video_rfparams:\n                self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n                self.__output_framerate = out_video_rfparams[\"framerate\"]\n                self.__output_orientation = out_video_rfparams[\"orientation\"]\n            # parse output pixel-format\n            self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n\n        # parse pixel-format\n        self.__default_video_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0]\n        )\n\n        # parse video decoder\n        self.__default_video_decoder = self.__extract_video_decoder(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse rest of metadata\n        if not self.__contains_images:\n            # parse video bitrate\n            self.__default_video_bitrate = self.__extract_video_bitrate(\n                default_stream=default_stream_indexes[0]\n            )\n            # parse audio bitrate and samplerate\n            audio_params = self.__extract_audio_bitrate_nd_samplerate(\n                default_stream=default_stream_indexes[1]\n            )\n            if audio_params:\n                self.__default_audio_bitrate = audio_params[\"bitrate\"]\n                self.__default_audio_samplerate = audio_params[\"samplerate\"]\n            # parse video duration\n            self.__default_source_duration = self.__extract_duration()\n            # calculate all flags\n            if (\n                self.__default_video_bitrate\n                or (self.__default_video_framerate and self.__default_video_resolution)\n            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n                self.__contains_video = True\n                self.__contains_audio = True\n            elif self.__default_video_bitrate or (\n                self.__default_video_framerate and self.__default_video_resolution\n            ):\n                self.__contains_video = True\n            elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n                self.__contains_audio = True\n            else:\n                raise ValueError(\n                    \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n                )\n        # calculate approximate number of video frame\n        if self.__default_video_framerate and self.__default_source_duration:\n            self.__approx_video_nframes = np.rint(\n                self.__default_video_framerate * self.__default_source_duration\n            ).astype(int, casting=\"unsafe\")\n\n        # signal metadata has been probed\n        self.__metadata_probed = True\n\n        # return reference to the instance object.\n        return self\n\n    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n        \"\"\"\n        This method returns Parsed/Probed Metadata of the given source.\n\n        Parameters:\n            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n        # log it\n        self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n        # create metadata dictionary from information populated in private class variables\n        metadata = {\n            \"ffmpeg_binary_path\": self.__ffmpeg,\n            \"source\": self.__source,\n        }\n        metadata_missing = {}\n        # Only either `source_demuxer` or `source_extension` attribute can be\n        # present in metadata.\n        if self.__source_demuxer is None:\n            metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n        else:\n            metadata.update({\"source_demuxer\": self.__source_demuxer})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n        # add source video metadata properties\n        metadata.update(\n            {\n                \"source_video_resolution\": self.__default_video_resolution,\n                \"source_video_pixfmt\": self.__default_video_pixfmt,\n                \"source_video_framerate\": self.__default_video_framerate,\n                \"source_video_orientation\": self.__default_video_orientation,\n                \"source_video_decoder\": self.__default_video_decoder,\n                \"source_duration_sec\": self.__default_source_duration,\n                \"approx_video_nframes\": (\n                    int(self.__approx_video_nframes)\n                    if self.__approx_video_nframes\n                    and not any(\n                        \"loop\" in x for x in self.__ffmpeg_prefixes\n                    )  # check if any loops in prefix\n                    and not any(\n                        \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                    )  # check if any loops in filters\n                    else None\n                ),\n                \"source_video_bitrate\": self.__default_video_bitrate,\n                \"source_audio_bitrate\": self.__default_audio_bitrate,\n                \"source_audio_samplerate\": self.__default_audio_samplerate,\n                \"source_has_video\": self.__contains_video,\n                \"source_has_audio\": self.__contains_audio,\n                \"source_has_image_sequence\": self.__contains_images,\n            }\n        )\n        # add output metadata properties (if available)\n        if not (self.__metadata_output is None):\n            metadata.update(\n                {\n                    \"output_frames_resolution\": self.__output_frames_resolution,\n                    \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                    \"output_framerate\": self.__output_framerate,\n                    \"output_orientation\": self.__output_orientation,\n                }\n            )\n        else:\n            # since output stream metadata properties are only available when additional\n            # FFmpeg parameters(such as filters) are defined manually, thereby missing\n            # output stream properties are handled by assigning them counterpart source\n            # stream metadata property values\n            force_retrieve_missing and metadata_missing.update(\n                {\n                    \"output_frames_resolution\": self.__default_video_resolution,\n                    \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                    \"output_framerate\": self.__default_video_framerate,\n                    \"output_orientation\": self.__default_video_orientation,\n                }\n            )\n        # log it\n        self.__verbose_logs and logger.debug(\n            \"Metadata Extraction completed successfully!\"\n        )\n        # parse as JSON string(`json.dumps`), if defined\n        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n        metadata_missing = (\n            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n        )\n        # return `metadata` or `(metadata, metadata_missing)`\n        return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n\n    @property\n    def enumerate_devices(self):\n        \"\"\"\n        A property object that enumerate all probed Camera Devices connected to your system names\n        along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.\n\n        **Returns:** Probed Camera Devices as python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n\n        # log if specified\n        self.__verbose_logs and logger.debug(\"Enumerating all probed Camera Devices.\")\n\n        # return probed Camera Devices as python dictionary.\n        return {\n            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)\n        }\n\n    def __validate_source(self, source, source_demuxer=None, forced_validate=False):\n        \"\"\"\n        This Internal method validates source and extracts its metadata.\n\n        Parameters:\n            source_demuxer(str): specifies the demuxer(`-f`) for the input source.\n            forced_validate (bool): whether to skip validation tests or not?\n\n        **Returns:** `True` if passed tests else `False`.\n        \"\"\"\n        # validate source demuxer(if defined)\n        if not (source_demuxer is None):\n            # check if \"auto\" demuxer is specified\n            if source_demuxer == \"auto\":\n                # integerise source to get index\n                index = int(source)\n                # extract devices list and actual demuxer value\n                (\n                    self.__extracted_devices_list,\n                    source_demuxer,\n                ) = extract_device_n_demuxer(\n                    self.__ffmpeg,\n                    machine_OS=self.__machine_OS,\n                    verbose=self.__verbose_logs,\n                )\n                # valid indexes range\n                valid_indexes = [\n                    x\n                    for x in range(\n                        -len(self.__extracted_devices_list),\n                        len(self.__extracted_devices_list),\n                    )\n                ]\n                # check index is within valid range\n                if self.__extracted_devices_list and index in valid_indexes:\n                    # overwrite actual source device name/path/index\n                    if self.__machine_OS == \"Windows\":\n                        # Windows OS requires \"video=\" suffix\n                        self.__source = source = \"video={}\".format(\n                            self.__extracted_devices_list[index]\n                        )\n                    elif self.__machine_OS == \"Darwin\":\n                        # Darwin OS requires only device indexes\n                        self.__source = source = (\n                            str(index)\n                            if index >= 0\n                            else str(len(self.__extracted_devices_list) + index)\n                        )\n                    else:\n                        # Linux OS require /dev/video format\n                        self.__source = source = next(\n                            iter(self.__extracted_devices_list[index].keys())\n                        )\n                    # overwrite source_demuxer global variable\n                    self.__source_demuxer = source_demuxer\n                    self.__verbose_logs and logger.debug(\n                        \"Successfully configured device `{}` at index `{}` with demuxer `{}`.\".format(\n                            self.__extracted_devices_list[index]\n                            if self.__machine_OS != \"Linux\"\n                            else next(\n                                iter(self.__extracted_devices_list[index].values())\n                            )[0],\n                            index\n                            if index >= 0\n                            else len(self.__extracted_devices_list) + index,\n                            self.__source_demuxer,\n                        )\n                    )\n                else:\n                    # raise error otherwise\n                    raise ValueError(\n                        \"Given source `{}` is not a valid device index. Possible values index values can be: {}\".format(\n                            source,\n                            \",\".join(f\"{x}\" for x in valid_indexes),\n                        )\n                    )\n            # otherwise validate against supported demuxers\n            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):\n                # raise if fails\n                raise ValueError(\n                    \"Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!\".format(\n                        source_demuxer\n                    )\n                )\n            else:\n                pass\n\n        # assert if valid source\n        assert source and isinstance(\n            source, str\n        ), \"Input `source` parameter is of invalid type!\"\n\n        # Differentiate input\n        if forced_validate:\n            source_demuxer is None and logger.critical(\n                \"Forcefully passing validation test for given source!\"\n            )\n            self.__source = source\n        elif os.path.isfile(source):\n            self.__source = os.path.abspath(source)\n        elif is_valid_image_seq(\n            self.__ffmpeg, source=source, verbose=self.__verbose_logs\n        ):\n            self.__source = source\n            self.__contains_images = True\n        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):\n            self.__source = source\n        else:\n            logger.error(\"`source` value is unusable or unsupported!\")\n            # discard the value otherwise\n            raise ValueError(\"Input source is invalid. Aborting!\")\n        # format command\n        if self.__sourcer_params:\n            # handle additional params separately\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + [\"-t\", \"0.0001\"]\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n                + dict2Args(self.__sourcer_params)\n                + [\"-f\", \"null\", \"-\"]\n            )\n        else:\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n            )\n        # extract metadata, decode, and filter\n        metadata = (\n            check_sp_output(\n                meta_cmd,\n                force_retrieve_stderr=True,\n            )\n            .decode(\"utf-8\")\n            .strip()\n        )\n        # separate input and output metadata (if available)\n        if \"Output #\" in metadata:\n            (metadata, self.__metadata_output) = metadata.split(\"Output #\")\n        # return metadata based on params\n        return metadata\n\n    def __extract_video_bitrate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream bitrate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video bitrate as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        video_bitrate_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if video_bitrate_text:\n            selected_stream = video_bitrate_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(video_bitrate_text)\n                else 0\n            ]\n            filtered_bitrate = re.findall(\n                r\",\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            if len(filtered_bitrate):\n                default_video_bitrate = filtered_bitrate[0].split(\" \")[1:3]\n                final_bitrate = \"{}{}\".format(\n                    int(default_video_bitrate[0].strip()),\n                    \"k\" if (default_video_bitrate[1].strip().startswith(\"k\")) else \"M\",\n                )\n                return final_bitrate\n        return \"\"\n\n    def __extract_video_decoder(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream decoder from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video decoder as string value.\n        \"\"\"\n        assert isinstance(default_stream, int), \"Invalid input!\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            filtered_pixfmt = re.findall(\n                r\"Video:\\s[a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream pixel-format from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video pixel-format as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            filtered_pixfmt = re.findall(\n                r\",\\s[a-z][a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default audio-stream bitrate and sample-rate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n\n        **Returns:** Default Audio-stream bitrate and sample-rate as string value.\n        \"\"\"\n        identifiers = [\"Audio:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n            # filter data\n            filtered_audio_bitrate = re.findall(\n                r\"fltp,\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            filtered_audio_samplerate = re.findall(\n                r\",\\s[0-9]+\\sHz\", selected_stream.strip()\n            )\n            # get audio bitrate metadata\n            if filtered_audio_bitrate:\n                filtered = filtered_audio_bitrate[0].split(\" \")[1:3]\n                result[\"bitrate\"] = \"{}{}\".format(\n                    int(filtered[0].strip()),\n                    \"k\" if (filtered[1].strip().startswith(\"k\")) else \"M\",\n                )\n            else:\n                result[\"bitrate\"] = \"\"\n            # get audio samplerate metadata\n            result[\"samplerate\"] = (\n                filtered_audio_samplerate[0].split(\", \")[1]\n                if filtered_audio_samplerate\n                else \"\"\n            )\n        return result if result and (len(result) == 2) else {}\n\n    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?\n\n        **Returns:** Default Video resolution and framerate as dictionary value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        # use output metadata if available\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        # extract video orientation metadata if available\n        identifiers_orientation = [\"displaymatrix:\", \"rotation\"]\n        meta_text_orientation = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n        )\n        # use metadata if available\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                default_stream\n                if default_stream > 0 and default_stream < len(meta_text)\n                else 0\n            ]\n\n            # filter data\n            filtered_resolution = re.findall(\n                r\"([1-9]\\d+)x([1-9]\\d+)\", selected_stream.strip()\n            )\n            filtered_framerate = re.findall(\n                r\"\\d+(?:\\.\\d+)?\\sfps\", selected_stream.strip()\n            )\n            filtered_tbr = re.findall(r\"\\d+(?:\\.\\d+)?\\stbr\", selected_stream.strip())\n\n            # extract framerate metadata\n            if filtered_framerate:\n                # calculate actual framerate\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_framerate[0])[0]\n                )\n            elif filtered_tbr:\n                # guess from TBR(if fps unavailable)\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_tbr[0])[0]\n                )\n\n            # extract resolution metadata\n            if filtered_resolution:\n                result[\"resolution\"] = [int(x) for x in filtered_resolution[0]]\n\n            # extract video orientation metadata\n            if meta_text_orientation:\n                selected_stream = meta_text_orientation[\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                ]\n                filtered_orientation = re.findall(\n                    r\"[-]?\\d+\\.\\d+\", selected_stream.strip()\n                )\n                result[\"orientation\"] = float(filtered_orientation[0])\n            else:\n                result[\"orientation\"] = 0.0\n\n        return result if result and (len(result) == 3) else {}\n\n    def __extract_duration(self, inseconds=True):\n        \"\"\"\n        This Internal method parses stream duration from metadata.\n\n        Parameters:\n            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?\n\n        **Returns:** Default Stream duration as string value.\n        \"\"\"\n        identifiers = [\"Duration:\"]\n        stripped_data = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if stripped_data:\n            t_duration = re.findall(\n                r\"(?:[01]\\d|2[0123]):(?:[012345]\\d):(?:[012345]\\d+(?:\\.\\d+)?)\",\n                stripped_data[0],\n            )\n            if t_duration:\n                return (\n                    sum(\n                        float(x) * 60**i\n                        for i, x in enumerate(reversed(t_duration[0].split(\":\")))\n                    )\n                    if inseconds\n                    else t_duration\n                )\n        return 0\n

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.enumerate_devices","title":"enumerate_devices property readonly","text":"

A property object that enumerate all probed Camera Devices connected to your system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.__init__","title":"__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special","text":"

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/sourcer.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **sourcer_params,\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the Sourcer Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n    # checks if machine in-use is running windows os or not\n    self.__machine_OS = platform.system()\n\n    # define internal parameters\n    self.__verbose_logs = (  # enable verbose if specified\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # handle metadata received\n    self.__ffsp_output = None\n\n    # sanitize sourcer_params\n    self.__sourcer_params = {\n        str(k).strip(): str(v).strip()\n        if not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in sourcer_params.items()\n    }\n\n    # handle whether to force validate source\n    self.__forcevalidatesource = self.__sourcer_params.pop(\n        \"-force_validate_source\", False\n    )\n    if not isinstance(self.__forcevalidatesource, bool):\n        # reset improper values\n        self.__forcevalidatesource = False\n\n    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n\n    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n    __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n    if not isinstance(__ffmpeg_download_path, str):\n        # reset improper values\n        __ffmpeg_download_path = \"\"\n\n    # validate the FFmpeg assets and return location (also downloads static assets on windows)\n    self.__ffmpeg = get_valid_ffmpeg_path(\n        str(custom_ffmpeg),\n        True if self.__machine_OS == \"Windows\" else False,\n        ffmpeg_download_path=__ffmpeg_download_path,\n        verbose=self.__verbose_logs,\n    )\n\n    # check if valid FFmpeg path returned\n    if self.__ffmpeg:\n        self.__verbose_logs and logger.debug(\n            \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n        )\n    else:\n        # else raise error\n        raise RuntimeError(\n            \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n        )\n\n    # sanitize externally accessible parameters and assign them\n    # handles source demuxer\n    if source is None:\n        # first check if source value is empty\n        # raise error if true\n        raise ValueError(\"Input `source` parameter is empty!\")\n    elif isinstance(source_demuxer, str):\n        # assign if valid demuxer value\n        self.__source_demuxer = source_demuxer.strip().lower()\n        # assign if valid demuxer value\n        assert self.__source_demuxer != \"auto\" or validate_device_index(\n            source\n        ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n            source\n        )\n    else:\n        # otherwise find valid default source demuxer value\n        # enforce \"auto\" if valid index device\n        self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n        # log if not valid index device and invalid type\n        self.__verbose_logs and not self.__source_demuxer in [\n            \"auto\",\n            None,\n        ] and logger.warning(\n            \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                type(source_demuxer).__name__\n            )\n        )\n        # log if not valid index device and invalid type\n        self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n            \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                source\n            )\n        )\n\n    # handles source stream\n    self.__source = source\n\n    # creates shallow copy for further usage #TODO\n    self.__source_org = copy.copy(self.__source)\n    self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n    # handles all extracted devices names/paths list\n    # when source_demuxer = \"auto\"\n    self.__extracted_devices_list = []\n\n    # various source stream params\n    self.__default_video_resolution = \"\"  # handles stream resolution\n    self.__default_video_orientation = \"\"  # handles stream's video orientation\n    self.__default_video_framerate = \"\"  # handles stream framerate\n    self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n    self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n    self.__default_video_decoder = \"\"  # handles stream's video decoder\n    self.__default_source_duration = \"\"  # handles stream's video duration\n    self.__approx_video_nframes = \"\"  # handles approx stream frame number\n    self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n    self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n    # handle various stream flags\n    self.__contains_video = False  # contains video\n    self.__contains_audio = False  # contains audio\n    self.__contains_images = False  # contains image-sequence\n\n    # handles output parameters through filters\n    self.__metadata_output = None  # handles output stream metadata\n    self.__output_frames_resolution = \"\"  # handles output stream resolution\n    self.__output_framerate = \"\"  # handles output stream framerate\n    self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n    self.__output_orientation = \"\"  # handles output frame orientation\n\n    # check whether metadata probed or not?\n    self.__metadata_probed = False\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.probe_stream","title":"probe_stream(self, default_stream_indexes=(0, 0))","text":"

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is (\"0th video stream\", \"1st audio stream\").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):\n    \"\"\"\n    This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n    Parameters:\n        default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n    **Returns:** Reference to the instance object.\n    \"\"\"\n    assert (\n        isinstance(default_stream_indexes, (list, tuple))\n        and len(default_stream_indexes) == 2\n        and all(isinstance(x, int) for x in default_stream_indexes)\n    ), \"Invalid default_stream_indexes value!\"\n    # validate source and extract metadata\n    self.__ffsp_output = self.__validate_source(\n        self.__source,\n        source_demuxer=self.__source_demuxer,\n        forced_validate=(\n            self.__forcevalidatesource if self.__source_demuxer is None else True\n        ),\n    )\n    # parse resolution and framerate\n    video_rfparams = self.__extract_resolution_framerate(\n        default_stream=default_stream_indexes[0]\n    )\n    if video_rfparams:\n        self.__default_video_resolution = video_rfparams[\"resolution\"]\n        self.__default_video_framerate = video_rfparams[\"framerate\"]\n        self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n    # parse output parameters through filters (if available)\n    if not (self.__metadata_output is None):\n        # parse output resolution and framerate\n        out_video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n        if out_video_rfparams:\n            self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n            self.__output_framerate = out_video_rfparams[\"framerate\"]\n            self.__output_orientation = out_video_rfparams[\"orientation\"]\n        # parse output pixel-format\n        self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n\n    # parse pixel-format\n    self.__default_video_pixfmt = self.__extract_video_pixfmt(\n        default_stream=default_stream_indexes[0]\n    )\n\n    # parse video decoder\n    self.__default_video_decoder = self.__extract_video_decoder(\n        default_stream=default_stream_indexes[0]\n    )\n    # parse rest of metadata\n    if not self.__contains_images:\n        # parse video bitrate\n        self.__default_video_bitrate = self.__extract_video_bitrate(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse audio bitrate and samplerate\n        audio_params = self.__extract_audio_bitrate_nd_samplerate(\n            default_stream=default_stream_indexes[1]\n        )\n        if audio_params:\n            self.__default_audio_bitrate = audio_params[\"bitrate\"]\n            self.__default_audio_samplerate = audio_params[\"samplerate\"]\n        # parse video duration\n        self.__default_source_duration = self.__extract_duration()\n        # calculate all flags\n        if (\n            self.__default_video_bitrate\n            or (self.__default_video_framerate and self.__default_video_resolution)\n        ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n            self.__contains_video = True\n            self.__contains_audio = True\n        elif self.__default_video_bitrate or (\n            self.__default_video_framerate and self.__default_video_resolution\n        ):\n            self.__contains_video = True\n        elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n            self.__contains_audio = True\n        else:\n            raise ValueError(\n                \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n            )\n    # calculate approximate number of video frame\n    if self.__default_video_framerate and self.__default_source_duration:\n        self.__approx_video_nframes = np.rint(\n            self.__default_video_framerate * self.__default_source_duration\n        ).astype(int, casting=\"unsafe\")\n\n    # signal metadata has been probed\n    self.__metadata_probed = True\n\n    # return reference to the instance object.\n    return self\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.retrieve_metadata","title":"retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False)","text":"

This method returns Parsed/Probed Metadata of the given source.

Parameters:

Name Type Description Default pretty_json bool

whether to return metadata as JSON string(if True) or Dictionary(if False) type?

False force_retrieve_output bool

whether to also return metadata missing in current Pipeline. This method returns (metadata, metadata_missing) tuple if force_retrieve_output=True instead of metadata.

required

Returns: metadata or (metadata, metadata_missing), formatted as JSON string or python dictionary.

Source code in deffcode/sourcer.py
def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n    \"\"\"\n    This method returns Parsed/Probed Metadata of the given source.\n\n    Parameters:\n        pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n        force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n    **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n    \"\"\"\n    # check if metadata has been probed or not\n    assert (\n        self.__metadata_probed\n    ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n    # log it\n    self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n    # create metadata dictionary from information populated in private class variables\n    metadata = {\n        \"ffmpeg_binary_path\": self.__ffmpeg,\n        \"source\": self.__source,\n    }\n    metadata_missing = {}\n    # Only either `source_demuxer` or `source_extension` attribute can be\n    # present in metadata.\n    if self.__source_demuxer is None:\n        metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n    else:\n        metadata.update({\"source_demuxer\": self.__source_demuxer})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n    # add source video metadata properties\n    metadata.update(\n        {\n            \"source_video_resolution\": self.__default_video_resolution,\n            \"source_video_pixfmt\": self.__default_video_pixfmt,\n            \"source_video_framerate\": self.__default_video_framerate,\n            \"source_video_orientation\": self.__default_video_orientation,\n            \"source_video_decoder\": self.__default_video_decoder,\n            \"source_duration_sec\": self.__default_source_duration,\n            \"approx_video_nframes\": (\n                int(self.__approx_video_nframes)\n                if self.__approx_video_nframes\n                and not any(\n                    \"loop\" in x for x in self.__ffmpeg_prefixes\n                )  # check if any loops in prefix\n                and not any(\n                    \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                )  # check if any loops in filters\n                else None\n            ),\n            \"source_video_bitrate\": self.__default_video_bitrate,\n            \"source_audio_bitrate\": self.__default_audio_bitrate,\n            \"source_audio_samplerate\": self.__default_audio_samplerate,\n            \"source_has_video\": self.__contains_video,\n            \"source_has_audio\": self.__contains_audio,\n            \"source_has_image_sequence\": self.__contains_images,\n        }\n    )\n    # add output metadata properties (if available)\n    if not (self.__metadata_output is None):\n        metadata.update(\n            {\n                \"output_frames_resolution\": self.__output_frames_resolution,\n                \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                \"output_framerate\": self.__output_framerate,\n                \"output_orientation\": self.__output_orientation,\n            }\n        )\n    else:\n        # since output stream metadata properties are only available when additional\n        # FFmpeg parameters(such as filters) are defined manually, thereby missing\n        # output stream properties are handled by assigning them counterpart source\n        # stream metadata property values\n        force_retrieve_missing and metadata_missing.update(\n            {\n                \"output_frames_resolution\": self.__default_video_resolution,\n                \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                \"output_framerate\": self.__default_video_framerate,\n                \"output_orientation\": self.__default_video_orientation,\n            }\n        )\n    # log it\n    self.__verbose_logs and logger.debug(\n        \"Metadata Extraction completed successfully!\"\n    )\n    # parse as JSON string(`json.dumps`), if defined\n    metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n    metadata_missing = (\n        json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n    )\n    # return `metadata` or `(metadata, metadata_missing)`\n    return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n
"},{"location":"reference/sourcer/params/","title":"Sourcer API Parameters","text":""},{"location":"reference/sourcer/params/#source","title":"source","text":"

This parameter defines the input source (-i) for probing.

Sourcer API will throw AssertionError if source provided is invalid or missing.

Sourcer API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize the sourcer and probe it\nsourcer = Sourcer('/home/foo.mp4').probe_stream()\n
  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nsourcer_params = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize the sourcer with define parameters\nsourcer = Sourcer('img%03d.png', verbose=True, **sourcer_params).probe_stream()\n
    # initialize the sourcer and probe it\nsourcer = Sourcer('img%03d.png', verbose=True).probe_stream()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img*.png', verbose=True, **sourcer_params).probe_stream()\n
    # define `-loop 1` for looping\nsourcer_params = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img.jpg', verbose=True, **sourcer_params).probe_stream()\n
  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nsourcer_params = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **sourcer_params).probe_stream()\n
  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. For example, for using \"0\" index device as source on Windows, we can do as follows in Sourcer API:

    Requirement for using Camera Device as source in Sourcer API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

    # initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", verbose=True).probe_stream()\n
  • Video Capture Devices: Valid video probe device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in Sourcer API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in Sourcer API as follows:

      # initialize the sourcer with \"USB2.0 Camera\" source and probe it\nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nsourcer_params = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize the sourcer with \"Camera\" source and probe it\nsourcer = Sourcer(\"Camera\", source_demuxer=\"dshow\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all video capture devices such as from an USB webcam. You can refer following steps to identify and specify your probe video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in Sourcer API as follows:

      # initialize the sourcer with \"/dev/video0\" source and probe it\nsourcer = Sourcer(\"/dev/video0\", source_demuxer=\"v4l2\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in Sourcer API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize the sourcer with `1` index source and probe it\nsourcer = Sourcer(\"1\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to probe from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize the sourcer with \"Integrated iSight-camera\" source \nsourcer = Sourcer(\"Integrated\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"USB2.0 Camera\" source \nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
  • Screen Capturing/Recording: Valid screen probe device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"0:\" indexed device with avfoundation source demuxer on MacOS, we can do as follows in Sourcer API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for probing:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    # define framerate\nsourcer_params = {\"-framerate\": \"30\"}\n\n# initialize the sourcer with \"desktop\" source and probe it\nsourcer = Sourcer(\"desktop\", source_demuxer=\"gdigrab\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the x11grab to probe an X11 display. You can refer following steps to specify source for probing:

    # initialize the sourcer with \":0.0\" desktop source and probe it\nsourcer = Sourcer(\":0.0\", source_demuxer=\"x11grab\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index in Sourcer API:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    You can enumerate all the available input devices including screens ready to be probed using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n

    Then, you can specify and initialize your located screens in Sourcer API using its index shown:

    # initialize the sourcer with `0:` index desktop screen and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"0:\" source and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n
  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and probing Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in Sourcer API:

    # initialize the sourcer with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate and probe it\nsourcer = Sourcer(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).probe_stream()\n

"},{"location":"reference/sourcer/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, as well as lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph.

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for using Camera Device Index as source in Sourcer API

For using Camera Device Index as source in Sourcer API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\").probe_stream()\n
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", source_demuxer=\"auto).probe_stream()\n

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize the sourcer with `dshow` demuxer and probe it\nsourcer = Sourcer(\"foo.mp4\", source_demuxer=\"dshow\").probe_stream()\n

"},{"location":"reference/sourcer/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then Sourcer API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path exclusive parameter in Sourcer API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in Sourcer API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nsourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}\n\n# initialize the sourcer\nSourcer(\"foo.mp4\", verbose=True, **sourcer_params).probe_stream()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nSourcer(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").probe_stream()\n

"},{"location":"reference/sourcer/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize the sourcer with verbose logs\nSourcer(\"foo.mp4\", verbose=True).probe_stream()\n

"},{"location":"reference/sourcer/params/#sourcer_params","title":"sourcer_params","text":"

This dictionary parameter accepts all Exclusive Parameters formatted as its attributes:

Additional FFmpeg parameters

In addition to Exclusive Parameters, Sourcer API supports almost any FFmpeg parameter (supported by installed FFmpeg), and thereby can be passed as dictionary attributes in sourcer_params parameter.

Kindly read FFmpeg Docs carefully before passing any additional values to sourcer_params parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/sourcer/params/#exclusive-parameters","title":"Exclusive Parameters","text":"

Sourcer API supports few Exclusive Parameters to allow users to flexibly change its probing properties and handle some special FFmpeg parameters.

These parameters are discussed below:

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in Sourcer's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nsourcer_params = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -ffmpeg_download_path (string): sets the custom directory for downloading FFmpeg Static Binaries in Compression Mode, during the Auto-Installation on Windows Machines Only. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows:

    sourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"} # will be saved to \"C:/User/foo/foo1\"\n

  • -force_validate_source (bool): forcefully passes validation test for given source which is required for some special cases with unusual input. It can be used as follows:

    sourcer_params = {\"-force_validate_source\": True} # will pass validation test forcefully\n

"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

A cross-platform High-performance Video Frames Decoder that flexibly executes FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in just a few lines of python code

Highly Adaptive - DeFFcode APIs implements a standalone highly-extensible wrapper around FFmpeg multimedia framework. These APIs supports a wide-ranging media streams as input source such as live USB/Virtual/IP camera feeds, regular multimedia files, screen recordings, image sequences, network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Highly Flexible - DeFFcode APIs gains an edge over other Wrappers by providing complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as specifying framerate, resolution, hardware decoder(s), filtergraph(s), and pixel-format(s) that are readily supported by all well known Computer Vision libraries.

Highly Convenient - FFmpeg has a steep learning curve especially for users unfamiliar with a command line interface. DeFFcode helps users by providing similar to OpenCV, Index based Camera Device Capturing and the same OpenCV-Python (Python API for OpenCV) coding syntax for its APIs, thereby making it even easier to learn, create, and develop FFmpeg based apps in Python.

"},{"location":"#key-features-of-deffcode","title":"Key features of DeFFcode","text":"

Here are some key features that stand out:

  • High-performance, low-overhead video frames decoding with robust error-handling.
  • Flexible API with access to almost any FFmpeg specification thinkable.
  • Supports a wide-range of media streams/devices/protocols as input source.
  • Curated list of well-documented recipes ranging from Basic to Advanced skill levels.
  • Hands down the easiest Index based Camera Device Capturing, similar to OpenCV.
  • Memory efficient Live Simple & Complex Filtergraphs. (Yes, You read it correctly \"Live\"!)
  • Lightning fast dedicated GPU-Accelerated Video Decoding & Transcoding.
  • Enables precise FFmpeg Frame Seeking with pinpoint accuracy.
  • Effortless Metadata Extraction from all streams available in the source.
  • Maintains the standard easy to learn OpenCV-Python coding syntax.
  • Out-of-the-box support for all prominent Computer Vision libraries.
  • Cross-platform, runs on Python 3.7+, and easy to install.
Still missing a key feature in DeFFcode?

Please review DeFFcode's Roadmap. If you still can't find the desired feature there, then you can request one simply by Commenting or Upvoting an existing comment on that issue.

"},{"location":"#getting-started","title":"Getting Started","text":"

In case you're run into any problems, consult our Help section.

"},{"location":"#installation-notes","title":"Installation Notes","text":"

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode on your machine.

"},{"location":"#recipes-aka-examples","title":"Recipes a.k.a Examples","text":"

Once you have DeFFcode installed, checkout our Well-Documented Recipes for usage examples:

How to Begin?

If you\u2019re just starting, check out the Beginner Basic Recipes and as your confidence grows, move up to Advanced Recipes .

  • Basic Recipes : Recipes for beginners of any skill level to get started.
  • Advanced Recipes : Recipes to take your skills to the next level.
"},{"location":"#api-in-a-nutshell","title":"API in a nutshell","text":"

As a user, you just have to remember only two DeFFcode APIs, namely:

See API Reference for more in-depth information.

"},{"location":"#a-ffdecoder-api","title":"A. FFdecoder API","text":"

The primary function of FFdecoder API is to decode 24-bit RGB video frames from the given source:

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# formulate the decoder with suitable source\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").formulate()\n\n# grab RGB24(default) 3D frames from decoder\nfor frame in decoder.generateFrame():\n\n    # lets print its shape\n    print(frame.shape) # (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n
"},{"location":"#b-sourcer-api","title":"B. Sourcer API","text":"

The primary function of Sourcer API is to gather information from all multimedia streams available in the given source:

# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
The resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 60.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 10.0,\n  \"approx_video_nframes\": 600,\n  \"source_video_bitrate\": \"832k\",\n  \"source_audio_bitrate\": \"\",\n  \"source_audio_samplerate\": \"\",\n  \"source_has_video\": true,\n  \"source_has_audio\": false,\n  \"source_has_image_sequence\": false\n}\n

"},{"location":"#contribution-guidelines","title":"Contribution Guidelines","text":"

Contributions are welcome, and greatly appreciated!

Please read our Contribution Guidelines for more details.

"},{"location":"#community-channel","title":"Community Channel","text":"

If you've come up with some new idea, or looking for the fastest way troubleshoot your problems. Please checkout our Gitter community channel \u27b6

"},{"location":"#become-a-stargazer","title":"Become a Stargazer","text":"

You can be a Stargazer by starring us on Github, it helps us a lot and you're making it easier for others to find & trust this library. Thanks!

"},{"location":"#donations","title":"Donations","text":"

DeFFcode is free and open source and will always remain so.

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

"},{"location":"#citation","title":"Citation","text":"

Here is a Bibtex entry you can use to cite this project in a publication:

@software{deffcode,\n  author       = {Abhishek Singh Thakur},\n  title        = {abhiTronix/deffcode: v0.2.4},\n  month        = oct,\n  year         = 2022,\n  publisher    = {Zenodo},\n  version      = {v0.2.4},\n  doi          = {10.5281/zenodo.7155399},\n  url          = {https://doi.org/10.5281/zenodo.7155399}\n}\n

"},{"location":"changelog/","title":"Release Notes","text":""},{"location":"changelog/#v025-2023-01-11","title":"v0.2.5 (2023-01-11)","text":"New Features
  • FFdecoder:
    • Added OpenCV compatibility patch for YUV pixel-formats.
      • Implemented new patch for handling YUV pixel-formats(such as YUV420p, yuv444p, NV12, NV21 etc.) for exclusive compatibility with OpenCV APIs.
        • Note: Only YUV pixel-formats starting with YUV and NV are currently supported.
      • Added new -enforce_cv_patch boolean attribute for enabling OpenCV compatibility patch.
  • Sourcer:
    • Added Looping Video support.
      • Now raw-frame numbers revert to null(None) whenever any looping is defined through filter(such as -filter_complex \"loop=loop=3:size=75:start=25\") or prefix(\"-ffprefixes\":[\"-stream_loop\", \"3\"]).
  • Docs:
    • Added YUV frames example code for Capturing and Previewing BGR frames from a video file recipe.
    • Added YUV frames example code for `Transcoding video using OpenCV VideoWriter API recipe.
    • Added YUV frames example code for `Transcoding lossless video using WriteGear API recipe.
    • Added new CUVID-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Hardware-based Video Decoding and Previewing recipe.
    • Added new CUDA-accelerated Video Transcoding with OpenCV`s VideoWriter API recipe.
    • Added new CUDA-NVENC-accelerated Video Transcoding with WriteGear API recipe both for consuming BGR and NV12 frames.
    • Added new CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API recipe which is still WIP(\ud83d\udcacconfirmed with a GIF from tenor).
    • Added new Capturing and Previewing frames from a Looping Video recipe using -stream_loop option and loop filter.
    • Added docs for -enforce_cv_patch boolean attribute in ffparam dictionary parameter.
    • Added new python dependency block for recipes.
    • Reflected new OpenCV compatibility patch for YUV pixel-formats in code.
    • Added new content.code.copy and content.code.link features.
Updates/Improvements
  • FFhelper:
    • Replaced depreciating Retry API from requests.packages with requests.adapters.
  • Maintenance:
    • Replaced raw.github.com links with GitLab and GH links.
    • Removed unused code.
    • Updated log message.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
    • Updated test_frame_format test to include -enforce_cv_patch boolean attribute.
    • Updated test_source to test looping video support.
  • Setup:
    • Removed unused imports and patches.
    • Bumped version to 0.2.5.
  • Docs:
    • Updated Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing passage.
    • Updated and corrected docs hyperlinks in index.md and ReadMe.md
    • Update Zenodo Badge and BibTex entry.
    • Updated Readme.md banner image URLs.
    • Updated md-typeset text font size to .75rem.
    • Updated text and admonitions.
    • Updated recipe assumptions.
    • Updated Readme.md GIF URLs.
    • Updated abstract text in recipes.
    • Updated changelog.md.
    • Updated recipe code.
    • Removed old recipes.
Bug-fixes
  • FFdecoder API:
    • Fixed Zero division bug while calculating raw_bit_per_component.
  • FFhelper:
    • Fixed response.headers returning content-length as Nonetype since it may not necessarily have the Content-Length header set.
      • Reason: The response from gitlab.com contains a Transfer-Encoding field as 'Transfer-Encoding': 'chunked', which means data is sent in a series of chunks, so the Content-Length header is emitted. More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding#Directives
  • Docs:
    • Fixed https://github.com/badges/shields/issues/8671 badge issue in README.md
    • Removed depreciated text.
    • Fixed several typos in docs.
  • CI:
    • Added fix for codecov upload bug (https://github.com/codecov/codecov-action/issues/598).
      • Updated codecov-action workflow to `v3.
      • Added new CODECOV_TOKEN GitHub secret.
Pull Requests
  • PR #37
"},{"location":"changelog/#v024-2022-10-07","title":"v0.2.4 (2022-10-07)","text":"New Features
  • FFdecoder API:
    • Implemented new comprehensive support for both discarding key default FFmpeg parameters from Decoding pipeline simply by assigning them null string values, and concurrently using values extracted from Output Stream metadata properties (available only when FFmpeg filters are defined) for formulating pipelines.
      • Added null string value support to -framerate and -custom_resolution attributes, as well as frame_format parameter for easily discarding them.
      • Re-Implemented calculation of rawframe pixel-format.
        • Reconfigured default rawframe pixel-format, Now rawframe pixel-format will always default to source_video_pixfmt with frame_format=\"null\".
        • Now with frame_format parameter value either \"null\" or invalid or undefined, rawframe pixel-format value is taken from output_frames_pixfmt metadata property extracted from Output Stream (available only when filters are defined). If valid output_video_resolution metadata property is found then it defaults to default pixel-format(calculated variably).
        • With frame_format=\"null\", -pix_fmt FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of rawframe resolution value.
        • Now with -custom_resolution dictionary attribute value either \"null\" or invalid or undefined, rawframe resolution value is first taken from output_video_resolution metadata property extracted from Output Stream (available only when filters are defined), next from source_video_resolution metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_resolution valid metadata properties are found then RuntimeError is raised.
        • With -custom_resolution dictionary attribute value \"null\", -s/-size FFmpeg parameter will not be added to Decoding pipeline.
      • Re-Implemented calculation of output framerate value.
        • Now with -framerate dictionary attribute either null or invalid or undefined, output framerate value is first taken from output_video_framerate metadata property extracted from Output Stream (available only when filters are defined), next from source_video_framerate metadata property(extracted from Input Source Stream). If neither output_video_resolution nor source_video_framerate valid metadata properties are found then RuntimeError is raised.
        • With -framerate dictionary attribute value \"null\", -r/-framerate FFmpeg parameter will not be added to Decoding pipeline.
    • Implemented passing of simple -vf filters, complex -filter_complex filters, and pre-headers(via -ffprefixes) directly to Sourcer API's sourcer_params parameter for probing Output Stream metadata and filter values.
  • Sourcer API:
    • Implemented new comprehensive approach to handle source_demuxer parameter w.r.t different source parameter values.
      • The source_demuxer parameter now accepts \"auto\" as its value for enabling Index based Camera Device Capture feature in Sourcer API.
      • Sourcer API auto-enforces source_demuxer=\"auto\" by default, whenever a valid device index (uses validate_device_index method for validation) is provided as its source parameter value.
        • \u26a0\ufe0f Sourcer API will throw Assertion error if source_demuxer=\"auto\" is provided explicitly without a valid device index at its source parameter.
      • Source API now accepts all +ve and -ve device indexes (e.g. -1,0,1,2 etc.) to its source parameter, both as in integer and string of integer types as source in Index based Camera Device Capture feature.
        • Sourcer API imports and utilizes extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system.
          • \u26a0\ufe0f Sourcer API will throw RuntimeError on failure to identify any device.
        • Sourcer API auto verifies that the specified source device index is in range of the devices discovered.
          • \u26a0\ufe0f Sourcer API will raise ValueError if value goes out of valid range.
        • Sourcer API also automatically handle -ve indexes if specified within the valid range.
        • Implemented patch to auto-add video= suffix to selected device name before using it as video source on Windows OSes.
        • Added patch for handling dictionary of devices paths(with devices names as values) and log messages on Linux Oses.
        • Added copy import for shallow copying various class parameters.
      • Implemented new Support for additional FFmpeg parameters and Output metadata.
        • Added three new metadata properties: output_video_resolution, output_video_framerate, output_frames_pixfmt for handling extracted Output Stream values, whenever additional FFmpeg parameters(such as FFmpeg filters) are defined.
        • Added support for auto-handling additional FFmpeg parameters defined by sourcer_params dictionary parameters.
        • Implement new separate pipeline for parsing Output Stream metadata by decoding video source using null muxer for few microseconds whenever additional FFmpeg parameters(such as -vf filters) are defined by the user.
        • Included new metadata_output internal parameter for holding Output Stream metadata splitted from original Sourcer Metadata extracted from new pipeline.
        • Included new output_video_resolution, output_video_framerate, output_frames_pixfmt internal parameters for metadata properties, whenever Output Stream Metadata available.
        • Added new extract_output boolean parameter to extract_video_pixfmt and extract_resolution_framerate internal methods for extracting output pixel-format, framerate and resolution using Output Stream metadata instead of Sourcer Metadata, whenever available.
      • Added tuple datatype to sourcer_params exception.
      • Added dict2Args import.
    • Added enumerate_devices property object to enumerate all probed Camera Devices connected to a system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.
    • Added new force_retrieve_missing parameter to retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata, when force_retrieve_missing=True.
    • Added various output stream metadata properties that are only available when additional FFmpeg parameters(such as filters) are defined manually, by assigning them counterpart source stream metadata property values
  • FFhelper:
    • Implemented new extract_device_n_demuxer() method for discovering and extracting all Video-Capture device(s) name/path/index present on system and supported by valid OS specific FFmpeg demuxer.
      • Added support for three OS specific FFmpeg demuxers: namely dshow for Windows, v4l2 for Linux, and avfoundation for Darwin/Mac OSes.
      • Implemented separate code for parsing outputs of python subprocess module outputs provided with different commands for discovering all Video-Capture devices present on system.
        • Processed dshow (on Windows) and avfoundation (on Darwin) demuxers in FFmpeg commands with -list_devices true parameters using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names/indexes.
        • Used v4l2-ctl submodule command on Linux machines for listing all Video-Capture devices using subprocess module and applied various brute-force pattern matching on its output for discovering and extracting all devices names and true system /dev/video paths.
          • Added patch for a single device with multiple /dev/video paths (each for metadata, video, controls), where it iterates on each path to find the exact path that contains valid video stream.
          • Added elaborated checks for catching all possible system errors that can occur while running v4l2-ctl submodule command.
          • The method will return discovered devices as list of dictionaries with device paths(/dev/video) as keys and respective device name as the values, instead of default list of device names.
          • Added patch for handling Linux specific log messages.
      • Added various logging messages to notify users about all discover devices names/paths w.r.t indexes.
      • \u26a0\ufe0f The extract_device_n_demuxer method will raise RuntimeError if it fails to identify any device.
      • Added various checks to assert invalid input parameters and unsupported OSes.
      • Added machine_OS parameter to specify OS running on the system, must be value of platform.system() module. If invalid the method will raise ValueError.
  • Utilities:
    • Added new new validate_device_index() method to verify if given device index is valid or not?
      • Only Integers or String of integers are valid indexes.
      • Returns a boolean value, confirming whether valid(If true), or not(If False).
    • Added checks to support all +ve and -ve integers, both as integer and string types.
  • Docs:
    • Added new validate_device_index() method and its parameters description.
    • Added new extract_device_n_demuxer() method and its parameters description.
    • Added Decoding Camera Devices using Indexes support docs.
      • Added decode-camera-devices.md doc for Decoding Camera Devices using Indexes.
        • Added Enumerating all Camera Devices with Indexes example doc with code.
        • Added Capturing and Previewing frames from a Camera using Indexes example doc with code.
      • Added Camera Device Index support docs to FFdecoder and Sourcer API params.
  • CI:
    • Added check exception for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Added new test_discard_n_filter_params unittest for test recently added supported for both discarded parameters and filter values.
Updates/Improvements
  • FFdecoder API:
    • Extended range of supported output frame pixel-formats.
      • Added new pixel-formats to supported group by extending raw bits-per-component range.
    • Simplified raw frame dtype calculation based on selected pixel-format.
      • output_frames_pixfmt metadata property(if available) will be overridden to rgb24.
    • Replaced continue with break in generateFrame() method.
    • Improved handling of frame_format parameter.
  • Sourcer API:
    • Simplified JSON formatting and returning values logic.
    • Updated logging messages text and position.
    • Removed redundant variable definitions.
    • Changed related internal variable names w.r.t metadata property names.
    • Replaced os_windows internal parameter with machine_OS, and changed its input from os.name to more flexible platform.system().
    • Removed source_extension internal parameter and assigned values directly.
  • FFhelper:
    • Implemented more robust pattern matching for Linux machines.
    • Updated logs in check_sp_output() method for improving error output message.
    • Implemented \"Cannot open device\" v4l2-ctl command Error logs.
  • Maintenance:
    • Bumped version to 0.2.4.
    • Updated code comments.
  • CI:
    • Updated FFdecoder API's test_camera_capture unittest to test new Index based Camera Device Capturing on different platforms.
      • Added various parametrize source and source_demuxer parameter data to attain maximum coverage.
      • Added result field to fail and xfail unittest according to parametrize data provided on different platforms.
      • Removed pytest.mark.skipif to support all platforms.
    • Added and updated various parametrize test data to attain maximum coverage.
    • Limited range of extracted frames, for finishing tests faster.
    • Updated unittests to reflect recent name changes.
    • Disabled capturing of stdout/stderr with -s flag in pytest.
  • Setup:
    • Updated description metadata.
  • Bash Script:
    • Created undeleteable undelete.txt file for testing on Linux envs.
    • Updated undelete.txt file path.
    • Made FFmpeg output less verbose.
  • Docs:
    • Updated FFdecoder API params docs w.r.t recent changes and supported for both discarded parameters and filter values.
      • Added new admonitions to explain handling of \"null\" and (special-case), undefined, or invalid type values in various parameters/attributes.
      • Added new footer reference explaining the handling of Default pixel-format for frame_format parameter.
      • Added missing docs for -default_stream_indexes ffparams attribute.
    • Added docs for recently added additional FFmpeg parameter in Sourcer API's sourcer_params parameter.
      • Removed unsupported -custom_resolution sourcer_params attributes from sourcer_params parameter docs.
      • Removed redundant -vcodec and -framerate attributes from sourcer_params parameter docs.
    • Updated both basic and advanced project Index hyperlinks.
    • Moved decoding-live-feed-devices.md doc from basic to advanced directory.
    • Updated page navigation in mkdocs.yml.
    • Update announcement bar to feature Index based Camera Device Capture support.
    • Updated Project description and Key features of DeFFcode.
    • Updated README.md with latest information.
    • Updated source and source_demuxer param doc.
    • Updated Hardware-Acceleration docs.
      • Updated Hardware-Accelerated Video Decoding and Transcoding docs to inform users about DeFFcode generated YUV frames not yet supported by OpenCV and its APIs.
    • Updated recipes docs to reflect recent changes in APIs.
    • Updated parameter docs to reflect recent name changes.
    • Updated parameters/attributes introductory descriptions.
    • Updated various parametrize data to attain maximum coverage.
    • Updated Zenodo badge and the BibTeX entry.
    • Updated method description texts and logging messages.
    • Update title headings, icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Breaking Updates/Changes
  • API:
    • Implemented new Index based Camera Device Capture feature (Similar to OpenCV), where the user just have to assign device index as integer (-n to n-1) in source parameter of DeFFcode APIs to directly access the given input device in few seconds.
  • FFdecoder API
    • Unsupported dtype pixel-format always defaults to rgb24.
  • Sourcer API:
    • Renamed output_video_resolution metadata property to output_frames_resolution.
    • Renamed output_video_framerate metadata property to output_framerate.
Bug-fixes
  • FFdecoder API:
    • Removed redundant dummy value for output_frames_pixfmt metadata property.
    • Fixed critical KeyError bug arises due to missing output metadata properties.
      • Enforced force_retrieve_missing parameter in Sourcer API's retrieve_metadata() method for returning metadata missing in current Pipeline as (metadata, metadata_missing) tuple value instead of just metadata.
      • Added new missing_prop internal class variable for handling metadata properties missing, received from Sourcer API.
      • Moved ffdecoder_operational_mode to missing metadata properties that cannot be updated but are read only.
      • Added missing metadata properties to metadata class property object for easy printing along with other metadata information.
      • Implemented missing metadata properties updation via. overridden metadata class property object.
        • Added counterpart_prop dict to handle all counterpart source properties for each missing output properties.
        • Implemented missing output properties auto-updation w.r.t counterpart source property.
        • Added separate case for handling only missing metadata properties and notifying user about counterpart source properties.
    • Fixed source metadata properties update bug causing non-existential missing metadata properties to be added to source metadata properties dictionary along with source metadata property.
      • Replaced update() calling on value dict directly with explicitly assigning values to source metadata properties dictionary.
      • Simplified missing_prop validation.
      • Removed unwanted continue in middle of loop.
    • Remove unusable exclusive yuv frames patch.
    • Fixed KeyError bug arises due to wrong variable placement.
    • Fixed approx_video_nframes metadata property check.
    • Fixed av_interleaved_write_frame(): broken pipe warning bug by switching process.terminate() with process.kill().
    • Fixed AttributeError bug caused due to typo in logger.
  • FFhelper:
    • Fixed check_sp_output() method returning Standard Error (stderr) even when Nonetype.
    • Fixed logger requiring utf-8 decoding.
    • Fixed missing force_retrieve_stderr argument to check_sp_output in extract_device_n_demuxer method on Linux platforms.
    • Fixed logger message bug.
  • Utils:
    • Fixed logger name typo.
  • Maintenance:
    • Fixed hyperlinks to new GitHub's form schemas.
    • Fixed typos in logs messages.
    • Removed redundant code.
    • Updated code comments.
  • Setup:
    • Rearranged long_description patches to address unused patch bug.
  • Bash Script:
    • Fixed chattr: No such file or directory bug.
  • CI:
    • Fixed missing lavfi demuxer for mandelbrot virtual source in Sourcer API's test_probe_stream_n_retrieve_metadata unittest.
    • Fixed missing ffparams parameter bug in test_discard_n_filter_params() unittest.
    • Fixed test_camera_capture test.
    • Removed redundant similar ValueError checks.
    • Fixed typo in pytest arguments.
    • Fixed missing arguments.
  • Docs:
    • Fixed invalid hyperlinks in ReadMe.md
    • Fixed bad formatting and context.
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #29
  • PR #32
"},{"location":"changelog/#v023-2022-08-11","title":"v0.2.3 (2022-08-11)","text":"New Features
  • Docs:
    • Added Zenodo Bibtex entry and badge in docs for easy citation.
    • Added new <div> tag bounding-box style to the Static FFmpeg binary download links in FFmpeg Installation Doc for better accessibility.
  • Maintenance:
    • Switched to new Issue GitHub's form schema using YAML:
      • Added new bug_report.yaml Issue GitHub's form schema for Bug Reports.
      • Added new idea.yaml Issue GitHub's form schema for new Ideas.
      • Added new question.yaml Issue GitHub's form schema for Questions.
      • Deleted old depreciated markdown(.md) files.
      • Polished forms.
Updates/Improvements
  • Maintenance:
    • Added new patterns to .gitignore to ignore vim files.
  • CI:
    • Updated test_FFdecoder_params unittest to include with statement access method.
  • Setup:
    • Added new patches for using README.md text as long_description metadata.
      • Implemented new patch to remove GitHub README UI specific text.
    • Simplified multiple str.replace to chained str.replace of better readability.
    • Bumped version to 0.2.3.
  • Docs:
    • Updated recipes to include with statement access method.
      • Updated existing recipes to include with statement access method in FFdecoder APIs.
      • Included new example code of accessing RGB frames using with statement access method.
      • Updated Recipe title to \"Accessing RGB frames from a video file\" across docs.
    • Included warning admonition for advising users to always use trim with reverse filter.
    • Updated docs text font to Libre Franklin.
    • Updated method description texts and logging messages.
    • Update icons and admonition messages.
    • Updated code comments.
    • Updated changelog.md.
Bug-fixes
  • FFdecoder API:
    • Fixed Context Manager methods.
      • Fixed __enter__ method returning class instance instead of formulating pipeline.
      • Fixed __exit__ method calling wrong non-existent method.
  • Setup:
    • Fixed missing comma(,) in keywords metadata.
    • Fixed bug in patch string.
  • Docs:
    • Fixed typos in code comments.
    • Fixed several typos in docs.
Pull Requests
  • PR #26
"},{"location":"changelog/#v022-2022-08-09","title":"v0.2.2 (2022-08-09)","text":"New Features
  • Sourcer API:
    • Added support for -ffprefixes attribute through Sourcer API's sourcer_param dictionary parameter (similar to FFdecoder API).
  • FFdecoder API:
    • Added new output_frames_pixfmt metadata property to preview and handle output frames pixel-format.
  • Docs:
    • Added separate \"Basic\" and \"Advanced\" Recipes markdowns files with self-explanatory text, related usage code, asset (such as images, diagrams, GIFs, etc.), and UI upgrades for bringing standard quality to visual design.
    • Added separate index.md for Basic and Advanced Recipes with introductory text and curated hyperlinks for quick references to various recipes (separated with sub-categories \"Decoding\", \"Transcoding\", and \"Extracting Video Metadata\").
    • Added related admonitions to specify python dependencies as well as other requirements and relevant information required for each of these recipes.
    • Added new Basic Decoding Recipes:
      • Added Decoding Video files with various pixel formats recipes.
      • Added Decoding Live Feed Devices recipes with source_demuxer FFdecoder API parameter.
      • Added Decoding Image sequences recipes supporting Sequential, Glob pattern , Single (looping) image.
      • Added Decoding Network Streams recipes.
    • Added new Basic Transcoding Recipes:
      • Added Transcoding Live frames recipes with OpenCV and WriteGear.
      • Added Transcoding Live Simple Filtergraphs recipes with OpenCV.
      • Added Saving Key-frames as Image recipes with different image processing libraries.
    • Added new Basic Extracting Video Metadata Recipes:
      • Added Extracting Video Metadata recipes with FFdecoder and Sourcer APIs.
    • Added new Advanced Decoding Recipes:
      • Added Hardware-Accelerated Video Decoding recipe using NVIDIA's H.264 CUVID Video-decoder(h264_cuvid).
      • Added Decoding Live Virtual Sources recipes with many test patterns using lavfi input virtual device.
    • Added new Advanced Decoding Recipes:
      • Added lossless Hardware-Accelerated Video Transcoding recipe with WriteGear API.
      • Added Transcoding Live Complex Filtergraphs recipes with WriteGear API.
      • Added Transcoding Video Art with Filtergraphs recipes with WriteGear API for creating real-time artistic generative video art using simple and complex filtergraphs.
    • Added new Advanced Updating Video Metadata Recipes:
      • Added Updating Video Metadata recipes with user-defined as well as source metadata in FFdecoder API.
    • Added new dark and light theme logo support.
    • Added new recipes GIF assets to gifs folder.
    • Added new dark logo deffcode-dark.png asset to images folder.
    • Added new ffdecoder.png and sourcer.png Image assets to images folder.
    • Added new navigation.tabs feature.
    • Added Material Announcement-Bar notifying recent changes.
Updates/Improvements
  • Sourcer API:
    • Implemented new validation checks to ensure given source has usable video stream available by checking availability of either video bitrate or both frame-size and framerate_ properties in the source metadata.
    • Improved extract_resolution_framerate method for making framerate extraction more robust by falling back to extracting TBR value when no framerate value available in the source metadata.
  • FFdecoder API:
    • Updated metadata property object to validate and override source metadata properties directly by overloading same property object before formulating Frames Decoder Pipeline:
      • Implemented validation checks to verify each validate manually assigned source metadata property against specific datatype before overriding.
      • Updated logging to notify invalid datatype values when assigned through metadata property object.
      • Added support for overriding source_video_resolution source metadata property to control frame-size directly through metadata.
      • Added support for overriding output_frames_pixfmt metadata attribute to be used as default pixel-format, when frame_format parameter value is None-type.
      • Improved handling of source metadata keys in metadata property object.
    • Updated metadata property object to handle and assign User-defined metadata directly by overloading the same property object:
      • Added new internal user_metadata class variable to handle all User-defined metadata information separately.
      • FFdecoder API's metadata property object now returns User-defined metadata information merged with Source Video metadata.
      • Added tuple value warning log to notify users json module converts Python tuples to JSON lists.
    • Improved logic to test validity of -custom_resolution attribute value through ffparams dictionary parameter.
    • Improved handling of FFmpeg pipeline framerate with both user-defined and metadata defined values.
    • Added tuple to exception in datatype check for ffparams dictionary parameter.
    • Added datatype validation check for frame_format parameter.
    • Improved handling of -framerate parameter.
  • Maintenance:
    • Reformatted all Core class and methods text descriptions:
      • Rewritten introductory each API class description.
      • Moved reference block from index.md to class description.
      • Fixed missing class and methods parameter description.
      • Fixed typos and context in texts.
      • Reformatted code comments.
    • Simplified for loop with if condition checking in metadata property object.
    • Updated logging comments.
  • Setup:
    • Updated project description in metadata.
    • Bumped version to 0.2.2.
  • Docs:
    • Updated Introduction doc:
      • Added new text sections such as \"Getting Started\", \"Installation Notes\", \"Recipes a.k.a Examples\" and \"API in a nutshell\".
      • Rewritten Introduction(index.md) with recent Information, redefined context, UI changes, updated recipe codes, curated hyperlinks to various recipes(separated with categories), and relatable GIFs.
      • Updated spacing in index.md using spacer class within <div> tag and &nbsp;.
      • Reformatted and centered DeFFcode Introductory description.
      • Reformatted FFmpeg Installation doc and Issue & PR guidelines.
      • Updated static FFmpeg binaries download URLs in FFmpeg Installation doc.
      • Refashioned text contexts, icons, and recipes codes.
      • Updated Key Features section with reflecting new features.
    • Updated README.md:
      • Updated README.md w.r.t recent changes in Introduction(index.md) doc.
      • Simplified and Reformatted text sections similar to Introduction doc.
      • Imported new \"Contributions\" and \"Donations\" sections from VidGear docs.
      • Added collapsible text and output section using <summary> and <detail> tags.
      • Added experimental note GitHub blockquote to simulate admonition in README.md.
      • Removed tag-line from README.md and related image asset.
      • Simplified and Grouped README URL hyperlinks.
      • Removed Roadmap section.
    • Updated Recipes docs:
      • Revamped DeFFcode Introduction index.md with new Information, Context and UI changes, Updated example codes and hyperlinks.
      • Updated Announcement Bar to fix announcement_link variable and text.
      • Updated footer note to notify users regarding tuple value warning in FFdecoder API.
      • Rewritten recipes w.r.t breaking changes in APIs.
    • Updated Reference docs:
      • Completely revamped API's parameter reference docs.
      • Added new Functional Block Diagrams to FFdecoder and Sourcer API References.
      • Rewritten and Reformatted FFdecoder and Sourcer API's parameter reference docs with new information w.r.t recent changes.
      • Implemented new admonitions explaining new changes, related warnings/errors, usage examples etc.
      • Removed redundant advanced.md and basic.md docs.
      • Added new abstracts to FFhelper and Utils docs.
    • Updated docs site navigation and titles:
      • Reformatted index.md and installation/index.md.
      • Renamed help/index.md to help/help.md.
      • Moved basic and advanced recipes from example to recipes folder.
      • Imported \"Donations\" sections from VidGear docs to help.md.
      • Added updated page-title and navigation hyperlinks in mkdocs.yml to new markdown files incorporated recently.
      • Updated internal navigation hyperlinks in docs and removed old redundant file links.
    • Updated docs UI:
      • Added custom spacer class in CSS for custom vertical spacing.
      • Imported new \"New\", \"Advance\", \"Alert\", \"Danger\" and \"Bug\" admonitions custom CSS UI patches from vidgear.
      • Updated all admonitions icons with new custom icon SVG+XML URLs.
      • Reformatted custom.css and added missing comments.
      • Updated docs fonts:
        • Updated text font to Heebo.
        • Updated code font to JetBrains Mono.
      • Updated primary and accent colors:
        • Updated primary light color to light green.
        • Updated primary dark color to amber.
        • Updated accent light color to green.
        • Updated accent dark color to lime.
      • Replaced admonitions with appropriate ones.
      • Changed Color palette toggle icons.
      • Updated icons in title headings.
    • Updated admonitions messages.
    • Updated changelog.md.
  • CI:
    • Pinned jinja2 version to <3.1.0, since jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799).
    • Updated unittests w.r.t recent changes in APIs:
      • Updated test_frame_format unittest to include manually assign output pixel-format via metadata property object.
      • Updated test_metadata unittest to include new checks parameter to decide whether to perform Assertion test on assigned metadata properties in FFdecoder API.
      • Added new parametrize attributes in test_metadata and test_seek_n_save unittests to cover every use-cases.
      • Replaced IOError with ValueError in Sourcer API unittests.
    • Updated test_metadata unittest to verify tuple value warning.
    • Updated unittests to increase code coverage significantly.
Breaking Updates/Changes
  • Sourcer API:
    • Sourcer API's retrieve_metadata() method now returns parsed metadata either as JSON string or dictionary type.
      • Added new pretty_json boolean parameter to retrieve_metadata(), that is when True, returns metadata formatted as JSON string instead of default python dictionary.
    • Changed IOError to ValueError in Sourcer API, raised when source with no decodable audio or video stream is provided.
  • FFdecoder API:
    • Rename extraparams dictionary parameter to ffparams in FFdecoder API.
    • The source metadata value cannot be altered through metadata property object in FFdecoder API.
    • Removed -ffpostfixes attribute support from ffparams dictionary parameter in FFdecoder API, since totally redundant in favor of similar -ffprefixes and -clones attributes.
Bug-fixes
  • FFdecoder API:
    • Fixed metadata property object unable to process user-defined keys when any source metadata keys are defined.
    • Fixed TypeError bug with string type -framerate parameter values.
  • Sourcer API:
    • Fixed Sourcer API throws IOError for videos containing streams without both source bitrate and framerate defined (such as from lavfi input virtual device).
    • Fixed AttributeError bug due to typo in variable name.
  • CI:
    • Fixed support for newer mkdocstring version in DeFFcode Docs Deployer workflow.
      • Added new mkdocstrings-python-legacy dependency.
      • Replaced rendering variable with options.
      • Removed pinned mkdocstrings==0.17.0 version.
      • Removed redundant variables.
    • Updated test_metadata unittest to fix AssertionError Bug.
  • Docs:
    • Fixed some admonitions icons not showing bug using !important rule in CSS.
    • Fixed 404.html static page not showing up.
    • Fixed invalid internal navigation hyperlinks and asset paths.
    • Removed quote/cite/summary admonition custom UI patches.
    • Removed redundant information texts.
    • Fixed typos in code comments.
    • Fixed typos in example code.
Pull Requests
  • PR #23
"},{"location":"changelog/#v021-2022-07-14","title":"v0.2.1 (2022-07-14)","text":"New Features
  • Sourcer API:
    • Implemented support for extracting metadata from live input devices/sources.
    • Added new source_demuxer and forced_validate parameters to validate_source internal method.
    • Implemented logic to validate source_demuxer value against FFmpeg supported demuxers.
    • Rearranged metadata dict.
    • Updated Code comments.
  • FFdecoder API:
    • Implemented functionality to supported live devices by allowing device path and respective demuxer into pipeline.
    • Included -f FFmpeg parameter into pipeline to specify source device demuxer.
    • Added special case for discarding -framerate value with Nonetype.
  • CI:
    • Added new unittest test_camera_capture() to test support for live Virtual Camera devices.
    • Added new v4l2loopback-dkms, v4l2loopback-utils and kernel related APT dependencies.
  • Bash Script:
    • Added new FFmpeg command to extract image datasets from given video on Linux envs.
    • Created live Virtual Camera devices through v4l2loopback library on Github Actions Linux envs.
      • Added v4l2loopback modprobe command to setup Virtual Camera named VCamera dynamically at /dev/video2.
      • Added v4l2-ctl --list-devices command for debugging.
      • Implemented FFmpeg command through nohup(no hangup) to feed video loop input to Virtual Camera in the background.
Updates/Improvements
  • Sourcer API:
    • Only either source_demuxer or source_extension attribute can be present in metadata.
    • Enforced forced_validate for live input devices/sources in validate_source internal method.
  • FFdecoder API:
    • Rearranged FFmpeg parameters in pipeline.
    • Removed redundant code.
    • Updated Code comments.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
  • CI:
    • Restricted test_camera_capture() unittest to Linux envs only.
    • Removed return_generated_frames_path() method support for Linux envs.
    • Pinned jinja2 3.1.0 or above breaking mkdocs.
      • jinja2>=3.1.0 breaks mkdocs (mkdocs/mkdocs#2799), therefore pinned jinja2 version to <3.1.0.
  • Bash Script:
    • Updated to latest FFmpeg Static Binaries links.
      • Updated download links to abhiTronix/ffmpeg-static-builds * hosting latest available versions.
      • Updated date/version tag to 12-07-2022.
      • Removed depreciated binaries download links and code.
  • Setup:
    • Bumped version to 0.2.1.
  • Docs:
    • Updated changelog.md.
Breaking Updates/Changes
  • Implement support for live input devices/sources.
    • source parameter now accepts device name or path.
    • Added source_demuxer parameter to specify demuxer for live input devices/sources.
    • Implemented Automated inserting of -f FFmpeg parameter whenever source_demuxer is specified by the user.
Bug-fixes
  • Sourcer API:
    • Fixed Nonetype value bug in source_demuxer assertion logic.
    • Fixed typos in parameter names.
    • Added missing import.
  • FFhelper API:
    • Logged error message on metadata extraction failure.
    • Fixed bug with get_supported_demuxers not detecting name patterns with commas.
    • Removed redundant logging.
  • CI:
    • Fixed critical permission bug causing v4l2loopback to fail on Github Actions Linux envs.
      • Elevated privileges to root by adding sudo to all commands(including bash scripts and python commands).
      • Updated vidgear dependency to pip install from its git testing branch with recent bug fixes.
      • Replaced relative paths with absolute paths in unit tests.
    • Fixed WriteGear API unable to write frames due to permission errors.
    • Fixed test_source_playback() test failing on Darwin envs with OLD FFmpeg binaries.
      • Removed custom_ffmpeg value for Darwin envs.
    • Fixed various naming typos.
    • Fixed missing APT dependencies.
Pull Requests
  • PR #17
"},{"location":"changelog/#v020-2022-03-21","title":"v0.2.0 (2022-03-21)","text":"New Features
  • Sourcer API:
    • Added a new source_audio_samplerate metadata parameter:
      • Re-implemented __extract_audio_bitrate internal function from scratch as __extract_audio_bitrate_nd_samplerate.
        • Implemented new algorithm to extract both extract both audio bitrate and samplerate from given source.
        • Updated regex patterns according to changes.
      • Updated __contains_video and __contains_audio logic to support new changes.
    • Added metadata extraction support:
      • Added retrieve_metadata class method to Sourcer API for extracting source metadata as python dictionary.
        • Populated private source member values in dictionary with distinct keys.
    • Added new -force_validate_source attribute to Sourcer API's sourcer_params dict parameter for special cases.
    • Implemented check whether probe_stream() called or not in Sourcer API.
  • FFdecoder API:
    • Added metadata extraction and updation support:
      • Added metadata property object function to FFdecoder API for retrieving source metadata form Sourcer API as dict and return it as JSON dump for pretty printing.
        • Added Operational Mode as read-only property in metadata.
      • Added metadata property object with setter() method for updating source metadata with user-defined dictionary.
        • Implemented way to manually alter metadata keys and values for custom results.
  • Docs:
    • Added new comprehensive documentation with Mkdocs:
      • Added new image assets:
        • Added new Deffcode banner image, logo and tagline
        • Added new icon ICO file with each layer of the favicon holds a different size of the image.
        • Added new png images for best compatibility with different web browsers.
      • Added new docs files:
        • Added new index.md with introduction to project.
        • Added new changelog.md.
        • Added license.md
        • Added new index.md with instructions for contributing in DeFFcode.
          • Added issue.md with Issue Contribution Guidelines.
          • Added PR.md with PR Contribution Guidelines.
        • Added new custom.js to add gitter sidecard support.
        • Added new custom.css that brings standard and quality visual design experience to DeFFcode docs.
          • Added new admonitions new and alert.
        • Added separate LICENSE(under CC creative commons) and REAME.md for assets.
        • Added new main.html extending base.html for defining custom site metadata.
        • Added deFFcode banner image to metadata.
        • Added twitter card and metadata.
        • Added version warning for displaying a warning when the user visits any other version.
        • Added footer sponsorship block.
        • Added gitter card official JS script dist.
        • Added new custom 404.html to handle HTTP status code 404 Not Found.
          • Implemented custom theming with new CSS style.
          • Added custom 404 image asset.
        • Added new index.md with DeFFcode Installation notes.
          • Added info about Supported Systems, Supported Python legacies, Prerequisites, Installation instructions.
          • Added Pip and Source Installation instructions.
        • Added new ffmpeg_install.md with machine-specific instructions for FFmpeg installation.
        • Added new index.md with different ways to help DeFFcode, other users, and the author.
          • Added info about Starring and Watching DeFFcode on GitHub, Helping with open issues etc.
          • Added Tweeter intent used for tweeting #deffode hastags easily.
          • Added Kofi Donation link button.
          • Added author contact links and left align avatar image.
        • Added new get_help.md to get help with DeFFcode.
          • Added DeFFcode gitter community link.
          • Added other helpful links.
      • Added new assets folders.
      • Added Basic Recipes with basic.md
      • Added Advanced Recipes with advanced.md
      • Added all API References.
        • Added mkdocstrings automatic documentation from sources.
        • Added new index.md for FFdecoder API with its description and explaining its API.
        • Added new index.md for Sourcer API with its description and explaining its API.
        • Added ffhelper methods API references.
        • Added utils methods API references.
      • Added all API Parameters.
        • Added new params.md for FFdecoder API explaining all its parameters.
        • Added new params.md for Sourcer API explaining all its parameters.
        • Added Mkdocs support with mkdocs.yml
      • Implemented new mkdocs.yml with relevant parameters.
        • Added extended material theme with overridden parts.
        • Added site metadata with site_name, site_url, site_author, site_description, repo_name, repo_url, edit_uri, copyright etc.
        • Added navigation under sections for easily accessing each document.
        • Implemented Page tree for DeFFcode docs.
        • Added features like navigation.tracking, navigation.indexes, navigation.top, search.suggest, search.highlight, search.share, content.code.annotate.
        • Added separate palette [default]light(with primary:green accent: dark green) and [slate]dark(with primary:teal accent: light green) mode.
        • Added Color palette toggle switch with icon material/home-lightning-bolt.
        • Added support for all pymarkdown-extensions.
        • Added google fonts for text: Quicksand and code: Fira Code.
        • Added custom logo and icon for DeFFcode.
        • Added support for plugins like search, git-revision-date-localized, minify.
        • Added support for mkdocstrings plugin for auto-built API references.
          • Added python handler for parsing python source-code to mkdocstrings.
          • Improved source-code docs for compatibility with mkdocstrings.
        • Added support for extensions like admonition, attr_list, codehilite, def_list, footnotes, meta, and toc.
        • Added social icons and links.
        • Added custom extra_css and extra_javascript.
        • Added support for en (English) language.
      • Added new badges to README.md for displaying current status of CI jobs and coverage.
      • Added Roadmap to README.md
  • CI:
    • Automated CI support for different environments:
      • Implemented auto-handling of dependencies installation, unit testing, and coverage report uploading.
      • Added GitHub Action workflow for Linux envs:
        • Added and configured CIlinux.yml to enable GitHub Action workflow for Linux-based Testing Envs.
        • Added 3.7+ python-versions to build matrix.
        • Added code coverage through codecov/codecov-action@v2 workflow for measuring unit-tests effectiveness.
          • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
      • Added Appveyor workflow for Windows envs:
        • Add and configured appveyor.yml to enable Appveyor workflow for Windows-based Testing Envs.
        • Added 3.7+ 64-bit python-versions to build matrix.
        • Enabled fast_finish to exit immediately on error.
      • Added Azure-Pipelines workflow for MacOS envs:
        • Add and configured azure-pipelines.yml to enable Azure-Pipelines workflow for MacOS-based Testing Envs.
        • Added code coverage through codecov workflow for measuring unit-tests effectiveness.
          • Added online auto validation of codecov bash script using SH256SUM and sig files as recommended.
        • Implemented behavior to about coverage upload on timeout(error code 124) in pytests.
        • Added 3.7+ python-versions to build matrix.
      • Added automated flake8 testing to discover any anomalies in code.
      • Added master branches for triggering CI.
    • Implement new automated Docs Building and Deployment on gh-pages through GitHub Actions workflow:
      • Added new workflow yaml docs_deployer.yml for automated docs deployment.
      • Added different jobs with ubuntu-latest environement to build matrix.
      • Added actions/checkout@v2 for repo checkout and actions/setup-python@v2 for python environment.
      • Pinned python version to 3.8 for python environment in docs building.
      • Added GIT_TOKEN, GIT_NAME, GIT_EMAIL environment variables through secrets.
      • Added Mkdocs Material theme related python dependencies and environments.
      • Added push on master and dev branch release with published as triggers.
      • Pinned mkdocstrings==0.17.0.
    • Added new Automated Docs Versioning:
      • Implemented Docs versioning through mike.
      • Separate new workflow steps to handle different versions.
      • Added step to auto-create RELEASE_NAME environment variable from DeFFcode version file.
      • Update docs deploy workflow to support latest, release and dev builds.
      • Added automatic release version extraction from GitHub events.
    • Added Skip Duplicate Actions Workflow to DeFFcode Docs Deployer:
      • Added Skip Duplicate Actions(fkirc/skip-duplicate-actions@master) Workflow to DeFFcode Docs Deployer to prevent redundant duplicate workflow-runs.
  • Maintenance:
    • New DeFFcode project issue and PR templates:
      • Added PR template:
        • Added a pull request template(PULL_REQUEST_TEMPLATE.md) for project contributors to automatically see the template's contents in the pull request body.
        • Added Brief Description, Requirements / Checklist, Related Issue, Context, Types of changes blocks.
      • Added Proposal, Bug-Report and Question templates:
        • Created an ISSUE_TEMPLATE subdirectory to contain multiple issue templates.
        • Add manually-created Proposal(proposal.md) and Question(question.md) issue template for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Any Other Information like blocks.
        • Add an manually-created Bug Report(bug_report.md) issue template to ISSUE_TEMPLATE subdirectory for project contributors to automatically see the template's contents in the issue body.
          • Added Brief Description, Acknowledgment, Context, Current Environment, Expected Behavior, Actual Behavior, Possible Fix, Steps to reproduce, Miscellaneous like blocks.
        • Added YAML frontmatter to each issue template to pre-fill the issue title, automatically add labels and assignees, and give the template a name and description.
        • Added a config.yml file to the .github/ISSUE_TEMPLATE folder to customize the issue template chooser that people see when creating a new issue.
        • Set blank_issues_enabled parameter to false to encourage contributors to use issue templates.
        • Added contact_links parameter with gitter community link to receive regular issues outside of GitHub.
      • Added new FUNDING.yml with ko-fi donation link.
      • Added .gitattributes for DeFFcode, that set the default behavior, in case people don't have core.autocrlf set.
      • Imported Codecov config(codecov.yml) from vidgear to modify coverage parameters.
  • Tests:
    • Added DeFFcode unit tests with pytest:
      • Added essential.py for defining all essential functions necessary for DeFFcode unit tests.
      • Added return_static_ffmpeg, remove_file_safe, return_testvideo_path, return_generated_frames_path, actual_frame_count_n_frame_size essential functions.
      • Added is_windows global variable.
      • Added related imports and logging.
      • Added __init__.py.
      • Moved all files to test folder.
      • Added DeFFcode's utils unit tests with pytest.
        • Added new test_loggerhandler and test_dict2Args tests.
      • Added DeFFcode's ffhelper unit tests with pytest.
        • Added new test_ffmpeg_binaries_download, test_validate_ffmpeg, test_get_valid_ffmpeg_path, test_check_sp_output, test_is_valid_url, test_is_valid_image_seq, and test_validate_imgseqdir parametrize tests.
      • Added DeFFcode's Sourcer API unit tests with pytest.
        • Added new test_source and test_probe_stream_n_retrieve_metadata parametrize tests.
      • Added DeFFcode's FFdecoder API unit tests with pytest.
        • Added new test_source_playback, test_frame_format, test_metadata, test_seek_n_save, and test_FFdecoder_params parametrize unit tests.
      • Added related imports and logging.
      • Added unit test for delete_file_safe utils function.
  • Bash:
    • \ud83d\udd27 Imported prepare_dataset.sh from vidgear for downloading pytest datasets to temp dir.
Updates/Improvements
  • FFdecoder API:
    • Removed redundant forcing -r FFmpeg parameter for image sequences as source.
    • Removed redundant checks on -vf FFmpeg parameter.
    • FFmpeg parameter -s will be discarded in favor of -custom_resolution attribute.
    • Replaced -constant_framerate with FFmpeg -framerate attribute.
    • Replaced -custom_source_params with correct -custom_sourcer_params attribute.
    • Renamed operational_mode metadata parameter to ffdecoder_operational_mode.
  • Sourcer API:
    • Converted all Sourcer APIs public available variables into private ones for stability.
    • All Sourcer's publicly accessed variable metadata values in FFdecoder, therefore replaced with dictionary counterparts.
    • Moved FFmpeg path validation and handling to Sourcer from FFdecoder API.
    • Moved -ffmpeg_download_path dictionary attribute to Sourcer API's sourcer_params parameter.
    • Moved dependencies and related functions.
  • CI:
    • Excluded dev branch from triggering workflow on any environment.
      • Updated yaml files to exclude beta dev branch from triggering workflow on any environment.
      • Restricted codecov to use only master branch.
    • Re-implemented fkirc/skip-duplicate-actions@master to Skip individual deploy steps instead of Skip entire jobs
  • Docs:
    • Updated PR.md
      • Added instructions to download prepare_dataset.sh using curl.
      • Updated dependencies for pytest.
    • Updated advanced.md
      • Updated generating Video from Image sequence to save video using OpenCV writer instead of WriteGear API.
      • Added frame_format=\"bgr24\"and additional instructions regarding OpenCV writer.
      • Updated example codes with new changes.
      • Rearranged examples placement.
    • Updates to custom.css
      • Added donation sponsor link in page footer with heart animation.
      • Added bouncing heart animation through pure CSS.
      • Added Bold property to currently highlighted link in Navigation Bar.
      • Updated Navigation Bar title font size.
      • Updated version list text to uppercase and bold.
      • Updated icon for task list unchecked.
      • Added more top-padding to docs heading.
      • Updated Block quote symbol and theming.
      • Updated Custom Button theming to match docs.
      • Added new custom classes to create shadow effect in dark mode for better visibility.
      • Updated dark mode theme \"slate\" hue to 285.
    • Updated admonitions colors.
    • Updated gitter sidecard UI colors and properties.
    • Reflected recent changes in Sourcer and FFdecoder API's metadata.
    • Updated sample code formatting from sh to json.
    • Added missing docs for delete_file_safe utils function.
    • Updated Download Test Datasets instructions.
    • Updated contribution guidelines and installation docs with related changes.
    • Updated License Notice.
    • Updated code comments.
    • Updated logging messages.
    • Updated Deffcode Logo and Tagline to be dark-mode friendly.
    • Adjusted asset alignment.
    • Updated example code.
    • Updated Installation instructions, Requirements and Roadmap.
    • Corrected links to documents.
    • Updated project description.
    • Updated LICENSE.
    • Updated indentation and code comments
    • Re-aligned text and images in README.md
    • Adjusted image classes and width.
  • Maintenance:
    • Updated LICENSE notice to add vidgear notice.
    • Bumped version to 0.2.0
    • Added useful comments for convenience.
Breaking Updates/Changes
  • Sourcer API will now raises Assertion error if probe_stream() not called before calling retrieve_metadata().
  • Only -framerate values greater than 0.0 are now valid.
  • Renamed decode_stream to probe_stream in Sourcer API.
  • Any of video bitrate or video framerate are sufficient to validate if source contains valid video stream(s).
  • Any of audio bitrate or audio samplerate are sufficient to validate if source contains valid audio stream(s).
Bug-fixes
  • APIs:
    • Added missing delete_file_safe function in utils.
      • Imported delete_file_safe from vidgear to safely deletes files at given path.
    • Fixed forward slash bugs in regex patterns.
    • Fixed IndexError when no bitrate was discovered in given source.
    • Fixed FFmpeg subprocess pipeline not terminating gracefully in FFdecoder API.
    • Fixed __version__ not defined in DeFFcode's __init__.py that throws AttributeError: module 'deffcode' has no attribute '__version__' on query.
      • Added necessary import in __init__.py.
  • Docs:
    • Fixed missing \"-vcodec\": \"h264_cuvid\" value in example code.
    • Fixed typos in filenames in utils.py
    • Fixed internal missing or invalid hyperlinks.
    • Fixed improper docs context and typos.
    • Fixed \"year\" in license notice.
    • Fixed content spacing.
    • Fixed Gitter Community Link in Mkdocs.
    • Fixed typos in README.md.
    • Fixed typos in license notices.
    • Fixed typos in code comments.
    • Fixed typos in example code.
  • CI:
    • Fixed missing FFmpeg dependency bug in GitHub Actions.
    • Fixes typo in Docs Deployer yaml.
    • Fixed if condition skipping when need is skipping
  • Maintenance:
    • Added missing imports.
    • Fixed redundant conditional logics.
    • Removed or Replaced redundant conditions and definitions.
    • Fixed minor typos in templates.
Pull Requests
  • PR #5
  • PR #6
  • PR #8
  • PR #9
  • PR #11
  • PR #12
  • PR #13
  • PR #14
"},{"location":"changelog/#v010-2022-03-07","title":"v0.1.0 (2022-03-07)","text":"New Features
  • Open-Sourced DeFFcode under the Apache 2.0 License.
  • Added new Classes(APIs):
    • FFdecoder: Performant Real-time Video frames Generator for generating blazingly fast video frames(RGB ndarray by default).
    • Sourcer: Extracts source video metadata (bitrate, resolution, framerate, nframes etc.) using its subprocess FFmpeg output.
  • Added new Helper functions:
    • ffhelper: Backend FFmpeg Wrapper that handles all subprocess transactions and gather data.
    • utils: Handles all additional Utilizes required for functioning of DeFFcode.
  • First PyPi Release:
    • Released DeFFcode to Python Package Index (PyPI)
    • Added setup.py and related metadata.
    • Added version.py
  • Docs:
    • Added abstract and related information in README.md
    • Added installation instructions.
    • Added preliminary usage examples.
  • Maintenance:
    • Added LICENSE.
    • Added .gitignore
Updates/Improvements
  • Maintenance:
    • Bumped version to 0.1.0
    • Updated LICENSE notice to add vidgear code usage notice.
Breaking Updates/Changes
  • Fixed support for Python-3.7 and above legacies only.
Bug-fixes
  • Docs:
    • Fixed hyperlinks in README.
    • Fixed indentation and spacing.
    • Fixed typos and updated context.
    • Removed dead code.
"},{"location":"help/","title":"Helping Us","text":"

Liked DeFFcode? Would you like to help DeFFcode, other users, and the author?

There are many simple ways to help us:

"},{"location":"help/#star-deffcode-on-github","title":"Star DeFFcode on GitHub","text":"

You can star DeFFcode on GitHub:

It helps us a lot by making it easier for others to find & trust this library. Thanks!

"},{"location":"help/#help-others-with-issues-on-github","title":"Help others with issues on GitHub","text":"

You can see through any opened or pinned existing issues on our GitHub repository, and try helping others, wherever possible:

"},{"location":"help/#watch-the-github-repository","title":"Watch the GitHub repository","text":"

You can watch \ud83d\udc40 DeFFcode Activities on GitHub:

When you watch a repository, you will be notified of all conversations for that repository, including when someone creates a new issue, or pushes a new pull request.

You can try helping solving those issues, or give valuable feedback/review on new Pull Requests.

"},{"location":"help/#tweet-about-deffcode","title":"Tweet about DeFFcode","text":"

Tweet about DeFFcode and Spread the word \ud83d\udde3:

Tweet #deffcode

Let others know how you are using DeFFcode and why you like it!

"},{"location":"help/#helping-author","title":"Helping Author","text":"

Donations help keep DeFFcode's development alive and motivate me (as author).

It is something I am doing with my own free time. But so much more needs to be done, and I need your help to do this. For just the price of a cup of coffee, you can make a difference

Thanks a million!

"},{"location":"help/#connect-with-author","title":"Connect with Author","text":"

You can connect with me, the author \ud83d\udc4b:

  • Follow author on GitHub:
  • Follow author on Twitter: Follow @abhi_una12
  • Get in touch with author on Linkedin:

"},{"location":"license/","title":"License","text":"

This library is released under the Apache 2.0 License.

"},{"location":"license/#copyright-notice","title":"Copyright Notice","text":"
Copyright (c) 2021 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n  http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n
"},{"location":"contribution/","title":"Overview","text":""},{"location":"contribution/#contribution-overview","title":"Contribution Overview","text":"

Contributions are always welcomed

We'd love your contribution to DeFFcode in order to fix bugs or to implement new features!

"},{"location":"contribution/#submission-guidelines","title":"Submission Guidelines","text":"
  • Submitting an Issue Guidelines \u27b6
  • Submitting Pull Request(PR) Guidelines \u27b6
"},{"location":"contribution/#submission-contexts","title":"Submission Contexts","text":""},{"location":"contribution/#got-a-question-or-problem","title":"Got a question or problem?","text":"

For quick questions, please refrain from opening an issue, instead you can reach us on Gitter community channel.

"},{"location":"contribution/#found-a-typo","title":"Found a typo?","text":"

There's no need to contribute for some typos. Just reach us on Gitter \u27b6 community channel, We will correct them in (less than) no time.

"},{"location":"contribution/#found-a-bug","title":"Found a bug?","text":"

If you encountered a bug, you can help us by submitting an issue in our GitHub repository. Even better, you can submit a Pull Request(PR) with a fix, but make sure to read the guidelines \u27b6.

"},{"location":"contribution/#request-for-a-featureimprovement","title":"Request for a feature/improvement?","text":"Subscribe to Github Repository

You can subscribe our GitHub Repository to receive notifications through email for new pull requests, commits and issues that are created in DeFFcode. Learn more about it here \u27b6

You can request our GitHub Repository for a new feature/improvement based on the type of request:

Please submit an issue with a proposal template for your request to explain how it benefits everyone in the community.

  • Major Feature Requests: If you require a major feature for DeFFcode, then first open an issue and outline your proposal so that it can be discussed. This will also allow us to better coordinate our efforts, prevent duplication of work, and help you to craft the change so that it is successfully accepted into the project. The purposed feature, if accepted, may take time based on its complexity and availability/time-schedule of our maintainers, but once it's completed, you will be notified right away. Please be patient!

  • Minor Feature Requests: Small features and bugs resolved on priority. You just have to submit an issue to our GitHub Repository.

"},{"location":"contribution/PR/","title":"Submitting Pull Request(PR) Guidelines:","text":"

The following guidelines tells you how to submit a valid PR for DeFFcode:

Working on your first Pull Request for DeFFcode?

  • You can learn about \"How to contribute to an Open Source Project on GitHub\" from this doc \u27b6
  • If you're stuck at something, please join our Gitter community channel. We will help you get started!

"},{"location":"contribution/PR/#clone-branch-for-pr","title":"Clone branch for PR","text":"

You can clone your Forked remote git to local and create your PR working branch as a sub-branch of latest master branch as follows:

Make sure the master branch of your Forked repository is up-to-date with DeFFcode, before starting working on a Pull Request.

# clone your forked repository(change with your username) and get inside\ngit clone https://github.com/{YOUR USERNAME}/DeFFcode.git && cd DeFFcode\n\n# pull any recent updates\ngit pull\n\n# Now create your new branch with suitable name(such as \"subbranch_of_master\")\ngit checkout -b subbranch_of_master\n

Now after working with this newly created branch for your Pull Request, you can commit and push or merge it locally or remotely as usual.

"},{"location":"contribution/PR/#pr-submission-checklist","title":"PR Submission Checklist","text":"

There are some important checks you need to perform while submitting your Pull Request(s) for DeFFcode library:

  • Submit a Related Issue:

  • The first thing you do is submit an issue with a proposal template for your work first and then work on your Pull Request.

  • Submit a Draft Pull Request:

  • Submit the draft pull request from the first day of your development.

  • Add a brief but descriptive title for your PR.
  • Explain what the PR adds, fixes, or improves.
  • In case of bug fixes, add a new unit test case that would fail against your bug fix.
  • Provide output or screenshots, if you can.
  • Make sure your pull request passed all the CI checks (triggers automatically on pushing commits against master branch). If it's somehow failing, then ask the maintainer for a review.
  • Click \"ready for review\" when finished.

  • Test, Format & lint code locally:

  • Make sure to test, format, and lint the modified code locally before every commit. The details are discussed below \u27b6

  • Make sensible commit messages:

  • If your pull request fixes a separate issue number, remember to include \"resolves #issue_number\" in the commit message. Learn more about it here \u27b6.

  • Keep the commit message concisely as much as possible at every submit. You can make a supplement to the previous commit with git commit --amend command.

  • Perform Integrity Checks:

    Any duplicate pull request will be Rejected!

  • Search GitHub if there's a similar open or closed PR that relates to your submission.

  • Check if your purpose code matches the overall direction of the DeFFcode APIs and improves it.
  • Retain copyright for your contributions, but also agree to license them for usage by the project and author(s) under the Apache 2.0 license \u27b6.

  • Link your Issues:

    For more information on Linking a pull request to an issue, See this doc\u27b6

  • Finally, when you're confident enough, make your pull request public.

  • You can link an issue to a pull request manually or using a supported keyword in the pull request description. It helps collaborators see that someone is working on the issue. For more information, see this doc\u27b6

"},{"location":"contribution/PR/#testing-formatting-linting","title":"Testing, Formatting & Linting","text":"

All Pull Request(s) must be tested, formatted & linted against our library standards as discussed below:

"},{"location":"contribution/PR/#requirements","title":"Requirements","text":"

Testing DeFFcode requires additional test dependencies and dataset, which can be handled manually as follows:

  • Install additional python libraries:

    You can easily install these dependencies via pip:

    # Install opencv(only if not installed previously)\n$ pip install opencv-python\n\n# install rest of dependencies\n$ pip install --upgrade flake8 black pytest vidgear[core]\n
  • Download Tests Dataset:

    To perform tests, you also need to download additional dataset (to your temp dir) by running prepare_dataset.sh bash script as follows:

    On Linux/MacOSOn Windows
    $ chmod +x scripts/bash/prepare_dataset.sh\n$ ./scripts/bash/prepare_dataset.sh\n
    $ sh scripts/bash/prepare_dataset.sh\n
"},{"location":"contribution/PR/#running-tests","title":"Running Tests","text":"

All tests can be run with pytest(in DeFFcode's root folder) as follows:

$ pytest -sv  #-sv for verbose output.\n
"},{"location":"contribution/PR/#formatting-linting","title":"Formatting & Linting","text":"

For formatting and linting, following libraries are used:

  • Flake8: You must run flake8 linting for checking the code base against the coding style (PEP8), programming errors and other cyclomatic complexity:

    $ flake8 {source_file_or_directory} --count --select=E9,F63,F7,F82 --show-source --statistics\n
  • Black: DeFFcode follows black formatting to make code review faster by producing the smallest diffs possible. You must run it with sensible defaults as follows:

    $ black {source_file_or_directory}\n

"},{"location":"contribution/PR/#frequently-asked-questions","title":"Frequently Asked Questions","text":"

Q1. Why do my changes taking so long to be Reviewed and/or Merged?

Submission Aftermaths

  • After your PR is merged, you can safely delete your branch and pull the changes from the main (upstream) repository.
  • The changes will remain in dev branch until next DeFFcode version is released, then it will be merged into master branch.
  • After a successful Merge, your newer contributions will be given priority over others.

Pull requests will be reviewed by the maintainers and the rationale behind the maintainer\u2019s decision to accept or deny the changes will be posted in the pull request. Please wait for our code review and approval, possibly enhancing your change on request.

Q2. Would you accept a huge Pull Request with Lots of Changes?

First, make sure that the changes are somewhat related. Otherwise, please create separate pull requests. Anyway, before submitting a huge change, it's probably a good idea to open an issue in the DeFFcode Github repository to ask the maintainers if they agree with your proposed changes. Otherwise, they could refuse your proposal after you put all that hard work into making the changes. We definitely don't want you to waste your time!

"},{"location":"contribution/issue/","title":"Submitting an Issue Guidelines","text":"

If you've found a new bug or you've come up with some new feature which can improve the quality of the DeFFcode, then related issues are welcomed! But, Before you do, please read the following guidelines:

First Issue on GitHub?

You can easily learn about it from creating an issue wiki.

Info

Please note that your issue will be fixed much faster if you spend about half an hour preparing it, including the exact reproduction steps and a demo. If you're in a hurry or don't feel confident, it's fine to report issues with less details, but this makes it less likely they'll get fixed soon.

"},{"location":"contribution/issue/#search-the-docs-and-previous-issues","title":"Search the Docs and Previous Issues","text":"
  • Remember to first search GitHub for a open or closed issue that relates to your submission or already been reported. You may find related information and the discussion might inform you of workarounds that may help to resolve the issue.
  • For quick questions, please refrain from opening an issue, as you can reach us on Gitter community channel.
  • Also, go comprehensively through our dedicated FAQ & Troubleshooting section.
"},{"location":"contribution/issue/#gather-required-information","title":"Gather Required Information","text":"
  • All DeFFcode APIs provides a verbose boolean flag in parameters, to log debugged output to terminal. Kindly turn this parameter True in the respective API for getting debug output, and paste it with your Issue.
  • In order to reproduce bugs we will systematically ask you to provide a minimal reproduction code for your report.
  • Check and paste, exact DeFFcode version by running command python -c \"import deffcode; print(deffcode.__version__)\".
"},{"location":"contribution/issue/#follow-the-issue-template","title":"Follow the Issue Template","text":"
  • Please format your issue by choosing the appropriate template.
  • Any improper/insufficient reports will be marked Invalid \u26d4, and if we don't hear back from you we may close the issue.
"},{"location":"contribution/issue/#raise-the-issue","title":"Raise the Issue","text":"
  • Add a brief but descriptive title for your issue.
  • Keep the issue phrasing in context of the problem.
  • Attach source-code/screenshots if you have one.
  • Finally, raise it by choosing the appropriate Issue Template: Bug report \ud83d\udc1e, Idea \ud83d\udca1, Question \u2754.
"},{"location":"help/get_help/","title":"Getting Help","text":"Courtesy - tenor

Would you like to get help with DeFFcode?

There are several ways to get help with DeFFcode:

"},{"location":"help/get_help/#join-our-gitter-community-channel","title":"Join our Gitter Community channel","text":"

Have you come up with some new idea \ud83d\udca1 or looking for the fastest way troubleshoot your problems

Join and chat on our Gitter Community channel:

There you can ask quick questions, swiftly troubleshoot your problems, help others, share ideas & information, etc.

"},{"location":"help/get_help/#this-is-what-you-do-when","title":"This is what you do when...","text":"
  • Got a question or problem?
  • Found a typo?
  • Found a bug?
  • Missing a feature/improvement?
"},{"location":"help/get_help/#reporting-an-issues","title":"Reporting an issues","text":"

Want to report a bug? Suggest a new feature?

Before you do, please read our guidelines \u27b6

"},{"location":"help/get_help/#preparing-a-pull-request","title":"Preparing a Pull Request","text":"

Interested in contributing to DeFFcode?

Before you do, please read our guidelines \u27b6

"},{"location":"installation/","title":"Overview","text":""},{"location":"installation/#installation-notes","title":"Installation Notes","text":""},{"location":"installation/#supported-systems","title":"Supported Systems","text":"

DeFFcode is well-tested and supported on the following systems(but not limited to), with python 3.7+ and pip installed:

Upgrade your pip

It strongly advised to upgrade to latest pip before installing deffcode to avoid any undesired installation error(s).

There are two mechanisms to upgrade pip:

pipensurepip

You can use existing pip to upgrade itself:

Install pip if not present
  • Download the script, from https://bootstrap.pypa.io/get-pip.py.
  • Open a terminal/command prompt, cd to the folder containing the get-pip.py file and run:
Linux/MacOSWindows
python get-pip.py\n
py get-pip.py\n

More details about this script can be found in pypa/get-pip\u2019s README.

Linux/MacOSWindows
python -m pip install pip --upgrade\n
py -m pip install pip --upgrade\n

Python also comes with an ensurepip module1, which can easily upgrade/install pip in any Python environment.

Linux/MacOSWindows
python -m ensurepip --upgrade\n
py -m ensurepip --upgrade\n
  • Any Linux distro released in 2016 or later
  • Windows 7 or later
  • MacOS 10.12.6 (Sierra) or later

"},{"location":"installation/#supported-python-legacies","title":"Supported Python legacies","text":"

Python 3.7+ are only supported legacies for installing DeFFcode v0.1.0 and above.

"},{"location":"installation/#prerequisites","title":"Prerequisites","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

"},{"location":"installation/#ffmpeg","title":"FFmpeg","text":"

When installing DeFFcode, FFmpeg is the only prerequisites you need to configure/install manually. You could easily do it by referring FFmpeg Installation doc.

"},{"location":"installation/#installation","title":"Installation","text":""},{"location":"installation/#a-installation-using-pip-recommended","title":"A. Installation using pip (Recommended)","text":"

Best option for easily getting stable DeFFcode installed.

Installation is as simple as:

Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest stable release\npython -m pip install -U deffcode\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest stable release\npython -m pip install --upgrade --user deffcode\n

Or, If you're using py as alias for installed python, then:

# Install latest stable release\npy -m pip install --upgrade --user deffcode\n
# Install latest stable release\npip install -U deffcode\n

And you can also download its wheel (.whl) package from our repository's releases section, thereby can be installed as follows:

# Install latest release\npip install deffcode-0.2.0-py3-none-any.whl\n

"},{"location":"installation/#b-installation-from-source","title":"B. Installation from Source","text":"

Best option for trying latest patches(maybe experimental), forking for Pull Requests, or automatically installing all prerequisites(with a few exceptions).

Installation using dev banch

If you're looking for latest work-in-progress enhancements or bug-fixes, then you want to checkout our beta dev branch with the following commands:

The beta dev branch at times can be very unstable or even unusable, User discretion is advised!

# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# checkout the dev beta branch\ngit checkout dev\n\n# Install it\npip install -U .\n
Windows Installation

If you are using Windows, some of the commands given below, may not work out-of-the-box.

A quick solution may be to preface every Python command with python -m like this:

# Install latest beta branch\npython -m pip install -U .\n

And, If you don't have the privileges to the directory you're installing package. Then use --user flag, that makes pip install packages in your home directory instead:

# Install latest beta branch\npython -m pip install --upgrade --user .\n

Or, If you're using py as alias for installed python, then:

# Install latest beta branch\npy -m pip install --upgrade --user .\n
# clone the repository and get inside\ngit clone https://github.com/abhiTronix/deffcode.git && cd deffcode\n\n# Install it\npip install -U .\n

  1. The ensurepip module is missing/disabled on Ubuntu. Use pip method only.\u00a0\u21a9

"},{"location":"installation/ffmpeg_install/","title":"FFmpeg Installation Doc","text":"

DeFFcode APIs requires FFmpeg binaries to be installed for all of its core functionality.

You can following machine-specific instructions for its configuration/installation:

DeFFcode APIs will throw RuntimeError, if they failed to detect valid FFmpeg executables on your system.

Enable verbose (verbose=True) for debugging FFmpeg validation process.

"},{"location":"installation/ffmpeg_install/#linux-ffmpeg-installation","title":"Linux FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on a Linux OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection","title":"A. Auto-Detection","text":"

This is a recommended approach on Linux Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the Linux OS systems.

You can install easily install official FFmpeg according to your Linux Distro by following this post \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Linux Static Binaries (based on your machine architecture) from the link below:

    Linux Static Binaries: http://johnvansickle.com/ffmpeg/

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#windows-ffmpeg-installation","title":"Windows FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Installation and Manual Configuration methods on Windows OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-installation","title":"A. Auto-Installation","text":"

This is a recommended approach on Windows Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-generate the required FFmpeg Static Binaries from our dedicated Github Server into the temporary directory(e.g. C:\\Temp) of your machine on the Windows OS systems.

Active Internet connection is required while downloading required FFmpeg Static Binaries from our dedicated Github Server onto your Windows machine.

Important Information regarding Auto-Installation
  • The files downloaded to a temporary directory (e.g. C:\\TEMP), may get erased if your machine shutdowns/restarts in some cases.

  • You can also provide a custom save path for auto-downloading FFmpeg Static Binaries through exclusive -ffmpeg_download_path attribute in Sourcer API.

    How to use -ffmpeg_download_path attribute in FFdecoder API?

    -ffmpeg_download_path is also available in FFdecoder API through the -custom_sourcer_params attribute of its ffparams dictionary parameter.

  • If binaries were found at the specified path, DeFFcode APIs automatically skips the Auto-Installation step.

  • If the required FFmpeg static binary fails to download, extract, or validate during Auto-Installation, then DeFFcode APIs will exit with RuntimeError!

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_1","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest Windows Static Binaries (based on your machine arch(x86/x64)) from the link below:

    Windows Static Binaries: https://ffmpeg.org/download.html#build-windows

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'C:/foo/Downloads/ffmpeg/bin') or path of ffmpeg.exe executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"installation/ffmpeg_install/#macos-ffmpeg-installation","title":"MacOS FFmpeg Installation","text":"

DeFFcode APIs supports Auto-Detection and Manual Configuration methods on MacOS OS machines:

"},{"location":"installation/ffmpeg_install/#a-auto-detection_1","title":"A. Auto-Detection","text":"

This is a recommended approach on MacOS Machines

If DeFFcode APIs do not receive any input from the user on custom_ffmpeg parameter, then they try to auto-detect the required FFmpeg installed binaries through a validation test that employs subprocess python module on the MacOS systems.

You can easily install FFmpeg on your MacOS machine by following this tutorial \u27b6

"},{"location":"installation/ffmpeg_install/#b-manual-configuration_2","title":"B. Manual Configuration","text":"
  • Download: You can also manually download the latest MacOS Static Binaries (only x64 Binaries) from the link below:

    MacOS Static Binaries: https://ffmpeg.org/download.html#build-mac

  • Assignment: Then, you can easily assign the custom path to the folder containing FFmpeg executables(for e.g 'ffmpeg/bin') or path of ffmpeg executable itself to the custom_ffmpeg parameter in the DeFFcode APIs.

    If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

"},{"location":"recipes/advanced/","title":"Advanced Recipes","text":"

The following challenging recipes will take your skills to the next level and will give access to new DeFFcode techniques, tricky examples, and advanced FFmpeg parameters:

Courtesy - tenor

Refer Basic Recipes first!

If you're just getting started, check out the Beginner's Basic Recipes first before trying these advanced recipes.

Any proficiency with OpenCV-Python will be Helpful

Any proficiency with OpenCV-Python (Python API for OpenCV) surely help you with these recipes.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

"},{"location":"recipes/advanced/#advanced-decoding-recipes","title":"Advanced Decoding Recipes","text":"
  • Decoding Live Virtual Sources
    • Generate and Decode frames from Sierpinski pattern
    • Generate and Decode frames from Test Source pattern
    • Generate and Decode frames from Gradients with custom Text effect
    • Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms
    • Generate and Decode frames from Game of Life Visualization
  • Decoding Live Feed Devices
    • Capturing and Previewing frames from a Webcam using Custom Demuxer
    • Capturing and Previewing frames from your Desktop (Screen Recording)
  • Hardware-Accelerated Video Decoding
    • CUVID-accelerated Hardware-based Video Decoding and Previewing
    • CUDA-accelerated Hardware-based Video Decoding and Previewing
"},{"location":"recipes/advanced/#advanced-transcoding-recipes","title":"Advanced Transcoding Recipes","text":"
  • Transcoding Live Complex Filtergraphs
    • Transcoding video with Live Custom watermark image overlay
    • Transcoding video from sequence of Images with additional filtering
  • Transcoding Video Art with Filtergraphs
    • Transcoding video art with YUV Bitplane Visualization
    • Transcoding video art with Jetcolor effect
    • Transcoding video art with Ghosting effect
    • Transcoding video art with Pixelation effect
  • Hardware-Accelerated Video Transcoding
    • CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API
    • CUDA-NVENC-accelerated Video Transcoding with WriteGear API
    • CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API
"},{"location":"recipes/advanced/#advanced-metadata-recipes","title":"Advanced Metadata Recipes","text":"
  • Updating Video Metadata
    • Added new attributes to metadata in FFdecoder API
    • Overriding source video metadata in FFdecoder API
"},{"location":"recipes/advanced/decode-hw-acceleration/","title":"Hardware-Accelerated Video Decoding","text":"

FFmpeg offer access to dedicated GPU hardware with varying support on different platforms for performing a range of video-related tasks to be completed faster or using less of other resources (particularly CPU).

By default, DeFFcode's FFdecoder API uses the Input Source's video-decoder (extracted using Sourcer API) itself for decoding its input. However, you could easily change the video-decoder to your desired specific supported Video-Decoder using FFmpeg options by way of its ffparams dictionary parameter. This feature provides easy access to GPU Accelerated Hardware Decoder in FFdecoder API that will generate faster video frames while using little to no CPU power, as opposed to CPU intensive Software Decoders.

We'll discuss its Hardware-Accelerated Video Decoding capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuvid-accelerated-hardware-based-video-decoding-and-previewing","title":"CUVID-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

  • Using h264_cuvid decoder: Remember to check if your FFmpeg compiled with H.264 CUVID decoder support by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    Verifying H.264 CUVID decoder support in FFmpeg
    $ ffmpeg  -hide_banner -decoders | grep cuvid\n\nV..... av1_cuvid            Nvidia CUVID AV1 decoder (codec av1)\nV..... h264_cuvid           Nvidia CUVID H264 decoder (codec h264)\nV..... hevc_cuvid           Nvidia CUVID HEVC decoder (codec hevc)\nV..... mjpeg_cuvid          Nvidia CUVID MJPEG decoder (codec mjpeg)\nV..... mpeg1_cuvid          Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)\nV..... mpeg2_cuvid          Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)\nV..... mpeg4_cuvid          Nvidia CUVID MPEG4 decoder (codec mpeg4)\nV..... vc1_cuvid            Nvidia CUVID VC1 decoder (codec vc1)\nV..... vp8_cuvid            Nvidia CUVID VP8 decoder (codec vp8)\nV..... vp9_cuvid            Nvidia CUVID VP9 decoder (codec vp9)\n

    You can also use any of above decoder in the similar way, if supported.

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's H.264 CUVID Video decoder in FFdecoder API to achieve GPU-accelerated hardware video decoding of YUV420p frames from a given Video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's CUVID can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": \"h264_cuvid\",  # use H.264 CUVID Video-decoder\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(YUV420p) frames\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"yuv420p\",  # use YUV420p frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the YUV420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-hw-acceleration/#cuda-accelerated-hardware-based-video-decoding-and-previewing","title":"CUDA-accelerated Hardware-based Video Decoding and Previewing","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters), thereby achieving GPU-accelerated decoding of NV12 pixel-format frames from a given video file (say foo.mp4), and preview them using OpenCV Library's cv2.imshow() method.

NV12(for 4:2:0 input) and NV21(for 4:4:4 input) are the only supported pixel format. You cannot change pixel format to any other since NV-accelerated video codec supports only them.

NV12 is a biplanar format with a full sized Y plane followed by a single chroma plane with weaved U and V values. NV21 is the same but with weaved V and U values. The 12 in NV12 refers to 12 bits per pixel. NV12 has a half width and half height chroma channel, and therefore is a 420 subsampling. NV16 is 16 bits per pixel, with half width and full height. aka 422. NV24 is 24 bits per pixel with full sized chroma channel. aka 444. Most NV12 functions allow the destination Y pointer to be NULL.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's GPU Accelerated Decoding can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"fps=60.0,\"  # framerate 60.0fps in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose=True, # enable verbose output\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# grab the NV12 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/","title":"Decoding Live Feed Devices","text":"

DeFFcode's FFdecoder API provide effortless support for any Live Feed Devices using two parameters: source parameter which accepts device name or its path, and source_demuxer parameter to specify demuxer for the given input device.

We'll discuss the Live Feed Devices support using both these parameters briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-a-webcam-using-custom-demuxer","title":"Capturing and Previewing frames from a Webcam using Custom Demuxer","text":"Example Assumptions

FFmpeg provide set of specific Demuxers on different platforms to read the multimedia streams from a particular type of Video Capture source/device. Please note that following recipe explicitly assumes:

  • You're running Linux Machine with USB webcam connected to it at node/path /dev/video0.
  • You already have appropriate Linux video drivers and related softwares installed on your machine.
  • You machine uses FFmpeg binaries built with --enable-libv4l2 flag to support video4linux2, v4l2 demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode BGR24 video frames from a USB webcam device connected at path /dev/video0 on a Linux Machine with video4linux2 (or simply v4l2) demuxer, and preview them using OpenCV Library's cv2.imshow() method.

Identifying and Specifying Video Capture Device Name/Path/Index and suitable Demuxer on different OS platforms Windows Linux MacOS

Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

  • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

    c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
  • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

    # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

  • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

    You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

    $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
  • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

    # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

    You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

    # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

    Using device's indexUsing device's name
    # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

    # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

    # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-feed-devices/#capturing-and-previewing-frames-from-your-desktop","title":"Capturing and Previewing frames from your Desktop","text":"Example Assumptions

Similar to Webcam capturing, FFmpeg provide set of specific Demuxers on different platforms for capturing your desktop (Screen recording). Please note that following recipe explicitly assumes:

  • You're running Linux Machine with libxcb module installed properly on your machine.
  • You machine uses FFmpeg binaries built with --enable-libxcb flag to support x11grab demuxer. BTW, you can list all supported demuxers using the ffmpeg --list-demuxers terminal command.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example we will decode live BGR video frames from your complete screen as well as a region in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OS platforms Windows Linux MacOS

Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

  • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

    # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
  • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

    x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

    # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

  • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
  • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

    # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
  • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

Capturing entire desktopCapturing a region

For capturing all your displays as one big contiguous display in FFdecoder API:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

For limit capturing to a region, and show the area being grabbed:

x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/","title":"Decoding Live Virtual Sources","text":"

Instead of using prerecorded video files as streams, DeFFcode's FFdecoder API with the help of powerful lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph, is also capable of creating virtual video frames out of thin air in real-time, which you might want to use as input for testing, compositing, and merging with other streams to obtain desired output on-the-fly.

We'll discuss the recipies for generating Live Fake Sources briefly below:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-sierpinski-pattern","title":"Generate and Decode frames from Sierpinski pattern","text":"

The sierpinski graph generates a Sierpinski carpet/triangle fractal, and randomly pan around by a single pixel each frame.

Sierpinski carpet fractal

In this example we will generate and decode 8 seconds of a Sierpinski carpet fractal pattern of 1280x720 frame size and 30 framerate using sierpinski graph source with lavfi input virtual device in FFdecoder API, and preview decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# playback time of 8 seconds\nffparams = {\"-ffprefixes\": [\"-t\", \"8\"]}\n\n# initialize and formulate the decoder with \"sierpinski\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"sierpinski=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        cv2.imwrite('foo_image.gif', frame)\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-test-source-pattern","title":"Generate and Decode frames from Test Source pattern","text":"

The testsrc graph generates a test video pattern showing a color pattern, a scrolling gradient, and a timestamp. This is useful for testing purposes.

Test Source pattern

In this example we will generate and decode 10 seconds of a Test Source pattern (1280x720 frame size & 30 framerate) using testsrc graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n}\n\n# initialize and formulate the decoder with \"testsrc\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"testsrc=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-gradients-with-custom-text-effect","title":"Generate and Decode frames from Gradients with custom Text effect","text":"

The gradients graph (as name suggests) generates several random gradients.

Gradients pattern with real-time text output

In this example we will generate and decode 15 seconds of Gradients using gradients graph source with lavfi input virtual device and also draw real-time text output (format HH::MM::SS) scrolling upward direction on it using drawtext filter in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

This example assumes you're running Windows machine. If not, then change fontfile parameter path in drawtext video filtergraph definition accordingly.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"15\"],  # playback time of 15 seconds\n    \"-vf\": \"drawtext=\"  # draw text\n    + \"text='%{localtime\\:%X}':\"  # real time text (HH::MM::SS)\n    + \"fontfile='c\\:\\/windows\\/fonts\\/arial.ttf':\"  # fontfile path (Only Windows)\n    + \"x=(w-text_w)/2:y=h-40*t:\"  # scroll upward effect\n    + \"fontsize=50:\"  # font size 50\n    + \"fontcolor=white\",  # font color white\n}\n\n\n# initialize and formulate the decoder with \n# \"gradients\" source for BGR24 output\ndecoder = FFdecoder(\n    \"gradients=n=3\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-mandelbrot-test-pattern-with-vectorscope-waveforms","title":"Generate and Decode frames from Mandelbrot test pattern with vectorscope & waveforms","text":"

The mandelbrot graph generate a Mandelbrot set fractal, that progressively zoom towards a specfic point.

Mandelbrot pattern with a Vectorscope & two Waveforms

In this example we will generate and decode 20 seconds of a Mandelbrot test pattern (1280x720 frame size & 30 framerate) using mandelbrot graph source with lavfi input virtual device with a vectorscope (plots 2 color component values) & two waveforms (plots YUV color component intensity) stacked to it in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"20\"],  # playback time of 20 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"split=4[a][b][c][d],\" # split input into 4 identical outputs.\n    + \"[a]waveform[aa],\"  # apply waveform on first output\n    + \"[b][aa]vstack[V],\"  # vertical stack 2nd output with waveform [V]\n    + \"[c]waveform=m=0[cc],\"  # apply waveform on 3rd output\n    + \"[d]vectorscope=color4[dd],\"  # apply vectorscope on 4th output\n    + \"[cc][dd]vstack[V2],\"  # vertical stack waveform and vectorscope [V2]\n    + \"[V][V2]hstack\",  # horizontal stack [V] and [V2] vertical stacks\n}\n\n# initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/decode-live-virtual-sources/#generate-and-decode-frames-from-game-of-life-visualization","title":"Generate and Decode frames from Game of Life Visualization","text":"

The life graph generates a life pattern based on a generalization of John Conway\u2019s life game. The sourced input represents a life grid, each pixel represents a cell which can be in one of two possible states, alive or dead. Every cell interacts with its eight neighbours, which are the cells that are horizontally, vertically, or diagonally adjacent. At each interaction the grid evolves according to the adopted rule, which specifies the number of neighbor alive cells which will make a cell stay alive or born.

Game of Life Visualization

In this example we will generate and decode 25 seconds of Game of Life Visualization using life graph source with lavfi input virtual device in FFdecoder API, all while previewing decoded frames using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define parameters\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"25\"],  # playback time of 25 seconds\n}\n\n# initialize and formulate the decoder with \"life\" source for BGR24 output\ndecoder = FFdecoder(\n    \"life=\"  # life graph\n    + \"s=640x480:\"  # grid size (in pixels)\n    + \"mold=10:\"  # cell mold speed\n    + \"r=36:\"  # framerate\n    + \"ratio=0.5:\"  # random fill ratio for the initial random grid\n    + \"death_color=#39FF14:\"  # color of dead cells\n    + \"life_color=#1d1160\" # color of living (or new born) cells\n    + \",scale=640:480:\" # frame size\n    + \"flags=16\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n    **ffparams\n).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/","title":"Transcoding Video Art with Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API unlocks the power of ffmpeg backend for creating real-time artistic generative video art using simple and complex filtergraphs, and decoding them into live video frames.

We'll discuss the Transcoding Video Art with Filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-yuv-bitplane-visualization","title":"Transcoding video art with YUV Bitplane Visualization","text":"

Based on the QCTools bitplane visualization, this video art has numerical values ranging between -1(no change) and 10(noisiest) for the Y (luminance), U and V (chroma or color difference) planes, yielding cool and different results for different values.

YUV Bitplane Visualization

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Bitplane Visualization by binding the bit position of the Y, U, and V planes of a video file (say foo.mp4) by using FFmpeg's lutyuv filter and assigning them random values (between -1(no change) and 10(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\" # change input format to yuv444p\n    + \"lutyuv=\"  # use  lutyuv filter for binding bit position of the Y, U, and V planes\n    + \"y=if(eq({y}\\,-1)\\,512\\,if(eq({y}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{y}))*pow(2\\,{y}))):\".format(\n        y=3 # define `Y` (luminance) plane value (b/w -1 and 10)\n    )\n    + \"u=if(eq({u}\\,-1)\\,512\\,if(eq({u}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{u}))*pow(2\\,{u}))):\".format(\n        u=1 # define `U` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"v=if(eq({v}\\,-1)\\,512\\,if(eq({v}\\,0)\\,val\\,bitand(val\\,pow(2\\,10-{v}))*pow(2\\,{v}))),\".format(\n        v=3 # define `V` (chroma or color difference) plane value (b/w -1 and 10)\n    )\n    + \"format=yuv422p10le\", # change output format to yuv422p10le\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-jetcolor-effect","title":"Transcoding video art with Jetcolor effect","text":"

This video art uses FFmpeg's pseudocolor filter to create a Jetcolor effect which is high contrast, high brightness, and high saturation colormap that ranges from blue to red, and passes through the colors cyan, yellow, and orange. The jet colormap is associated with an astrophysical fluid jet simulation from the National Center for Supercomputer Applications.

Jetcolor effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Jetcolor effect by changing frame colors of a video file (say foo.mp4) using FFmpeg's pseudocolor filter in different modes (values between 0 (cleaner) [default] and 2(noisiest)), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to `yuv444p`\n    + \"eq=brightness=0.40:saturation=8,\"  # default `brightness = 0.40` and `saturation=8`\n    + \"pseudocolor='\"  # dynamically controlled colors through `pseudocolor` filter\n    + \"if(between(val,0,85),lerp(45,159,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(159,177,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(177,70,(val-170)/(255-170))))):\"  # mode 0 (cleaner) [default]\n    + \"if(between(val,0,85),lerp(205,132,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(132,59,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(59,100,(val-170)/(255-170))))):\"  # mode 1\n    + \"if(between(val,0,85),lerp(110,59,(val-0)/(85-0)),\"\n    + \"if(between(val,85,170),lerp(59,127,(val-85)/(170-85)),\"\n    + \"if(between(val,170,255),lerp(127,202,(val-170)/(255-170))))):\"  # mode 2 (noisiest)\n    + \"i={mode}',\".format(\n        mode=0  # define mode value (b/w `0` and `2`) to control colors\n    )\n    + \"format=yuv422p10le\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-ghosting-effect","title":"Transcoding video art with Ghosting effect","text":"

This video art using FFmpeg\u2019s lagfun filter to create a video echo/ghost/trailing effect.

Ghosting effect

This Video Art idea credits goes to ffmpeg-artschool - An AMIA workshop featuring scripts, exercises, and activities to make art using FFmpeg.

In this example we will generate 8 seconds of Ghosting effect using FFmpeg's lagfun filter on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-filter_complex\": \"format=yuv444p[formatted];\"  # change video input format to yuv444p\n    + \"[formatted]split[a][b];\"  # split input into 2 identical outputs\n    + \"[a]lagfun=decay=.99:planes=1[a];\"  # apply lagfun filter on first output\n    + \"[b]lagfun=decay=.98:planes=2[b];\"  # apply lagfun filter on 2nd output\n    + \"[a][b]blend=all_mode=screen:c0_opacity=.5:c1_opacity=.5,\"  # apply screen blend mode both outputs\n    + \"format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-map\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-art-filtergraphs/#transcoding-video-art-with-pixelation-effect","title":"Transcoding video art with Pixelation effect","text":"

This video art uses FFmpeg\u2019s overlay, smartblur and stacks of dilation filters to intentionally Pixelate your video in artistically cool looking ways such that each pixel become visible to the naked eye.

Pixelation effect

This Video Art idea credits goes to oioiiooixiii blogspot.

In this example we will generate 8 seconds of Pixelation effect using FFmpeg\u2019s smartblur and stacks of dilation filters overlayed on a video file (say foo.mp4), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"8\"],  # playback time of 8 seconds\n    \"-vf\": \"format=yuv444p,\"  # change input format to yuv444p\n    + \"split [out1][out2];\"  # split input into 2 identical outputs\n    + \"[out1][out2] overlay,smartblur,\"  # apply overlay,smartblur filter on both outputs\n    + \"dilation,dilation,dilation,dilation,dilation,\"  # apply stacks of dilation filters on both outputs\n    + \"eq=contrast=1.4:brightness=-0.09 [pixels];\"  # change brightness and contrast\n    + \"[pixels]format=yuv422p10le[out]\",  # change output format to yuv422p10le\n    \"-mode\": \"[out]\",  # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/","title":"Hardware-Accelerated Video Transcoding","text":"What exactly is Transcoding?

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allowing us to process real-time video frames with immense flexibility. Both these APIs are capable of utilizing the potential of GPU backed fully-accelerated Hardware based video Decoding(FFdecoder API with hardware decoder) and Encoding (WriteGear API with hardware encoder), thus dramatically improving the transcoding performance. At same time, FFdecoder API Hardware-decoded frames are fully compatible with OpenCV's VideoWriter API for producing high-quality output video in real-time.

Limitation: Bottleneck in Hardware-Accelerated Video Transcoding performance with Real-time Frame processing

As we know, using the \u2013hwaccel cuda -hwaccel_output_format cuda flags in FFmpeg pipeline will keep video frames in GPU memory, and this ensures that the memory transfers (system memory to video memory and vice versa) are eliminated, and that transcoding is performed with the highest possible performance on the available GPU hardware.

General Memory Flow with Hardware Acceleration

But unfortunately, for processing real-time frames in our python script with FFdecoder and WriteGear APIs, we're bound to sacrifice this performance gain by explicitly copying raw decoded frames between System and GPU memory (via the PCIe bus), thereby creating self-made latency in transfer time and increasing PCIe bandwidth occupancy due to overheads in communication over the bus. Moreover, given PCIe bandwidth limits, copying uncompressed image data would quickly saturate the PCIe bus.

Memory Flow with Hardware Acceleration and Real-time Processing

On the bright side, however, GPU enabled Hardware based encoding/decoding is inherently faster and more efficient (do not use much CPU resources when frames in GPU) thus freeing up the CPU for other tasks, as compared to Software based encoding/decoding that is known to be completely CPU intensive. Plus scaling, de-interlacing, filtering, etc. tasks will be way faster and efficient than usual using these Hardware based decoders/encoders as oppose to Software ones.

As you can see the pros definitely outweigh the cons and you're getting to process video frames in the real-time with immense speed and flexibility, which is impossible to do otherwise.

We'll discuss its Hardware-Accelerated Video Transcoding capabilities using these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-accelerated-video-transcoding-with-opencvs-videowriter-api","title":"CUDA-accelerated Video Transcoding with OpenCV's VideoWriter API","text":"Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with OpenCV's VideoWriter API.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs such as imshow(). But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

More information on Nvidia's NVENC Encoder can be found here \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\" # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since write() method only accepts `BGR` frames\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated Video Transcoding with WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

Example Assumptions

Please note that following recipe explicitly assumes:

  • You're running Linux operating system with a supported NVIDIA GPU.
  • You're using FFmpeg 4.4 or newer, configured with at least --enable-nonfree --enable-cuda-nvcc --enable-libnpp --enable-cuvid --enable-nvenc configuration flags during compilation. For compilation follow these instructions \u27b6

    Verifying NVDEC/CUDA support in FFmpeg

    To use CUDA Video-decoder(cuda), remember to check if your FFmpeg compiled with it by executing following commands in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -pix_fmts | grep cuda\n..H.. cuda                   0              0      0\n\n$ ffmpeg  -hide_banner -filters | egrep \"cuda|npp\"\n... bilateral_cuda    V->V       GPU accelerated bilateral filter\n... chromakey_cuda    V->V       GPU accelerated chromakey filter\n... colorspace_cuda   V->V       CUDA accelerated video color converter\n... hwupload_cuda     V->V       Upload a system memory frame to a CUDA device.\n... overlay_cuda      VV->V      Overlay one video on top of another using CUDA\n... scale_cuda        V->V       GPU accelerated video resizer\n... scale_npp         V->V       NVIDIA Performance Primitives video scaling and format conversion\n... scale2ref_npp     VV->VV     NVIDIA Performance Primitives video scaling and format conversion to the given reference.\n... sharpen_npp       V->V       NVIDIA Performance Primitives video sharpening filter.\n... thumbnail_cuda    V->V       Select the most representative frame in a given sequence of consecutive frames.\n... transpose_npp     V->V       NVIDIA Performance Primitives video transpose\nT.. yadif_cuda        V->V       Deinterlace CUDA frames\n
    Verifying H.264 NVENC encoder support in FFmpeg

    To use NVENC Video-encoder(cuda), remember to check if your FFmpeg compiled with H.264 NVENC encoder support. You can easily do this by executing following one-liner command in your terminal, and observing if output contains something similar as follows:

    $ ffmpeg  -hide_banner -encoders | grep nvenc \n\nV....D av1_nvenc            NVIDIA NVENC av1 encoder (codec av1)\nV....D h264_nvenc           NVIDIA NVENC H.264 encoder (codec h264)\nV....D hevc_nvenc           NVIDIA NVENC hevc encoder (codec hevc)\n

    You can also use other NVENC encoder in the similar way, if supported.

  • You already have appropriate Nvidia video drivers and related softwares installed on your machine.

  • If the stream is not decodable in hardware (for example, it is an unsupported codec or profile) then it will still be decoded in software automatically, but hardware filters won't be applicable.

These assumptions MAY/MAY NOT suit your current setup. Kindly use suitable parameters based your system platform and hardware settings only.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

Consuming BGR framesConsuming NV12 frames

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as patched NV12 frames.
  4. Converting patched NV12 frames into BGR pixel-format using OpenCV's cvtcolor method.
  5. Encoding BGR frames with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-enforce_cv_patch\": True # enable OpenCV patch for YUV(NV12) frames\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\" # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format\n    frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV12)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In this example, we will be:

  1. Using Nvidia's CUDA Internal hwaccel Video decoder(cuda) in FFdecoder API to automatically detect best NV-accelerated video codec and keeping video frames in GPU memory (for applying hardware filters) for achieving GPU-accelerated decoding of a given video file (say foo.mp4).
  2. Scaling and Cropping decoded frames in GPU memory.
  3. Downloading decoded frames into system memory as NV12 frames.
  4. Encoding NV12 frames directly with WriteGear API using Nvidia's Hardware accelerated H.264 NVENC Video-encoder(h264_nvenc) into lossless video file in the GPU memory.
# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\nimport cv2\n\n# define suitable FFmpeg parameter\nffparams = {\n    \"-vcodec\": None,  # skip source decoder and let FFmpeg chose\n    \"-ffprefixes\": [\n        \"-vsync\",\n        \"0\",  # prevent duplicate frames\n        \"-hwaccel\",\n        \"cuda\",  # accelerator\n        \"-hwaccel_output_format\",\n        \"cuda\",  # output accelerator\n    ],\n    \"-custom_resolution\": \"null\",  # discard source `-custom_resolution`\n    \"-framerate\": \"null\",  # discard source `-framerate`\n    \"-vf\": \"scale_cuda=640:360,\"  # scale to 640x360 in GPU memory\n    + \"crop=80:60:200:100,\"  # crop a 80\u00d760 section from position (200, 100) in GPU memory\n    + \"hwdownload,\"  # download hardware frames to system memory\n    + \"format=nv12\",  # convert downloaded frames to NV12 pixel format\n}\n\n# initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder(\n    \"foo.mp4\",\n    frame_format=\"null\",  # discard source frame pixel format\n    verbose = False, # to avoid too much clutter\n    **ffparams # apply various params and custom filters\n).formulate()\n\n# retrieve framerate from JSON Metadata and pass it as\n# `-input_framerate` parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-vcodec\": \"h264_nvenc\", # H.264 NVENC Video-encoder\n    \"-input_pixfmt\": \"nv12\", # input frames pixel format as `NV12`\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output=\"output_foo.mp4\", logging=True, **output_params)\n\n# grab the NV12 frames from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the NV12 frame here}\n\n    # writing NV12 frame to writer\n    writer.write(frame)\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-hw-acceleration/#cuda-nvenc-accelerated-end-to-end-lossless-video-transcoding-with-writegear-api","title":"CUDA-NVENC-accelerated End-to-end Lossless Video Transcoding with WriteGear API","text":"

DeFFcode's FFdecoder API in conjunction with VidGear's WriteGear API creates a High-performance Lossless FFmpeg Transcoding Pipeline

Courtesy - tenor"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/","title":"Transcoding Live Complex Filtergraphs","text":"What are Complex filtergraphs?

Before heading straight into recipes we will talk about Complex filtergraphs:

Complex filtergraphs are those which cannot be described as simply a linear processing chain applied to one stream.

Complex filtergraphs are configured with the -filter_complex global option.

The -lavfi option is equivalent to -filter_complex.

A trivial example of a complex filtergraph is the overlay filter, which has two video inputs and one video output, containing one video overlaid on top of the other.

DeFFcode's FFdecoder API seamlessly supports processing multiple input streams including real-time frames through multiple filter chains combined into a filtergraph (via. -filter_complex FFmpeg parameter), and use their outputs as inputs for other filter chains.

We'll discuss the transcoding of live complex filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-with-live-custom-watermark-image-overlay","title":"Transcoding video with Live Custom watermark image overlay","text":"Big Buck Bunny with custom watermark

In this example we will apply a watermark image (say watermark.png with transparent background) overlay to the 10 seconds of video file (say foo.mp4) using FFmpeg's overlay filter with some additional filtering, , and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

You can use FFdecoder's metadata property object that dumps Source Metadata as JSON to retrieve source framerate and frame-size.

To learn about exclusive -ffprefixes & -clones parameter. See Exclusive Parameters \u27b6

Remember to replace watermark.png watermark image file-path with yours before using this recipe.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json, cv2\n\n# define the Complex Video Filter with additional `watermark.png` image input\nffparams = {\n    \"-ffprefixes\": [\"-t\", \"10\"],  # playback time of 10 seconds\n    \"-clones\": [\n        \"-i\",\n        \"watermark.png\",  # !!! [WARNING] define your `watermark.png` here.\n    ],\n    \"-filter_complex\": \"[1]format=rgba,\"  # change 2nd(image) input format to yuv444p\n    + \"colorchannelmixer=aa=0.7[logo];\"  # apply colorchannelmixer to image for controlling alpha [logo]\n    + \"[0][logo]overlay=W-w-{pixel}:H-h-{pixel}:format=auto,\".format(  # apply overlay to 1st(video) with [logo]\n        pixel=5  # at 5 pixels from the bottom right corner of the input video\n    )\n    + \"format=bgr24\",  # change output format to `yuv422p10le`\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate`\n# parameter for controlled framerate and define other parameters\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/transcode-live-frames-complexgraphs/#transcoding-video-from-sequence-of-images-with-additional-filtering","title":"Transcoding video from sequence of Images with additional filtering","text":"Mandelbrot pattern blend with Fish school video Available blend mode options

Other blend mode options for blend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

In this example we will blend 10 seconds of Mandelbrot test pattern (generated using lavfi input virtual device) that serves as the \"top\" layer with 10 seconds of Image Sequence that serves as the \"bottom\" layer, using blend filter (with heat blend mode), and decode live BGR24 video frames in FFdecoder API. We'll also be encoding those decoded frames in real-time into lossless video file using WriteGear API with controlled framerate.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4 (restricted to 12 seconds):

$ ffmpeg -t 12 -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('/path/to/img%03d.png', verbose=True, **ffparams).formulate()\n

FFdecoder API also accepts Glob pattern(*.png) as well Single looping image as as input to its source parameter. See this Basic Recipe \u27b6 for more information.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport cv2, json\n\n# define mandelbrot pattern generator\n# and the Video Filter definition\nffparams = {\n    \"-ffprefixes\": [\n        \"-t\", \"10\", # playback time of 10 seconds for mandelbrot pattern\n        \"-f\", \"lavfi\", # use input virtual device\n        \"-i\", \"mandelbrot=rate=25\", # create mandelbrot pattern at 25 fps\n        \"-t\", \"10\", # playback time of 10 seconds for video\n    ],  \n    \"-custom_resolution\": (1280, 720), # resize to 1280x720\n    \"-filter_complex\":\"[1:v]format=yuv444p[v1];\" # change 2nd(video) input format to yuv444p\n        + \"[0:v]format=gbrp10le[v0];\" # change 1st(mandelbrot pattern) input format to gbrp10le\n        + \"[v1][v0]scale2ref[v1][v0];\" # resize the 1st(mandelbrot pattern), based on a 2nd(video).\n        + \"[v0][v1]blend=all_mode='heat',\" # apply heat blend mode to output\n        + \"format=yuv422p10le[v]\", # change output format to `yuv422p10le`\n    \"-map\": \"[v]\", # map the output\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"/path/to/image-%03d.png\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# define your parameters\n# [WARNING] framerate must match original source framerate !!!\noutput_params = {\n    \"-input_framerate\": 25,  # Default\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"recipes/advanced/update-metadata/","title":"Updating Video Metadata","text":"

In addition of using metadata property object in FFdecoder API for probing metadata information (only as JSON string) for each multimedia stream available in the given video source, you can also easily update the video metadata on-the-fly by assigning desired data as python dictionary to the same overloaded metadata property object. This feature can be used either for adding new custom properties to metadata, or to override source metadata properties used by FFdecoder API to formulate its default Decoder Pipeline for real-time video-frames generation.

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

This feature is not yet fully explored, but in the near future you'll be able to use it to dynamically override any Video frames Decoder Pipeline property (such as frame-size, pixel-format, etc.) in real-time like a pro. Stay tuned for more updates

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/advanced/update-metadata/#added-new-properties-to-metadata-in-ffdecoder-api","title":"Added new properties to metadata in FFdecoder API","text":"

In FFdecoder API, you can easily define any number of new properties for its metadata (formatted as python dictionary) with desired data of any datatype(s)1 , without affecting its default Video frames Decoder pipeline.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, thereby add new propertys (formatted as python dictionary) with desired data of different datatype(s) through overloaded metadata property object, and then finally print it as JSON string using the same metadata property object in FFdecoder API.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json\n\n# initialize the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# format your data as dictionary (with data of any [printable] datatype)\ndata = dict(\n    mystring=\"abcd\",  # string data\n    myint=1234,  # integers data\n    mylist=[1, \"Rohan\", [\"inner_list\"]],  # list data\n    mytuple=(1, \"John\", (\"inner_tuple\")),  # tuple data\n    mydict={\"anotherstring\": \"hello\"},  # dictionary data\n    myjson=json.loads('{\"name\": \"John\", \"age\": 30, \"city\": \"New York\"}'),  # json data\n)\n\n# assign your dictionary data\ndecoder.metadata = data\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"D:\\\\foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1920,\n    1080\n  ],\n  \"source_video_framerate\": 29.97,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 21.03,\n  \"approx_video_nframes\": 630,\n  \"source_video_bitrate\": \"4937k\",\n  \"source_audio_bitrate\": \"256k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\",\n  \"mystring\": \"abcd\",\n  \"myint\": 1234,\n  \"mylist\": [\n    1,\n    \"Rohan\",\n    [\n      \"inner_list\"\n    ]\n  ],\n  \"mytuple\": [\n    1,\n    \"John\",\n    \"inner_tuple\"\n  ],\n  \"mydict\": {\n    \"anotherstring\": \"hello\"\n  },\n  \"myjson\": {\n    \"name\": \"John\",\n    \"age\": 30,\n    \"city\": \"New York\"\n  }\n}\n

"},{"location":"recipes/advanced/update-metadata/#overriding-source-video-metadata-in-ffdecoder-api","title":"Overriding source video metadata in FFdecoder API","text":"

In FFdecoder API, you can also use its metadata to manually override the source properties (as frame-size, frame pixel-format, video-framerate, video-decoder etc.) that directly affects its default Video frames Decoder pipeline that decodes real-time video-frames.

The \"source\" property in metadata cannot be altered in any manner.

Source Video metadata values must be handled carefully

Source Video metadata information is used by FFdecoder API to formulate its default Video frames Decoder pipeline, and any improper or invalid inputted source property could crash the pipeline with RuntimeError.

Therefore to safeguard against it, FFdecoder API discards any Source Video metadata dictionary keys, if its value's datatype fails to match the exact valid datatype defined in following table:

Only either source_demuxer or source_extension property can be present in source metadata.

Not all Source Video metadata properties directly affects the pipeline (as mentioned in the table). But this might change in future versions.

Source Video Metadata Keys Valid Value Datatype Effect on Pipeline \"source_extension\" string None \"source_demuxer\" string Direct \"source_video_resolution\" list of integers e.g. [1280,720] Direct \"source_video_framerate\" float Direct \"source_video_pixfmt\" string Direct \"source_video_decoder\" string Direct \"source_duration_sec\" float None \"approx_video_nframes\" integer Direct \"source_video_bitrate\" string None \"source_audio_bitrate\" string None \"source_audio_samplerate\" string None \"source_has_video\" bool Direct \"source_has_audio\" bool None \"source_has_image_sequence\" bool Direct \"ffdecoder_operational_mode\" str None \"output_frames_pixfmt\" str Direct

Hence for instance, if \"source_video_resolution\" is assigned \"1280x720\" (i.e. string datatype value instead of list), then it will be discarded.

In this example we will probe all metadata information available within foo.mp4 video file, and override frame size (originally 1920x1080) and pixel-format (originally rgb24) to our desired values through overloaded metadata property object in FFdecoder API, and thereby preview them using OpenCV Library's cv2.imshow() method.

The value assigned to metadata property object can be of dictionary datatype only. Any other type will immediately raise ValueError!

Once the formulate() method is called, the metadata information present in FFdecoder API is finalized and thereby used to formulate its default pipeline for decoding real-time video-frames. Therefore make all changes to video properties beforehand.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\", verbose=True)\n\n# override source metadata values\n# !!! [WARNING] Make sure each value datatype matches the table !!!\ndecoder.metadata = {\n    \"output_frames_pixfmt\": \"gray\",  # gray frame-pixfmt\n    \"source_video_resolution\": [1280, 720],  # 1280x720 frame-size\n}\n\n# finally formulate the decoder\ndecoder.formulate()\n\n# [NOTE] uncomment following line to debug values\n# print(decoder.metadata)\n\n# let's grab the 1280x720 sized gray frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with gray frame here}\n\n    # Show gray frames in output window\n    cv2.imshow(\"Output gray\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

  1. There is no concept of tuple datatype in the JSON format. Thereby, Python's json module auto-converts all tuple python values into JSON list because that's the closest thing in JSON format to a tuple.\u00a0\u21a9

"},{"location":"recipes/basic/","title":"Basic Recipes","text":"

The following recipes should be reasonably accessible to beginners of any skill level to get started with DeFFcode APIs:

Courtesy - tenor

Refer Installation doc first!

If this is your first time using DeFFcode, head straight to the Installation Notes to install DeFFcode with required prerequisites on your machine.

Any proficiency with OpenCV-Python will be Helpful

If you've any proficiency with OpenCV-Python (Python API for OpenCV), you will find these recipes really easy.

Wanna suggest any improvements or additional recipes?

Please feel free to suggest any improvements or additional recipes on our Gitter community channel \u27b6

Frames are actually 3D Numpy arrays

In python, \"Frames\" are actually three-dimensional NumPy ndarray composed of 3 nested levels of arrays, one for each dimension.

"},{"location":"recipes/basic/#basic-decoding-recipes","title":"Basic Decoding Recipes","text":"
  • Decoding Video files
    • Accessing RGB frames from a video file
    • Capturing and Previewing BGR frames from a video file (OpenCV Support)
    • Playing with any other FFmpeg pixel formats
    • Capturing and Previewing frames from a Looping Video
  • Decoding Camera Devices using Indexes
    • Enumerating all Camera Devices with Indexes
    • Capturing and Previewing frames from a Camera using Indexes
  • Decoding Network Streams
    • Capturing and Previewing frames from a HTTPs Stream
    • Capturing and Previewing frames from a RTSP/RTP Stream
  • Decoding Image sequences
    • Capturing and Previewing frames from Sequence of images
    • Capturing and Previewing frames from Single looping image
"},{"location":"recipes/basic/#basic-transcoding-recipes","title":"Basic Transcoding Recipes","text":"
  • Transcoding Live frames
    • Transcoding video using OpenCV VideoWriter API
    • Transcoding lossless video using WriteGear API
  • Transcoding Live Simple Filtergraphs
    • Transcoding Trimmed and Reversed video
    • Transcoding Cropped video
    • Transcoding Rotated video (with rotate filter)
    • Transcoding Rotated video (with transpose filter)
    • Transcoding Horizontally flipped and Scaled video
  • Saving Key-frames as Image (Image processing)
    • Extracting Key-frames as PNG image
    • Generating Thumbnail with a Fancy filter
"},{"location":"recipes/basic/#basic-metadata-recipes","title":"Basic Metadata Recipes","text":"
  • Extracting Video Metadata
    • Extracting video metadata using Sourcer API
    • Extracting video metadata using FFdecoder API
"},{"location":"recipes/basic/#whats-next","title":"What's next?","text":"

Done already! Let's checkout Advanced Recipes to level up your skills!

"},{"location":"recipes/basic/decode-camera-devices/","title":"Decoding Camera Devices using Indexes","text":"

With DeFFcode APIs, we are able to probe and enumerate all Camera Devices names along with their respective \"device indexes\" or \"camera indexes\" no matter how many cameras are connected to your system. This makes Camera Devices decoding as simple as OpenCV, where one can effortlessly access a specific Camera Device just by the specifying the matching index of it. These indexes are much easier to read, memorize, and type, and one don't have to remember long Device names or worry about its Demuxer.

We'll discuss the Decoding Camera Devices using Indexes briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-camera-devices/#enumerating-all-camera-devices-with-indexes","title":"Enumerating all Camera Devices with Indexes","text":"

In Sourcer API, you can easily use its enumerate_devices property object to enumerate all probed Camera Devices (connected to your system) as dictionary object with device indexes as keys and device names as their respective values.

Requirement for Enumerating all Camera Devices in Sourcer API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will enumerate all probed Camera Devices connected on a Windows machine using enumerate_devices property object in Sourcer API, both as dictionary object and JSON string.

# import the necessary packages\nfrom deffcode import Sourcer\nimport json\n\n# initialize and formulate the decoder\nsourcer = Sourcer(\"0\").probe_stream()\n\n# enumerate probed devices as Dictionary object(`dict`)\nprint(sourcer.enumerate_devices)\n\n# enumerate probed devices as JSON string(`json.dump`)\nprint(json.dumps(sourcer.enumerate_devices,indent=2))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine: As Dictionary objectAs JSON string
{0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
{\n  \"0\": \"Integrated Camera\",\n  \"1\": \"USB2.0 Camera\",\n  \"2\": \"DroidCam Source\"\n}\n

"},{"location":"recipes/basic/decode-camera-devices/#capturing-and-previewing-frames-from-a-camera-using-indexes","title":"Capturing and Previewing frames from a Camera using Indexes","text":"

After knowing the index of Camera Device with Sourcer API, One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value either as integer or string of integer type to its source parameter.

Requirement for Index based Camera Device Capturing in FFdecoder API
  • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

    Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

    Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

    Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

  • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

  • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

In this example we will decode BGR24 video frames from Integrated Camera at index 0 on a Windows Machine, and preview them using OpenCV Library's cv2.imshow() method.

Important Facts related to Camera Device Indexing
  • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
  • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
  • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
    • For example, If there are three devices:
      {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
    • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

      Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

Out of Index Camera Device index values will raise ValueError in FFdecoder API

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/","title":"Decoding Image sequences","text":"

DeFFcode's FFdecoder API supports a wide-ranging media streams as input to its source parameter, which also includes Image Sequences such as Sequential(img%03d.png) and Glob pattern(*.png) as well as Single looping image.

We'll discuss both briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-sequence-of-images","title":"Capturing and Previewing frames from Sequence of images","text":"

In this example we will capture video frames from a given Image Sequence using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

OpenCV expects BGR format frames in its cv2.imshow() method.

Extracting Image Sequences from a video

You can use following FFmpeg command to extract sequences of images from a video file foo.mp4:

$ ffmpeg -i foo.mp4 /path/to/image-%03d.png\n

The default framerate is 25 fps, therefore this command will extract 25 images/sec from the video file, and save them as sequences of images (starting from image-000.png, image-001.png, image-002.png up to image-999.png).

If there are more than 1000 frames then the last image will be overwritten with the remaining frames leaving only the last frame.

The default images width and height is same as the video.

SequentialGlob pattern How to start with specific number image?

You can use -start_number FFmpeg parameter if you want to start with specific number image:

# define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img%03d.png\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

The glob pattern is not available on Windows FFmpeg builds.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-pattern_type glob` for accepting glob pattern\nffparams = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"/path/to/pngs/img*.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-image-sequences/#capturing-and-previewing-frames-from-single-looping-image","title":"Capturing and Previewing frames from Single looping image","text":"

In this example we will capture video frames from a Single Looping image using FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method in real-time.

By default, OpenCV expects BGR format frames in its cv2.imshow() method.

To learn more about exclusive -ffprefixes parameter. See Exclusive Parameters \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-loop 1` for infinite looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"img.png\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/","title":"Decoding Network Streams","text":"

Similar to decoding Video files, DeFFcode's FFdecoder API directly supports Network Streams with specific protocols (such as RTSP/RTP, HTTP(s), MPEG-TS, etc.) as input to its source parameter.

We'll discuss Network Streams support briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-https-stream","title":"Capturing and Previewing frames from a HTTPs Stream","text":"

In this example we will decode live BGR24 video frames from a HTTPs protocol Stream in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"https://abhitronix.github.io/html/Big_Buck_Bunny_1080_10s_1MB.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-network-streams/#capturing-and-previewing-frames-from-a-rtsprtp-stream","title":"Capturing and Previewing frames from a RTSP/RTP Stream","text":"

In this example we will decode live BGR24 video frames from RTSP/RTP protocol Streams in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

This example assume you already have a RSTP Server running at specified RSTP address with syntax rtsp://[RTSP_ADDRESS]:[RTSP_PORT]/[RTSP_PATH] and video data already being published to it.

For creating your own RSTP Server locally and publishing video data to it, You can refer this WriteGear API's bonus example \u27b6

Make sure to change RSTP address rtsp://localhost:8554/mystream with yours in following code before running

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define suitable parameters\nffparams = {\"-rtsp_transport\": \"tcp\"}\n\n# initialize and formulate the decoder with RTSP protocol source for BGR24 output\n# [WARNING] Change your RSTP address `rtsp://localhost:8554/mystream` with yours!\ndecoder = FFdecoder(\"rtsp://localhost:8554/mystream\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/","title":"Decoding Video files","text":"

DeFFcode's FFdecoder API readily supports multimedia Video files path as input to its source parameter. And with its frame_format parameter, you can easily decode video frames in any pixel format(s) that are readily supported by all well known Computer Vision libraries (such as OpenCV).

We'll discuss its video files support and pixel format capabilities briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/decode-video-files/#accessing-rgb-frames-from-a-video-file","title":"Accessing RGB frames from a video file","text":"

The default function of FFdecoder API is to decode 24-bit RGB video frames from the given source.

FFdecoder API's generateFrame() function can be used in multiple methods to access RGB frames from a given source, such as as a Generator (Recommended Approach), calling with Statement, and as a Iterator.

In this example we will decode the default RGB24 video frames from a given Video file (say foo.mp4) using above mentioned accessing methods:

As a Generator (Recommended)Calling with StatementAs a Iterator

This is a recommended approach for faster and error-proof access of decoded frames. We'll use it throughout the recipes.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# grab RGB24(default) frame from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder\nwith FFdecoder(\"foo.mp4\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # lets print its shape\n        print(frame.shape)  # for e.g. (1080, 1920, 3)\n

This Iterator Approach bears a close resemblance to OpenCV-Python (Python API for OpenCV) coding syntax, thereby easier to learn and remember.

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# loop over frames\nwhile True:\n\n    # grab RGB24(default) frames from decoder\n    frame = next(decoder.generateFrame(), None)\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # lets print its shape\n    print(frame.shape) # for e.g. (1080, 1920, 3)\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-bgr-frames-from-a-video-file","title":"Capturing and Previewing BGR frames from a video file","text":"

In this example we will decode OpenCV supported live BGR24 video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

By default, OpenCV expects BGR format frames in its cv2.imshow() method by using two accessing methods.

As a Generator (Recommended)Calling with Statement
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# grab the BGR24 frames from decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Calling with Statement approach can be used to make the code easier, cleaner, and much more readable. This approach also automatically handles management of formulate() and terminate() methods in FFdecoder API, so don't need to explicitly call them. See PEP343 -- The 'with' statement' for more information on this approach.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\nwith FFdecoder(\"foo.mp4\", frame_format=\"bgr24\") as decoder:\n\n    # grab the BGR24 frames from decoder\n    for frame in decoder.generateFrame():\n\n        # check if frame is None\n        if frame is None:\n            break\n\n        # {do something with the frame here}\n\n        # Show output window\n        cv2.imshow(\"Output\", frame)\n\n        # check for 'q' key if pressed\n        key = cv2.waitKey(1) & 0xFF\n        if key == ord(\"q\"):\n            break\n\n# close output window\ncv2.destroyAllWindows()\n

"},{"location":"recipes/basic/decode-video-files/#playing-with-any-other-ffmpeg-pixel-formats","title":"Playing with any other FFmpeg pixel formats","text":"

Similar to BGR, you can input any pixel format (supported by installed FFmpeg) by way of frame_format parameter of FFdecoder API for the desired video frame format.

In this example we will decode live Grayscale and YUV video frames from a given Video file (say foo.mp4) in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Decode GrayscaleDecode YUV frames
# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"input_foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# grab the GRAYSCALE frames from the decoder\nfor gray in decoder.generateFrame():\n\n    # check if frame is None\n    if gray is None:\n        break\n\n    # {do something with the gray frame here}\n\n    # Show output window\n    cv2.imshow(\"Gray Output\", gray)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try decoding YUV420p pixel-format frames in following python code:

You can also use other YUV pixel formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# grab the YUV420p frames from the decoder\nfor yuv in decoder.generateFrame():\n\n    # check if frame is None\n    if yuv is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the bgr frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", bgr)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/decode-video-files/#capturing-and-previewing-frames-from-a-looping-video","title":"Capturing and Previewing frames from a Looping Video","text":"

In this example we will decode live BGR24 video frames from looping video using different means in FFdecoder API, and preview them using OpenCV Library's cv2.imshow() method.

Using -stream_loop optionUsing loop filter

The recommend way to loop video is to use -stream_loop option via. -ffprefixes list attribute of ffparam dictionary parameter in FFdecoder API. Possible values are integer values: >0 value of loop, 0 means no loop, -1 means infinite loop.

Using -stream_loop 3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define `-stream_loop 3` for looping 4 times\nffparams = {\"-ffprefixes\":[\"-stream_loop\", \"3\"]}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

Another way to loop video is to use loop complex filter via. -filter_complex FFmpeg flag as attribute of ffparam dictionary parameter in FFdecoder API.

This filter places all frames into memory(RAM), so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

Using loop filter for looping video

The filter accepts the following options:

  • loop: Sets the number of loops for integer values >0. Setting this value to -1 will result in infinite loops. Default is 0(no loops).
  • size: Sets maximal size in number of frames. Default is 0.
  • start: Sets first frame of loop. Default is 0.

Using loop=3 will loop video 4 times.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define loop 4 times, each loop is 15 frames, each loop skips the first 25 frames\nffparams = {\n    \"-filter_complex\": \"loop=loop=3:size=15:start=25\" # Or use: `loop=3:15:25`\n}  \n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\n    \"input.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # Show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/extract-video-metadata/","title":"Extracting Video Metadata","text":"

DeFFcode's Sourcer API acts as Source Probing Utility for easily probing metadata information for each multimedia stream available in the given video source, and return it as in Human-readable (as JSON string) or Machine-readable (as Dictionary object) type with its retrieve_metadata() class method. Apart from this, you can also use metadata property object in FFdecoder API to extract this metadata information (only as JSON string).

We'll discuss video metadata extraction using both these APIs briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-sourcer-api","title":"Extracting video metadata using Sourcer API","text":"

This is the recommended way for extracting video metadata.

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it in both Human-readable (as JSON string) and Machine-readable (as Dictionary object) types using retrieve_metadata() class method in Sourcer API:

The Sourcer API's retrieve_metadata() class method provides pretty_json boolean parameter to return metadata as JSON string (if True) and as Dictionary (if False).

As JSON stringAs Dictionary object
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `json.dump`\nprint(sourcer.retrieve_metadata(pretty_json=True))\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n}\n
# import the necessary packages\nfrom deffcode import Sourcer\n\n# initialize and formulate the decoder using suitable source\nsourcer = Sourcer(\"foo.mp4\").probe_stream()\n\n# print metadata as `dict`\nprint(sourcer.retrieve_metadata())\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{'ffmpeg_binary_path': 'C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe', 'source': 'foo.mp4', 'source_extension': '.mp4', 'source_video_resolution': [1280, 720], 'source_video_framerate': 25.0, 'source_video_pixfmt': 'yuv420p', 'source_video_decoder': 'h264', 'source_duration_sec': 5.31, 'approx_video_nframes': 133, 'source_video_bitrate': '1205k', 'source_audio_bitrate': '384k', 'source_audio_samplerate': '48000 Hz', 'source_has_video': True, 'source_has_audio': True, 'source_has_image_sequence': False}\n

"},{"location":"recipes/basic/extract-video-metadata/#extracting-video-metadata-using-ffdecoder-api","title":"Extracting video metadata using FFdecoder API","text":"

In this example we will probe all metadata information available within foo.mp4 video file on Windows machine, and print it as JSON string using metadata property object in FFdecoder API.

You can also update video's metadata by using the same overloaded metadata property object in FFdecoder API. More information can be found in this Advanced Recipe \u27b6

# import the necessary packages\nfrom deffcode import FFdecoder\n\n# initialize and formulate the decoder using suitable source\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# print metadata as `json.dump`\nprint(decoder.metadata)\n\n# terminate the decoder\ndecoder.terminate()\n
After running above python code, the resultant Terminal Output will look something as following on Windows machine:
{\n  \"ffmpeg_binary_path\": \"C:\\\\Users\\\\foo\\\\AppData\\\\Local\\\\Temp\\\\ffmpeg-static-win64-gpl/bin/ffmpeg.exe\",\n  \"source\": \"foo.mp4\",\n  \"source_extension\": \".mp4\",\n  \"source_video_resolution\": [\n    1280,\n    720\n  ],\n  \"source_video_framerate\": 25.0,\n  \"source_video_pixfmt\": \"yuv420p\",\n  \"source_video_decoder\": \"h264\",\n  \"source_duration_sec\": 5.31,\n  \"approx_video_nframes\": 133,\n  \"source_video_bitrate\": \"1205k\",\n  \"source_audio_bitrate\": \"384k\",\n  \"source_audio_samplerate\": \"48000 Hz\",\n  \"source_has_video\": true,\n  \"source_has_audio\": true,\n  \"source_has_image_sequence\": false,\n  \"ffdecoder_operational_mode\": \"Video-Only\",\n  \"output_frames_pixfmt\": \"rgb24\"\n}\n

"},{"location":"recipes/basic/save-keyframe-image/","title":"Saving Key-frames as Image","text":"

DeFFcode's FFdecoder API provide effortless and precise Frame Seeking with -ss FFmpeg parameter that enable us to save any frame from a specific part of our input source.

We'll discuss aboout it briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for saving video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • Pillow: Pillow is a Imaging Library required for saving frame as Image. You can easily install it directly via pip:

    pip install Pillow     \n
  • Matplotlib: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations, also required for saving frame as Image. You can easily install it directly via pip:

    pip install matplotlib   \n
  • Imageio: Imageio is a Library for reading and writing a wide range of image, video, scientific, and volumetric data formats, also required for saving frame as Image. You can easily install it directly via pip:

    pip install imageio      \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/save-keyframe-image/#extracting-key-frames-as-png-image","title":"Extracting Key-frames as PNG image","text":"

In this example we will seek to 00:00:01.45(or 1045msec) in time and decode one single frame in FFdecoder API, and thereby saving it as PNG image using few prominent Image processing python libraries by providing valid filename (e.g. foo_image.png).

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter:

  • Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS) format, such as in 01:23:45.678.
  • Fractional: such as in 02:30.05. This is interpreted as 2 minutes, 30 and a half a second, which would be the same as using 150.5 in seconds.
Using PillowUsing OpenCVUsing MatplotlibUsing Imageio

In Pillow, the fromarray() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as PNG\n    im.save(\"foo_image.png\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In OpenCV, the imwrite() function can export BGR frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport cv2\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder for BGR24 outputwith suitable source\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", **ffparams).formulate()\n\n# grab the BGR24 frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    cv2.imwrite('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Matplotlib, the imsave() function can save an RGB frame as an image file:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport matplotlib.pyplot as plt\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our image as PNG\n    plt.imsave('foo_image.png', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

In Imageio, the imwrite() function can be used to create an image memory from an RGB frame:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport imageio\n\n# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec) \n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\":1}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"foo.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not(frame is None):\n    # Save our output\n    imageio.imwrite('foo_image.jpeg', frame)\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/save-keyframe-image/#generating-thumbnail-with-a-fancy-filter","title":"Generating Thumbnail with a Fancy filter","text":"fancy_thumbnail.jpg (Courtesy - BigBuckBunny)

In this example we first apply FFmpeg\u2019s tblend filter with an hardmix blend mode (cool stuff) and then seek to 00:00:25.917(or 25.917sec) in time to retrieve our single frame thumbnail, and thereby save it as JPEG image with valid filename (e.g. fancy_thumbnail.jpg) using Pillow library.

Time unit syntax in -ss FFmpeg parameter

You can use two different time unit formats with -ss FFmpeg parameter: - [x] Sexagesimal(in seconds): Uses (HOURS:MM:SS.MILLISECONDS), such as in 01:23:45.678 - [x] Fractional: such as in 02:30.05, this is interpreted as 2 minutes, 30 seconds, and a half a second, which would be the same as using 150.5 in seconds.

Available blend mode options

Other blend mode options for tblend filter include: addition, addition128, grainmerge, and, average, burn, darken, difference, difference128, grainextract, divide, dodge, freeze, exclusion, extremity, glow, hardlight, hardmix, heat, lighten, linearlight, multiply, multiply128, negation, normal, or, overlay, phoenix, pinlight, reflect, screen, softlight, subtract, vividlight, xor

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom PIL import Image\n\n# define the FFmpeg parameter to\nffparams = {\n    \"-vf\": \"tblend=all_mode='hardmix'\",  # trim and reverse\n    \"-ss\": \"00:00:25.917\",  # seek to 00:00:25.917(or 25s 917msec)\n    \"-frames:v\": 1,  # get one single frame\n}\n\n# initialize and formulate the decoder with suitable source\ndecoder = FFdecoder(\"BigBuckBunny.mp4\", **ffparams).formulate()\n\n# grab the RGB24(default) frame from the decoder\nframe = next(decoder.generateFrame(), None)\n\n# check if frame is None\nif not (frame is None):\n    # Convert to Image\n    im = Image.fromarray(frame)\n    # Save Image as JPEG\n    im.save(\"fancy_thumbnail.jpg\")\nelse:\n    raise ValueError(\"Something is wrong!\")\n\n# terminate the decoder\ndecoder.terminate()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/","title":"Transcoding Live Simple Filtergraphs","text":"What are Simple filtergraphs?

Before heading straight into recipes we will talk about Simple filtergraphs:

Simple filtergraphs are those filters that have exactly one input and output, both of the same type.

They can be processed by simply inserting an additional step between decoding and encoding of video frames:

Simple filtergraphs are configured with the per-stream -filter option (with -vf for video).

DeFFcode's FFdecoder API handles a single chain of filtergraphs (through -vf FFmpeg parameter) to the to real-time frames quite effortlessly.

We'll discuss the transcoding of live simple filtergraphs in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

OpenCV's' VideoWriter() class lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-trimmed-and-reversed-video","title":"Transcoding Trimmed and Reversed video","text":"Big Buck Bunny Reversed

In this example we will take the first 5 seconds of a video clip (using trim filter) and reverse it (by applying reverse filter), and encode them using OpenCV Library's VideoWriter() method in real-time.

The reverse filter requires memory to buffer the entire clip, so applying trim filter first is strongly recommended. Otherwise you might probably run Out of Memory.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

By default, OpenCV expects BGR format frames in its write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# trim 5 sec from end and reverse\nffparams = {\n    \"-vf\": \"trim=end=5,reverse\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-cropped-video","title":"Transcoding Cropped video","text":"Big Buck Bunny Cropped

In this example we will crop real-time video frames by an area with size \u2154 of the input video (say foo.mp4) by applying crop filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using crop filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# cropped the central input area with size 2/3 of the input video\nffparams = {\n    \"-vf\": \"crop=2/3*in_w:2/3*in_h\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-rotate-filter","title":"Transcoding Rotated video (with rotate filter)","text":"

FFmpeg features Rotate Filter that is used to rotate videos by an arbitrary angle (expressed in radians).

Big Buck Bunny Rotated (with rotate filter)

In this example we will rotate real-time video frames at an arbitrary angle by applying rotate filter in FFdecoder API and also using green color to fill the output area not covered by the rotated image, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 0.35 rad and fill green\nffparams = {\n    \"-vf\": \"rotate=angle=-20*PI/180:fillcolor=green\" \n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-rotated-video-with-transpose-filter","title":"Transcoding Rotated video (with transpose filter)","text":"

FFmpeg also features Transpose Filter that is used to rotate videos by 90 degrees clockwise and counter-clockwise direction as well as flip them vertically and horizontally.

Big Buck Bunny Rotated (with transpose filter)

In this example we will rotate real-time video frames by 90 degrees counterclockwise and preserve portrait geometry by applying transpose filter in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# rotate by 90 degrees counter-clockwise and preserve portrait layout\nffparams = {\n    \"-vf\": \"transpose=dir=2:passthrough=portrait\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames-simplegraphs/#transcoding-horizontally-flipped-and-scaled-video","title":"Transcoding Horizontally flipped and Scaled video","text":"Big Buck Bunny Horizontally flipped and Scaled

In this example we will horizontally flip and scale real-time video frames to half its original size by applying hflip and scale filter one-by-one in FFdecoder API, all while encoding them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

More complex examples using scale filter can be found here \u27b6 and can be applied similarly.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# define the Video Filter definition\n# horizontally flip and scale to half its original size\nffparams = {\n    \"-vf\": \"hflip,scale=w=iw/2:h=ih/2\"\n}\n\n# initialize and formulate the decoder for BGR24 output with given params\ndecoder = FFdecoder(\n    \"foo.mp4\", frame_format=\"bgr24\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/","title":"Transcoding Live frames","text":"What exactly is Transcoding?

Before heading directly into recipes we have to talk about Transcoding:

Transcoding is the technique of transforming one media encoding format into another.

This is typically done for compatibility purposes, such as when a media source provides a format that the intended target is not able to process; an in-between adaptation step is required:

  • Decode media from its originally encoded state into raw, uncompressed information.
  • Encode the raw data back, using a different codec that is supported by end user.

While decoding media into video frames is purely managed by DeFFcode's FFdecoder API, you can easily encode those video frames back into multimedia files using any well-known video processing library such as OpenCV and VidGear.

We'll discuss transcoding using both these libraries briefly in the following recipes:

DeFFcode APIs requires FFmpeg executable

DeFFcode APIs MUST requires valid FFmpeg executable for all of its core functionality, and any failure in detection will raise RuntimeError immediately. Follow dedicated FFmpeg Installation doc \u27b6 for its installation.

Additional Python Dependencies for following recipes

Following recipes requires additional python dependencies which can be installed easily as below:

  • OpenCV: OpenCV is required for previewing and encoding video frames. You can easily install it directly via pip:

    OpenCV installation from source

    You can also follow online tutorials for building & installing OpenCV on Windows, Linux, MacOS and Raspberry Pi machines manually from its source.

    Make sure not to install both pip and source version together. Otherwise installation will fail to work!

    Other OpenCV binaries

    OpenCV maintainers also provide additional binaries via pip that contains both main modules and contrib/extra modules opencv-contrib-python, and for server (headless) environments like opencv-python-headless and opencv-contrib-python-headless. You can also install any one of them in similar manner. More information can be found here.

    pip install opencv-python       \n
  • VidGear: VidGear is required for lossless encoding of video frames into file/stream. You can easily install it directly via pip:

    pip install vidgear[core]       \n

Always use FFdecoder API's terminate() method at the end to avoid undesired behavior.

Never name your python script deffcode.py

When trying out these recipes, never name your python script deffcode.py otherwise it will result in ModuleNotFound error.

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-video-using-opencv-videowriter-api","title":"Transcoding video using OpenCV VideoWriter API","text":"

OpenCV's' VideoWriter() class can be used directly with DeFFcode's FFdecoder API to encode video frames into a multimedia video file but it lacks the ability to control output quality, bitrate, compression, and other important features which are only available with VidGear's WriteGear API.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them using OpenCV Library's VideoWriter() method in real-time.

OpenCV's VideoWriter() class requires a valid Output filename (e.g. output_foo.avi), FourCC code, framerate, and resolution as input.

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve output framerate and resolution.

BGR framesRGB framesGRAYSCALE framesYUV frames

By default, OpenCV expects BGR format frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for BGR24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n     # let's also show output window\n    cv2.imshow(\"Output\", frame)\n\n    # check for 'q' key if pressed\n    key = cv2.waitKey(1) & 0xFF\n    if key == ord(\"q\"):\n        break\n\n# close output window\ncv2.destroyAllWindows()\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

Since OpenCV expects BGR format frames in its cv2.write() method, therefore we need to convert RGB frames into BGR before encoding as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for RGB24 pixel format output\ndecoder = FFdecoder(\"foo.mp4\").formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo.avi`\nwriter = cv2.VideoWriter(\"output_foo.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the RGB24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # converting RGB24 to BGR24 frame\n    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n    # writing BGR24 frame to writer\n    writer.write(frame_bgr)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

OpenCV also directly consumes GRAYSCALE frames in its cv2.write() method.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

With FFdecoder API, frames extracted with YUV pixel formats (yuv420p, yuv444p, nv12, nv21 etc.) are generally incompatible with OpenCV APIs. But you can make them easily compatible by using exclusive -enforce_cv_patch boolean attribute of its ffparam dictionary parameter.

Let's try encoding YUV420p pixel-format frames with OpenCV's write() method in following python code:

You can also use other YUV pixel-formats such yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) etc. instead for more higher dynamic range in the similar manner.

# import the necessary packages\nfrom deffcode import FFdecoder\nimport json, cv2\n\n# enable OpenCV patch for YUV frames\nffparams = {\"-enforce_cv_patch\": True}\n\n# initialize and formulate the decoder for YUV420p output\ndecoder = FFdecoder(\n    \"input_foo.mp4\", frame_format=\"yuv420p\", verbose=True, **ffparams\n).formulate()\n\n# retrieve JSON Metadata and convert it to dict\nmetadata_dict = json.loads(decoder.metadata)\n\n# prepare OpenCV parameters\nFOURCC = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\nFRAMERATE = metadata_dict[\"output_framerate\"]\nFRAMESIZE = tuple(metadata_dict[\"output_frames_resolution\"])\n\n# Define writer with parameters and suitable output filename for e.g. `output_foo_gray.avi`\nwriter = cv2.VideoWriter(\"output_foo_gray.avi\", FOURCC, FRAMERATE, FRAMESIZE)\n\n# grab the yuv420p frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # convert it to `BGR` pixel format,\n    # since imshow() method only accepts `BGR` frames\n    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420)\n\n    # {do something with the BGR frame here}\n\n    # writing BGR frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.release()\n

"},{"location":"recipes/basic/transcode-live-frames/#transcoding-lossless-video-using-writegear-api","title":"Transcoding lossless video using WriteGear API","text":"

WriteGear's Compression Mode support for FFdecoder API is currently in beta so you can expect much higher than usual CPU utilization!

Lossless transcoding with FFdecoder and WriteGear API

VidGear's WriteGear API implements a complete, flexible, and robust wrapper around FFmpeg in compression mode for encoding real-time video frames to a lossless compressed multimedia output file(s)/stream(s).

DeFFcode's FFdecoder API in conjunction with WriteGear API creates a high-level High-performance Lossless FFmpeg Transcoding (Decoding + Encoding) Pipeline that is able to exploit almost any FFmpeg parameter for achieving anything imaginable with multimedia video data all while allow us to manipulate the real-time video frames with immense flexibility.

In this example we will decode different pixel formats video frames from a given Video file (say foo.mp4) in FFdecoder API, and encode them into lossless video file with controlled framerate using WriteGear API in real-time.

Additional Parameters in WriteGear API

WriteGear API only requires a valid Output filename (e.g. output_foo.mp4) as input, but you can easily control any output specifications (such as bitrate, codec, framerate, resolution, subtitles, etc.) supported by FFmpeg (in use).

You can use FFdecoder's metadata property object that dumps source Video's metadata information (as JSON string) to retrieve source framerate.

BGR framesRGB framesGRAYSCALE framesYUV frames

WriteGear API by default expects BGR format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for BGR24 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"bgr24\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing BGR24 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

In WriteGear API, you can use rgb_mode parameter in write() class method to write RGB format frames instead of default BGR as follows:

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder\ndecoder = FFdecoder(\"foo.mp4\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` \n# parameter for controlled framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo.mp4`\nwriter = WriteGear(output_filename=\"output_foo.mp4\", **output_params)\n\n# grab the BGR24 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing RGB24 frame to writer\n    writer.write(frame, rgb_mode=True)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consumes GRAYSCALE format frames in its write() class method.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for GRAYSCALE output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\", verbose=True).formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as `-input_framerate` parameter\n# for controlled output framerate\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"source_video_framerate\"]\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_gray.mp4`\nwriter = WriteGear(output_filename=\"output_foo_gray.mp4\", **output_params)\n\n# grab the GRAYSCALE frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing GRAYSCALE frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

WriteGear API also directly consume YUV (or basically any other supported pixel format) frames in its write() class method with its -input_pixfmt attribute in compression mode. For its non-compression mode, see above example.

You can also use yuv422p(4:2:2 subsampling) or yuv444p(4:4:4 subsampling) instead for more higher dynamic ranges.

In WriteGear API, the support for -input_pixfmt attribute in output_params dictionary parameter was added in v0.3.0.

# import the necessary packages\nfrom deffcode import FFdecoder\nfrom vidgear.gears import WriteGear\nimport json\n\n# initialize and formulate the decoder for YUV420 output\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"yuv420p\").formulate()\n\n# retrieve framerate from source JSON Metadata and pass it as \n# `-input_framerate` parameter for controlled framerate\n# and add input pixfmt as yuv420p also\noutput_params = {\n    \"-input_framerate\": json.loads(decoder.metadata)[\"output_framerate\"],\n    \"-input_pixfmt\": \"yuv420p\"\n}\n\n# Define writer with default parameters and suitable\n# output filename for e.g. `output_foo_yuv.mp4`\nwriter = WriteGear(output_filename=\"output_foo_yuv.mp4\", logging=True, **output_params)\n\n# grab the YUV420 frame from the decoder\nfor frame in decoder.generateFrame():\n\n    # check if frame is None\n    if frame is None:\n        break\n\n    # {do something with the frame here}\n\n    # writing YUV420 frame to writer\n    writer.write(frame)\n\n# terminate the decoder\ndecoder.terminate()\n\n# safely close writer\nwriter.close()\n

"},{"location":"reference/ffhelper/","title":"deffcode.ffhelper","text":"

Following methods are exclusively design to handle FFmpeg related tasks. These tasks includes validation of installed FFmpeg binaries, downloading of FFmpeg binaries(on Windows), and parsing of FFmpeg metadata into useful information using various pattern matching methods.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_valid_ffmpeg_path--get_valid_ffmpeg_path","title":"get_valid_ffmpeg_path","text":"

Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.

Parameters:

Name Type Description Default custom_ffmpeg string

path to custom FFmpeg executables

'' is_windows boolean

is running on Windows OS?

False ffmpeg_download_path string

FFmpeg static binaries download location (Windows only)

'' verbose bool

enables verbose for its operations

False

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def get_valid_ffmpeg_path(\n    custom_ffmpeg=\"\", is_windows=False, ffmpeg_download_path=\"\", verbose=False\n):\n    \"\"\"\n    ## get_valid_ffmpeg_path\n\n    Validate the given FFmpeg path/binaries, and returns a valid FFmpeg executable path.\n\n    Parameters:\n        custom_ffmpeg (string): path to custom FFmpeg executables\n        is_windows (boolean): is running on Windows OS?\n        ffmpeg_download_path (string): FFmpeg static binaries download location _(Windows only)_\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if is_windows:\n        # checks if current os is windows\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            final_path += custom_ffmpeg\n        else:\n            # otherwise auto-download them\n            try:\n                if not (ffmpeg_download_path):\n                    # otherwise save to Temp Directory\n                    import tempfile\n\n                    ffmpeg_download_path = tempfile.gettempdir()\n\n                verbose and logger.debug(\n                    \"FFmpeg Windows Download Path: {}\".format(ffmpeg_download_path)\n                )\n\n                # download Binaries\n                os_bit = (\n                    (\"win64\" if platform.machine().endswith(\"64\") else \"win32\")\n                    if is_windows\n                    else \"\"\n                )\n                _path = download_ffmpeg_binaries(\n                    path=ffmpeg_download_path, os_windows=is_windows, os_bit=os_bit\n                )\n                # assign to local variable\n                final_path += _path\n\n            except Exception as e:\n                # log if any error occurred\n                logger.exception(str(e))\n                logger.error(\n                    \"Error in downloading FFmpeg binaries, Check your network and Try again!\"\n                )\n                return False\n\n        if os.path.isfile(final_path):\n            # check if valid FFmpeg file exist\n            pass\n        elif os.path.isfile(os.path.join(final_path, \"ffmpeg.exe\")):\n            # check if FFmpeg directory exists, if does, then check for valid file\n            final_path = os.path.join(final_path, \"ffmpeg.exe\")\n        else:\n            # else return False\n            verbose and logger.debug(\n                \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n            )\n            return False\n    else:\n        # otherwise perform test for Unix\n        if custom_ffmpeg:\n            # if custom FFmpeg path is given assign to local variable\n            if os.path.isfile(custom_ffmpeg):\n                # check if valid FFmpeg file exist\n                final_path += custom_ffmpeg\n            elif os.path.isfile(os.path.join(custom_ffmpeg, \"ffmpeg\")):\n                # check if FFmpeg directory exists, if does, then check for valid file\n                final_path = os.path.join(custom_ffmpeg, \"ffmpeg\")\n            else:\n                # else return False\n                verbose and logger.debug(\n                    \"No valid FFmpeg executables found at Custom FFmpeg path!\"\n                )\n                return False\n        else:\n            # otherwise assign ffmpeg binaries from system\n            final_path += \"ffmpeg\"\n\n    verbose and logger.debug(\"Final FFmpeg Path: {}\".format(final_path))\n\n    # Final Auto-Validation for FFmeg Binaries. returns final path if test is passed\n    return final_path if validate_ffmpeg(final_path, verbose=verbose) else False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.download_ffmpeg_binaries--download_ffmpeg_binaries","title":"download_ffmpeg_binaries","text":"

Generates FFmpeg Static Binaries for windows(if not available)

Parameters:

Name Type Description Default path string

path for downloading custom FFmpeg executables

required os_windows boolean

is running on Windows OS?

False os_bit string

32-bit or 64-bit OS?

''

Returns: A valid FFmpeg executable path string.

Source code in deffcode/ffhelper.py
def download_ffmpeg_binaries(path, os_windows=False, os_bit=\"\"):\n    \"\"\"\n    ## download_ffmpeg_binaries\n\n    Generates FFmpeg Static Binaries for windows(if not available)\n\n    Parameters:\n        path (string): path for downloading custom FFmpeg executables\n        os_windows (boolean): is running on Windows OS?\n        os_bit (string): 32-bit or 64-bit OS?\n\n    **Returns:** A valid FFmpeg executable path string.\n    \"\"\"\n    final_path = \"\"\n    if os_windows and os_bit:\n        # initialize with available FFmpeg Static Binaries GitHub Server\n        file_url = \"https://github.com/abhiTronix/FFmpeg-Builds/releases/latest/download/ffmpeg-static-{}-gpl.zip\".format(\n            os_bit\n        )\n\n        file_name = os.path.join(\n            os.path.abspath(path), \"ffmpeg-static-{}-gpl.zip\".format(os_bit)\n        )\n        file_path = os.path.join(\n            os.path.abspath(path),\n            \"ffmpeg-static-{}-gpl/bin/ffmpeg.exe\".format(os_bit),\n        )\n        base_path, _ = os.path.split(file_name)  # extract file base path\n        # check if file already exists\n        if os.path.isfile(file_path):\n            final_path += file_path  # skip download if does\n        else:\n            # import libs\n            import zipfile\n\n            # check if given path has write access\n            assert os.access(path, os.W_OK), (\n                \"[Helper:ERROR] :: Permission Denied, Cannot write binaries to directory = \"\n                + path\n            )\n            # remove leftovers if exists\n            os.path.isfile(file_name) and delete_file_safe(file_name)\n            # download and write file to the given path\n            with open(file_name, \"wb\") as f:\n                logger.debug(\n                    \"No Custom FFmpeg path provided. Auto-Installing FFmpeg static binaries from GitHub Mirror now. Please wait...\"\n                )\n                # create session\n                with requests.Session() as http:\n                    # setup retry strategy\n                    retries = Retry(\n                        total=3,\n                        backoff_factor=1,\n                        status_forcelist=[429, 500, 502, 503, 504],\n                    )\n                    # Mount it for https usage\n                    adapter = TimeoutHTTPAdapter(timeout=2.0, max_retries=retries)\n                    http.mount(\"https://\", adapter)\n                    response = http.get(file_url, stream=True)\n                    response.raise_for_status()\n                    total_length = (\n                        response.headers.get(\"content-length\")\n                        if \"content-length\" in response.headers\n                        else len(response.content)\n                    )\n                    assert not (\n                        total_length is None\n                    ), \"[Helper:ERROR] :: Failed to retrieve files, check your Internet connectivity!\"\n                    bar = tqdm(total=int(total_length), unit=\"B\", unit_scale=True)\n                    for data in response.iter_content(chunk_size=4096):\n                        f.write(data)\n                        len(data) > 0 and bar.update(len(data))\n                    bar.close()\n            logger.debug(\"Extracting executables.\")\n            with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n                zip_fname, _ = os.path.split(zip_ref.infolist()[0].filename)\n                zip_ref.extractall(base_path)\n            # perform cleaning\n            delete_file_safe(file_name)\n            logger.debug(\"FFmpeg binaries for Windows configured successfully!\")\n            final_path += file_path\n    # return final path\n    return final_path\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_ffmpeg--validate_ffmpeg","title":"validate_ffmpeg","text":"

Validate FFmpeg Binaries. Returns True if validity test passes successfully.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_ffmpeg(path, verbose=False):\n    \"\"\"\n    ## validate_ffmpeg\n\n    Validate FFmpeg Binaries. Returns `True` if validity test passes successfully.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    try:\n        # get the FFmpeg version\n        version = check_sp_output([path, \"-version\"])\n        firstline = version.split(b\"\\n\")[0]\n        version = firstline.split(b\" \")[2].strip()\n        if verbose:  # log if test are passed\n            logger.debug(\"FFmpeg validity Test Passed!\")\n            logger.debug(\n                \"Found valid FFmpeg Version: `{}` installed on this system\".format(\n                    version\n                )\n            )\n    except Exception as e:\n        # log if test are failed\n        if verbose:\n            logger.exception(str(e))\n            logger.warning(\"FFmpeg validity Test Failed!\")\n        return False\n    return True\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_pixfmts--get_supported_pixfmts","title":"get_supported_pixfmts","text":"

Find and returns all FFmpeg's supported pixel formats.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).

Source code in deffcode/ffhelper.py
def get_supported_pixfmts(path):\n    \"\"\"\n    ## get_supported_pixfmts\n\n    Find and returns all FFmpeg's supported pixel formats.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported pixel formats as (PIXEL FORMAT, NB_COMPONENTS, BITS_PER_PIXEL).\n    \"\"\"\n    pxfmts = check_sp_output([path, \"-hide_banner\", \"-pix_fmts\"])\n    splitted = pxfmts.split(b\"\\n\")\n    srtindex = [i for i, s in enumerate(splitted) if b\"-----\" in s]\n    # extract video encoders\n    supported_pxfmts = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[srtindex[0] + 1 :]\n        if x.decode(\"utf-8\").strip()\n    ]\n    # compile regex\n    finder = re.compile(r\"([A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*)(\\s+[0-4])(\\s+[0-9]+)\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_pxfmts))\n    # return output findings\n    return [\n        ([s for s in o[0].split(\" \")][-1], o[1].strip(), o[2].strip())\n        for o in outputs\n        if len(o) == 3\n    ]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_vdecoders--get_supported_vdecoders","title":"get_supported_vdecoders","text":"

Find and returns all FFmpeg's supported video decoders.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported decoders.

Source code in deffcode/ffhelper.py
def get_supported_vdecoders(path):\n    \"\"\"\n    ## get_supported_vdecoders\n\n    Find and returns all FFmpeg's supported video decoders.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported decoders.\n    \"\"\"\n    decoders = check_sp_output([path, \"-hide_banner\", \"-decoders\"])\n    splitted = decoders.split(b\"\\n\")\n    # extract video encoders\n    supported_vdecoders = [\n        x.decode(\"utf-8\").strip()\n        for x in splitted[2 : len(splitted) - 1]\n        if x.decode(\"utf-8\").strip().startswith(\"V\")\n    ]\n    # compile regex\n    finder = re.compile(r\"[A-Z]*[\\.]+[A-Z]*\\s[a-z0-9_-]*\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_vdecoders))\n    # return output findings\n    return [[s for s in o.split(\" \")][-1] for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.get_supported_demuxers--get_supported_demuxers","title":"get_supported_demuxers","text":"

Find and returns all FFmpeg's supported demuxers.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required

Returns: List of supported demuxers.

Source code in deffcode/ffhelper.py
def get_supported_demuxers(path):\n    \"\"\"\n    ## get_supported_demuxers\n\n    Find and returns all FFmpeg's supported demuxers.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n\n    **Returns:** List of supported demuxers.\n    \"\"\"\n    demuxers = check_sp_output([path, \"-hide_banner\", \"-demuxers\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in demuxers.split(b\"\\n\")]\n    split_index = [idx for idx, s in enumerate(splitted) if \"--\" in s][0]\n    supported_demuxers = splitted[split_index + 1 : len(splitted) - 1]\n    # compile regex\n    finder = re.compile(r\"\\s\\s[a-z0-9_,-]+\\s+\")\n    # find all outputs\n    outputs = finder.findall(\"\\n\".join(supported_demuxers))\n    # return output findings\n    return [o.strip() if not (\",\" in o) else o.split(\",\")[-1].strip() for o in outputs]\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.validate_imgseqdir--validate_imgseqdir","title":"validate_imgseqdir","text":"

Validates Image Sequence by counting number of Image files.

Parameters:

Name Type Description Default source string

video source to be validated

required extension string

extension of image sequence.

'jpg'

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def validate_imgseqdir(source, extension=\"jpg\", verbose=False):\n    \"\"\"\n    ## validate_imgseqdir\n\n    Validates Image Sequence by counting number of Image files.\n\n    Parameters:\n        source (string): video source to be validated\n        extension (string): extension of image sequence.\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    # check if path exists\n    dirpath = Path(source).parent\n    try:\n        if not (dirpath.exists() and dirpath.is_dir()):\n            verbose and logger.warning(\n                \"Specified path `{}` doesn't exists or valid.\".format(dirpath)\n            )\n            return False\n        else:\n            return (\n                True if len(list(dirpath.glob(\"*.{}\".format(extension)))) > 2 else False\n            )\n    except:\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_image_seq--is_valid_image_seq","title":"is_valid_image_seq","text":"

Checks Image sequence validity by testing its extension against FFmpeg's supported pipe formats and number of Image files.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required source string

video source to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_image_seq(path, source=None, verbose=False):\n    \"\"\"\n    ## is_valid_image_seq\n\n    Checks Image sequence validity by testing its extension against\n    FFmpeg's supported pipe formats and number of Image files.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        source (string): video source to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if source is None or not (source):\n        logger.error(\"Source is empty!\")\n        return False\n    # extract all FFmpeg supported protocols\n    formats = check_sp_output([path, \"-hide_banner\", \"-formats\"])\n    extract_formats = re.findall(r\"\\w+_pipe\", formats.decode(\"utf-8\").strip())\n    supported_image_formats = [\n        x.split(\"_\")[0] for x in extract_formats if x.endswith(\"_pipe\")\n    ]\n    filename, extension = os.path.splitext(source)\n    # Test and return result whether scheme is supported\n    if extension and source.endswith(tuple(supported_image_formats)):\n        if validate_imgseqdir(source, extension=extension[1:], verbose=verbose):\n            verbose and logger.debug(\n                \"A valid Image Sequence source of format `{}` found.\".format(extension)\n            )\n            return True\n        else:\n            ValueError(\n                \"Given Image Sequence source of format `{}` contains insignificant(invalid) sample size, Check the `source` parameter value again!\".format(\n                    source.split(\".\")[1]\n                )\n            )\n    else:\n        verbose and logger.warning(\"Source isn't a valid Image Sequence\")\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.is_valid_url--is_valid_url","title":"is_valid_url","text":"

Checks URL validity by testing its scheme against FFmpeg's supported protocols.

Parameters:

Name Type Description Default path string

absolute path of FFmpeg binaries

required url string

URL to be validated

None verbose bool

enables verbose for its operations

False

Returns: A boolean value, confirming whether tests passed, or not?.

Source code in deffcode/ffhelper.py
def is_valid_url(path, url=None, verbose=False):\n    \"\"\"\n    ## is_valid_url\n\n    Checks URL validity by testing its scheme against\n    FFmpeg's supported protocols.\n\n    Parameters:\n        path (string): absolute path of FFmpeg binaries\n        url (string): URL to be validated\n        verbose (bool): enables verbose for its operations\n\n    **Returns:** A boolean value, confirming whether tests passed, or not?.\n    \"\"\"\n    if url is None or not (url):\n        logger.warning(\"URL is empty!\")\n        return False\n    # extract URL scheme\n    extracted_scheme_url = url.split(\"://\", 1)[0]\n    # extract all FFmpeg supported protocols\n    protocols = check_sp_output([path, \"-hide_banner\", \"-protocols\"])\n    splitted = [x.decode(\"utf-8\").strip() for x in protocols.split(b\"\\n\")]\n    supported_protocols = splitted[splitted.index(\"Output:\") + 1 : len(splitted) - 1]\n    # RTSP is a demuxer somehow\n    # support both RTSP and RTSPS(over SSL)\n    supported_protocols += (\n        [\"rtsp\", \"rtsps\"] if \"rtsp\" in get_supported_demuxers(path) else []\n    )\n    # Test and return result whether scheme is supported\n    if extracted_scheme_url and extracted_scheme_url in supported_protocols:\n        verbose and logger.debug(\n            \"URL scheme `{}` is supported by FFmpeg.\".format(extracted_scheme_url)\n        )\n        return True\n    else:\n        verbose and logger.warning(\n            \"URL scheme `{}` isn't supported by FFmpeg!\".format(extracted_scheme_url)\n        )\n        return False\n
"},{"location":"reference/ffhelper/#deffcode.ffhelper.check_sp_output--check_sp_output","title":"check_sp_output","text":"

Returns FFmpeg stdout output from subprocess module.

Parameters:

Name Type Description Default args based on input

Non Keyword Arguments

() kwargs based on input

Keyword Arguments

{}

Returns: A string value.

Source code in deffcode/ffhelper.py
def check_sp_output(*args, **kwargs):\n    \"\"\"\n    ## check_sp_output\n\n    Returns FFmpeg `stdout` output from subprocess module.\n\n    Parameters:\n        args (based on input): Non Keyword Arguments\n        kwargs (based on input): Keyword Arguments\n\n    **Returns:** A string value.\n    \"\"\"\n    # workaround for python bug: https://bugs.python.org/issue37380\n    if platform.system() == \"Windows\":\n        # see comment https://bugs.python.org/msg370334\n        sp._cleanup = lambda: None\n    # handle additional params\n    retrieve_stderr = kwargs.pop(\"force_retrieve_stderr\", False)\n    # execute command in subprocess\n    process = sp.Popen(\n        stdout=sp.PIPE,\n        stderr=sp.DEVNULL if not (retrieve_stderr) else sp.PIPE,\n        *args,\n        **kwargs,\n    )\n    # communicate and poll process\n    output, stderr = process.communicate()\n    retcode = process.poll()\n    # handle return code\n    if retcode and not (retrieve_stderr):\n        logger.error(\"[Pipline-Error] :: {}\".format(output.decode(\"utf-8\")))\n        cmd = kwargs.get(\"args\")\n        if cmd is None:\n            cmd = args[0]\n        error = sp.CalledProcessError(retcode, cmd)\n        error.output = output\n        raise error\n    # raise error if no output\n    bool(output) or bool(stderr) or logger.error(\n        \"[Pipline-Error] :: Pipline failed to exact any data from command: {}!\".format(\n            args[0] if args else []\n        )\n    )\n    # return output otherwise\n    return stderr if retrieve_stderr and stderr else output\n
"},{"location":"reference/utils/","title":"deffcode.utils","text":"

Following are the helper methods required by the DeFFcode APIs.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

"},{"location":"reference/utils/#deffcode.utils.logger_handler--logger_handler","title":"logger_handler","text":"

Returns the logger handler

Returns: A logger handler

Source code in deffcode/utils.py
def logger_handler():\n    \"\"\"\n    ## logger_handler\n\n    Returns the logger handler\n\n    **Returns:** A logger handler\n    \"\"\"\n    # logging formatter\n    formatter = ColoredFormatter(\n        \"{green}{asctime}{reset} :: {bold_purple}{name:^13}{reset} :: {log_color}{levelname:^8}{reset} :: {bold_white}{message}\",\n        datefmt=\"%H:%M:%S\",\n        reset=True,\n        log_colors={\n            \"INFO\": \"bold_cyan\",\n            \"DEBUG\": \"bold_yellow\",\n            \"WARNING\": \"bold_red,fg_thin_yellow\",\n            \"ERROR\": \"bold_red\",\n            \"CRITICAL\": \"bold_red,bg_white\",\n        },\n        style=\"{\",\n    )\n    # check if FFdecoder_LOGFILE defined\n    file_mode = os.environ.get(\"DEFFCODE_LOGFILE\", False)\n    # define handler\n    handler = logging.StreamHandler()\n    if file_mode and isinstance(file_mode, str):\n        file_path = os.path.abspath(file_mode)\n        if (os.name == \"nt\" or os.access in os.supports_effective_ids) and os.access(\n            os.path.dirname(file_path), os.W_OK\n        ):\n            file_path = (\n                os.path.join(file_path, \"deffcode.log\")\n                if os.path.isdir(file_path)\n                else file_path\n            )\n            handler = logging.FileHandler(file_path, mode=\"a\")\n            formatter = logging.Formatter(\n                \"{asctime} :: {name} :: {levelname} :: {message}\",\n                datefmt=\"%H:%M:%S\",\n                style=\"{\",\n            )\n\n    handler.setFormatter(formatter)\n    return handler\n
"},{"location":"reference/utils/#deffcode.utils.dict2Args--dict2args","title":"dict2Args","text":"

Converts dictionary attributes to list(args)

Parameters:

Name Type Description Default param_dict dict

Parameters dictionary

required

Returns: Arguments list

Source code in deffcode/utils.py
def dict2Args(param_dict):\n    \"\"\"\n    ## dict2Args\n\n    Converts dictionary attributes to list(args)\n\n    Parameters:\n        param_dict (dict): Parameters dictionary\n\n    **Returns:** Arguments list\n    \"\"\"\n    args = []\n    for key in param_dict.keys():\n        if key in [\"-clones\"] or key.startswith(\"-core\"):\n            if isinstance(param_dict[key], list):\n                args.extend(param_dict[key])\n            else:\n                logger.warning(\n                    \"{} with invalid datatype:`{}`, Skipped!\".format(\n                        \"Core parameter\" if key.startswith(\"-core\") else \"Clone\",\n                        param_dict[key],\n                    )\n                )\n        else:\n            args.append(key)\n            args.append(str(param_dict[key]))\n    return args\n
"},{"location":"reference/utils/#deffcode.utils.delete_file_safe--delete_ext_safe","title":"delete_ext_safe","text":"

Safely deletes files at given path.

Parameters:

Name Type Description Default file_path string

path to the file

required Source code in deffcode/utils.py
def delete_file_safe(file_path):\n    \"\"\"\n    ## delete_ext_safe\n\n    Safely deletes files at given path.\n\n    Parameters:\n        file_path (string): path to the file\n    \"\"\"\n    try:\n        dfile = Path(file_path)\n        if sys.version_info >= (3, 8, 0):\n            dfile.unlink(missing_ok=True)\n        else:\n            dfile.exists() and dfile.unlink()\n    except Exception as e:\n        logger.exception(str(e))\n
"},{"location":"reference/ffdecoder/","title":"FFdecoder API","text":"

FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1

FFdecoder API implements a standalone highly-extensible wrapper around FFmpeg multimedia framework that provides complete control over the underline pipeline including access to almost any FFmpeg specification thinkable such as framerate, resolution, hardware decoder(s), complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.

FFdecoder API compiles its FFmpeg pipeline by processing input Video Source metadata and User-defined options, and runs it inside a subprocess pipe concurrently with the main thread, while extracting output dataframes(1D arrays) into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into 24-bit RGB (default) ndarray 3D frames that are readily available through its generateFrame() method.

FFdecoder API employs Sourcer API at its backend for gathering, processing, and validating metadata of all multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also available as a JSON string with its metadata property object and can be updated as desired.

FFdecoder API supports a wide-ranging media stream as input source such as USB/Virtual/IP Camera Feed, Multimedia video file, Screen Capture, Image Sequence, Network protocols (such as HTTP(s), RTP/RSTP, etc.), so on and so forth.

Furthermore, FFdecoder API maintains the standard OpenCV-Python (Python API for OpenCV) coding syntax, thereby making it even easier to integrate this API in any Computer Vision application.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

FFdecoder API parameters are explained here \u27b6

Source code in deffcode/ffdecoder.py
class FFdecoder:\n    \"\"\"\n    > FFdecoder API compiles and executes the FFmpeg pipeline inside a subprocess pipe for generating real-time, low-overhead, lightning fast video frames\n    with robust error-handling in python \ud83c\udf9e\ufe0f\u26a1\n\n    FFdecoder API implements a **standalone highly-extensible wrapper around [FFmpeg](https://ffmpeg.org/)** multimedia framework that provides complete\n    control over the underline pipeline including **access to almost any FFmpeg specification thinkable** such as framerate, resolution, hardware decoder(s),\n    complex filter(s), and pixel format(s) that are readily supported by all well known Computer Vision libraries.\n\n    FFdecoder API **compiles its FFmpeg pipeline** by processing input Video Source metadata and User-defined options, and **runs it inside a\n    [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe** concurrently with the main thread, while extracting output dataframes(1D arrays)\n    into a Numpy buffer. These dataframes are consecutively grabbed from the buffer and decoded into ==[24-bit RGB](https://en.wikipedia.org/wiki/List_of_monochrome_and_RGB_color_formats#24-bit_RGB) _(default)_\n    [`ndarray`](https://numpy.org/doc/stable/reference/arrays.ndarray.html#the-n-dimensional-array-ndarray) 3D frames== that are readily available\n    through its [`generateFrame()`](#deffcode.ffdecoder.FFdecoder.generateFrame) method.\n\n    FFdecoder API **employs [Sourcer API](../../reference/sourcer) at its backend** for gathering, processing, and validating metadata of all\n    multimedia streams available in the given source for formulating/compiling its default FFmpeg pipeline. This metadata information is also\n    available as a JSON string with its [`metadata`](#deffcode.ffdecoder.FFdecoder.metadata) property object and can be updated as desired.\n\n    FFdecoder API **supports a wide-ranging media stream** as input source such as USB/Virtual/IP Camera Feed, Multimedia video file,\n    Screen Capture, Image Sequence, Network protocols _(such as HTTP(s), RTP/RSTP, etc.)_, so on and so forth.\n\n    Furthermore, FFdecoder API maintains the **standard [OpenCV-Python](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) _(Python API for OpenCV)_ coding syntax**, thereby making it even easier to\n    integrate this API in any Computer Vision application.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"FFdecoder API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        frame_format=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **ffparams\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n\n        # enable verbose if specified\n        self.__verbose_logs = (\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # define whether initializing\n        self.__initializing = True\n\n        # define frame pixel-format for decoded frames\n        self.__frame_format = (\n            frame_format.lower().strip() if isinstance(frame_format, str) else None\n        )\n\n        # handles user-defined parameters\n        self.__extra_params = {}\n\n        # handle process to be frames written\n        self.__process = None\n\n        # handle exclusive metadata\n        self.__ff_pixfmt_metadata = None  # metadata\n        self.__raw_frame_num = None  # raw-frame number\n        self.__raw_frame_pixfmt = None  # raw-frame pixformat\n        self.__raw_frame_dtype = None  # raw-frame dtype\n        self.__raw_frame_depth = None  # raw-frame depth\n        self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n        # define supported mode of operation\n        self.__supported_opmodes = {\n            \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n            \"vo\": \"Video-Only\",\n            \"imgseq\": \"Image-Sequence\",\n            # \"ao\":\"Audio-Only\", # reserved for future\n        }\n        # operation mode variable\n        self.__opmode = None\n\n        # handle termination\n        self.__terminate_stream = False\n\n        # cleans and reformat user-defined parameters\n        self.__extra_params = {\n            str(k).strip(): str(v).strip()\n            if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n            else v\n            for k, v in ffparams.items()\n        }\n\n        # handle custom Sourcer API params\n        sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n        # reset improper values\n        sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n        # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n        # check if not valid type\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n        else:\n            # also pass valid ffmpeg pre-headers to Sourcer API\n            sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n        # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n        # assets on Windows(if specified)\n        sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n            \"-ffmpeg_download_path\", \"\"\n        )\n\n        # handle video and audio stream indexes in case of multiple ones.\n        default_stream_indexes = self.__extra_params.pop(\n            \"-default_stream_indexes\", (0, 0)\n        )\n        # reset improper values\n        default_stream_indexes = (\n            (0, 0)\n            if not isinstance(default_stream_indexes, (list, tuple))\n            else default_stream_indexes\n        )\n\n        # pass FFmpeg filter to Sourcer API params for processing\n        if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n            key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n            sourcer_params[key] = self.__extra_params[key]\n\n        # define dict to store user-defined parameters\n        self.__user_metadata = {}\n        # extract and assign source metadata as dict\n        (self.__sourcer_metadata, self.__missing_prop) = (\n            Sourcer(\n                source=source,\n                source_demuxer=source_demuxer,\n                verbose=verbose,\n                custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n                **sourcer_params\n            )\n            .probe_stream(default_stream_indexes=default_stream_indexes)\n            .retrieve_metadata(force_retrieve_missing=True)\n        )\n\n        # handle valid FFmpeg assets location\n        self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n        # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n        # patch for compatibility with OpenCV APIs.\n        self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n        if not (isinstance(self.__cv_patch, bool)):\n            self.__cv_patch = False\n            self.__verbose_logs and logger.critical(\n                \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n            )\n\n        # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n        self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n        if not (isinstance(self.__passthrough_mode, bool)):\n            self.__passthrough_mode = False\n\n        # handle mode of operation\n        if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n            # image-sequence mode\n            self.__opmode = \"imgseq\"\n        elif (\n            self.__sourcer_metadata[\n                \"source_has_video\"\n            ]  # audio is only for pass-through, not really for audio decoding yet.\n            and self.__sourcer_metadata[\"source_has_audio\"]\n            and self.__passthrough_mode  # [TODO]\n        ):\n            self.__opmode = \"av\"\n        # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n        #    self.__opmode = \"ao\"\n        elif self.__sourcer_metadata[\"source_has_video\"]:\n            # video-only mode\n            self.__opmode = \"vo\"\n        else:\n            # raise if unknown mode\n            raise ValueError(\n                \"Unable to find any usable video stream in the given source!\"\n            )\n        # store as metadata\n        self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n            self.__opmode\n        ]\n\n        # handle user-defined output framerate\n        __framerate = self.__extra_params.pop(\"-framerate\", None)\n        if (\n            isinstance(__framerate, str)\n            and __framerate\n            == \"null\"  # special mode to discard `-framerate/-r` parameter\n        ):\n            self.__inputframerate = __framerate\n        elif isinstance(__framerate, (float, int)):\n            self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n        else:\n            # warn if wrong type\n            not (__framerate is None) and logger.warning(\n                \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                    type(__framerate).__name__\n                )\n            )\n            # reset to default\n            self.__inputframerate = 0.0\n\n        # handle user defined decoded frame resolution\n        self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n        if (\n            isinstance(self.__custom_resolution, str)\n            and self.__custom_resolution\n            == \"null\"  # special mode to discard `-size/-s` parameter\n        ) or (\n            isinstance(self.__custom_resolution, (list, tuple))\n            and len(self.__custom_resolution)\n            == 2  # valid resolution(must be a tuple or list)\n        ):\n            # log it\n            self.__verbose_logs and not isinstance(\n                self.__custom_resolution, str\n            ) and logger.debug(\n                \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n            )\n        else:\n            # log it\n            not (self.__custom_resolution is None) and logger.warning(\n                \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                    self.__custom_resolution\n                )\n            )\n            # reset improper values\n            self.__custom_resolution = None\n\n    def formulate(self):\n\n        \"\"\"\n        This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n        **Returns:** A reference to the FFdecoder class object.\n        \"\"\"\n        # assign values to class variables on first run\n        if self.__initializing:\n            # prepare parameter dict\n            input_params = OrderedDict()\n            output_params = OrderedDict()\n\n            # dynamically pre-assign a default video-decoder (if not assigned by user).\n            supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n            default_vdecodec = (\n                self.__sourcer_metadata[\"source_video_decoder\"]\n                if self.__sourcer_metadata[\"source_video_decoder\"]\n                in supported_vdecodecs\n                else \"unknown\"\n            )\n            if \"-c:v\" in self.__extra_params:\n                self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-c:v\", default_vdecodec\n                )\n            # handle image sequence separately\n            if self.__opmode == \"imgseq\":\n                # -vcodec is discarded by default\n                # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n                self.__extra_params.pop(\"-vcodec\", None)\n            elif (\n                \"-vcodec\" in self.__extra_params\n                and self.__extra_params[\"-vcodec\"] is None\n            ):\n                # special case when -vcodec is not needed intentionally\n                self.__extra_params.pop(\"-vcodec\", None)\n            else:\n                # assign video decoder selected here.\n                if not \"-vcodec\" in self.__extra_params:\n                    input_params[\"-vcodec\"] = default_vdecodec\n                else:\n                    input_params[\"-vcodec\"] = self.__extra_params.pop(\n                        \"-vcodec\", default_vdecodec\n                    )\n                if (\n                    default_vdecodec != \"unknown\"\n                    and not input_params[\"-vcodec\"] in supported_vdecodecs\n                ):\n                    # reset to default if not supported\n                    logger.warning(\n                        \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                            input_params[\"-vcodec\"], default_vdecodec\n                        )\n                    )\n                    input_params[\"-vcodec\"] = default_vdecodec\n                # raise error if not valid decoder found\n                if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                    raise RuntimeError(\n                        \"Provided FFmpeg does not support any known usable video-decoders.\"\n                        \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                    )\n\n            # handle user-defined number of frames.\n            if \"-vframes\" in self.__extra_params:\n                self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                    \"-vframes\", None\n                )\n            if \"-frames:v\" in self.__extra_params:\n                value = self.__extra_params.pop(\"-frames:v\", None)\n                if not (value is None) and value > 0:\n                    output_params[\"-frames:v\"] = value\n\n            # dynamically calculate default raw-frames pixel format(if not assigned by user).\n            # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n            if \"-pix_fmt\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n                )\n                self.__extra_params.pop(\"-pix_fmt\", None)\n            # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n            self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n            supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n            # calculate default pixel-format\n            # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n            self.__frame_format == \"null\" and logger.critical(\n                \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n            )\n            # choose between rgb24(if available) or source pixel-format\n            # otherwise, only source pixel-format for special case\n            default_pixfmt = (\n                \"rgb24\"\n                if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n                else self.__sourcer_metadata[\"source_video_pixfmt\"]\n            )\n            # assign output raw-frames pixel format\n            rawframe_pixfmt = None\n            if (\n                not (self.__frame_format is None)\n                and self.__frame_format in supported_pixfmts\n            ):\n                # check if valid and supported `frame_format` parameter assigned\n                rawframe_pixfmt = self.__frame_format.strip()\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                        rawframe_pixfmt\n                    )\n                )\n            elif (\n                \"output_frames_pixfmt\"\n                in self.__sourcer_metadata  # means `format` filter is defined\n                and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n            ):\n                # assign if valid and supported\n                rawframe_pixfmt = self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ].strip()\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n                )\n            else:\n                # reset to default if not supported\n                rawframe_pixfmt = default_pixfmt\n                # log it accordingly\n                if self.__frame_format is None:\n                    logger.info(\n                        \"Using default `{}` pixel-format for this pipeline.\".format(\n                            default_pixfmt\n                        )\n                    )\n                else:\n                    logger.warning(\n                        \"{} Switching to default `{}` pixel-format!\".format(\n                            \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                                self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                                if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                                else self.__frame_format\n                            )\n                            if self.__frame_format != \"null\"\n                            else \"No usable pixel-format defined.\",\n                            default_pixfmt,\n                        )\n                    )\n\n            # dynamically calculate raw-frame datatype based on pixel-format selected\n            (self.__raw_frame_depth, rawframesbpp) = [\n                (int(x[1]), int(x[2]))\n                for x in self.__ff_pixfmt_metadata\n                if x[0] == rawframe_pixfmt\n            ][0]\n            raw_bit_per_component = (\n                rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n            )\n            if 4 <= raw_bit_per_component <= 8:\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n            elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n                (\"le\", \"be\")\n            ):\n                if rawframe_pixfmt.endswith(\"le\"):\n                    self.__raw_frame_dtype = np.dtype(\"<u2\")\n                else:\n                    self.__raw_frame_dtype = np.dtype(\">u2\")\n            else:\n                # reset to both pixel-format and datatype to default if not supported\n                not (self.__frame_format is None) and logger.warning(\n                    \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                        rawframe_pixfmt\n                    )\n                )\n                rawframe_pixfmt = \"rgb24\"\n                self.__raw_frame_dtype = np.dtype(\"u1\")\n\n            # Check if not special case\n            if self.__frame_format != \"null\":\n                # assign to FFmpeg pipeline otherwise\n                output_params[\"-pix_fmt\"] = rawframe_pixfmt\n            # assign to global parameter further usage\n            self.__raw_frame_pixfmt = rawframe_pixfmt\n            # also override as metadata(if available)\n            if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n                self.__sourcer_metadata[\n                    \"output_frames_pixfmt\"\n                ] = self.__raw_frame_pixfmt\n\n            # handle raw-frame resolution\n            # notify FFmpeg `-s` parameter cannot be assigned directly\n            if \"-s\" in self.__extra_params:\n                logger.warning(\n                    \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n                )\n                self.__extra_params.pop(\"-s\", None)\n            # assign output rawframe resolution\n            if not (self.__custom_resolution is None) and not isinstance(\n                self.__custom_resolution, str\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                self.__raw_frame_resolution = self.__custom_resolution\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                        self.__raw_frame_resolution\n                    )\n                )\n            elif (\n                \"output_frames_resolution\"\n                in self.__sourcer_metadata  # means `scale` filter is defined\n                and self.__sourcer_metadata[\"output_frames_resolution\"]\n                and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on output.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"output_frames_resolution\"\n                ]\n            elif (\n                self.__sourcer_metadata[\"source_video_resolution\"]\n                and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n            ):\n                # calculate raw-frame resolution/dimensions based on source.\n                self.__raw_frame_resolution = self.__sourcer_metadata[\n                    \"source_video_resolution\"\n                ]\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n            # special mode to discard `-size/-s` FFmpeg parameter completely\n            if isinstance(self.__custom_resolution, str):\n                logger.critical(\n                    \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # add to pipeline\n                dimensions = \"{}x{}\".format(\n                    self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n                )\n                output_params[\"-s\"] = str(dimensions)\n            # log if filters or default source is used\n            self.__verbose_logs and (\n                self.__custom_resolution is None\n                or isinstance(self.__custom_resolution, str)\n            ) and logger.info(\n                \"{} for this pipeline for defining output resolution.\".format(\n                    \"FFmpeg filter values will be used\"\n                    if \"output_frames_resolution\" in self.__sourcer_metadata\n                    else \"Default source resolution will be used\"\n                )\n            )\n\n            # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n            if (\n                not isinstance(self.__inputframerate, str)\n                and self.__inputframerate > 0.0\n            ):\n                # assign if assigned by user and not \"null\"(str)\n                output_params[\"-framerate\"] = str(self.__inputframerate)\n                self.__verbose_logs and logger.info(\n                    \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                        str(self.__inputframerate)\n                    )\n                )\n            elif (\n                \"output_framerate\"\n                in self.__sourcer_metadata  # means `fps` filter is defined\n                and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n            ):\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on output\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"output_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n                )\n            elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n                # special mode to discard `-framerate/-r` FFmpeg parameter completely\n                if self.__inputframerate == \"null\":\n                    logger.critical(\n                        \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                    )\n                else:\n                    # calculate raw-frame framerate based on source\n                    output_params[\"-framerate\"] = str(\n                        self.__sourcer_metadata[\"source_video_framerate\"]\n                    )\n                self.__verbose_logs and logger.info(\n                    \"Default source framerate will be used for this pipeline for defining output framerate.\"\n                )\n            else:\n                # otherwise raise error\n                raise RuntimeError(\n                    \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                        \"null\"\n                        if isinstance(self.__inputframerate, str)\n                        else \"undefined\"\n                    )\n                )\n\n            # add rest to output parameters\n            output_params.update(self.__extra_params)\n\n            # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n            # TODO Added support for `-re -stream_loop` and `-loop`\n            if \"-frames:v\" in input_params:\n                self.__raw_frame_num = input_params[\"-frames:v\"]\n            elif (\n                not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n                and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n            ):\n                self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n            else:\n                self.__raw_frame_num = None\n                # log that number of frames are unknown\n                self.__verbose_logs and logger.info(\n                    \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n                )\n\n            # log Mode of Operation\n            self.__verbose_logs and logger.critical(\n                \"Activating {} Mode of Operation.\".format(\n                    self.__supported_opmodes[self.__opmode]\n                )\n            )\n\n            # compose the Pipeline using formulated FFmpeg parameters\n            self.__launch_FFdecoderline(input_params, output_params)\n\n            # inform the initialization is completed\n            self.__initializing = False\n        else:\n            # warn if pipeline is recreated\n            logger.error(\"This pipeline is already created and running!\")\n        return self\n\n    def __fetchNextfromPipeline(self):\n        \"\"\"\n        This Internal method to fetch next dataframes(1D arrays) from `subprocess` pipe's standard output(`stdout`) into a Numpy buffer.\n        \"\"\"\n        assert not (\n            self.__process is None\n        ), \"Pipeline is not running! You must call `formulate()` method first.\"\n\n        # formulated raw frame size and apply YUV pixel formats patch(if applicable)\n        raw_frame_size = (\n            (self.__raw_frame_resolution[0] * (self.__raw_frame_resolution[1] * 3 // 2))\n            if self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch\n            else (\n                self.__raw_frame_depth\n                * self.__raw_frame_resolution[0]\n                * self.__raw_frame_resolution[1]\n            )\n        )\n        # next dataframe as numpy ndarray\n        nparray = None\n        try:\n            # read bytes frames from buffer\n            nparray = np.frombuffer(\n                self.__process.stdout.read(\n                    raw_frame_size * self.__raw_frame_dtype.itemsize\n                ),\n                dtype=self.__raw_frame_dtype,\n            )\n        except Exception as e:\n            raise RuntimeError(\"Frame buffering failed with error: {}\".format(str(e)))\n        return (\n            nparray\n            if not (nparray is None) and len(nparray) == raw_frame_size\n            else None\n        )\n\n    def __fetchNextFrame(self):\n        \"\"\"\n        This Internal method grabs and decodes next 3D `ndarray` video-frame from the buffer.\n        \"\"\"\n        # Read next and reconstruct as numpy array\n        frame = self.__fetchNextfromPipeline()\n        # check if empty\n        if frame is None:\n            return frame\n        elif self.__raw_frame_pixfmt.startswith(\"gray\"):\n            # reconstruct exclusive `gray` frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )[:, :, 0]\n        elif self.__raw_frame_pixfmt.startswith((\"yuv\", \"nv\")) and self.__cv_patch:\n            # reconstruct exclusive YUV formats frames for OpenCV APIs\n            frame = frame.reshape(\n                self.__raw_frame_resolution[1] * 3 // 2,\n                self.__raw_frame_resolution[0],\n            )\n        else:\n            # reconstruct default frames\n            frame = frame.reshape(\n                (\n                    self.__raw_frame_resolution[1],\n                    self.__raw_frame_resolution[0],\n                    self.__raw_frame_depth,\n                )\n            )\n        # return frame\n        return frame\n\n    def generateFrame(self):\n        \"\"\"\n        This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n        _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n        \"\"\"\n        if self.__raw_frame_num is None or not self.__raw_frame_num:\n            while not self.__terminate_stream:  # infinite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n        else:\n            for _ in range(self.__raw_frame_num):  # finite raw frames\n                frame = self.__fetchNextFrame()\n                if frame is None:\n                    self.__terminate_stream = True\n                    break\n                yield frame\n\n    def __enter__(self):\n        \"\"\"\n        Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n        **Returns:** Output of `formulate()` method.\n        \"\"\"\n        return self.formulate()\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"\n        Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n        \"\"\"\n        self.terminate()\n\n    @property\n    def metadata(self):\n        \"\"\"\n        A property object that dumps metadata information as JSON string.\n\n        **Returns:** Metadata as JSON string.\n        \"\"\"\n        # import dependency\n        import json\n\n        # return complete metadata information as JSON string\n        return json.dumps(\n            {\n                **self.__sourcer_metadata,  # source video\n                **self.__missing_prop,  # missing properties\n                **self.__user_metadata,  # user-defined\n            },\n            indent=2,\n        )\n\n    @metadata.setter\n    def metadata(self, value):\n        \"\"\"\n        A property object that updates metadata information with user-defined dictionary.\n\n        Parameters:\n            value (dict): User-defined dictionary.\n        \"\"\"\n        # check if value dict type\n        if value and isinstance(value, dict):\n            # log it\n            self.__verbose_logs and logger.info(\"Updating Metadata...\")\n            # extract any source and output internal metadata keys\n            default_keys = set(value).intersection(\n                {**self.__sourcer_metadata, **self.__missing_prop}\n            )\n            # counterpart source properties for each output properties\n            counterpart_prop = {\n                \"output_frames_resolution\": \"source_video_resolution\",\n                \"output_frames_pixfmt\": \"source_video_pixfmt\",\n                \"output_framerate\": \"source_video_framerate\",\n            }\n            # iterate over source metadata keys and sanitize it\n            for key in default_keys or []:\n                if key == \"source\":\n                    # metadata properties that cannot be altered\n                    logger.warning(\n                        \"`{}` metadata property value cannot be altered. Discarding!\".format(\n                            key\n                        )\n                    )\n                elif key in self.__missing_prop:\n                    # missing metadata properties are unavailable and read-only\n                    # notify user about alternative counterpart property (if available)\n                    logger.warning(\n                        \"`{}` metadata property is read-only\".format(key)\n                        + (\n                            \". Try updating `{}` property instead!\".format(\n                                counterpart_prop[key]\n                            )\n                            if key in counterpart_prop.keys()\n                            else \" and cannot be updated!\"\n                        )\n                    )\n                elif isinstance(value[key], type(self.__sourcer_metadata[key])):\n                    # check if correct datatype as original\n                    self.__verbose_logs and logger.info(\n                        \"Updating `{}`{} metadata property to `{}`.\".format(\n                            key,\n                            \" and its counterpart\"\n                            if key in counterpart_prop.values()\n                            else \"\",\n                            value[key],\n                        )\n                    )\n                    # update source metadata if valid\n                    self.__sourcer_metadata[key] = value[key]\n                    # also update missing counterpart property (if available)\n                    counter_key = next(\n                        (k for k, v in counterpart_prop.items() if v == key), \"\"\n                    )\n                    if counter_key:\n                        self.__missing_prop[counter_key] = value[key]\n                else:\n                    # otherwise discard and log it\n                    logger.warning(\n                        \"Manually assigned `{}` metadata property value is of invalid type. Discarding!\"\n                    ).format(key)\n                # delete invalid key\n                del value[key]\n            # There is no concept of a tuple in the JSON format.\n            # Python's `json` module converts Python tuples to JSON lists\n            # because that's the closest thing in JSON to a tuple.\n            any(isinstance(value[x], tuple) for x in value) and logger.warning(\n                \"All TUPLE metadata properties will be converted to LIST datatype. Read docs for more details.\"\n            )\n            # update user-defined metadata\n            self.__user_metadata.update(value)\n        else:\n            # otherwise raise error\n            raise ValueError(\"Invalid datatype metadata assigned. Aborting!\")\n\n    def __launch_FFdecoderline(self, input_params, output_params):\n\n        \"\"\"\n        This Internal method executes FFmpeg pipeline arguments inside a `subprocess` pipe in a new process.\n\n        Parameters:\n            input_params (dict): Input FFmpeg parameters\n            output_params (dict): Output FFmpeg parameters\n        \"\"\"\n        # convert input parameters to list\n        input_parameters = dict2Args(input_params)\n\n        # convert output parameters to list\n        output_parameters = dict2Args(output_params)\n\n        # format command\n        cmd = (\n            [self.__ffmpeg]\n            + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n            + self.__ffmpeg_prefixes\n            + input_parameters\n            + (\n                [\"-f\", self.__sourcer_metadata[\"source_demuxer\"]]\n                if (\"source_demuxer\" in self.__sourcer_metadata.keys())\n                else []\n            )\n            + [\"-i\", self.__sourcer_metadata[\"source\"]]\n            + output_parameters\n            + [\"-f\", \"rawvideo\", \"-\"]\n        )\n        # compose the FFmpeg process\n        if self.__verbose_logs:\n            logger.debug(\"Executing FFmpeg command: `{}`\".format(\" \".join(cmd)))\n            # In debugging mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=None\n            )\n        else:\n            # In silent mode\n            self.__process = sp.Popen(\n                cmd, stdin=sp.DEVNULL, stdout=sp.PIPE, stderr=sp.DEVNULL\n            )\n\n    def terminate(self):\n        \"\"\"\n        Safely terminates all processes.\n        \"\"\"\n\n        # signal we are closing\n        self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n        self.__terminate_stream = True\n        # check if no process was initiated at first place\n        if self.__process is None or not (self.__process.poll() is None):\n            logger.info(\"Pipeline already terminated.\")\n            return\n        # Attempt to close pipeline.\n        # close `stdin` output\n        self.__process.stdin and self.__process.stdin.close()\n        # close `stdout` output\n        self.__process.stdout and self.__process.stdout.close()\n        # terminate/kill process if still processing\n        if self.__process.poll() is None:\n            # demuxers prefer kill\n            self.__process.kill()\n        # wait if not exiting\n        self.__process.wait()\n        self.__process = None\n        logger.info(\"Pipeline terminated successfully.\")\n

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.metadata","title":"metadata property writable","text":"

A property object that dumps metadata information as JSON string.

Returns: Metadata as JSON string.

"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__enter__","title":"__enter__(self) special","text":"

Handles entry with the with statement. See PEP343 -- The 'with' statement'.

Returns: Output of formulate() method.

Source code in deffcode/ffdecoder.py
def __enter__(self):\n    \"\"\"\n    Handles entry with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n\n    **Returns:** Output of `formulate()` method.\n    \"\"\"\n    return self.formulate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__exit__","title":"__exit__(self, exc_type, exc_val, exc_tb) special","text":"

Handles exit with the with statement. See PEP343 -- The 'with' statement'.

Source code in deffcode/ffdecoder.py
def __exit__(self, exc_type, exc_val, exc_tb):\n    \"\"\"\n    Handles exit with the `with` statement. See [PEP343 -- The 'with' statement'](https://peps.python.org/pep-0343/).\n    \"\"\"\n    self.terminate()\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.__init__","title":"__init__(self, source, source_demuxer=None, frame_format=None, custom_ffmpeg='', verbose=False, **ffparams) special","text":"

This constructor method initializes the object state and attributes of the FFdecoder Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None frame_format str

sets pixel format(-pix_fmt) of the decoded frames.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False ffparams dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/ffdecoder.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    frame_format=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **ffparams\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the FFdecoder Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        frame_format (str): sets pixel format(`-pix_fmt`) of the decoded frames.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        ffparams (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n\n    # enable verbose if specified\n    self.__verbose_logs = (\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # define whether initializing\n    self.__initializing = True\n\n    # define frame pixel-format for decoded frames\n    self.__frame_format = (\n        frame_format.lower().strip() if isinstance(frame_format, str) else None\n    )\n\n    # handles user-defined parameters\n    self.__extra_params = {}\n\n    # handle process to be frames written\n    self.__process = None\n\n    # handle exclusive metadata\n    self.__ff_pixfmt_metadata = None  # metadata\n    self.__raw_frame_num = None  # raw-frame number\n    self.__raw_frame_pixfmt = None  # raw-frame pixformat\n    self.__raw_frame_dtype = None  # raw-frame dtype\n    self.__raw_frame_depth = None  # raw-frame depth\n    self.__raw_frame_resolution = None  # raw-frame resolution/dimension\n\n    # define supported mode of operation\n    self.__supported_opmodes = {\n        \"av\": \"Audio-Video\",  # audio is only for pass-through, not really for audio decoding yet.\n        \"vo\": \"Video-Only\",\n        \"imgseq\": \"Image-Sequence\",\n        # \"ao\":\"Audio-Only\", # reserved for future\n    }\n    # operation mode variable\n    self.__opmode = None\n\n    # handle termination\n    self.__terminate_stream = False\n\n    # cleans and reformat user-defined parameters\n    self.__extra_params = {\n        str(k).strip(): str(v).strip()\n        if not (v is None) and not isinstance(v, (dict, list, int, float, tuple))\n        else v\n        for k, v in ffparams.items()\n    }\n\n    # handle custom Sourcer API params\n    sourcer_params = self.__extra_params.pop(\"-custom_sourcer_params\", {})\n    # reset improper values\n    sourcer_params = {} if not isinstance(sourcer_params, dict) else sourcer_params\n\n    # handle user ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__extra_params.pop(\"-ffprefixes\", [])\n    # check if not valid type\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type: `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n    else:\n        # also pass valid ffmpeg pre-headers to Sourcer API\n        sourcer_params[\"-ffprefixes\"] = self.__ffmpeg_prefixes\n\n    # pass parameter(if specified) to Sourcer API, specifying where to save the downloaded FFmpeg Static\n    # assets on Windows(if specified)\n    sourcer_params[\"-ffmpeg_download_path\"] = self.__extra_params.pop(\n        \"-ffmpeg_download_path\", \"\"\n    )\n\n    # handle video and audio stream indexes in case of multiple ones.\n    default_stream_indexes = self.__extra_params.pop(\n        \"-default_stream_indexes\", (0, 0)\n    )\n    # reset improper values\n    default_stream_indexes = (\n        (0, 0)\n        if not isinstance(default_stream_indexes, (list, tuple))\n        else default_stream_indexes\n    )\n\n    # pass FFmpeg filter to Sourcer API params for processing\n    if set([\"-vf\", \"-filter_complex\"]).intersection(self.__extra_params.keys()):\n        key = \"-vf\" if \"-vf\" in self.__extra_params else \"-filter_complex\"\n        sourcer_params[key] = self.__extra_params[key]\n\n    # define dict to store user-defined parameters\n    self.__user_metadata = {}\n    # extract and assign source metadata as dict\n    (self.__sourcer_metadata, self.__missing_prop) = (\n        Sourcer(\n            source=source,\n            source_demuxer=source_demuxer,\n            verbose=verbose,\n            custom_ffmpeg=custom_ffmpeg if isinstance(custom_ffmpeg, str) else \"\",\n            **sourcer_params\n        )\n        .probe_stream(default_stream_indexes=default_stream_indexes)\n        .retrieve_metadata(force_retrieve_missing=True)\n    )\n\n    # handle valid FFmpeg assets location\n    self.__ffmpeg = self.__sourcer_metadata[\"ffmpeg_binary_path\"]\n\n    # handle YUV pixel formats(such as `yuv420p`, `yuv444p`, `nv12`, `nv21` etc.)\n    # patch for compatibility with OpenCV APIs.\n    self.__cv_patch = self.__extra_params.pop(\"-enforce_cv_patch\", False)\n    if not (isinstance(self.__cv_patch, bool)):\n        self.__cv_patch = False\n        self.__verbose_logs and logger.critical(\n            \"Enforcing OpenCV compatibility patch for YUV/NV frames.\"\n        )\n\n    # handle pass-through audio mode works in conjunction with WriteGear [TODO]\n    self.__passthrough_mode = self.__extra_params.pop(\"-passthrough_audio\", False)\n    if not (isinstance(self.__passthrough_mode, bool)):\n        self.__passthrough_mode = False\n\n    # handle mode of operation\n    if self.__sourcer_metadata[\"source_has_image_sequence\"]:\n        # image-sequence mode\n        self.__opmode = \"imgseq\"\n    elif (\n        self.__sourcer_metadata[\n            \"source_has_video\"\n        ]  # audio is only for pass-through, not really for audio decoding yet.\n        and self.__sourcer_metadata[\"source_has_audio\"]\n        and self.__passthrough_mode  # [TODO]\n    ):\n        self.__opmode = \"av\"\n    # elif __defop_mode == \"ao\" and self.__sourcer_metadata.contains_audio: # [TODO]\n    #    self.__opmode = \"ao\"\n    elif self.__sourcer_metadata[\"source_has_video\"]:\n        # video-only mode\n        self.__opmode = \"vo\"\n    else:\n        # raise if unknown mode\n        raise ValueError(\n            \"Unable to find any usable video stream in the given source!\"\n        )\n    # store as metadata\n    self.__missing_prop[\"ffdecoder_operational_mode\"] = self.__supported_opmodes[\n        self.__opmode\n    ]\n\n    # handle user-defined output framerate\n    __framerate = self.__extra_params.pop(\"-framerate\", None)\n    if (\n        isinstance(__framerate, str)\n        and __framerate\n        == \"null\"  # special mode to discard `-framerate/-r` parameter\n    ):\n        self.__inputframerate = __framerate\n    elif isinstance(__framerate, (float, int)):\n        self.__inputframerate = float(__framerate) if __framerate > 0.0 else 0.0\n    else:\n        # warn if wrong type\n        not (__framerate is None) and logger.warning(\n            \"Discarding invalid `-framerate` value of wrong type `{}`!\".format(\n                type(__framerate).__name__\n            )\n        )\n        # reset to default\n        self.__inputframerate = 0.0\n\n    # handle user defined decoded frame resolution\n    self.__custom_resolution = self.__extra_params.pop(\"-custom_resolution\", None)\n    if (\n        isinstance(self.__custom_resolution, str)\n        and self.__custom_resolution\n        == \"null\"  # special mode to discard `-size/-s` parameter\n    ) or (\n        isinstance(self.__custom_resolution, (list, tuple))\n        and len(self.__custom_resolution)\n        == 2  # valid resolution(must be a tuple or list)\n    ):\n        # log it\n        self.__verbose_logs and not isinstance(\n            self.__custom_resolution, str\n        ) and logger.debug(\n            \"Setting raw frames size: `{}`.\".format(self.__custom_resolution)\n        )\n    else:\n        # log it\n        not (self.__custom_resolution is None) and logger.warning(\n            \"Discarding invalid `-custom_resolution` value: `{}`!\".format(\n                self.__custom_resolution\n            )\n        )\n        # reset improper values\n        self.__custom_resolution = None\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.formulate","title":"formulate(self)","text":"

This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg subprocess pipe.

Returns: A reference to the FFdecoder class object.

Source code in deffcode/ffdecoder.py
def formulate(self):\n\n    \"\"\"\n    This method formulates all necessary FFmpeg pipeline arguments and executes it inside the FFmpeg `subprocess` pipe.\n\n    **Returns:** A reference to the FFdecoder class object.\n    \"\"\"\n    # assign values to class variables on first run\n    if self.__initializing:\n        # prepare parameter dict\n        input_params = OrderedDict()\n        output_params = OrderedDict()\n\n        # dynamically pre-assign a default video-decoder (if not assigned by user).\n        supported_vdecodecs = get_supported_vdecoders(self.__ffmpeg)\n        default_vdecodec = (\n            self.__sourcer_metadata[\"source_video_decoder\"]\n            if self.__sourcer_metadata[\"source_video_decoder\"]\n            in supported_vdecodecs\n            else \"unknown\"\n        )\n        if \"-c:v\" in self.__extra_params:\n            self.__extra_params[\"-vcodec\"] = self.__extra_params.pop(\n                \"-c:v\", default_vdecodec\n            )\n        # handle image sequence separately\n        if self.__opmode == \"imgseq\":\n            # -vcodec is discarded by default\n            # (This is correct or maybe -vcodec required in some unknown case) [TODO]\n            self.__extra_params.pop(\"-vcodec\", None)\n        elif (\n            \"-vcodec\" in self.__extra_params\n            and self.__extra_params[\"-vcodec\"] is None\n        ):\n            # special case when -vcodec is not needed intentionally\n            self.__extra_params.pop(\"-vcodec\", None)\n        else:\n            # assign video decoder selected here.\n            if not \"-vcodec\" in self.__extra_params:\n                input_params[\"-vcodec\"] = default_vdecodec\n            else:\n                input_params[\"-vcodec\"] = self.__extra_params.pop(\n                    \"-vcodec\", default_vdecodec\n                )\n            if (\n                default_vdecodec != \"unknown\"\n                and not input_params[\"-vcodec\"] in supported_vdecodecs\n            ):\n                # reset to default if not supported\n                logger.warning(\n                    \"Provided FFmpeg does not support `{}` video decoder. Switching to default supported `{}` decoder!\".format(\n                        input_params[\"-vcodec\"], default_vdecodec\n                    )\n                )\n                input_params[\"-vcodec\"] = default_vdecodec\n            # raise error if not valid decoder found\n            if not input_params[\"-vcodec\"] in supported_vdecodecs:\n                raise RuntimeError(\n                    \"Provided FFmpeg does not support any known usable video-decoders.\"\n                    \" Either define your own manually or switch to another FFmpeg binaries(if available).\"\n                )\n\n        # handle user-defined number of frames.\n        if \"-vframes\" in self.__extra_params:\n            self.__extra_params[\"-frames:v\"] = self.__extra_params.pop(\n                \"-vframes\", None\n            )\n        if \"-frames:v\" in self.__extra_params:\n            value = self.__extra_params.pop(\"-frames:v\", None)\n            if not (value is None) and value > 0:\n                output_params[\"-frames:v\"] = value\n\n        # dynamically calculate default raw-frames pixel format(if not assigned by user).\n        # notify FFmpeg `-pix_fmt` parameter cannot be assigned directly\n        if \"-pix_fmt\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-pix_fmt` value as it can only be assigned with `frame_format` parameter!\"\n            )\n            self.__extra_params.pop(\"-pix_fmt\", None)\n        # get supported FFmpeg pixfmt data with depth and bpp(bits-per-pixel)\n        self.__ff_pixfmt_metadata = get_supported_pixfmts(self.__ffmpeg)\n        supported_pixfmts = [fmts[0] for fmts in self.__ff_pixfmt_metadata]\n\n        # calculate default pixel-format\n        # Check special case  - `frame_format`(or `-pix_fmt`) parameter discarded from pipeline\n        self.__frame_format == \"null\" and logger.critical(\n            \"Manually discarding `frame_format`(or `-pix_fmt`) parameter from this pipeline.\"\n        )\n        # choose between rgb24(if available) or source pixel-format\n        # otherwise, only source pixel-format for special case\n        default_pixfmt = (\n            \"rgb24\"\n            if \"rgb24\" in supported_pixfmts and self.__frame_format != \"null\"\n            else self.__sourcer_metadata[\"source_video_pixfmt\"]\n        )\n        # assign output raw-frames pixel format\n        rawframe_pixfmt = None\n        if (\n            not (self.__frame_format is None)\n            and self.__frame_format in supported_pixfmts\n        ):\n            # check if valid and supported `frame_format` parameter assigned\n            rawframe_pixfmt = self.__frame_format.strip()\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame pixel-format will be used for this pipeline.\".format(\n                    rawframe_pixfmt\n                )\n            )\n        elif (\n            \"output_frames_pixfmt\"\n            in self.__sourcer_metadata  # means `format` filter is defined\n            and self.__sourcer_metadata[\"output_frames_pixfmt\"] in supported_pixfmts\n        ):\n            # assign if valid and supported\n            rawframe_pixfmt = self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ].strip()\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output pixel-format.\"\n            )\n        else:\n            # reset to default if not supported\n            rawframe_pixfmt = default_pixfmt\n            # log it accordingly\n            if self.__frame_format is None:\n                logger.info(\n                    \"Using default `{}` pixel-format for this pipeline.\".format(\n                        default_pixfmt\n                    )\n                )\n            else:\n                logger.warning(\n                    \"{} Switching to default `{}` pixel-format!\".format(\n                        \"Provided FFmpeg does not supports `{}` pixel-format.\".format(\n                            self.__sourcer_metadata[\"output_frames_pixfmt\"]\n                            if \"output_frames_pixfmt\" in self.__sourcer_metadata\n                            else self.__frame_format\n                        )\n                        if self.__frame_format != \"null\"\n                        else \"No usable pixel-format defined.\",\n                        default_pixfmt,\n                    )\n                )\n\n        # dynamically calculate raw-frame datatype based on pixel-format selected\n        (self.__raw_frame_depth, rawframesbpp) = [\n            (int(x[1]), int(x[2]))\n            for x in self.__ff_pixfmt_metadata\n            if x[0] == rawframe_pixfmt\n        ][0]\n        raw_bit_per_component = (\n            rawframesbpp // self.__raw_frame_depth if self.__raw_frame_depth else 0\n        )\n        if 4 <= raw_bit_per_component <= 8:\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n        elif 8 < raw_bit_per_component <= 16 and rawframe_pixfmt.endswith(\n            (\"le\", \"be\")\n        ):\n            if rawframe_pixfmt.endswith(\"le\"):\n                self.__raw_frame_dtype = np.dtype(\"<u2\")\n            else:\n                self.__raw_frame_dtype = np.dtype(\">u2\")\n        else:\n            # reset to both pixel-format and datatype to default if not supported\n            not (self.__frame_format is None) and logger.warning(\n                \"Selected pixel-format `{}` dtype is not supported by FFdecoder API. Switching to default `rgb24` pixel-format!\".format(\n                    rawframe_pixfmt\n                )\n            )\n            rawframe_pixfmt = \"rgb24\"\n            self.__raw_frame_dtype = np.dtype(\"u1\")\n\n        # Check if not special case\n        if self.__frame_format != \"null\":\n            # assign to FFmpeg pipeline otherwise\n            output_params[\"-pix_fmt\"] = rawframe_pixfmt\n        # assign to global parameter further usage\n        self.__raw_frame_pixfmt = rawframe_pixfmt\n        # also override as metadata(if available)\n        if \"output_frames_pixfmt\" in self.__sourcer_metadata:\n            self.__sourcer_metadata[\n                \"output_frames_pixfmt\"\n            ] = self.__raw_frame_pixfmt\n\n        # handle raw-frame resolution\n        # notify FFmpeg `-s` parameter cannot be assigned directly\n        if \"-s\" in self.__extra_params:\n            logger.warning(\n                \"Discarding user-defined `-s` FFmpeg parameter as it can only be assigned with `-custom_resolution` attribute! Read docs for more details.\"\n            )\n            self.__extra_params.pop(\"-s\", None)\n        # assign output rawframe resolution\n        if not (self.__custom_resolution is None) and not isinstance(\n            self.__custom_resolution, str\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            self.__raw_frame_resolution = self.__custom_resolution\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` frame resolution will be used for this pipeline.\".format(\n                    self.__raw_frame_resolution\n                )\n            )\n        elif (\n            \"output_frames_resolution\"\n            in self.__sourcer_metadata  # means `scale` filter is defined\n            and self.__sourcer_metadata[\"output_frames_resolution\"]\n            and len(self.__sourcer_metadata[\"output_frames_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on output.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"output_frames_resolution\"\n            ]\n        elif (\n            self.__sourcer_metadata[\"source_video_resolution\"]\n            and len(self.__sourcer_metadata[\"source_video_resolution\"]) == 2\n        ):\n            # calculate raw-frame resolution/dimensions based on source.\n            self.__raw_frame_resolution = self.__sourcer_metadata[\n                \"source_video_resolution\"\n            ]\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-custom_resolution` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n        # special mode to discard `-size/-s` FFmpeg parameter completely\n        if isinstance(self.__custom_resolution, str):\n            logger.critical(\n                \"Manually discarding `-size/-s` FFmpeg parameter from this pipeline.\"\n            )\n        else:\n            # add to pipeline\n            dimensions = \"{}x{}\".format(\n                self.__raw_frame_resolution[0], self.__raw_frame_resolution[1]\n            )\n            output_params[\"-s\"] = str(dimensions)\n        # log if filters or default source is used\n        self.__verbose_logs and (\n            self.__custom_resolution is None\n            or isinstance(self.__custom_resolution, str)\n        ) and logger.info(\n            \"{} for this pipeline for defining output resolution.\".format(\n                \"FFmpeg filter values will be used\"\n                if \"output_frames_resolution\" in self.__sourcer_metadata\n                else \"Default source resolution will be used\"\n            )\n        )\n\n        # dynamically calculate raw-frame framerate based on source (if not assigned by user).\n        if (\n            not isinstance(self.__inputframerate, str)\n            and self.__inputframerate > 0.0\n        ):\n            # assign if assigned by user and not \"null\"(str)\n            output_params[\"-framerate\"] = str(self.__inputframerate)\n            self.__verbose_logs and logger.info(\n                \"User-defined `{}` output framerate will be used for this pipeline.\".format(\n                    str(self.__inputframerate)\n                )\n            )\n        elif (\n            \"output_framerate\"\n            in self.__sourcer_metadata  # means `fps` filter is defined\n            and self.__sourcer_metadata[\"output_framerate\"] > 0.0\n        ):\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually discarding `-framerate/-r` FFmpeg parameter from this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on output\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"output_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"FFmpeg filter values will be used for this pipeline for defining output framerate.\"\n            )\n        elif self.__sourcer_metadata[\"source_video_framerate\"] > 0.0:\n            # special mode to discard `-framerate/-r` FFmpeg parameter completely\n            if self.__inputframerate == \"null\":\n                logger.critical(\n                    \"Manually disabling `-framerate/-r` FFmpeg parameter for this pipeline.\"\n                )\n            else:\n                # calculate raw-frame framerate based on source\n                output_params[\"-framerate\"] = str(\n                    self.__sourcer_metadata[\"source_video_framerate\"]\n                )\n            self.__verbose_logs and logger.info(\n                \"Default source framerate will be used for this pipeline for defining output framerate.\"\n            )\n        else:\n            # otherwise raise error\n            raise RuntimeError(\n                \"Both source and output metadata values found Invalid with {} `-framerate` attribute. Aborting!\".format(\n                    \"null\"\n                    if isinstance(self.__inputframerate, str)\n                    else \"undefined\"\n                )\n            )\n\n        # add rest to output parameters\n        output_params.update(self.__extra_params)\n\n        # dynamically calculate raw-frame numbers based on source (if not assigned by user).\n        # TODO Added support for `-re -stream_loop` and `-loop`\n        if \"-frames:v\" in input_params:\n            self.__raw_frame_num = input_params[\"-frames:v\"]\n        elif (\n            not (self.__sourcer_metadata[\"approx_video_nframes\"] is None)\n            and self.__sourcer_metadata[\"approx_video_nframes\"] > 0\n        ):\n            self.__raw_frame_num = self.__sourcer_metadata[\"approx_video_nframes\"]\n        else:\n            self.__raw_frame_num = None\n            # log that number of frames are unknown\n            self.__verbose_logs and logger.info(\n                \"Number of frames in given source are unknown. Live/Network/Looping stream detected!\"\n            )\n\n        # log Mode of Operation\n        self.__verbose_logs and logger.critical(\n            \"Activating {} Mode of Operation.\".format(\n                self.__supported_opmodes[self.__opmode]\n            )\n        )\n\n        # compose the Pipeline using formulated FFmpeg parameters\n        self.__launch_FFdecoderline(input_params, output_params)\n\n        # inform the initialization is completed\n        self.__initializing = False\n    else:\n        # warn if pipeline is recreated\n        logger.error(\"This pipeline is already created and running!\")\n    return self\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.generateFrame","title":"generateFrame(self)","text":"

This method returns a Generator function (also an Iterator using next()) of video frames, grabbed continuously from the buffer.

Source code in deffcode/ffdecoder.py
def generateFrame(self):\n    \"\"\"\n    This method returns a [Generator function](https://wiki.python.org/moin/Generators)\n    _(also an Iterator using `next()`)_ of video frames, grabbed continuously from the buffer.\n    \"\"\"\n    if self.__raw_frame_num is None or not self.__raw_frame_num:\n        while not self.__terminate_stream:  # infinite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n    else:\n        for _ in range(self.__raw_frame_num):  # finite raw frames\n            frame = self.__fetchNextFrame()\n            if frame is None:\n                self.__terminate_stream = True\n                break\n            yield frame\n
"},{"location":"reference/ffdecoder/#deffcode.ffdecoder.FFdecoder.terminate","title":"terminate(self)","text":"

Safely terminates all processes.

Source code in deffcode/ffdecoder.py
def terminate(self):\n    \"\"\"\n    Safely terminates all processes.\n    \"\"\"\n\n    # signal we are closing\n    self.__verbose_logs and logger.debug(\"Terminating FFdecoder Pipeline...\")\n    self.__terminate_stream = True\n    # check if no process was initiated at first place\n    if self.__process is None or not (self.__process.poll() is None):\n        logger.info(\"Pipeline already terminated.\")\n        return\n    # Attempt to close pipeline.\n    # close `stdin` output\n    self.__process.stdin and self.__process.stdin.close()\n    # close `stdout` output\n    self.__process.stdout and self.__process.stdout.close()\n    # terminate/kill process if still processing\n    if self.__process.poll() is None:\n        # demuxers prefer kill\n        self.__process.kill()\n    # wait if not exiting\n    self.__process.wait()\n    self.__process = None\n    logger.info(\"Pipeline terminated successfully.\")\n
"},{"location":"reference/ffdecoder/params/","title":"FFdecoder API Parameters","text":""},{"location":"reference/ffdecoder/params/#source","title":"source","text":"

This parameter defines the input source (-i) for decoding real-time frames.

FFdecoder API will throw Assertion if source provided is invalid or missing.

FFdecoder API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize and formulate the decoder with `foo.mp4` source\ndecoder = FFdecoder('/home/foo.mp4').formulate()\n

    Related usage recipes can found here \u27b6

  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nffparams = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img%03d.png', verbose=True, **ffparams).formulate()\n
    # initialize and formulate the decoder\ndecoder = FFdecoder('img%03d.png').formulate()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img*.png', verbose=True, **sourcer_params).formulate()\n
    # define `-loop 1` for looping\nffparams = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('img.jpg', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nffparams = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize and formulate the decoder with define parameters\ndecoder = FFdecoder('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **ffparams).formulate()\n

    Related usage recipes can found here \u27b6

  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. One can easily Capture desired Camera Device in FFdecoder API by specifying its matching index value (use Sourcer API's enumerate_devices to list them) either as integer or string of integer type to its source parameter. For example, for capturing \"0\" index device on Windows, we can do as follows in FFdecoder API:

    Requirement for Index based Camera Device Capturing in FFdecoder API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be exactly the probed Camera Device index (use Sourcer API's enumerate_devices to list them).

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".
    Important Facts related to Camera Device Indexing
    • Camera Device indexes are 0-indexed. So the first device is at 0, second is at 1, so on. So if the there are n devices, the last device is at n-1.
    • Camera Device indexes can be of either integer (e.g. 0,1, etc.) or string of integer (e.g. \"0\",\"1\", etc.) type.
    • Camera Device indexes can be negative (e.g. -1,-2, etc.), this means you can also start indexing from the end.
      • For example, If there are three devices:
        {0: 'Integrated Camera', 1: 'USB2.0 Camera', 2: 'DroidCam Source'}\n
      • Then, You can specify Positive Indexes and its Equivalent Negative Indexes as follows:

        Positive Indexes Equivalent Negative Indexes FFdecoder(\"0\").formulate() FFdecoder(\"-3\").formulate() FFdecoder(\"1\").formulate() FFdecoder(\"-2\").formulate() FFdecoder(\"2\").formulate() FFdecoder(\"-1\").formulate()

    Out of Index Camera Device index values will raise ValueError in FFdecoder API

    # initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipes can found here \u27b6

  • Video Capture Device Name/Path: Valid video capture device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for capturing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in FFdecoder API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in FFdecoder API as follows:

      # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nffparams = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize and formulate the decoder with \"Camera\" source for BGR24 output\ndecoder = FFdecoder(\"Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all capture video devices such as from an USB webcam. You can refer following steps to identify and specify your capture video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in FFdecoder API as follows:

      # initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Video Device's additional specifications: You can also specify additional specifications (such as pixel format(s), video format(s), framerate, and frame dimensions) supported by your Video Device as follows:

      You can use ffmpeg -f v4l2 -list_formats all -i /dev/video0 terminal command to list available specifications.

      # define video device specifications\nffparams = {\"-ffprefixes\":[\"-framerate\", \"25\", \"-video_size\", \"640x480\"]}\n\n# initialize and formulate the decoder with \"/dev/video0\" source for BGR24 output\ndecoder = FFdecoder(\"/dev/video0\", source_demuxer=\"v4l2\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in FFdecoder API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize and formulate the decoder with `1` index source for BGR24 output\ndecoder = FFdecoder(\"1\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to capture from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize and formulate the decoder with \"Integrated iSight-camera\" source for BGR24 output\ndecoder = FFdecoder(\"Integrated\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Specify Default Video device: You can also use the default device which is usually the first device in the listing by using \"default\" as source:

      # initialize and formulate the decoder with \"default\" source for BGR24 output\ndecoder = FFdecoder(\"default\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize and formulate the decoder with \"USB2.0 Camera\" source for BGR24 output\ndecoder = FFdecoder(\"USB2.0 Camera\", source_demuxer=\"dshow\", frame_format=\"bgr24\", verbose=True).formulate()\n

    Related usage recipe can found here \u27b6

  • Screen Capturing/Recording: Valid screen capture device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. You can also specify additional specifications (such as limiting capture area to a region, setting capturing coordinates, whether to capture mouse pointer and clicks etc.). For example, for capturing \"0:\" indexed device with avfoundation source demuxer on MacOS along with mouse pointer and clicks, we can do as follows in FFdecoder API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for capturing different regions of your display:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the desktop screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-offset_x\", \"10\", \"-offset_y\", \"20\", # grab at position 10,20\n        \"-video_size\", \"640x480\", # frame size\n        \"-show_region\", \"1\", # show only region\n    ],\n}\n\n# initialize and formulate the decoder with \"desktop\" source for BGR24 output\ndecoder = FFdecoder(\"desktop\", source_demuxer=\"gdigrab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Linux OS users can use the x11grab to capture an X11 display. You can refer following steps to specify source for capturing different regions of your display:

    For X11 display, the source input has the syntax: \"display_number.screen_number[+x_offset,y_offset]\".

    • Capturing entire desktop: For capturing all your displays as one big contiguous display, you can specify source, suitable parameters and demuxers in FFdecoder API as follows:

      # define framerate\nffparams = {\"-framerate\": \"30\"}\n\n# initialize and formulate the decoder with \":0.0\" desktop source for BGR24 output\ndecoder = FFdecoder(\":0.0\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n
    • Capturing a region: If you want to limit capturing to a region, and show the area being grabbed, you can specify source and suitable parameters in FFdecoder API as follows:

      x_offset and y_offset specify the offsets of the grabbed area with respect to the top-left border of the X11 screen. They default to 0.

      # define suitable parameters\nffparams = {\n    \"-framerate\": \"30\", # input framerate\n    \"-ffprefixes\": [\n        \"-video_size\", \"1024x768\", # frame size\n    ],\n}\n\n# initialize and formulate the decoder with \":0.0\" desktop source(starting with the upper-left corner at x=10, y=20) \n# for BGR24 output\ndecoder = FFdecoder(\":0.0+10,20\", source_demuxer=\"x11grab\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for stream capturing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your capture video device's name or index on MacOS/OSX machines:

    QTKit is also available for stream capturing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: You can enumerate all the available input devices including screens ready to be captured using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Capturing entire desktop: Then, you can specify and initialize your located screens in FFdecoder API using its index shown:

      # initialize and formulate the decoder with `0:` index desktop screen for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True).formulate()\n
    • [OPTIONAL] Capturing mouse: You can also specify additional specifications to capture the mouse pointer and screen mouse clicks as follows:

      # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # define specifications\nffparams = {\"-ffprefixes\":[\"-capture_cursor\", \"1\", \"-capture_mouse_clicks\", \"0\"]}\n\n# initialize and formulate the decoder with \"0:\" source for BGR24 output\ndecoder = FFdecoder(\"0:\", source_demuxer=\"avfoundation\", frame_format=\"bgr24\", verbose=True, **ffparams).formulate()\n

    Related usage recipe can found here \u27b6

  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and decoding Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in FFdecoder API:

    # initialize and formulate the decoder with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate for BGR24 output\ndecoder = FFdecoder(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).formulate()\n

    Related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph, and

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for Index based Camera Device Capturing in FFdecoder API

For enabling Index based Camera Device Capturing in FFdecoder API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", frame_format=\"bgr24\").formulate()\n
# initialize and formulate the decoder with \"0\" index source for BGR24 output\ndecoder = FFdecoder(\"0\", source_demuxer=\"auto, frame_format=\"bgr24\").formulate()\n

Related usage recipes can found here \u27b6

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize and formulate the decoder with `dshow` demuxer\ndecoder = FFdecoder(\"foo.mp4\", source_demuxer=\"dshow\").formulate()\n

"},{"location":"reference/ffdecoder/params/#frame_format","title":"frame_format","text":"

This parameter select the pixel format for output video frames (such as gray for grayscale output).

Any invalid or unsupported value to frame_format parameter will discarded!

Any improper frame_format parameter value (i.e. either null(special-case), undefined, or invalid type) , then -pix_fmt FFmpeg parameter value in Decoding pipeline uses output_frames_pixfmt metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to Default pixel-format1 (calculated variably).

Use frame_format=\"null\" to manually discard -pix_fmt FFmpeg parameter entirely from Decoding pipeline.

This feature allows users to manually skip -pix_fmt FFmpeg parameter in Decoding pipeline, essentially for using only format ffmpeg filter values instead, or even better let FFmpeg itself choose the best available output frame pixel-format for the given source.

Data-Type: String

Default Value: Its default value is Default pixel-format1 (calculated variably).

Usage:

# initialize and formulate the decoder for grayscale frames\ndecoder = FFdecoder(\"foo.mp4\", frame_format=\"gray\").formulate()\n

Use ffmpeg -pix_fmts terminal command to lists all FFmpeg supported pixel formats.

Various Pixel formats related usage recipes can found here \u27b6

"},{"location":"reference/ffdecoder/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then FFdecoder API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path (via. -custom_sourcer_params) exclusive parameter in FFdecoder API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in FFdecoder API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n\n# initialize and formulate the decoder\nFFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nFFdecoder(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").formulate()\n

"},{"location":"reference/ffdecoder/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize and formulate decoder with verbose logs\nFFdecoder(\"foo.mp4\", verbose=True).formulate()\n

"},{"location":"reference/ffdecoder/params/#ffparams","title":"ffparams","text":"

This dictionary parameter accepts all supported parameters formatted as its attributes:

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/ffdecoder/params/#supported-parameters","title":"Supported Parameters","text":""},{"location":"reference/ffdecoder/params/#a-ffmpeg-parameters","title":"A. FFmpeg Parameters","text":"

Almost any FFmpeg parameter (supported by installed FFmpeg) can be passed as dictionary attributes in ffparams parameter.

Let's assume we want to 00:00:01.45(or 1045msec) in time and decode one single frame from given source (say foo.mp4) in FFdecoder API, then we can assign required FFmpeg parameters as dictionary attributes as follows:

Kindly read FFmpeg Docs carefully before passing any additional values to ffparams parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

# define the FFmpeg parameter to seek to 00:00:01.45(or 1s and 45msec)\n# in time and get one single frame\nffparams = {\"-ss\": \"00:00:01.45\", \"-frames:v\": 1}\n\n# initialize and formulate decoder with suitable source and FFmpeg params\ndecoder = FFdecoder(\"foo.mp4\", verbose=True, **ffparams).formulate()\n

"},{"location":"reference/ffdecoder/params/#b-exclusive-parameters","title":"B. Exclusive Parameters","text":"

In addition to FFmpeg parameters, FFdecoder API also supports few Exclusive Parameters to allow users to flexibly change its internal pipeline, properties, and handle some special FFmpeg parameters (such as repeated map) that cannot be assigned via. python dictionary.

These parameters are discussed below:

  • -vcodec (str) : This attribute works similar to -vcodec FFmpeg parameter for specifying supported decoders that are compiled with FFmpeg in use. If not specified, it's value is derived from source video metadata. Its usage is as follows:

    Use ffmpeg -decoders terminal command to lists all FFmpeg supported decoders.

    Use {\"-vcodec\":None} in ffparams to discard -vcodec FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -vcodec FFmpeg parameter in Decoding pipeline, for letting FFmpeg itself choose the best available video decoder for the given source.

    # define suitable parameter\nffparams = {\"-vcodec\": \"h264\"} # set decoder to `h264`\n

  • -framerate (float/int) : This attribute works similar to -framerate FFmpeg parameter for generating video-frames at specified framerate. If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -framerate attribute will discarded!

    The output_frames_framerate metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -framerate parameter value (i.e. either null(special-case), undefined, or invalid type) , then -framerate/-r FFmpeg parameter value in Decoding pipeline uses output_frames_framerate metadata property extracted from Output Stream. Thereby, in case if no valid output_framerate metadata property is found, then API finally defaults to source_video_framerate metadata property extracted from Input Source Stream.

    In case neither output_framerate nor source_video_framerate valid metadata properties are found, then RuntimeError is raised.

    Use {\"-framerate\":\"null\"} in ffparams to discard -framerate/-r FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -framerate/-r FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output framerate for the given source.

    # define suitable parameter\nffparams = {\"-framerate\": 60.0} # set input video source framerate to 60fps\n

  • -custom_resolution (tuple/list) : This attribute sets the custom resolution/size of the output frames. Its value can either be a tuple ((width,height)) or a list ([width, height]). If not specified, it calculated from video metadata. Its usage is as follows:

    Any invalid or unsupported value to -custom_resolution attribute will discarded!

    The output_frames_resolution metadata property is only available when FFmpeg filters via. -vf or -filter_complex are manually defined.

    Any improper -custom_resolution parameter value (i.e. either null(special-case), undefined, or invalid type) , then -s/-size FFmpeg parameter value in Decoding pipeline uses output_frames_resolution metadata property extracted from Output Stream. Thereby, in case if no valid output_frames_resolution metadata property is found, then API finally defaults to source_video_resolution metadata property extracted from Input Source Stream.

    In case neither output_frames_resolution nor source_video_resolution valid metadata properties are found, then RuntimeError is raised.

    Use {\"-custom_resolution\":\"null\"} in ffparams to discard -size/-s FFmpeg parameter entirely from Decoding pipeline.

    This feature allows users to manually skip -size/-s FFmpeg parameter in Decoding pipeline, essentially for using only fps filter values, or even better, let FFmpeg itself choose the best available output frames resolution for the given source.

    # define suitable parameter\nffparams = {\"-output_dimensions\": (1280,720)} # to produce a 1280x720 resolution/scale output video\n

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Difference from -clones parameter

    The -clones and -ffprefixes parameters even tho fundamentally work the same, they're meant to serve at different positions in the FFmpeg command. Normally, FFdecoder API pipeline looks something like following with these parameters in place:

    ffmpeg {{-ffprefixes FFmpeg params}} -vcodec h264 -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 {{-clones FFmpeg params}} -f rawvideo -\n

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -clones (list): This attribute sets the special FFmpeg parameters after that are repeated more than once or occurs in a specific order (that cannot be altered) in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in FFdecoder's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nffparams = {\"-clones\": ['-map', '0:v:0', '-map', '1:a?']} \n\n# NOTE: Will be format as `ffmpeg -vcodec -i foo.mp4 -pix_fmt rgb24 -s 1280x720 -framerate 25.0 -map 0:v:0 -map 1:a -f rawvideo -`\n

  • -custom_sourcer_params (dict) : This attribute assigns all Exclusive Parameter meant for Sourcer API's sourcer_params dictionary parameter directly through FFdecoder API. Its usage is as follows:

    # define suitable parameter meant for `sourcer_params`\nffparams = {\"-custom_sourcer_params\": {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}}\n

  • -default_stream_indexes (list/tuple) : This attribute assign value directly to default_stream_indexes parameter in Sourcer API's probe_stream() method for selecting specific video and audio stream index in case of multiple ones. Value can be of format: (int,int) or [int,int] as follows:

    # define suitable parameter meant for `probe_stream()` method\nffparams = {\"-default_stream_indexes\": (0,1)} # (\"0th video stream\", \"1st audio stream\")\n

  • -enforce_cv_patch (bool) : This attribute can be enabled(True) for patching YUV pixel-formats (such as YUV420p, yuv444p, NV12, NV21 etc.) frames to be seamless compatibility with OpenCV APIs such as imshow(), write() etc. It can be used as follows:

    As of now, YUV pixel-formats starting with YUV and NV are only supported.

    # define suitable parameter\nffparams = {\"-enforce_cv_patch\": True} # enables OpenCV patch for YUV frames\n

    YUV pixel-formats usage recipe can found here \u27b6

  • -passthrough_audio (bool/list) : (Yet to be supported)

  1. Default pixel-format is calculated variably in FFdecoder API:

    • If frame_format != \"null\":
      • If frame_format parameter is valid and supported: Default pixel-format is frame_format parameter value.
      • If frame_format parameter is NOT valid or supported:
        • If output_frame_pixfmt metadata is available: Default pixel-format is output_frame_pixfmt metadata value.
        • If output_frame_pixfmt metadata is NOT available: Default pixel-format is rgb24 if supported otherwise source_video_pixfmt metadata value.
    • If frame_format == \"null\": Default pixel-format is source_video_pixfmt metadata value

    \u21a9\u21a9

"},{"location":"reference/sourcer/","title":"Sourcer API","text":"

Sourcer API acts as Source Probing Utility that unlike other FFmpeg Wrappers which mostly uses ffprobe module, attempts to open the given Input Source directly with FFmpeg inside a subprocess pipe, and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each media stream contained in it.

Sourcer API primarily acts as a backend for FFdecoder API for gathering, processing, and validating all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in formulating its default FFmpeg pipeline parameters for real-time video-frames generation.

Sourcer API is design as a standalone Metadata Extraction API for easily parsing information from multimedia streams available in the given Input Source and returns it in either Human-readable (JSON string) or Machine-readable (Dictionary object) type with its retrieve_metadata() method.

All metadata attributes available with Sourcer API(On Windows) are discussed here \u27b6.

Furthermore, Sourcer's sourcer_params dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.

For usage examples, kindly refer our Basic Recipes and Advanced Recipes

Sourcer API parameters are explained here \u27b6

Source code in deffcode/sourcer.py
class Sourcer:\n    \"\"\"\n    > Sourcer API acts as **Source Probing Utility** that unlike other FFmpeg Wrappers which mostly uses [`ffprobe`](https://ffmpeg.org/ffprobe.html) module,\n    attempts to open the given Input Source directly with [**FFmpeg**](https://ffmpeg.org/) inside a [`subprocess`](https://docs.python.org/3/library/subprocess.html) pipe,\n    and parses/probes the standard output(stdout) employing various pattern matching methods in order to recognize all the properties(metadata) of each\n    media stream contained in it.\n\n    Sourcer API primarily acts as a **backend for [FFdecoder API](../../reference/ffdecoder)** for gathering, processing, and validating\n    all multimedia streams metadata available in the given Input Source. Sourcer shares this information with FFdecoder API which helps in\n    formulating its default FFmpeg pipeline parameters for real-time video-frames generation.\n\n    Sourcer API is design as a standalone **Metadata Extraction API** for easily parsing information from multimedia streams available in the\n    given Input Source and returns it in either Human-readable _(JSON string)_ or Machine-readable _(Dictionary object)_ type with its\n    [`retrieve_metadata()`](#deffcode.sourcer.Sourcer.retrieve_metadata) method.\n\n    !!! info \"All metadata attributes available with Sourcer API(On :fontawesome-brands-windows: Windows) are discussed [here \u27b6](../../recipes/basic/#display-source-video-metadata).\"\n\n    Furthermore, Sourcer's [`sourcer_params`](params/#sourcer_params) dictionary parameter can be used to define almost any FFmpeg parameter as well as alter internal API settings.\n\n    !!! example \"For usage examples, kindly refer our **[Basic Recipes :cake:](../../recipes/basic)** and **[Advanced Recipes :croissant:](../../recipes/advanced)**\"\n\n    !!! info \"Sourcer API parameters are explained [here \u27b6](params/)\"\n    \"\"\"\n\n    def __init__(\n        self,\n        source,\n        source_demuxer=None,\n        custom_ffmpeg=\"\",\n        verbose=False,\n        **sourcer_params,\n    ):\n        \"\"\"\n        This constructor method initializes the object state and attributes of the Sourcer Class.\n\n        Parameters:\n            source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n            source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n            custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n            verbose (bool): enables/disables verbose.\n            sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n        \"\"\"\n        # checks if machine in-use is running windows os or not\n        self.__machine_OS = platform.system()\n\n        # define internal parameters\n        self.__verbose_logs = (  # enable verbose if specified\n            verbose if (verbose and isinstance(verbose, bool)) else False\n        )\n\n        # handle metadata received\n        self.__ffsp_output = None\n\n        # sanitize sourcer_params\n        self.__sourcer_params = {\n            str(k).strip(): (\n                str(v).strip()\n                if not isinstance(v, (dict, list, int, float, tuple))\n                else v\n            )\n            for k, v in sourcer_params.items()\n        }\n\n        # handle whether to force validate source\n        self.__forcevalidatesource = self.__sourcer_params.pop(\n            \"-force_validate_source\", False\n        )\n        if not isinstance(self.__forcevalidatesource, bool):\n            # reset improper values\n            self.__forcevalidatesource = False\n\n        # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n        self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n        if not isinstance(self.__ffmpeg_prefixes, list):\n            # log it\n            logger.warning(\n                \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                    type(self.__ffmpeg_prefixes).__name__\n                )\n            )\n            # reset improper values\n            self.__ffmpeg_prefixes = []\n\n        # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n        __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n        if not isinstance(__ffmpeg_download_path, str):\n            # reset improper values\n            __ffmpeg_download_path = \"\"\n\n        # validate the FFmpeg assets and return location (also downloads static assets on windows)\n        self.__ffmpeg = get_valid_ffmpeg_path(\n            str(custom_ffmpeg),\n            True if self.__machine_OS == \"Windows\" else False,\n            ffmpeg_download_path=__ffmpeg_download_path,\n            verbose=self.__verbose_logs,\n        )\n\n        # check if valid FFmpeg path returned\n        if self.__ffmpeg:\n            self.__verbose_logs and logger.debug(\n                \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n            )\n        else:\n            # else raise error\n            raise RuntimeError(\n                \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n            )\n\n        # sanitize externally accessible parameters and assign them\n        # handles source demuxer\n        if source is None:\n            # first check if source value is empty\n            # raise error if true\n            raise ValueError(\"Input `source` parameter is empty!\")\n        elif isinstance(source_demuxer, str):\n            # assign if valid demuxer value\n            self.__source_demuxer = source_demuxer.strip().lower()\n            # assign if valid demuxer value\n            assert self.__source_demuxer != \"auto\" or validate_device_index(\n                source\n            ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n                source\n            )\n        else:\n            # otherwise find valid default source demuxer value\n            # enforce \"auto\" if valid index device\n            self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n            # log if not valid index device and invalid type\n            self.__verbose_logs and not self.__source_demuxer in [\n                \"auto\",\n                None,\n            ] and logger.warning(\n                \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                    type(source_demuxer).__name__\n                )\n            )\n            # log if not valid index device and invalid type\n            self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n                \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                    source\n                )\n            )\n\n        # handles source stream\n        self.__source = source\n\n        # creates shallow copy for further usage #TODO\n        self.__source_org = copy.copy(self.__source)\n        self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n        # handles all extracted devices names/paths list\n        # when source_demuxer = \"auto\"\n        self.__extracted_devices_list = []\n\n        # various source stream params\n        self.__default_video_resolution = \"\"  # handles stream resolution\n        self.__default_video_orientation = \"\"  # handles stream's video orientation\n        self.__default_video_framerate = \"\"  # handles stream framerate\n        self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n        self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n        self.__default_video_decoder = \"\"  # handles stream's video decoder\n        self.__default_source_duration = \"\"  # handles stream's video duration\n        self.__approx_video_nframes = \"\"  # handles approx stream frame number\n        self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n        self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n        # handle various stream flags\n        self.__contains_video = False  # contains video\n        self.__contains_audio = False  # contains audio\n        self.__contains_images = False  # contains image-sequence\n\n        # handles output parameters through filters\n        self.__metadata_output = None  # handles output stream metadata\n        self.__output_frames_resolution = \"\"  # handles output stream resolution\n        self.__output_framerate = \"\"  # handles output stream framerate\n        self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n        self.__output_orientation = \"\"  # handles output frame orientation\n\n        # check whether metadata probed or not?\n        self.__metadata_probed = False\n\n    def probe_stream(self, default_stream_indexes=(0, 0)):\n        \"\"\"\n        This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n        Parameters:\n            default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n        **Returns:** Reference to the instance object.\n        \"\"\"\n        assert (\n            isinstance(default_stream_indexes, (list, tuple))\n            and len(default_stream_indexes) == 2\n            and all(isinstance(x, int) for x in default_stream_indexes)\n        ), \"Invalid default_stream_indexes value!\"\n        # validate source and extract metadata\n        self.__ffsp_output = self.__validate_source(\n            self.__source,\n            source_demuxer=self.__source_demuxer,\n            forced_validate=(\n                self.__forcevalidatesource if self.__source_demuxer is None else True\n            ),\n        )\n        # parse resolution and framerate\n        video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0]\n        )\n        if video_rfparams:\n            self.__default_video_resolution = video_rfparams[\"resolution\"]\n            self.__default_video_framerate = video_rfparams[\"framerate\"]\n            self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n        # parse output parameters through filters (if available)\n        if not (self.__metadata_output is None):\n            # parse output resolution and framerate\n            out_video_rfparams = self.__extract_resolution_framerate(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n            if out_video_rfparams:\n                self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n                self.__output_framerate = out_video_rfparams[\"framerate\"]\n                self.__output_orientation = out_video_rfparams[\"orientation\"]\n            # parse output pixel-format\n            self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n                default_stream=default_stream_indexes[0], extract_output=True\n            )\n\n        # parse pixel-format\n        self.__default_video_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0]\n        )\n\n        # parse video decoder\n        self.__default_video_decoder = self.__extract_video_decoder(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse rest of metadata\n        if not self.__contains_images:\n            # parse video bitrate\n            self.__default_video_bitrate = self.__extract_video_bitrate(\n                default_stream=default_stream_indexes[0]\n            )\n            # parse audio bitrate and samplerate\n            audio_params = self.__extract_audio_bitrate_nd_samplerate(\n                default_stream=default_stream_indexes[1]\n            )\n            if audio_params:\n                self.__default_audio_bitrate = audio_params[\"bitrate\"]\n                self.__default_audio_samplerate = audio_params[\"samplerate\"]\n            # parse video duration\n            self.__default_source_duration = self.__extract_duration()\n            # calculate all flags\n            if (\n                self.__default_video_bitrate\n                or (self.__default_video_framerate and self.__default_video_resolution)\n            ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n                self.__contains_video = True\n                self.__contains_audio = True\n            elif self.__default_video_bitrate or (\n                self.__default_video_framerate and self.__default_video_resolution\n            ):\n                self.__contains_video = True\n            elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n                self.__contains_audio = True\n            else:\n                raise ValueError(\n                    \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n                )\n        # calculate approximate number of video frame\n        if self.__default_video_framerate and self.__default_source_duration:\n            self.__approx_video_nframes = np.rint(\n                self.__default_video_framerate * self.__default_source_duration\n            ).astype(int, casting=\"unsafe\")\n\n        # signal metadata has been probed\n        self.__metadata_probed = True\n\n        # return reference to the instance object.\n        return self\n\n    def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n        \"\"\"\n        This method returns Parsed/Probed Metadata of the given source.\n\n        Parameters:\n            pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n            force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n        **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n        # log it\n        self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n        # create metadata dictionary from information populated in private class variables\n        metadata = {\n            \"ffmpeg_binary_path\": self.__ffmpeg,\n            \"source\": self.__source,\n        }\n        metadata_missing = {}\n        # Only either `source_demuxer` or `source_extension` attribute can be\n        # present in metadata.\n        if self.__source_demuxer is None:\n            metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n        else:\n            metadata.update({\"source_demuxer\": self.__source_demuxer})\n            # update missing\n            force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n        # add source video metadata properties\n        metadata.update(\n            {\n                \"source_video_resolution\": self.__default_video_resolution,\n                \"source_video_pixfmt\": self.__default_video_pixfmt,\n                \"source_video_framerate\": self.__default_video_framerate,\n                \"source_video_orientation\": self.__default_video_orientation,\n                \"source_video_decoder\": self.__default_video_decoder,\n                \"source_duration_sec\": self.__default_source_duration,\n                \"approx_video_nframes\": (\n                    int(self.__approx_video_nframes)\n                    if self.__approx_video_nframes\n                    and not any(\n                        \"loop\" in x for x in self.__ffmpeg_prefixes\n                    )  # check if any loops in prefix\n                    and not any(\n                        \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                    )  # check if any loops in filters\n                    else None\n                ),\n                \"source_video_bitrate\": self.__default_video_bitrate,\n                \"source_audio_bitrate\": self.__default_audio_bitrate,\n                \"source_audio_samplerate\": self.__default_audio_samplerate,\n                \"source_has_video\": self.__contains_video,\n                \"source_has_audio\": self.__contains_audio,\n                \"source_has_image_sequence\": self.__contains_images,\n            }\n        )\n        # add output metadata properties (if available)\n        if not (self.__metadata_output is None):\n            metadata.update(\n                {\n                    \"output_frames_resolution\": self.__output_frames_resolution,\n                    \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                    \"output_framerate\": self.__output_framerate,\n                    \"output_orientation\": self.__output_orientation,\n                }\n            )\n        else:\n            # since output stream metadata properties are only available when additional\n            # FFmpeg parameters(such as filters) are defined manually, thereby missing\n            # output stream properties are handled by assigning them counterpart source\n            # stream metadata property values\n            force_retrieve_missing and metadata_missing.update(\n                {\n                    \"output_frames_resolution\": self.__default_video_resolution,\n                    \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                    \"output_framerate\": self.__default_video_framerate,\n                    \"output_orientation\": self.__default_video_orientation,\n                }\n            )\n        # log it\n        self.__verbose_logs and logger.debug(\n            \"Metadata Extraction completed successfully!\"\n        )\n        # parse as JSON string(`json.dumps`), if defined\n        metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n        metadata_missing = (\n            json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n        )\n        # return `metadata` or `(metadata, metadata_missing)`\n        return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n\n    @property\n    def enumerate_devices(self):\n        \"\"\"\n        A property object that enumerate all probed Camera Devices connected to your system names\n        along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.\n\n        **Returns:** Probed Camera Devices as python dictionary.\n        \"\"\"\n        # check if metadata has been probed or not\n        assert (\n            self.__metadata_probed\n        ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n\n        # log if specified\n        self.__verbose_logs and logger.debug(\"Enumerating all probed Camera Devices.\")\n\n        # return probed Camera Devices as python dictionary.\n        return {\n            dev_idx: dev for dev_idx, dev in enumerate(self.__extracted_devices_list)\n        }\n\n    def __validate_source(self, source, source_demuxer=None, forced_validate=False):\n        \"\"\"\n        This Internal method validates source and extracts its metadata.\n\n        Parameters:\n            source_demuxer(str): specifies the demuxer(`-f`) for the input source.\n            forced_validate (bool): whether to skip validation tests or not?\n\n        **Returns:** `True` if passed tests else `False`.\n        \"\"\"\n        logger.critical(\n            \"{} :: {} :: {}\".format(\n                source_demuxer,\n                source_demuxer in get_supported_demuxers(self.__ffmpeg),\n                get_supported_demuxers(self.__ffmpeg),\n            )\n        )\n        # validate source demuxer(if defined)\n        if not (source_demuxer is None):\n            # check if \"auto\" demuxer is specified\n            if source_demuxer == \"auto\":\n                # integerise source to get index\n                index = int(source)\n                # extract devices list and actual demuxer value\n                (\n                    self.__extracted_devices_list,\n                    source_demuxer,\n                ) = extract_device_n_demuxer(\n                    self.__ffmpeg,\n                    machine_OS=self.__machine_OS,\n                    verbose=self.__verbose_logs,\n                )\n                # valid indexes range\n                valid_indexes = [\n                    x\n                    for x in range(\n                        -len(self.__extracted_devices_list),\n                        len(self.__extracted_devices_list),\n                    )\n                ]\n                # check index is within valid range\n                if self.__extracted_devices_list and index in valid_indexes:\n                    # overwrite actual source device name/path/index\n                    if self.__machine_OS == \"Windows\":\n                        # Windows OS requires \"video=\" suffix\n                        self.__source = source = \"video={}\".format(\n                            self.__extracted_devices_list[index]\n                        )\n                    elif self.__machine_OS == \"Darwin\":\n                        # Darwin OS requires only device indexes\n                        self.__source = source = (\n                            str(index)\n                            if index >= 0\n                            else str(len(self.__extracted_devices_list) + index)\n                        )\n                    else:\n                        # Linux OS require /dev/video format\n                        self.__source = source = next(\n                            iter(self.__extracted_devices_list[index].keys())\n                        )\n                    # overwrite source_demuxer global variable\n                    self.__source_demuxer = source_demuxer\n                    self.__verbose_logs and logger.debug(\n                        \"Successfully configured device `{}` at index `{}` with demuxer `{}`.\".format(\n                            (\n                                self.__extracted_devices_list[index]\n                                if self.__machine_OS != \"Linux\"\n                                else next(\n                                    iter(self.__extracted_devices_list[index].values())\n                                )[0]\n                            ),\n                            (\n                                index\n                                if index >= 0\n                                else len(self.__extracted_devices_list) + index\n                            ),\n                            self.__source_demuxer,\n                        )\n                    )\n                else:\n                    # raise error otherwise\n                    raise ValueError(\n                        \"Given source `{}` is not a valid device index. Possible values index values can be: {}\".format(\n                            source,\n                            \",\".join(f\"{x}\" for x in valid_indexes),\n                        )\n                    )\n            # otherwise validate against supported demuxers\n            elif not (source_demuxer in get_supported_demuxers(self.__ffmpeg)):\n                # raise if fails\n                raise ValueError(\n                    \"Installed FFmpeg failed to recognize `{}` demuxer. Check `source_demuxer` parameter value again!\".format(\n                        source_demuxer\n                    )\n                )\n            else:\n                pass\n\n        # assert if valid source\n        assert source and isinstance(\n            source, str\n        ), \"Input `source` parameter is of invalid type!\"\n\n        # Differentiate input\n        if forced_validate:\n            source_demuxer is None and logger.critical(\n                \"Forcefully passing validation test for given source!\"\n            )\n            self.__source = source\n        elif os.path.isfile(source):\n            self.__source = os.path.abspath(source)\n        elif is_valid_image_seq(\n            self.__ffmpeg, source=source, verbose=self.__verbose_logs\n        ):\n            self.__source = source\n            self.__contains_images = True\n        elif is_valid_url(self.__ffmpeg, url=source, verbose=self.__verbose_logs):\n            self.__source = source\n        else:\n            logger.error(\"`source` value is unusable or unsupported!\")\n            # discard the value otherwise\n            raise ValueError(\"Input source is invalid. Aborting!\")\n        # format command\n        if self.__sourcer_params:\n            # handle additional params separately\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + [\"-t\", \"0.0001\"]\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n                + dict2Args(self.__sourcer_params)\n                + [\"-f\", \"null\", \"-\"]\n            )\n        else:\n            meta_cmd = (\n                [self.__ffmpeg]\n                + ([\"-hide_banner\"] if not self.__verbose_logs else [])\n                + self.__ffmpeg_prefixes\n                + ([\"-f\", source_demuxer] if source_demuxer else [])\n                + [\"-i\", source]\n            )\n        # extract metadata, decode, and filter\n        metadata = (\n            check_sp_output(\n                meta_cmd,\n                force_retrieve_stderr=True,\n            )\n            .decode(\"utf-8\")\n            .strip()\n        )\n        # separate input and output metadata (if available)\n        if \"Output #\" in metadata:\n            (metadata, self.__metadata_output) = metadata.split(\"Output #\")\n        # return metadata based on params\n        return metadata\n\n    def __extract_video_bitrate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream bitrate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video bitrate as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        video_bitrate_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if video_bitrate_text:\n            selected_stream = video_bitrate_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(video_bitrate_text)\n                    else 0\n                )\n            ]\n            filtered_bitrate = re.findall(\n                r\",\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            if len(filtered_bitrate):\n                default_video_bitrate = filtered_bitrate[0].split(\" \")[1:3]\n                final_bitrate = \"{}{}\".format(\n                    int(default_video_bitrate[0].strip()),\n                    \"k\" if (default_video_bitrate[1].strip().startswith(\"k\")) else \"M\",\n                )\n                return final_bitrate\n        return \"\"\n\n    def __extract_video_decoder(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default video-stream decoder from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video decoder as string value.\n        \"\"\"\n        assert isinstance(default_stream, int), \"Invalid input!\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            filtered_pixfmt = re.findall(\n                r\"Video:\\s[a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_video_pixfmt(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream pixel-format from metadata.\n\n        Parameters:\n            default_stream (int): selects specific video-stream in case of multiple ones.\n\n        **Returns:** Default Video pixel-format as string value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            filtered_pixfmt = re.findall(\n                r\",\\s[a-z][a-z0-9_-]*\", selected_stream.strip()\n            )\n            if filtered_pixfmt:\n                return filtered_pixfmt[0].split(\" \")[-1]\n        return \"\"\n\n    def __extract_audio_bitrate_nd_samplerate(self, default_stream=0):\n        \"\"\"\n        This Internal method parses default audio-stream bitrate and sample-rate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n\n        **Returns:** Default Audio-stream bitrate and sample-rate as string value.\n        \"\"\"\n        identifiers = [\"Audio:\", \"Stream #\"]\n        meta_text = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n            # filter data\n            filtered_audio_bitrate = re.findall(\n                r\"fltp,\\s[0-9]+\\s\\w\\w[\\/]s\", selected_stream.strip()\n            )\n            filtered_audio_samplerate = re.findall(\n                r\",\\s[0-9]+\\sHz\", selected_stream.strip()\n            )\n            # get audio bitrate metadata\n            if filtered_audio_bitrate:\n                filtered = filtered_audio_bitrate[0].split(\" \")[1:3]\n                result[\"bitrate\"] = \"{}{}\".format(\n                    int(filtered[0].strip()),\n                    \"k\" if (filtered[1].strip().startswith(\"k\")) else \"M\",\n                )\n            else:\n                result[\"bitrate\"] = \"\"\n            # get audio samplerate metadata\n            result[\"samplerate\"] = (\n                filtered_audio_samplerate[0].split(\", \")[1]\n                if filtered_audio_samplerate\n                else \"\"\n            )\n        return result if result and (len(result) == 2) else {}\n\n    def __extract_resolution_framerate(self, default_stream=0, extract_output=False):\n        \"\"\"\n        This Internal method parses default video-stream resolution, orientation, and framerate from metadata.\n\n        Parameters:\n            default_stream (int): selects specific audio-stream in case of multiple ones.\n            extract_output (bool): Whether to extract from output(if true) or input(if false) stream?\n\n        **Returns:** Default Video resolution and framerate as dictionary value.\n        \"\"\"\n        identifiers = [\"Video:\", \"Stream #\"]\n        # use output metadata if available\n        meta_text = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers)\n            ]\n        )\n        # extract video orientation metadata if available\n        identifiers_orientation = [\"displaymatrix:\", \"rotation\"]\n        meta_text_orientation = (\n            [\n                line.strip()\n                for line in self.__ffsp_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n            if not extract_output\n            else [\n                line.strip()\n                for line in self.__metadata_output.split(\"\\n\")\n                if all(x in line for x in identifiers_orientation)\n            ]\n        )\n        # use metadata if available\n        result = {}\n        if meta_text:\n            selected_stream = meta_text[\n                (\n                    default_stream\n                    if default_stream > 0 and default_stream < len(meta_text)\n                    else 0\n                )\n            ]\n\n            # filter data\n            filtered_resolution = re.findall(\n                r\"([1-9]\\d+)x([1-9]\\d+)\", selected_stream.strip()\n            )\n            filtered_framerate = re.findall(\n                r\"\\d+(?:\\.\\d+)?\\sfps\", selected_stream.strip()\n            )\n            filtered_tbr = re.findall(r\"\\d+(?:\\.\\d+)?\\stbr\", selected_stream.strip())\n\n            # extract framerate metadata\n            if filtered_framerate:\n                # calculate actual framerate\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_framerate[0])[0]\n                )\n            elif filtered_tbr:\n                # guess from TBR(if fps unavailable)\n                result[\"framerate\"] = float(\n                    re.findall(r\"[\\d\\.\\d]+\", filtered_tbr[0])[0]\n                )\n\n            # extract resolution metadata\n            if filtered_resolution:\n                result[\"resolution\"] = [int(x) for x in filtered_resolution[0]]\n\n            # extract video orientation metadata\n            if meta_text_orientation:\n                selected_stream = meta_text_orientation[\n                    (\n                        default_stream\n                        if default_stream > 0 and default_stream < len(meta_text)\n                        else 0\n                    )\n                ]\n                filtered_orientation = re.findall(\n                    r\"[-]?\\d+\\.\\d+\", selected_stream.strip()\n                )\n                result[\"orientation\"] = float(filtered_orientation[0])\n            else:\n                result[\"orientation\"] = 0.0\n\n        return result if result and (len(result) == 3) else {}\n\n    def __extract_duration(self, inseconds=True):\n        \"\"\"\n        This Internal method parses stream duration from metadata.\n\n        Parameters:\n            inseconds (bool): whether to parse time in second(s) or `HH::mm::ss`?\n\n        **Returns:** Default Stream duration as string value.\n        \"\"\"\n        identifiers = [\"Duration:\"]\n        stripped_data = [\n            line.strip()\n            for line in self.__ffsp_output.split(\"\\n\")\n            if all(x in line for x in identifiers)\n        ]\n        if stripped_data:\n            t_duration = re.findall(\n                r\"(?:[01]\\d|2[0123]):(?:[012345]\\d):(?:[012345]\\d+(?:\\.\\d+)?)\",\n                stripped_data[0],\n            )\n            if t_duration:\n                return (\n                    sum(\n                        float(x) * 60**i\n                        for i, x in enumerate(reversed(t_duration[0].split(\":\")))\n                    )\n                    if inseconds\n                    else t_duration\n                )\n        return 0\n

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.enumerate_devices","title":"enumerate_devices property readonly","text":"

A property object that enumerate all probed Camera Devices connected to your system names along with their respective \"device indexes\" or \"camera indexes\" as python dictionary.

Returns: Probed Camera Devices as python dictionary.

"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.__init__","title":"__init__(self, source, source_demuxer=None, custom_ffmpeg='', verbose=False, **sourcer_params) special","text":"

This constructor method initializes the object state and attributes of the Sourcer Class.

Parameters:

Name Type Description Default source str

defines the input(-i) source filename/URL/device-name/device-path.

required source_demuxer str

specifies the demuxer(-f) for the input source.

None custom_ffmpeg str

assigns the location of custom path/directory for custom FFmpeg executable.

'' verbose bool

enables/disables verbose.

False sourcer_params dict

provides the flexibility to control supported internal and FFmpeg parameters.

{} Source code in deffcode/sourcer.py
def __init__(\n    self,\n    source,\n    source_demuxer=None,\n    custom_ffmpeg=\"\",\n    verbose=False,\n    **sourcer_params,\n):\n    \"\"\"\n    This constructor method initializes the object state and attributes of the Sourcer Class.\n\n    Parameters:\n        source (str): defines the input(`-i`) source filename/URL/device-name/device-path.\n        source_demuxer (str): specifies the demuxer(`-f`) for the input source.\n        custom_ffmpeg (str): assigns the location of custom path/directory for custom FFmpeg executable.\n        verbose (bool): enables/disables verbose.\n        sourcer_params (dict): provides the flexibility to control supported internal and FFmpeg parameters.\n    \"\"\"\n    # checks if machine in-use is running windows os or not\n    self.__machine_OS = platform.system()\n\n    # define internal parameters\n    self.__verbose_logs = (  # enable verbose if specified\n        verbose if (verbose and isinstance(verbose, bool)) else False\n    )\n\n    # handle metadata received\n    self.__ffsp_output = None\n\n    # sanitize sourcer_params\n    self.__sourcer_params = {\n        str(k).strip(): (\n            str(v).strip()\n            if not isinstance(v, (dict, list, int, float, tuple))\n            else v\n        )\n        for k, v in sourcer_params.items()\n    }\n\n    # handle whether to force validate source\n    self.__forcevalidatesource = self.__sourcer_params.pop(\n        \"-force_validate_source\", False\n    )\n    if not isinstance(self.__forcevalidatesource, bool):\n        # reset improper values\n        self.__forcevalidatesource = False\n\n    # handle user defined ffmpeg pre-headers(parameters such as `-re`) parameters (must be a list)\n    self.__ffmpeg_prefixes = self.__sourcer_params.pop(\"-ffprefixes\", [])\n    if not isinstance(self.__ffmpeg_prefixes, list):\n        # log it\n        logger.warning(\n            \"Discarding invalid `-ffprefixes` value of wrong type `{}`!\".format(\n                type(self.__ffmpeg_prefixes).__name__\n            )\n        )\n        # reset improper values\n        self.__ffmpeg_prefixes = []\n\n    # handle where to save the downloaded FFmpeg Static assets on Windows(if specified)\n    __ffmpeg_download_path = self.__sourcer_params.pop(\"-ffmpeg_download_path\", \"\")\n    if not isinstance(__ffmpeg_download_path, str):\n        # reset improper values\n        __ffmpeg_download_path = \"\"\n\n    # validate the FFmpeg assets and return location (also downloads static assets on windows)\n    self.__ffmpeg = get_valid_ffmpeg_path(\n        str(custom_ffmpeg),\n        True if self.__machine_OS == \"Windows\" else False,\n        ffmpeg_download_path=__ffmpeg_download_path,\n        verbose=self.__verbose_logs,\n    )\n\n    # check if valid FFmpeg path returned\n    if self.__ffmpeg:\n        self.__verbose_logs and logger.debug(\n            \"Found valid FFmpeg executable: `{}`.\".format(self.__ffmpeg)\n        )\n    else:\n        # else raise error\n        raise RuntimeError(\n            \"[DeFFcode:ERROR] :: Failed to find FFmpeg assets on this system. Kindly compile/install FFmpeg or provide a valid custom FFmpeg binary path!\"\n        )\n\n    # sanitize externally accessible parameters and assign them\n    # handles source demuxer\n    if source is None:\n        # first check if source value is empty\n        # raise error if true\n        raise ValueError(\"Input `source` parameter is empty!\")\n    elif isinstance(source_demuxer, str):\n        # assign if valid demuxer value\n        self.__source_demuxer = source_demuxer.strip().lower()\n        # assign if valid demuxer value\n        assert self.__source_demuxer != \"auto\" or validate_device_index(\n            source\n        ), \"Invalid `source_demuxer='auto'` value detected with source: `{}`. Aborting!\".format(\n            source\n        )\n    else:\n        # otherwise find valid default source demuxer value\n        # enforce \"auto\" if valid index device\n        self.__source_demuxer = \"auto\" if validate_device_index(source) else None\n        # log if not valid index device and invalid type\n        self.__verbose_logs and not self.__source_demuxer in [\n            \"auto\",\n            None,\n        ] and logger.warning(\n            \"Discarding invalid `source_demuxer` parameter value of wrong type: `{}`\".format(\n                type(source_demuxer).__name__\n            )\n        )\n        # log if not valid index device and invalid type\n        self.__verbose_logs and self.__source_demuxer == \"auto\" and logger.critical(\n            \"Given source `{}` is a valid device index. Enforcing 'auto' demuxer.\".format(\n                source\n            )\n        )\n\n    # handles source stream\n    self.__source = source\n\n    # creates shallow copy for further usage #TODO\n    self.__source_org = copy.copy(self.__source)\n    self.__source_demuxer_org = copy.copy(self.__source_demuxer)\n\n    # handles all extracted devices names/paths list\n    # when source_demuxer = \"auto\"\n    self.__extracted_devices_list = []\n\n    # various source stream params\n    self.__default_video_resolution = \"\"  # handles stream resolution\n    self.__default_video_orientation = \"\"  # handles stream's video orientation\n    self.__default_video_framerate = \"\"  # handles stream framerate\n    self.__default_video_bitrate = \"\"  # handles stream's video bitrate\n    self.__default_video_pixfmt = \"\"  # handles stream's video pixfmt\n    self.__default_video_decoder = \"\"  # handles stream's video decoder\n    self.__default_source_duration = \"\"  # handles stream's video duration\n    self.__approx_video_nframes = \"\"  # handles approx stream frame number\n    self.__default_audio_bitrate = \"\"  # handles stream's audio bitrate\n    self.__default_audio_samplerate = \"\"  # handles stream's audio samplerate\n\n    # handle various stream flags\n    self.__contains_video = False  # contains video\n    self.__contains_audio = False  # contains audio\n    self.__contains_images = False  # contains image-sequence\n\n    # handles output parameters through filters\n    self.__metadata_output = None  # handles output stream metadata\n    self.__output_frames_resolution = \"\"  # handles output stream resolution\n    self.__output_framerate = \"\"  # handles output stream framerate\n    self.__output_frames_pixfmt = \"\"  # handles output frame pixel format\n    self.__output_orientation = \"\"  # handles output frame orientation\n\n    # check whether metadata probed or not?\n    self.__metadata_probed = False\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.probe_stream","title":"probe_stream(self, default_stream_indexes=(0, 0))","text":"

This method Parses/Probes FFmpeg subprocess pipe's Standard Output for given input source and Populates the information in private class variables.

Parameters:

Name Type Description Default default_stream_indexes list, tuple

selects specific video and audio stream index in case of multiple ones. Value can be of format: (int,int). For example (0,1) is (\"0th video stream\", \"1st audio stream\").

(0, 0)

Returns: Reference to the instance object.

Source code in deffcode/sourcer.py
def probe_stream(self, default_stream_indexes=(0, 0)):\n    \"\"\"\n    This method Parses/Probes FFmpeg `subprocess` pipe's Standard Output for given input source and Populates the information in private class variables.\n\n    Parameters:\n        default_stream_indexes (list, tuple): selects specific video and audio stream index in case of multiple ones. Value can be of format: `(int,int)`. For example `(0,1)` is (\"0th video stream\", \"1st audio stream\").\n\n    **Returns:** Reference to the instance object.\n    \"\"\"\n    assert (\n        isinstance(default_stream_indexes, (list, tuple))\n        and len(default_stream_indexes) == 2\n        and all(isinstance(x, int) for x in default_stream_indexes)\n    ), \"Invalid default_stream_indexes value!\"\n    # validate source and extract metadata\n    self.__ffsp_output = self.__validate_source(\n        self.__source,\n        source_demuxer=self.__source_demuxer,\n        forced_validate=(\n            self.__forcevalidatesource if self.__source_demuxer is None else True\n        ),\n    )\n    # parse resolution and framerate\n    video_rfparams = self.__extract_resolution_framerate(\n        default_stream=default_stream_indexes[0]\n    )\n    if video_rfparams:\n        self.__default_video_resolution = video_rfparams[\"resolution\"]\n        self.__default_video_framerate = video_rfparams[\"framerate\"]\n        self.__default_video_orientation = video_rfparams[\"orientation\"]\n\n    # parse output parameters through filters (if available)\n    if not (self.__metadata_output is None):\n        # parse output resolution and framerate\n        out_video_rfparams = self.__extract_resolution_framerate(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n        if out_video_rfparams:\n            self.__output_frames_resolution = out_video_rfparams[\"resolution\"]\n            self.__output_framerate = out_video_rfparams[\"framerate\"]\n            self.__output_orientation = out_video_rfparams[\"orientation\"]\n        # parse output pixel-format\n        self.__output_frames_pixfmt = self.__extract_video_pixfmt(\n            default_stream=default_stream_indexes[0], extract_output=True\n        )\n\n    # parse pixel-format\n    self.__default_video_pixfmt = self.__extract_video_pixfmt(\n        default_stream=default_stream_indexes[0]\n    )\n\n    # parse video decoder\n    self.__default_video_decoder = self.__extract_video_decoder(\n        default_stream=default_stream_indexes[0]\n    )\n    # parse rest of metadata\n    if not self.__contains_images:\n        # parse video bitrate\n        self.__default_video_bitrate = self.__extract_video_bitrate(\n            default_stream=default_stream_indexes[0]\n        )\n        # parse audio bitrate and samplerate\n        audio_params = self.__extract_audio_bitrate_nd_samplerate(\n            default_stream=default_stream_indexes[1]\n        )\n        if audio_params:\n            self.__default_audio_bitrate = audio_params[\"bitrate\"]\n            self.__default_audio_samplerate = audio_params[\"samplerate\"]\n        # parse video duration\n        self.__default_source_duration = self.__extract_duration()\n        # calculate all flags\n        if (\n            self.__default_video_bitrate\n            or (self.__default_video_framerate and self.__default_video_resolution)\n        ) and (self.__default_audio_bitrate or self.__default_audio_samplerate):\n            self.__contains_video = True\n            self.__contains_audio = True\n        elif self.__default_video_bitrate or (\n            self.__default_video_framerate and self.__default_video_resolution\n        ):\n            self.__contains_video = True\n        elif self.__default_audio_bitrate or self.__default_audio_samplerate:\n            self.__contains_audio = True\n        else:\n            raise ValueError(\n                \"Invalid source with no decodable audio or video stream provided. Aborting!\"\n            )\n    # calculate approximate number of video frame\n    if self.__default_video_framerate and self.__default_source_duration:\n        self.__approx_video_nframes = np.rint(\n            self.__default_video_framerate * self.__default_source_duration\n        ).astype(int, casting=\"unsafe\")\n\n    # signal metadata has been probed\n    self.__metadata_probed = True\n\n    # return reference to the instance object.\n    return self\n
"},{"location":"reference/sourcer/#deffcode.sourcer.Sourcer.retrieve_metadata","title":"retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False)","text":"

This method returns Parsed/Probed Metadata of the given source.

Parameters:

Name Type Description Default pretty_json bool

whether to return metadata as JSON string(if True) or Dictionary(if False) type?

False force_retrieve_output bool

whether to also return metadata missing in current Pipeline. This method returns (metadata, metadata_missing) tuple if force_retrieve_output=True instead of metadata.

required

Returns: metadata or (metadata, metadata_missing), formatted as JSON string or python dictionary.

Source code in deffcode/sourcer.py
def retrieve_metadata(self, pretty_json=False, force_retrieve_missing=False):\n    \"\"\"\n    This method returns Parsed/Probed Metadata of the given source.\n\n    Parameters:\n        pretty_json (bool): whether to return metadata as JSON string(if `True`) or Dictionary(if `False`) type?\n        force_retrieve_output (bool): whether to also return metadata missing in current Pipeline. This method returns `(metadata, metadata_missing)` tuple if `force_retrieve_output=True` instead of `metadata`.\n\n    **Returns:** `metadata` or `(metadata, metadata_missing)`, formatted as JSON string or python dictionary.\n    \"\"\"\n    # check if metadata has been probed or not\n    assert (\n        self.__metadata_probed\n    ), \"Source Metadata not been probed yet! Check if you called `probe_stream()` method.\"\n    # log it\n    self.__verbose_logs and logger.debug(\"Extracting Metadata...\")\n    # create metadata dictionary from information populated in private class variables\n    metadata = {\n        \"ffmpeg_binary_path\": self.__ffmpeg,\n        \"source\": self.__source,\n    }\n    metadata_missing = {}\n    # Only either `source_demuxer` or `source_extension` attribute can be\n    # present in metadata.\n    if self.__source_demuxer is None:\n        metadata.update({\"source_extension\": os.path.splitext(self.__source)[-1]})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_demuxer\": \"\"})\n    else:\n        metadata.update({\"source_demuxer\": self.__source_demuxer})\n        # update missing\n        force_retrieve_missing and metadata_missing.update({\"source_extension\": \"\"})\n    # add source video metadata properties\n    metadata.update(\n        {\n            \"source_video_resolution\": self.__default_video_resolution,\n            \"source_video_pixfmt\": self.__default_video_pixfmt,\n            \"source_video_framerate\": self.__default_video_framerate,\n            \"source_video_orientation\": self.__default_video_orientation,\n            \"source_video_decoder\": self.__default_video_decoder,\n            \"source_duration_sec\": self.__default_source_duration,\n            \"approx_video_nframes\": (\n                int(self.__approx_video_nframes)\n                if self.__approx_video_nframes\n                and not any(\n                    \"loop\" in x for x in self.__ffmpeg_prefixes\n                )  # check if any loops in prefix\n                and not any(\n                    \"loop\" in x for x in dict2Args(self.__sourcer_params)\n                )  # check if any loops in filters\n                else None\n            ),\n            \"source_video_bitrate\": self.__default_video_bitrate,\n            \"source_audio_bitrate\": self.__default_audio_bitrate,\n            \"source_audio_samplerate\": self.__default_audio_samplerate,\n            \"source_has_video\": self.__contains_video,\n            \"source_has_audio\": self.__contains_audio,\n            \"source_has_image_sequence\": self.__contains_images,\n        }\n    )\n    # add output metadata properties (if available)\n    if not (self.__metadata_output is None):\n        metadata.update(\n            {\n                \"output_frames_resolution\": self.__output_frames_resolution,\n                \"output_frames_pixfmt\": self.__output_frames_pixfmt,\n                \"output_framerate\": self.__output_framerate,\n                \"output_orientation\": self.__output_orientation,\n            }\n        )\n    else:\n        # since output stream metadata properties are only available when additional\n        # FFmpeg parameters(such as filters) are defined manually, thereby missing\n        # output stream properties are handled by assigning them counterpart source\n        # stream metadata property values\n        force_retrieve_missing and metadata_missing.update(\n            {\n                \"output_frames_resolution\": self.__default_video_resolution,\n                \"output_frames_pixfmt\": self.__default_video_pixfmt,\n                \"output_framerate\": self.__default_video_framerate,\n                \"output_orientation\": self.__default_video_orientation,\n            }\n        )\n    # log it\n    self.__verbose_logs and logger.debug(\n        \"Metadata Extraction completed successfully!\"\n    )\n    # parse as JSON string(`json.dumps`), if defined\n    metadata = json.dumps(metadata, indent=2) if pretty_json else metadata\n    metadata_missing = (\n        json.dumps(metadata_missing, indent=2) if pretty_json else metadata_missing\n    )\n    # return `metadata` or `(metadata, metadata_missing)`\n    return metadata if not force_retrieve_missing else (metadata, metadata_missing)\n
"},{"location":"reference/sourcer/params/","title":"Sourcer API Parameters","text":""},{"location":"reference/sourcer/params/#source","title":"source","text":"

This parameter defines the input source (-i) for probing.

Sourcer API will throw AssertionError if source provided is invalid or missing.

Sourcer API checks for video bitrate or frame-size and framerate in video's metadata to ensure given input source has usable video stream available. Thereby, it will throw ValueError if it fails to find those parameters.

Multiple video inputs are not yet supported!

Data-Type: String.

Its valid input can be one of the following:

  • Filepath: Valid path of the video file, for e.g \"/home/foo.mp4\" as follows:

    # initialize the sourcer and probe it\nsourcer = Sourcer('/home/foo.mp4').probe_stream()\n
  • Image Sequence: Valid image sequence such as sequential('img%03d.png') or glob pattern('*.png') or single (looping) image as input:

    SequentialGlob patternSingle (loop) image How to start with specific number image?

    You can use -start_number FFmpeg parameter if you want to start with specific number image:

    # define `-start_number` such as `5`\nsourcer_params = {\"-ffprefixes\":[\"-start_number\", \"5\"]}\n\n# initialize the sourcer with define parameters\nsourcer = Sourcer('img%03d.png', verbose=True, **sourcer_params).probe_stream()\n
    # initialize the sourcer and probe it\nsourcer = Sourcer('img%03d.png', verbose=True).probe_stream()\n

    Bash-style globbing (* represents any number of any characters) is useful if your images are sequential but not necessarily in a numerically sequential order.

    The glob pattern is not available on Windows builds.

    # define `-pattern_type glob` for accepting glob pattern\nsourcer_params = {\"-ffprefixes\":[\"-pattern_type\", \"glob\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img*.png', verbose=True, **sourcer_params).probe_stream()\n
    # define `-loop 1` for looping\nsourcer_params = {\"-ffprefixes\":[\"-loop\", \"1\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('img.jpg', verbose=True, **sourcer_params).probe_stream()\n
  • Network Address: Valid (http(s), rtp, rstp, rtmp, mms, etc.) incoming network stream address such as 'rtsp://xx:yy@192.168.1.ee:fd/av0_0' as input:

    # define `rtsp_transport` or necessary parameters \nsourcer_params = {\"-ffprefixes\":[\"-rtsp_transport\", \"tcp\"]}\n\n# initialize the sourcer with define parameters and probe it\nsourcer = Sourcer('rtsp://xx:yy@192.168.1.ee:fd/av0_0', verbose=True, **sourcer_params).probe_stream()\n
  • Camera Device Index: Valid \"device index\" or \"camera index\" of the connected Camera Device. For example, for using \"0\" index device as source on Windows, we can do as follows in Sourcer API:

    Requirement for using Camera Device as source in Sourcer API
    • MUST have appropriate FFmpeg binaries, Drivers, and Softwares installed:

      Internally, DeFFcode APIs achieves Index based Camera Device Capturing by employing some specific FFmpeg demuxers on different platforms(OSes). These platform specific demuxers are as follows:

      Platform(OS) Demuxer Windows OS dshow (or DirectShow) Linux OS video4linux2 (or its alias v4l2) Mac OS avfoundation

      Important: Kindly make sure your FFmpeg binaries support these platform specific demuxers as well as system have the appropriate video drivers and related softwares installed.

    • The source parameter value MUST be any Camera Device index that can be of either integer (e.g. -1,0,1, etc.) or string of integer (e.g. \"-1\",\"0\",\"1\", etc.) type.

    • The source_demuxer parameter value MUST be either None(also means empty) or \"auto\".

    # initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", verbose=True).probe_stream()\n
  • Video Capture Devices: Valid video probe device's name (e.g. \"USB2.0 Camera\") or its path (e.g. \"/dev/video0\" on linux) or its index (e.g. \"0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"USB2.0 Camera\" named device with dshow source demuxer on Windows, we can do as follows in Sourcer API:

    Identifying and Specifying Device name/path/index and suitable Demuxer on different OSes Windows Linux MacOS

    Windows OS users can use the dshow (DirectShow) to list video input device which is the preferred option for Windows users. You can refer following steps to identify and specify your input video device's name:

    • Identify Video Devices: You can locate your video device's name (already connected to your system) using dshow as follows:

      c:\\> ffmpeg.exe -list_devices true -f dshow -i dummy\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[dshow @ 03ACF580] DirectShow video devices\n[dshow @ 03ACF580]  \"Integrated Camera\"\n[dshow @ 03ACF580]  \"USB2.0 Camera\"\n[dshow @ 03ACF580] DirectShow audio devices\n[dshow @ 03ACF580]  \"Microphone (Realtek High Definition Audio)\"\n[dshow @ 03ACF580]  \"Microphone (USB2.0 Camera)\"\ndummy: Immediate exit requested\n
    • Specify Video Device's name: Then, you can specify and initialize your located Video device's name in Sourcer API as follows:

      # initialize the sourcer with \"USB2.0 Camera\" source and probe it\nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
    • [OPTIONAL] Specify Video Device's index along with name: If there are multiple Video devices with similar name, then you can use -video_device_number parameter to specify the arbitrary index of the particular device. For instance, to open second video device with name \"Camera\" you can do as follows:

      # define video_device_number as 1 (numbering start from 0)\nsourcer_params = {\"-ffprefixes\":[\"-video_device_number\", \"1\"]}\n\n# initialize the sourcer with \"Camera\" source and probe it\nsourcer = Sourcer(\"Camera\", source_demuxer=\"dshow\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the video4linux2 (or its alias v4l2) to list to all video capture devices such as from an USB webcam. You can refer following steps to identify and specify your probe video device's path:

    • Identify Video Devices: Linux systems tend to automatically create file device node/path when the device (e.g. an USB webcam) is plugged into the system, and has a name of the kind '/dev/videoN', where N is a index associated to the device. To get the list of all available file device node/path on your Linux machine, you can use the v4l-ctl command.

      You can use sudo apt install v4l-utils APT command to install v4l-ctl tool on Debian-based Linux distros.

      $ v4l2-ctl --list-devices\n\nUSB2.0 PC CAMERA (usb-0000:00:1d.7-1):\n        /dev/video1\n\nUVC Camera (046d:0819) (usb-0000:00:1d.7-2):\n        /dev/video0\n
    • Specify Video Device's path: Then, you can specify and initialize your located Video device's path in Sourcer API as follows:

      # initialize the sourcer with \"/dev/video0\" source and probe it\nsourcer = Sourcer(\"/dev/video0\", source_demuxer=\"v4l2\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index on MacOS/OSX machines:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    • Identify Video Devices: Then, You can locate your Video device's name and index using avfoundation as follows:

      $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n
    • Specify Video Device's name or index: Then, you can specify and initialize your located Video device in Sourcer API using its either the name or the index shown in the device listing:

      Using device's indexUsing device's name
      # initialize the sourcer with `1` index source and probe it\nsourcer = Sourcer(\"1\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

      When specifying device's name, abbreviations using just the beginning of the device name are possible. Thus, to probe from a device named \"Integrated iSight-camera\" just \"Integrated\" is sufficient:

      # initialize the sourcer with \"Integrated iSight-camera\" source \nsourcer = Sourcer(\"Integrated\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"USB2.0 Camera\" source \nsourcer = Sourcer(\"USB2.0 Camera\", source_demuxer=\"dshow\", verbose=True).probe_stream()\n
  • Screen Capturing/Recording: Valid screen probe device's name (e.g. \"desktop\") or its index (e.g. \":0.0\") as input w.r.t source_demuxer parameter value in use. For example, for probing \"0:\" indexed device with avfoundation source demuxer on MacOS, we can do as follows in Sourcer API:

    Specifying suitable Parameter(s) and Demuxer for Capturing your Desktop on different OSes Windows Linux MacOS

    Windows OS users can use the gdigrab to grab video from the Windows screen. You can refer following steps to specify source for probing:

    For Windows OS users dshow is also available for grabbing frames from your desktop. But it is highly unreliable and don't works most of the times.

    # define framerate\nsourcer_params = {\"-framerate\": \"30\"}\n\n# initialize the sourcer with \"desktop\" source and probe it\nsourcer = Sourcer(\"desktop\", source_demuxer=\"gdigrab\", verbose=True, **sourcer_params).probe_stream()\n

    Linux OS users can use the x11grab to probe an X11 display. You can refer following steps to specify source for probing:

    # initialize the sourcer with \":0.0\" desktop source and probe it\nsourcer = Sourcer(\":0.0\", source_demuxer=\"x11grab\", verbose=True).probe_stream()\n

    MacOS users can use the AVFoundation to list input devices and is the currently recommended framework by Apple for streamgrabbing on Mac OSX-10.7 (Lion) and later as well as on iOS. You can refer following steps to identify and specify your probe video device's name or index in Sourcer API:

    QTKit is also available for streamgrabbing on Mac OS X 10.4 (Tiger) and later, but has been marked deprecated since OS X 10.7 (Lion) and may not be available on future releases.

    You can enumerate all the available input devices including screens ready to be probed using avfoundation as follows:

    $ ffmpeg -f avfoundation -list_devices true -i \"\"\n\nffmpeg version N-45279-g6b86dd5... --enable-runtime-cpudetect\n  libavutil      51. 74.100 / 51. 74.100\n  libavcodec     54. 65.100 / 54. 65.100\n  libavformat    54. 31.100 / 54. 31.100\n  libavdevice    54.  3.100 / 54.  3.100\n  libavfilter     3. 19.102 /  3. 19.102\n  libswscale      2.  1.101 /  2.  1.101\n  libswresample   0. 16.100 /  0. 16.100\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation video devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] FaceTime HD camera (built-in)\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Capture screen 0\n[AVFoundation input device @ 0x7f8e2540ef20] AVFoundation audio devices:\n[AVFoundation input device @ 0x7f8e2540ef20] [0] Blackmagic Audio\n[AVFoundation input device @ 0x7f8e2540ef20] [1] Built-in Microphone\n

    Then, you can specify and initialize your located screens in Sourcer API using its index shown:

    # initialize the sourcer with `0:` index desktop screen and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n

    If these steps doesn't work for you then reach us out on Gitter \u27b6 Community channel

    # initialize the sourcer with \"0:\" source and probe it\nsourcer = Sourcer(\"0:\", source_demuxer=\"avfoundation\", verbose=True).probe_stream()\n
  • Virtual Sources: Valid filtergraph to use as input with lavfi (Libavfilter input virtual device) source that reads data from the open output pads of a libavfilter filtergraph. For example, for generating and probing Mandelbrot graph of 1280x720 frame size and 30 framerate using lavfi input virtual device, we can do as follows in Sourcer API:

    # initialize the sourcer with \"mandelbrot\" source of\n# `1280x720` frame size and `30` framerate and probe it\nsourcer = Sourcer(\n    \"mandelbrot=size=1280x720:rate=30\",\n    source_demuxer=\"lavfi\",\n    frame_format=\"bgr24\",\n).probe_stream()\n

"},{"location":"reference/sourcer/params/#source_demuxer","title":"source_demuxer","text":"

This parameter specifies the demuxer(-f) for the input source (such as dshow, v4l2, gdigrab etc.) to support Live Feed Devices, as well as lavfi (Libavfilter input virtual device) that reads data from the open output pads of a libavfilter filtergraph.

Any invalid or unsupported value to source_demuxer parameter value will raise Assertion error!

Use ffmpeg -demuxers terminal command to lists all FFmpeg supported demuxers.

Specifying source_demuxer for using Camera Device Index as source in Sourcer API

For using Camera Device Index as source in Sourcer API, the source_demuxer parameter value MUST be either None(also means empty) or \"auto\":

source_demuxer=None (Default and Recommended)source_demuxer=\"auto\"
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\").probe_stream()\n
# initialize the sourcer with \"0\" index source and probe it\nsourcer = Sourcer(\"0\", source_demuxer=\"auto).probe_stream()\n

Data-Type: String

Default Value: Its default value is None.

Usage:

# initialize the sourcer with `dshow` demuxer and probe it\nsourcer = Sourcer(\"foo.mp4\", source_demuxer=\"dshow\").probe_stream()\n

"},{"location":"reference/sourcer/params/#custom_ffmpeg","title":"custom_ffmpeg","text":"

This parameter can be used to manually assigns the system file-path/directory where the custom or downloaded FFmpeg executable is located.

Behavior on Windows

If custom FFmpeg executable binary file-path/directory is not assigned through custom_ffmpeg parameter on Windows machine, then Sourcer API will automatically attempt to download and extract suitable Static FFmpeg binaries at suitable location on your windows machine. More information can be found here \u27b6.

How to change FFmpeg Static Binaries download directory?

You can use -ffmpeg_download_path exclusive parameter in Sourcer API to set the custom directory for downloading FFmpeg Static Binaries during the Auto-Installation step on Windows Machines. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows in Sourcer API:

# # define suitable parameter to download at \"C:/User/foo/foo1\"\nsourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"}\n\n# initialize the sourcer\nSourcer(\"foo.mp4\", verbose=True, **sourcer_params).probe_stream()\n

If binaries were not found at the manually specified path, DeFFcode APIs will throw RuntimeError!

Data-Type: String

Default Value: Its default value is None.

Usage:

# If ffmpeg executables are located at \"/foo/foo1/ffmpeg\"\nSourcer(\"foo.mp4\", custom_ffmpeg=\"/foo/foo1/ffmpeg\").probe_stream()\n

"},{"location":"reference/sourcer/params/#verbose","title":"verbose","text":"

This parameter enables verbose logs (if True), essential for debugging.

Data-Type: Boolean

Default Value: Its default value is False.

Usage:

# initialize the sourcer with verbose logs\nSourcer(\"foo.mp4\", verbose=True).probe_stream()\n

"},{"location":"reference/sourcer/params/#sourcer_params","title":"sourcer_params","text":"

This dictionary parameter accepts all Exclusive Parameters formatted as its attributes:

Additional FFmpeg parameters

In addition to Exclusive Parameters, Sourcer API supports almost any FFmpeg parameter (supported by installed FFmpeg), and thereby can be passed as dictionary attributes in sourcer_params parameter.

Kindly read FFmpeg Docs carefully before passing any additional values to sourcer_params parameter. Wrong invalid values may result in undesired errors or no output at all.

All FFmpeg parameters are case-sensitive. Remember to double check every parameter if any error(s) occurred.

Data-Type: Dictionary

Default Value: Its default value is {}.

"},{"location":"reference/sourcer/params/#exclusive-parameters","title":"Exclusive Parameters","text":"

Sourcer API supports few Exclusive Parameters to allow users to flexibly change its probing properties and handle some special FFmpeg parameters.

These parameters are discussed below:

  • -ffprefixes (list): This attribute sets the special FFmpeg parameters that generally occurs at the very beginning (such as -re) before input (-i) source. The FFmpeg parameters defined with this attribute can repeated more than once and maintains its original order in the FFmpeg command. Its value can be of datatype list only and its usage is as follows:

    Turn on verbose parameter (verbose = True) to see the FFmpeg command that is being executed in Sourcer's pipeline. This helps you debug/address any issues and make adjustments accordingly.

    # define suitable parameter\nsourcer_params = {\"-ffprefixes\": ['-re']} # executes as `ffmpeg -re <rest of command>`\n

  • -ffmpeg_download_path (string): sets the custom directory for downloading FFmpeg Static Binaries in Compression Mode, during the Auto-Installation on Windows Machines Only. If this parameter is not altered, then these binaries will auto-save to the default temporary directory (for e.g. C:/User/temp) on your windows machine. It can be used as follows:

    sourcer_params = {\"-ffmpeg_download_path\": \"C:/User/foo/foo1\"} # will be saved to \"C:/User/foo/foo1\"\n

  • -force_validate_source (bool): forcefully passes validation test for given source which is required for some special cases with unusual input. It can be used as follows:

    sourcer_params = {\"-force_validate_source\": True} # will pass validation test forcefully\n

"}]} \ No newline at end of file