forked from dusty-nv/jetson-utils
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
python: added jetson.utils.VideoLoader class
- Loading branch information
1 parent
2694381
commit a557a85
Showing
2 changed files
with
157 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,4 +3,6 @@ | |
|
||
from jetson_utils_python import * | ||
|
||
from .video_loader import * | ||
|
||
VERSION = '1.0.0' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,155 @@ | ||
# | ||
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. | ||
# | ||
# Permission is hereby granted, free of charge, to any person obtaining a | ||
# copy of this software and associated documentation files (the "Software"), | ||
# to deal in the Software without restriction, including without limitation | ||
# the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
# and/or sell copies of the Software, and to permit persons to whom the | ||
# Software is furnished to do so, subject to the following conditions: | ||
# | ||
# The above copyright notice and this permission notice shall be included in | ||
# all copies or substantial portions of the Software. | ||
# | ||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
# DEALINGS IN THE SOFTWARE. | ||
# | ||
|
||
import queue | ||
|
||
import gi | ||
gi.require_version("Gst", "1.0") | ||
from gi.repository import Gst | ||
|
||
Gst.init(None) | ||
|
||
class VideoLoader: | ||
|
||
def __init__(self, logger, width=1280, height=720, framerate="5/1"): | ||
self.__logger = logger | ||
self.__width = width | ||
self.__height = height | ||
self.__framerate = framerate | ||
|
||
self.__pipeline = None | ||
self.__queue = queue.Queue() | ||
|
||
def load(self, filename): | ||
if self.__pipeline: | ||
raise Exception("Already processing a file") | ||
self.__pipeline = Gst.Pipeline.new("video-loader") | ||
|
||
bus = self.__pipeline.get_bus() | ||
bus.add_signal_watch() | ||
bus.connect("message", self.__on_message) | ||
|
||
self.__build_pipeline(filename) | ||
self.__pipeline.set_state(Gst.State.PLAYING) | ||
|
||
|
||
def stop(self): | ||
if self.__pipeline: | ||
self.__pipeline.set_state(Gst.State.NULL) | ||
self.__pipeline = None | ||
|
||
def is_loading(self): | ||
return self.__pipeline != None | ||
|
||
def sample_next(self): | ||
return self.__queue.get() | ||
|
||
def sample_done(self): | ||
self.__queue.task_done() | ||
|
||
def __build_pipeline(self, filename): | ||
source = Gst.ElementFactory.make("filesrc") | ||
source.set_property("location", filename) | ||
self.__pipeline.add(source) | ||
|
||
decodebin = Gst.ElementFactory.make("decodebin") | ||
decodebin.connect("pad-added", self.__on_pad_added) | ||
self.__pipeline.add(decodebin) | ||
|
||
source.link(decodebin) | ||
|
||
def __on_message(self, bus, message): | ||
t = message.type | ||
if t == Gst.MessageType.EOS: | ||
self.stop() | ||
elif t == Gst.MessageType.ERROR: | ||
self.stop() | ||
err, debug = message.parse_error() | ||
self.__logger.error("Error: %s (%s)" % (err, debug)) | ||
|
||
def __on_pad_added(self, decodebin, pad): | ||
caps_str = pad.query_caps(None).to_string() | ||
if caps_str.startswith("video"): | ||
self.__logger.info("Detected video stream, processing...") | ||
elif caps_str.startswith("audio"): | ||
self.__logger.info("Detected audio stream, ignoring.") | ||
return | ||
|
||
queue = Gst.ElementFactory.make("queue") | ||
self.__pipeline.add(queue) | ||
|
||
# rate first so we dn't process as many frames. | ||
videorate = Gst.ElementFactory.make("videorate") | ||
self.__pipeline.add(videorate) | ||
|
||
caps = "video/x-raw,framerate=%s" % self.__framerate | ||
ratecaps = Gst.ElementFactory.make("capsfilter") | ||
ratecaps.set_property("caps", Gst.Caps(caps)) | ||
self.__pipeline.add(ratecaps) | ||
|
||
# scale image in GPU if needed. | ||
videoscale = Gst.ElementFactory.make("nvvidconv") | ||
self.__pipeline.add(videoscale) | ||
|
||
caps = "video/x-raw(memory:NVMM),format=NV12" | ||
if self.__width > 0: | ||
caps += ",width=%d" % self.__width | ||
if self.__height > 0: | ||
caps += ",height=%d" % self.__height | ||
|
||
scalecaps = Gst.ElementFactory.make("capsfilter") | ||
scalecaps.set_property("caps", Gst.Caps(caps)) | ||
self.__pipeline.add(scalecaps) | ||
|
||
# copy to main memory. | ||
videoconvert = Gst.ElementFactory.make("nvvidconv") | ||
self.__pipeline.add(videoconvert) | ||
|
||
convcaps = Gst.ElementFactory.make("capsfilter") | ||
convcaps.set_property("caps", Gst.Caps("video/x-raw")) | ||
self.__pipeline.add(convcaps) | ||
|
||
# finally, add the sample sink. | ||
appsink = Gst.ElementFactory.make("appsink") | ||
appsink.set_property("emit-signals", True) | ||
appsink.set_property("max-buffers", 10) | ||
appsink.set_property("drop", True) | ||
appsink.set_property("sync", True) | ||
appsink.connect("new-sample", self.__on_new_sample) | ||
self.__pipeline.add(appsink) | ||
|
||
# link everything together. | ||
sinkpad = queue.get_static_pad("sink") | ||
pad.link(sinkpad) | ||
|
||
queue.link(videorate) | ||
videorate.link(ratecaps) | ||
ratecaps.link(videoscale) | ||
videoscale.link(scalecaps) | ||
scalecaps.link(videoconvert) | ||
videoconvert.link(convcaps) | ||
convcaps.link(appsink) | ||
|
||
def __on_new_sample(self, appsink): | ||
sample = appsink.emit("pull-sample") | ||
self.__queue.put(sample) | ||
return Gst.FlowReturn.OK |
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hey Aleix,
I built jetson inference using your patch and inferencing on a mp4 file works like a charm. I was wondering if you could help me with something though: I am using detectnet and I'd like to push detections to an upstream HTTP API for further analysis but I'm having trouble dumping the gstreamer sample to a jpeg file.
I tried converting into to a numpy array ( np.float32 ) and it keeps saying the buffer size is too big. I also tried dumping the image using jetson.utils.saveImageRGBA binding but I constantly get this exception:
jetson.utils.cudaToNumpy throws the same exception as above.
I'd appreciate any thoughts on this you might be able to share.
Thank you,
Raul
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@lrauldan glad to hear it's helpful to someone. can you share the place where you are calling
jetson.utils.saveImageRGBA
?a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just before synchronizing the cuda device:
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
oh, i think it's because this is not still supported. see the comment (
// TODO support GPU-only memory
) in the bindingPyImageIO_SaveRGBA
:the memory used by
cudaFromGstSample
is GPU-only.a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@lrauldan i just updated my PR dusty-nv#10 to use mapped memory. it should work now.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
bummer :/ it's not working for me, on a Jetson TX1. it just sit there, show nothing and there is no sign of activity (CPU/GPU). To me __on_new_sample should be called but it's never called.
here my info:
`NVIDIA Jetson NANO/TX1 - Jetpack 4.2.1 [L4T 32.2.0]
`python3 jetson-video.py ~/devel/Videos/Indoor1.mp4 --render
jetson.inference.init.py
jetson.inference -- initializing Python 3.6 bindings...
jetson.inference -- registering module types...
jetson.inference -- done registering module types
jetson.inference -- done Python 3.6 binding initialization
jetson.utils.init.py
jetson.utils -- initializing Python 3.6 bindings...
jetson.utils -- registering module functions...
jetson.utils -- done registering module functions
jetson.utils -- registering module types...
jetson.utils -- done registering module types
jetson.utils -- done Python 3.6 binding initialization
/media/goview/goview/devel/goview/src/tools/video_loader.py:23: PyGIWarning: Gst was imported without specifying a version first. Use gi.require_version('Gst', '1.0') before import to ensure that the right version gets loaded.
from gi.repository import Gst
jetson.inference -- PyTensorNet_New()
jetson.inference -- PyDetectNet_Init()
jetson.inference -- detectNet loading build-in network 'pednet'
detectNet -- loading detection network model from:
-- prototxt networks/ped-100/deploy.prototxt
-- model networks/ped-100/snapshot_iter_70800.caffemodel
-- input_blob 'data'
-- output_cvg 'coverage'
-- output_bbox 'bboxes'
-- mean_pixel 0.000000
-- mean_binary NULL
-- class_labels networks/ped-100/class_labels.txt
-- threshold 0.500000
-- batch_size 1
[TRT] TensorRT version 5.1.6
[TRT] loading NVIDIA plugins...
[TRT] Plugin Creator registration succeeded - GridAnchor_TRT
[TRT] Plugin Creator registration succeeded - NMS_TRT
[TRT] Plugin Creator registration succeeded - Reorg_TRT
[TRT] Plugin Creator registration succeeded - Region_TRT
[TRT] Plugin Creator registration succeeded - Clip_TRT
[TRT] Plugin Creator registration succeeded - LReLU_TRT
[TRT] Plugin Creator registration succeeded - PriorBox_TRT
[TRT] Plugin Creator registration succeeded - Normalize_TRT
[TRT] Plugin Creator registration succeeded - RPROI_TRT
[TRT] Plugin Creator registration succeeded - BatchedNMS_TRT
[TRT] completed loading NVIDIA plugins.
[TRT] detected model format - caffe (extension '.caffemodel')
[TRT] desired precision specified for GPU: FASTEST
[TRT] requested fasted precision for device GPU without providing valid calibrator, disabling INT8
[TRT] native precisions detected for GPU: FP32, FP16
[TRT] selecting fastest native precision for GPU: FP16
[TRT] attempting to open engine cache file /usr/local/bin/networks/ped-100/snapshot_iter_70800.caffemodel.1.1.GPU.FP16.engine
[TRT] loading network profile from engine cache... /usr/local/bin/networks/ped-100/snapshot_iter_70800.caffemodel.1.1.GPU.FP16.engine
[TRT] device GPU, /usr/local/bin/networks/ped-100/snapshot_iter_70800.caffemodel loaded
[TRT] device GPU, CUDA engine context initialized with 3 bindings
[TRT] binding -- index 0
-- name 'data'
-- type FP32
-- in/out INPUT
-- # dims 3
-- dim #0 3 (CHANNEL)
-- dim dusty-nv#1 512 (SPATIAL)
-- dim dusty-nv#2 1024 (SPATIAL)
[TRT] binding -- index 1
-- name 'coverage'
-- type FP32
-- in/out OUTPUT
-- # dims 3
-- dim #0 1 (CHANNEL)
-- dim dusty-nv#1 32 (SPATIAL)
-- dim dusty-nv#2 64 (SPATIAL)
[TRT] binding -- index 2
-- name 'bboxes'
-- type FP32
-- in/out OUTPUT
-- # dims 3
-- dim #0 4 (CHANNEL)
-- dim dusty-nv#1 32 (SPATIAL)
-- dim dusty-nv#2 64 (SPATIAL)
[TRT] binding to input 0 data binding index: 0
[TRT] binding to input 0 data dims (b=1 c=3 h=512 w=1024) size=6291456
[TRT] binding to output 0 coverage binding index: 1
[TRT] binding to output 0 coverage dims (b=1 c=1 h=32 w=64) size=8192
[TRT] binding to output 1 bboxes binding index: 2
[TRT] binding to output 1 bboxes dims (b=1 c=4 h=32 w=64) size=32768
device GPU, /usr/local/bin/networks/ped-100/snapshot_iter_70800.caffemodel initialized.
detectNet -- number object classes: 1
detectNet -- maximum bounding boxes: 2048
detectNet -- loaded 1 class info entries
detectNet -- number of object classes: 1
------ True
jetson.utils -- PyDisplay_New()
jetson.utils -- PyDisplay_Init()
[OpenGL] glDisplay -- X screen 0 resolution: 1920x1080
[OpenGL] glDisplay -- display device initialized
before while
sample_next...
Opening in BLOCKING MODE
NvMMLiteOpen : Block : BlockType = 261
NVMEDIA: Reading vendor.tegra.display-size : status: 6
NvMMLiteBlockCreate : Block : BlockType = 261
[INFO][detectnet-video] 2019-08-21 15:25:18,132 - Detected video stream, processing...
end pipeline
[INFO][detectnet-video] 2019-08-21 15:25:18,143 - Detected audio stream, ignoring.
^C
User interruption. Exiting...
PyTensorNet_Dealloc()
jetson.utils -- PyDisplay_Dealloc()`
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thank you @aconchillo. It works beautifully on TX2, jetpack 4.2.
@blitzvb it looks like your queue is not receiving samples from gstreamer. Try setting a timeout to queue.get to see if it exits with a queue.Empty exception:
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
the queue is empty. I did try with a mp4 and h264 video.
[INFO][detectnet-video] 2019-08-21 16:07:45,857 - Detected video stream, processing...
end pipeline
[INFO][detectnet-video] 2019-08-21 16:07:45,867 - Detected audio stream, ignoring.
Traceback (most recent call last):
File "jetson-video.py", line 79, in
sample = loader.sample_next()
File "/media/devel/g/src/tools/video_loader.py", line 64, in sample_next
return self.__queue.get(timeout=10) # in seconds
File "/usr/lib/python3.6/queue.py", line 172, in get
raise Empty
queue.Empty
PyTensorNet_Dealloc()
jetson.utils -- PyDisplay_Dealloc()
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Disclaimer: I am merely a beginner in this field so my advices could be stupid.
Have you tried checking if you have the nvvidconv and avdec_mpeg4 plugins available on gstreamer?
CLI:
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yep all good. I actually can play those files with gst-launch-1.0
maybe it's because I was not able to git your branch (VideoLoader was missing) so I did copy VideoLoader and the sample only. am I missing something ?
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
when I git clone this repos. I don't get video_loader.py
here what I do:
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You need to checkout the add-pygst-support branch. I built the jetson inference package by replacing the utils submodule with this fork ( branch changed from master to the specified one ).
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
oh my bad! I didn't thought that you had another branch on this fork. I got that this time :
jetson-utils/python/bindings/PyGst.cpp:29:10: fatal error: pygobject.h: No such file or directory #include <pygobject.h> ^~~~~~~~~~~~~ compilation terminated.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I faced this issue myself. I solved it by installing pygobject ( https://github.com/GNOME/pygobject ) version 3.0.0. If you install it in /usr/local you might need create a symlink in /usr/include:
lrwxrwxrwx 1 root root 45 aug 18 16:44 /usr/include/pygobject.h -> /usr/local/include/pygobject-3.0/pygobject.h
Don't forget to run "ldconfig" as root before attempting to rebuild jetson inference.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't remember having to create a link. I think I just installed
python-gi-dev
(sudo apt-get install python-gi-dev
). That should do it.a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
indeed that make the trick. thanks!
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I have updated dusty-nv/jetson-inference#389 so it pulls
python-gi-dev
automatically (I forgot to add it originally). So, you won't have to install anything by hand.@lrauldan I would not recommend modifying system files by hand. easy to end up with a messy system and hard to rollback (you will forget what you've done).
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
so even with your branch including the latest VideoLoader and the latest Jetson_inference with video example, it never start playing the video file after detecting the video stream.
I am running it on a brand new installed TX1 with the latest jetpack. I can run those video with gst-launch.
here the log, when I add debug info to your example :
0:00:04.083898842 31958 0x7f242b0590 WARN GST_PADS gstpad.c:4226:gst_pad_peer_query:<decodebin0:src_0> could not send sticky events
any idea?
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can you run the example with GST_DEBUG=4 and send the output somehow? it will be big.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
sure. here you go :
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I just checked and the h264 decoder being chosen for you is
nvv4l2decoder
. In my case, it'somxh264dec
. The difference is thatnvv4l2decoder
uses the V4L API to decode andomxh264dec
uses Open MAX. I have checked which package installsomxh264dec
, but it doesn't come from any package, which means it comes from the Jetson Nano image I downloaded.I will chek removing the Open MAX plugins and see if I get your issue.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using
nvv4l2decoder
I'm getting the same behavior and also this error:Investigating.
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Interesting. I am on a TX1 and it’s a brand new install. On what Jetson/jetpack you are running it ?
Also even though I cannot check at this time, but I am pretty sure that I have the omxh264dec decoder (at least, with the former jetpack)
Maybe there is way to force it ?
a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
First thing would be to know if you have the OMX plugins:
Then you can check the plugin rank (bigger ranks are chosen first).
So, in this case
omxh264dec
is chosen instead ofnvv4l2decoder
.a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I flashed Jetson Pack 4.2.1 and I get both
omxh264dec
andnvv4l2decoder
which now come int thenvidia-l4t-gstreamer
package. So, I'm not sure whynvv4l2decoder
is being used in your case. When I have some time I'll check whynvv4l2decoder
doesn't work.a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OK, the reason is because in JetPack 4.2.1,
nvv4l2decoder
has a higher ranking.a557a85
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is now fixed for both
nvv4l2decoder
andomxh264dec
. You should update myjetson-utils
branch.