首页 > 解决方案 > 如何将 OpenCV 处理的帧推回 gstreamer 管道?

问题描述

我发现将我的 opencv 处理帧返回到 gstreamer 管道时遇到了一些麻烦。这是我的源代码:

import timeit
act_start = timeit.default_timer()
import os, sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
import numpy as np
import torchvision
import argparse
import sys
import time
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import shutil
from random import randrange

frame_count = 1
frame_time = 0
FPS = 0

frame_format, pixel_bytes = 'RGBA', 4
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')

Gst.init()
pipeline = Gst.parse_launch(f'''
    filesrc location=2m.mp4 !
    decodebin !
    nvvideoconvert !
    video/x-raw,format={frame_format} !
    nvvideoconvert name=s !
    x264enc !
    filesink location=output.mp4 
''')

def on_frame_probe(pad, info):
    
    start = timeit.default_timer()
    buf = info.get_buffer()
    #print(dir(info))
    global frame_count
    global frame_time
    global FPS

    image_tensor = buffer_to_image_tensor(buf, pad.get_current_caps())
    results = model(image_tensor)
    results_np = np.array(results.render())
    result_image = cv2.cvtColor(results_np[0], cv2.COLOR_BGR2RGB)
    stop = timeit.default_timer()
    time_taken = stop - start
    print('Time taken on this frame: ', time_taken)
    frame_time += time_taken
    fps = frame_count/frame_time
    FPS = fps
    print('FPS: ', fps)
    frame_count += 1
    return Gst.PadProbeReturn.OK

def buffer_to_image_tensor(buf, caps):
    caps_structure = caps.get_structure(0)
    height, width = caps_structure.get_value('height'), caps_structure.get_value('width')

    is_mapped, map_info = buf.map(Gst.MapFlags.READ)
    if is_mapped:
        try:
            image_array = np.ndarray(
                (height, width, pixel_bytes),
                dtype=np.uint8,
                buffer=map_info.data
            ).copy()
            return (image_array[:,:,:3])
        finally:
            buf.unmap(map_info)

pipeline.get_by_name('s').get_static_pad('sink').add_probe(
    Gst.PadProbeType.BUFFER,
    on_frame_probe
)

pipeline.set_state(Gst.State.PLAYING)

try:
    while True:
        msg = pipeline.get_bus().timed_pop_filtered(
            Gst.SECOND,
            Gst.MessageType.EOS | Gst.MessageType.ERROR
        )
        if msg:
            text = msg.get_structure().to_string() if msg.get_structure() else ''
            msg_type = Gst.message_type_get_name(msg.type)
            #print(f'{msg.src.name}: [{msg_type}] {text}')
            break
finally:

    pipeline.set_state(Gst.State.NULL)

act_end = timeit.default_timer()
total_time_taken = act_end - act_start
print("Total time taken: ",total_time_taken)
print("Average FPS: ", FPS)

我想将包含 numpy 图像的 result_image 变量返回到管道,以便文件接收器输出可以接收结果帧并使用 bbox 制作视频。目前输出视频与输入视频相同,没有绘制 bbox。如果我这样做, cv2.imwrite(result_image) 我会得到绘制 bbox 的帧。为什么它没有被推送到管道以及如何推送它?

标签: opencvgstreamerpython-gstreamer

解决方案


推荐阅读