首页 > 解决方案 > 用opencv拼接2个视频

问题描述

我想用 Stitch 制作 2 个全景相机视频。我在网站https://www.pyimagesearch.com/2016/01/25/real-time-panorama-and-image-stitching-with-opencv/上实时找到了它,但无法使用视频。

我正在使用 Python3.9 Opencv 4.4,但每次都会给出不同的错误。请帮忙。

基本运动检测器.py

  import imutils
  from cv2 import cv2

  class BasicMotionDetector:
    def __init__(self, accumWeight=0.5, deltaThresh=5, minArea=5000):
    # determine the OpenCV version, followed by storing the
    # the frame accumulation weight, the fixed threshold for
    # the delta image, and finally the minimum area required
    # for "motion" to be reported
       self.isv2 = imutils.is_cv2()
       self.accumWeight = accumWeight
       self.deltaThresh = deltaThresh
       self.minArea = minArea
    # initialize the average image for motion detection
       self.avg = None

全景图.py

# import the necessary packages
import numpy as np
import imutils
from cv2 import cv2
class Stitcher:
def __init__(self):
    # determine if we are using OpenCV v3.X and initialize the
    # cached homography matrix
    self.isv3 = imutils.is_cv3()
    self.cachedH = None

def stitch(self, images, ratio=0.75, reprojThresh=4.0):
    # unpack the images
    (imageB, imageA) = images
    # if the cached homography matrix is None, then we need to
    # apply keypoint matching to construct it
    if self.cachedH is None:
        # detect keypoints and extract
        (kpsA, featuresA) = self.detectAndDescribe(imageA)
        (kpsB, featuresB) = self.detectAndDescribe(imageB)
        # match features between the two images
        M = self.matchKeypoints(kpsA, kpsB,
            featuresA, featuresB, ratio, reprojThresh)
        # if the match is None, then there aren't enough matched
        # keypoints to create a panorama
        if M is None:
            return None
        # cache the homography matrix
        self.cachedH = M[1]
    # apply a perspective transform to stitch the images together
    # using the cached homography matrix
    result = cv2.warpPerspective(imageA, self.cachedH,
        (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
    result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
    # return the stitched image
    return result

def detectAndDescribe(self, image):
    # convert the image to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # check to see if we are using OpenCV 3.X
    if self.isv3:
        # detect and extract features from the image
        #SIFT Algorithm
        descriptor = cv2.xfeatures2d.SIFT_create()
        #SURF Algorithm
        #descriptor = cv2.xfeatures2d.SURF_create()# 400 is hesian threshold, optimum values should be around 300-500
        #upright SURF: faster and can be used for panorama stiching i.e our case.
        #descriptor.upright = True
        print(descriptor.descriptorSize())
        (kps, features) = descriptor.detectAndCompute(image, None)
        print(len(kps),features.shape)

    # otherwise, we are using OpenCV 2.4.X
    else:
        # detect keypoints in the image
        detector = cv2.FeatureDetector_create("SIFT")
        kps = detector.detect(gray)

        # extract features from the image
        extractor = cv2.DescriptorExtractor_create("SIFT")
        (kps, features) = extractor.compute(gray, kps)

    # convert the keypoints from KeyPoint objects to NumPy
    # arrays
    kps = np.float32([kp.pt for kp in kps])

    # return a tuple of keypoints and features
    #print("features",features)
    return (kps, features)

主文件

    # import the necessary packages
    from __future__ import print_function
    from basicmotiondetector import BasicMotionDetector
    from panorama import Stitcher
    from imutils.video import VideoStream
    import numpy as np
    import datetime
    import imutils
    import time
    from cv2 import cv2

    # initialize the video streams and allow them to warmup
    print("[INFO] starting cameras...")
    leftStream = cv2.VideoCapture("a2.mp4")
    rightStream = cv2.VideoCapture("a1.mp4")

    time.sleep(2.0)

    # initialize the image stitcher, motion detector, and total
    # number of frames read
    stitcher = Stitcher()
    motion = BasicMotionDetector(minArea=500)
    total = 0

    # loop over frames from the video streams
    while True:
        # grab the frames from their respective video streams
        ret, left = leftStream.read()
        ret1, right = rightStream.read()



        ret2, result = stitcher.stitch([left, right])
        # no homograpy could be computed
        if result is None:
            print("[INFO] homography could not be computed")
            break
        # convert the panorama to grayscale, blur it slightly, update
        # the motion detector
        gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # increment the total number of frames read and draw the 
        # timestamp on the image

        # show the output images
        cv2.imshow("Result", result)
        cv2.imshow("Left Frame", left)
        cv2.imshow("Right Frame", right)
        key = cv2.waitKey(1) & 0xFF
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    print("[INFO] cleaning up...")
    cv2.destroyAllWindows()
    leftStream.stop()
    rightStream.stop()

标签: pythonopencv-python

解决方案


您提供的代码没有问题,但它在 opencv 3.4 中也不起作用。我需要这个和我类似的代码,但我还没有找到员工。


推荐阅读