首页 > 解决方案 > 如何记录/导出模型检测到的情绪?

问题描述

如何在下面的代码中记录/导出模型分类的情绪?

我的意思是,如果这个模型用于在线会议,我如何可视化检测到的情绪并看到最主要的情绪?

例如,我想生成某种会议报告/分析。

谢谢!

这是代码的视频捕获/情感显示部分:

cap = cv2.VideoCapture(0)

if args["isVideoWriter"] == True:
    fourrcc = cv2.VideoWriter_fourcc("M", "J", "P", "G")
    capWidth = int(cap.get(3))
    capHeight = int(cap.get(4))
    videoWrite = cv2.VideoWriter("output.avi", fourrcc, 22,
                                 (capWidth, capHeight))

while True:
    ret, frame = cap.read()
    frame = cv2.resize(frame, (720, 480))

    if not ret:
        break

    grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(grayFrame, 0)
    for rect in rects:
        shape = predictor(grayFrame, rect)
        points = shapePoints(shape)
        (x, y, w, h) = rectPoints(rect)
        grayFace = grayFrame[y:y + h, x:x + w]
        try:
            grayFace = cv2.resize(grayFace, (emotionTargetSize))
        except:
            continue

        grayFace = grayFace.astype('float32')
        grayFace = grayFace / 255.0
        grayFace = (grayFace - 0.5) * 2.0
        grayFace = np.expand_dims(grayFace, 0)
        grayFace = np.expand_dims(grayFace, -1)
        emotion_prediction = emotionClassifier.predict(grayFace)
        emotion_probability = np.max(emotion_prediction)
        if (emotion_probability > 0.36):
            emotion_label_arg = np.argmax(emotion_prediction)
            color = emotions[emotion_label_arg]['color']
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            cv2.line(frame, (x, y + h), (x + 20, y + h + 20),
                     color,
                     thickness=2)
            cv2.rectangle(frame, (x + 20, y + h + 20), (x + 110, y + h + 40),
                          color, -1)
            cv2.putText(frame, emotions[emotion_label_arg]['emotion'],
                        (x + 25, y + h + 36), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 1, cv2.LINE_AA)
        else:
            color = (255, 255, 255)
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)

    if args["isVideoWriter"] == True:
        videoWrite.write(frame)

    cv2.imshow("Emotion Recognition", frame)
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        break

cap.release()
if args["isVideoWriter"] == True:
    videoWrite.release()
cv2.destroyAllWindows()

标签: opencvsentiment-analysisface-recognitionopencv-python

解决方案


您好,您可以通过以下方式实现您的代码。注意:我没有检查此代码,因此您可以更改和调试并使用它

import datetime, time
import cv2

FPS = 5.0  # Change Frames Per Second 

os.makedirs('./clips', exist_ok=True) # Make Dir For Store Videos

camera = cv2.VideoCapture(0)

camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) # depends on fourcc available camera
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # Frame Width
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # Frame Height
camera.set(cv2.CAP_PROP_FPS, FPS)

frame_width = int(camera.get(3))
frame_height = int(camera.get(4))
size = (frame_width, frame_height)


while True:
    global FPS

    img = camera.get_frame()

    ### your code

    ## when Imotion detect rec = True
    if rec:
        now = datetime.datetime.now()
        filename = "vid_{}.mp4".format(str(now).replace(":", ''))
        path = os.path.sep.join(['clips', filename])

        fourcc = cv2.VideoWriter_fourcc(*'FMP4')
        out = cv2.VideoWriter(path, fourcc, FPS, size)
        img= cv2.putText(img,"Recording...", (0,25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),4)

        out.write(img)
        time.sleep(1/FPS)  # 1s / 25fps = 0.04  # it needs some time for code.
    else:
        ## other code

推荐阅读