首页 > 解决方案 > 从 flask-socketio 向特定客户端发送消息

问题描述

我的申请有问题。它的工作原理是这样的:我从网络客户端获取网络摄像头流 ---- 我使用 socketio 将帧发送到服务器 --- 我在帧上执行一个算法,让我获得心跳、spo2、呼吸 --- - 将带有发射的值发送到浏览器。这在一个无休止的循环中。

问题是,如果我同时启动两个 Web 客户端,我返回给客户端的值,我会将它们返回给所有连接的客户端,实际上使各个客户端在某种程度上不独立。我该如何解决这个问题?我希望每个连接的客户端都将其帧发送到服务器,并将结果发送到正确的客户端而不是每个人。然后使各个客户端独立并能够运行算法而不受其他客户端的影响。

应用程序.py

# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, disconnect
import base64
import numpy as np
import cv2
from flask_httpauth import HTTPDigestAuth
import os
from dotenv import load_dotenv
from engineio.payload import Payload
from queue import Queue, Empty
from custom_flask import CustomFlask
import dlib
from numpy.linalg import norm

from datetime import datetime
from scipy import signal
import pip
import math
import time
from skimage.color import rgb2hsv
from skimage.color import hsv2rgb


Payload.max_decode_packets = 500
load_dotenv(verbose=True)

image_queue = Queue(maxsize=50)
processed_queue = Queue(maxsize=50)


#  for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
# Constants
WINDOW_TITLE = 'ReHe Rate '
BUFFER_MAX_SIZE =200       # Numero di valori del ROI da salvare
countfile=0

MIN_FRAMES = 100    # Minimo numero di frame richiesti nella prima fase di acquisizione segnale
finalrpm=[]
flagrilevazione = 0
nodetectface = 1
signalpercent = ""
bpm = 0
rpm = 0
spo2 = 0


# Funzione filtro Butterworth
# Riferimento:  http://scipy.github.io/old-wiki/pages/Cookbook/ButterworthBandpass
def butterworth_filter(data, low, high, sample_rate, order):
    nyquist_rate = sample_rate * 0.5
    low /= nyquist_rate
    high /= nyquist_rate
    b, a = signal.butter(order, [low, high], btype='band')
    return signal.lfilter(b, a, data)


# Funzione per ottenere riferimento della fronte
# Riferimento:  https://matthewearl.github.io/2015/07/28/switching-eds-with-python/

def get_forehead_roi(face_points):
    points = np.zeros((len(face_points.parts()), 2))
    for i, part in enumerate(face_points.parts()):
        points[i] = (part.x, part.y)

    
    min_x = int(points[21, 0])
    min_y = int(min(points[21, 1], points[22, 1]))
    max_x = int(points[22, 0])
    max_y = int(max(points[21, 1], points[22, 1]))
    left = min_x
    right = max_x
    top = min_y - (max_x - min_x)
    bottom = max_y * 0.98
    return int(left), int(right), int(top), int(bottom)




#funzione di Demeaning
def sliding_window_demean(signal_values, num_windows):
    window_size = int(round(len(signal_values) / num_windows))
    demeaned = np.zeros(signal_values.shape)
    for i in range(0, len(signal_values), window_size):
        if i + window_size > len(signal_values):
            window_size = len(signal_values) - i
        curr_slice = signal_values[i: i + window_size]
        
        demeaned[i:i + window_size] = curr_slice - np.mean(curr_slice)
    return demeaned



  
#Funzione media canale verde utilizzato per HR
def get_avg_green(roi1):
    roi1_green = roi1[:,:,1]
    avg = (np.mean(roi1_green))
    return avg
#Funzione media canale verde utilizzato per BR   
def get_avg_blue(roi1):
    roi1_blue= roi1[:,:,0]
    avg = (np.mean(roi1_blue))
    return avg


# Calcolo BPM e RPM  
def compute(filtered_values, fps, buffer_size, last_value,hzmin,hzmax):
    # Realizzo fft
    fft = np.abs(np.fft.rfft(filtered_values))

    # Genero lista di frequenze che corrispondo ai valori fft
    freqs = fps / buffer_size * np.arange(buffer_size / 2 + 1)

    # Filtro tutte le frequenze che non sono comprese all'interno del nostro range di frequenze scelto (0.83 - 3.33 per heart rate, 0.14-0.9 per breathing rate) 
    while True:
        max_idx = fft.argmax()
        bps = freqs[max_idx]
        if bps < hzmin or bps > hzmax:
           
            fft[max_idx] = 0
        else:
            value = bps * 60.0
            break

    # è impossibile che il battito cardiaco o la respirazione cambino più del 5% velocemente. Per questo peso il risultato confrontandolo con il valore precedenete
    if last_value > 0:
        value = (last_value * 0.95) + (value * 0.05)

    return value


#filtro dati
def filter_signal_data(values, fps, hzmin, hzmax,order):
    # Verifico che array  canale verde non possieda valori infiniti o Nan
    values = np.array(values)
    np.nan_to_num(values, copy=False)

    # Riduzione e pulizia segnale attraverso detrending e demeaning
    detrended = signal.detrend(values, type='linear')
    demeaned = sliding_window_demean(detrended, 15)
    #filtro segnale attraverso filtro Butterworth bandpass 
    filtered = butterworth_filter(demeaned, hzmin, hzmax, fps, order)
    return filtered
    


# Genero media roi fronte. Disegno di un rettangolo verde attorno alle aree di interesse 
def get_roi_avg_green(frame, view, face_points, draw_rect=True):
    #Riferimenti regioni di interesse
    fh_left, fh_right, fh_top, fh_bottom = get_forehead_roi(face_points)
    # Disegno rettangolo verde sulle zone di nostro interesse
    if draw_rect:
        cv2.rectangle(view, (fh_left, fh_top), (fh_right, fh_bottom), color=(255, 0, 0), thickness=2)
    # Taglia le regioni di interesse (ROI) e calcola la media
    fh_roi = frame[fh_top:fh_bottom, fh_left:fh_right]
    return get_avg_green(fh_roi)
    
    
    
def get_roi_avg_blue(frame, view, face_points, draw_rect=True):
    #Riferimenti regioni di interesse
    fh_left, fh_right, fh_top, fh_bottom = get_forehead_roi(face_points)
    # Disegno rettangolo verde sulle zone di nostro interesse
    if draw_rect:
        cv2.rectangle(view, (fh_left, fh_top), (fh_right, fh_bottom), color=(255, 0, 0), thickness=2)
    # Taglia le regioni di interesse (ROI) e calcola la media
    fh_roi = frame[fh_top:fh_bottom, fh_left:fh_right]
    return get_avg_blue(fh_roi)


#Funzione media canale rosso 
def get_avg_red_sp(roi1):
    roi1 = roi1[:,:,2]
    #avg = np.mean(roi1)
    DcRed = np.mean(roi1)
    AcRed = np.std(roi1)
             
    
  
    return DcRed, AcRed

#Funzione media canale verde delle due ROI
def get_avg_blue_sp(roi1):
   
    roi1 = roi1[:,:,0]
    #avg = np.mean(roi1)
    DcBlue = np.mean(roi1)
    AcBlue = np.std(roi1)
    return DcBlue,AcBlue

# Genero media roi fronte. Disegno di un rettangolo verde attorno alle aree di interesse 
def get_roi_avg_blue_sp(frame, view, face_points, draw_rect=True):
    #Riferimenti regioni di interesse
    fh_left, fh_right, fh_top, fh_bottom = get_forehead_roi(face_points)
    # Disegno rettangolo verde sulle zone di nostro interesse
    if draw_rect:
        cv2.rectangle(view, (fh_left, fh_top), (fh_right, fh_bottom), color=(255, 0, 0), thickness=2)
    # Taglia le regioni di interesse (ROI) e calcola la media
    fh_roi = frame[fh_top:fh_bottom, fh_left:fh_right]
    return get_avg_blue_sp(fh_roi)


  



# Genero media roi fronte. Disegno di un rettangolo verde attorno alle aree di interesse 
def get_roi_avg_red_sp(frame, view, face_points, draw_rect=True):
    #Riferimenti regioni di interesse
    fh_left, fh_right, fh_top, fh_bottom = get_forehead_roi(face_points)
    # Disegno rettangolo verde sulle zone di nostro interesse
    if draw_rect:
        cv2.rectangle(view, (fh_left, fh_top), (fh_right, fh_bottom), color=(255, 0, 0), thickness=2)
    # Taglia le regioni di interesse (ROI) e calcola la media
    fh_roi = frame[fh_top:fh_bottom, fh_left:fh_right]
    return get_avg_red_sp(fh_roi)


def _base64_decode(img):
    _, buffer = cv2.imencode(".jpg", img)
    base64_data = base64.b64encode(buffer)
    base64_data = "data:image/jpg;base64," + base64_data.decode('utf-8')
    return base64_data


def _base64_encode(img_base64):
    img_binary = base64.b64decode(img_base64)
    jpg = np.frombuffer(img_binary, dtype=np.uint8)
    img = cv2.imdecode(jpg, cv2.IMREAD_COLOR)
    return img





def loop_emit():
        print("start loop")
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
        roi_avg_values_green = []
        roi_avg_values_blue = []
        times = []
        last_bpm = 0
        last_rpm=0
        #k=0
        spo2array = []
        countfile=0
        #global finalrpm
   
        times1 = []
    
        
        global spo2
        first = 0
        global signalpercent
        global flagrilevazione
        bluevalue = []
        redvalue = []
        global bpm
        global rpm
        global nodetectface
        while True:
                
            try:
                frame = image_queue.get()
            except Empty:
                continue

            view = np.array(frame)
            faces = detector(frame, 0)
            if len(faces) == 1:
                nodetectface=1
                face_points = predictor(frame, faces[0])
                roi_avg_green = get_roi_avg_green(frame, view, face_points, draw_rect=True)
                roi_avg_blue = get_roi_avg_blue(frame, view, face_points, draw_rect=True)
                roi_avg_values_blue.append(roi_avg_blue)
                roi_avg_values_green.append(roi_avg_green)
                
                bluevalue = get_roi_avg_blue_sp(frame, view, face_points, draw_rect=True)
                #roi_avg_values_blue_sp.append(roi_avg_blue_sp)
            
                redvalue = get_roi_avg_red_sp(frame, view, face_points, draw_rect=True)
                #roi_avg_values_red_sp.append(roi_avg_red_sp)
            
                spo2_prov = 100 - 5 * ((redvalue[1]/redvalue[0])/(bluevalue[1]/bluevalue[0]))                 
                #print(int(round(spo2_prov)))
                if np.isnan(spo2_prov):
                    print("nan")
                    spo2_prov = 0
               
                spo2array.append(spo2_prov)

                if first == 0:
                    text = ""
                    #print(text)
                else:
                    text = "SpO2: "+str(int(round(spo2)))+"%"
                    #print(text)
                    #view = draw_ris_spo2(view,text)
                
                times.append(time.time())
                times1.append(time.time())                
                if len(times) > BUFFER_MAX_SIZE:
                    roi_avg_values_green.pop(0)
                    roi_avg_values_blue.pop(0)
                    times.pop(0)

                curr_buffer_size = len(times)
                buffer_spo2 = len(times1)
                # Calcola HR e RR solo se si ha a disposizione il numero minimo di frames
                if curr_buffer_size > MIN_FRAMES:
                    flagrilevazione = 1
                    order = 3
                    time_elapsed = times[-1] - times[0]
                    fps = curr_buffer_size / time_elapsed
                    #print("fps: ",fps) 
                    # frame per secondo
                    #fps = 30
                    # Pulizia dati respirazione 
                    try:
                        filtered = filter_signal_data(roi_avg_values_blue, fps,0.14,0.7,20)
                    #Pulizia dati battito cardiaco
                        filtered1 = filter_signal_data(roi_avg_values_green, fps,0.8,2.22,5)

                
                    #calcolo bpm
                        bpm = compute(filtered1, fps, curr_buffer_size, last_bpm,0.8,2.22)
                    #print(bpm)
                    #calcolo rpm
                        rpm = compute(filtered, fps, curr_buffer_size, last_rpm,0.14,0.7)
                    #memorizzo valori bpm e rpm per futura verifica variazione dati
                        last_bpm = bpm
                        last_rpm = rpm
               
                        #stampo sul frame bpm e rpm
                        #view = draw_ris_bpm(view,bpm)
                       # print("Battito cardiaco: ",bpm)
                        #view = draw_ris_rpm(view,rpm)
                       # print("Respirazione: ",rpm)
                        currentTime= datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    except:
                        print("avviso errore")
                        nodetectface = 0
                        del roi_avg_values_green[:]
                        del times[:]
                        del times1[:]
                        bluevalue = []
                        last_bpm = 0
                        last_rpm=0 
                        first = 0
                        redvalue = []
                        del roi_avg_values_blue[:]
                        del spo2array[:]
                        buffer_spo2 = 0
                        

                    
                    
                else:
                
                    signalpercent = 'Rilevazione segnale ' + str(int(round(float(curr_buffer_size) / MIN_FRAMES * 100.0))) + '%'
                    #print(signalpercent)
                    flagrilevazione=0
                    #view = draw_nodetect(view,text)
                    
                
                if buffer_spo2 == MIN_FRAMES:
                    first = first+1
                    flagrilevazione=1
                    
                    spo2array = [i for i in spo2array if i != 0]
                    spo2 = np.mean(spo2array)
                    spo2 = int(round(spo2))
                    text = "SpO2: "+str(spo2)+"%"
                    #view = draw_ris_spo2(view,text)
                    #print("SpO2: ",text)

                    bluevalue = []
                
                    redvalue = []
                    del spo2array[:]
                    del times1[:]
            else:
                #Se nessuna faccia è individuata, puliamo le liste dei valori
                nodetectface = 0
                del roi_avg_values_green[:]
                del times[:]
                del times1[:]
                bluevalue = []
                last_bpm = 0
                last_rpm=0 
                first = 0
                redvalue = []
                del roi_avg_values_blue[:]
                del spo2array[:]
            


    
app = CustomFlask(__name__, background_task=loop_emit)


socketio = SocketIO(app)

@app.route('/health_check')
def health_check():
    return "Status OK"


@app.route('/')
def sender():
    return render_template("sender.html")

@socketio.on('disconnect', namespace="/image")
def on_disconnect():

    print('-----------------------')
    print('Client disconnect - %s' + request.sid)
    print('-----------------------')
    



@socketio.on('connect', namespace="/image")
def test_connect():
    print('-----------------------')
    print('Client connected - %s' + request.sid)
    print('-----------------------')
    
    referer = request.referrer

    if referer is None or 'sender' not in referer:
        image_queue.queue.clear()
        processed_queue.queue.clear()

@socketio.on("send image", namespace="/image")
def parse_image(json):
    #global bpm
    #global rpm
    #global spo2
    img_base64 = json["data"].split(',')[1]
    img = _base64_encode(img_base64)
    image_queue.put(img)
    #try:
    #base64_data = processed_queue.get()
    #except Empty:
    #    return
    #else:
    if nodetectface==0:
    #emit('return face', "Faccia non rilevata, assicurati di avere il volto libero", broadcast=True)
    #emit('return status', "", broadcast=True)
        emit('return status', "Volto non rilevato", broadcast=False)#, room=sessionid)
        emit('return bpm', "", broadcast=False)#,room=sessionid)
        emit('return rpm', "", broadcast=False)#,room=sessionid)
        emit('return spo2', "", broadcast=False)#,room=sessionid)
    else:
        if flagrilevazione==0: 
            #emit('return percent', signalpercent, broadcast=True)
            #emit('return face', "", broadcast=True)
            emit('return status', signalpercent, broadcast=False)#,room=sessionid)
            emit('return bpm', "", broadcast=False)#,room=sessionid)
            emit('return rpm', "", broadcast=False)#,room=sessionid)
            emit('return spo2', "", broadcast=False)#,room=sessionid)
        
        else: 
            #emit('return percent', "", broadcast=True)
            emit('return status', "In corso", broadcast=False)#,room=sessionid)
            emit('return bpm', "{0:.0f}".format(bpm), broadcast=False)#,room=sessionid)
            emit('return rpm', "{0:.0f}".format(rpm), broadcast=False)#,room=sessionid)
            try:
                emit('return spo2', "{0:.0f}".format(round(spo2)), broadcast=False)#,room=sessionid)
            except: 
                emit('return spo2', "Ricalcolo", broadcast=False)#,room=sessionid)   
    


if __name__ == '__main__':
    
    socketio.run(app, debug=False, host='0.0.0.0', port='5000')

sender.html(这里我打开socketio lato客户端)

{% extends "layout.html" %}
{% block content %}
    
    
    <header>
  <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
   <img src="https://i.ibb.co/2h3MqP2/logo.png" alt="logo" border="0" width="60px" style="margin-right: 30px;"><a class="navbar-brand" href="#">Università degli Studi Aldo Moro</a>
    <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarCollapse" aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
      <span class="navbar-toggler-icon"></span>
    </button>
    <div class="collapse navbar-collapse" id="navbarCollapse">
      <ul class="navbar-nav mr-auto">
        <li class="nav-item active">
          <a class="nav-link" href="#">Home <span class="sr-only">(current)</span></a>
        </li>
        <li class="nav-item">
          <a class="nav-link" href="#">Chi sono</a>
        </li>
        
      </ul>
      
    </div>
  </nav>
</header>

    <!-- Begin page content -->
    <main role="main" class="container">
    
    <br><br>
    <div class="card mt-5 mb-5" style="width: 60%;margin: auto;">
  <div class="card-header" style="text-align: center;">
    <strong>Parametri vitali</strong>
  </div>
  <div class="card-body">
    <div class="alert alert-warning" role="alert">
    <b>Segui queste istruzioni per il corretto funzionamento di questo applicativo:<br></b>
    <ul>
  <li>Assicurati di aver la fronte libera </li>
  <li>Assicurati di essere in una stanza ben luminosa e non in controluce</li>
  <li>Poni il flash di un cellulare ad una distanza di circa 30cm dal tuo volto</li>
  <li> Resta fermo il più possibile per aumentare l'accuratezza della misurazione </li>
</ul>
</div>
<div style="text-align: center">
        <video id="local_video" autoplay="" width="400" height="350"></video><br>
        
<div style="text-align: center; width: 90%; margin: auto;">
        
    <table class="table">
   <thead class="thead-dark">
    <tr>
      <th scope="col" style="width: 40%" >STATO</th>

      <th scope="col" style="width: 20%">BPM</th>
      <th scope="col" style="width: 20%">RPM</th>
      <th scope="col" style="width: 20%">SPo2</th>
    </tr>
  </thead>
  <tbody>
    <tr class="table-secondary">
      <td id="status"></td>
      <td id="bpm"></td>
      <td id="rpm"></td>
      <td id="spo2"></td>

    </tr>

  </tbody>
</table>
    
  
  </div>
    </div>
    <canvas id="local_canvas" width="400" height="350" style="display: none"></canvas>
  </div>
</div>
          <br>

      
    </main>
    
    
    
    

{% endblock %}
{% block js %}
    <script type="text/javascript" charset="utf-8">

       

        const socket = io("/image");

        const videoElem = document.getElementById("local_video");
        const constraint = {audio: false, video: {width: {ideal: 640}, height: {ideal: 480}}}
        media = navigator.mediaDevices.getUserMedia(constraint)
            .then((stream) => {
                videoElem.srcObject = stream;
            });

        const canvasElem = document.getElementById("local_canvas");
        const canvasCtx = canvasElem.getContext('2d');

        function _canvasUpdate() {
            canvasCtx.drawImage(videoElem, 0, 0, canvasElem.width, canvasElem.height);
            const base64Data = canvasElem.toDataURL("image/jpg")
            socket.emit('send image', {data: base64Data});
        }

        setInterval(_canvasUpdate, 150);
        //_canvasUpdate();
         const bpmElem = document.getElementById("bpm")
        const rpmElem = document.getElementById("rpm")
        const spo2Elem = document.getElementById("spo2")
        //const rilevazione = document.getElementById("rilevazione")
        //const nodetect = document.getElementById("nodetect")
        const statusE = document.getElementById("status")


        

        socket.on("return bpm", (data) => {
            bpmElem.innerHTML = data
        })
        //socket.on("return face", (data) => {
        //    nodetect.innerHTML = data
        //})
        socket.on("return rpm", (data) => {
            rpmElem.innerHTML = data
        })
        socket.on("return spo2", (data) => {
            spo2Elem.innerHTML = data
        })
        socket.on("return status", (data) => {
            statusE.innerHTML = data
        })
    </script>
    
    <script type="text/javascript" charset="utf-8">
       
    </script>
{% endblock %}

标签: pythonflaskflask-socketio

解决方案


推荐阅读