首页 > 解决方案 > Tkinter Pyimage37 不存在

问题描述

我正在尝试以多线程模式运行 opencv 应用程序。我有一个显示来自网络摄像头或输入文件的视频帧的进程。另一个并行运行的进程对视频帧进行后处理并生成输出图像。当线程以独立模式运行时,它们工作正常。但是,当它在多线程模式下运行时,它会给出一个错误,因为“pyimage37 不存在”。

#input video file

cap1 = cv2.VideoCapture(INPUT_FILE)
cap2 = cv2.VideoCapture(INPUT_FILE)

#This class displays the video from file
class App1(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)
        self.start()

    def run(self):
     
        def quit_program():
            self.root.destroy()
   
        def show_frame():
            ret, frame = cap1.read()
            frame = cv2.flip(frame, 1)
            cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
            img = Image.fromarray(cv2image)
            imgtk = ImageTk.PhotoImage(image=img)
            self.label1.imgtk = imgtk
            self.label1.configure(image=imgtk)
            while(pauseFlag == True):
                time.sleep(0.001)
            self.label1.after(10, show_frame)

        self.root = Tk()
        self.root.geometry("900x700")
        self.root.resizable(0,0)
        left = Frame(self.root, borderwidth=2, relief="solid")
        self.label1 = Label(left, text="I could be an image, but right now I'm a label")
        
        #to display text
        label2 = Label(left)
        
        #packing all entities
        left.pack(side="left", expand=False, fill="both")
        self.label1.pack()
        label2.pack(side="bottom")
        
        v = StringVar()
        #show live video frame
        show_frame()   
             
        #Quit button
        helv36 = font.Font(family="Helvetica",size=16,weight="bold")
        Q = Button(label2, text ="Quit", command = quit_program, fg='red', height = 2, width=8, font=helv36)
        Q.pack(side = 'bottom')
           
        self.root.title("HII THIS IS a test window")   
        self.root.mainloop()

#This class displays the seen images and also indicates if there is seen, unseen, not seen
class App2(threading.Thread):
   
    def __init__(self):
        threading.Thread.__init__(self)
        self.start()

    def run(self):
         
        def cap_frame():
            seen_images = []        
            ret, img = cap2.read() #captures images from video
            detected_face = []
            y_test_newExp = []
            x_test_newExp = []
            if ret == 0:
                return
            print("DEBUG: Image detected")
            img = cv2.resize(img,(640,360))
            gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #color to gray
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x,y,w,h) in faces: #detected faces in video
                if w > 130: #trick: ignore small faces #capture only dominant face
                    detected_face = img[int(y):int(y+h), int(x): int(x+w)] #crop detected face
                    detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale            
                    detected_face = cv2.resize(detected_face, (48,48)) #resize to 48x48            
                    detected_face = detected_face.flatten() #flatten image          
                    x_test_newExp.append(detected_face)
                    x_test_newExp = np.array(x_test_newExp, 'float32')
                    x_test_newExp = x_test_newExp.reshape(x_test_newExp.shape[0],48,48,1)
                    x_test_newExp = x_test_newExp.astype('float32')
                    x_test_newExp/=255
                    y_test_newExp.append(7)
                    y_test_newExp = np.asarray(y_test_newExp)
                    seen_unseen_notsure_flag = "notsure"
                    seen_images = []
                    if len(detected_face) != 0:  
                        print("DEBUG: face detected!!!!")
                        x_test_fullImg.append(img)        
                        
                        #Now check if this image is seen before in the list images that were stored in the previous instance
                        img_feature = model().predict([x_test_newExp, y_test_newExp])
                        x_test_feature.append(img_feature)
                                        
                        for i in range(0, len(x_test_feature) - 1):
                            #calculate eucledian distance
                            dist = np.linalg.norm(x_test_feature[i] - img_feature)
                                
                            if(dist < THRESHOLD_LB):                        
                                if(seen_unseen_notsure_flag != "seen"):
                                    seen_unseen_notsure_flag = "seen"                        
                                seen_images.append(x_test_fullImg[i])
                                
                            elif(dist > THRESHOLD_UB):
                                if(seen_unseen_notsure_flag != "unseen"):
                                    seen_unseen_notsure_flag = "unseen" 
                                    
                            else:
                                seen_unseen_notsure_flag = "notsure"
                        
                        print("DEBUG: flag = " + seen_unseen_notsure_flag)
                        self.v.set(seen_unseen_notsure_flag)
                        if(seen_unseen_notsure_flag == "seen"): 
                            #pauseFlag = True
                            for i in range(0, len(seen_images)):
                                seenImg = cv2.cvtColor(seen_images[i], cv2.COLOR_BGR2RGBA)
                                seenImg = Image.fromarray(seenImg)
                                imgtk = ImageTk.PhotoImage(image=seenImg)
                                self.label1.imgtk = imgtk
                                self.label1.configure(image=imgtk) 
                        #else:
                            #pauseFlag = False 
            print("DEBUG: looping caputre frame")
            self.label1.after(10, cap_frame) 


        def quit_program():
                self.root.destroy()
   
        self.root = Tk()
        self.root.geometry("600x800")
        self.root.resizable(0,0)
        left = Frame(self.root, borderwidth=2, relief="solid")
       
        #display seen images
        self.label1 = Label(left)
 
        #to display text
        label2 = Label(left)
        
        #packing all entities
        left.pack(side="left", expand=False, fill="both")
        self.label1.pack(side="left")
        label2.pack(side="left")
        
        self.v = StringVar()
        #cap live video frame
        cap_frame()   
             
        #Display text
        helv36 = font.Font(family="Helvetica",size=16,weight="bold")
        T = Label(label2, textvariable=self.v, font=helv36)
        T.pack() 

        self.root.title("display seen images")   
        self.root.mainloop()
 
app1 = App1()
app2 = App2()

错误输出

Exception in Tkinter callback
Traceback (most recent call last):
  File "/home/sahulphaniraj/anaconda/envs/tflow2/lib/python3.6/tkinter/__init__.py", line 1705, in __call__
    return self.func(*args)
  File "/home/sahulphaniraj/anaconda/envs/tflow2/lib/python3.6/tkinter/__init__.py", line 749, in callit
    func(*args)
  File "gui_fer_v4.py", line 269, in cap_frame
    self.label1.configure(image=imgtk)
  File "/home/sahulphaniraj/anaconda/envs/tflow2/lib/python3.6/tkinter/__init__.py", line 1485, in configure
    return self._configure('configure', cnf, kw)
  File "/home/sahulphaniraj/anaconda/envs/tflow2/lib/python3.6/tkinter/__init__.py", line 1476, in _configure
    self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
_tkinter.TclError: image "pyimage37" doesn't exist

标签: pythonmultithreadingopencvtkinter

解决方案


推荐阅读