The code for this tutorial is here
Opencv provides are useful, but limited, method of building a GUI. A much more complete system could be acheived using pyqt.
The question is, how do we display images. There are quite a few possible routes but perhaps the easiest is to use QLabel
since it has a setPixmap
function. Below is some code that creates two labels. It then creates a grey pixmap and displays it one of the labels. code: staticLabel1.py
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap, QColor
import sys
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt static label demo")
width = 640
height = 480
# create the label that holds the image
self.image_label = QLabel(self)
# create a text label
self.textLabel = QLabel('Demo')
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create a grey pixmap
grey = QPixmap(width, height)
grey.fill(QColor('darkGray'))
# set the image image to the grey pixmap
self.image_label.setPixmap(grey)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
It should look like this:
Ok, so how do with display an image. Well we could us Qt to load the image directly, but I want to do this with opencv so it can be integrated into a computer vision app. To do this load the image with cv2.imread
and then we convert this to a QPixmap
and rescale. Here is an example
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
This is part of a class that defines self.disply_width
and self.display_height
. The full code is here (staticLabel2.ppy). The result should look like
So can we now use this to display video? We could open the webcam and update our image every frame. liveLabel1.py attempts to do this - but it does not work. The problem is the video capture loop is blocking the processing. This means the message system that Qt uses to do things like draw the widgets never gets called. We need to fix this by capturing the webcam in a seperate thread. Here's the code:
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
# capture from web cam
cap = cv2.VideoCapture(0)
while True:
ret, cv_img = cap.read()
if ret:
self.change_pixmap_signal.emit(cv_img)
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt live label demo")
self.disply_width = 640
self.display_height = 480
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
@pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__=="__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
Note, we send a numpy array using PyQt's signal slot mechanism to ensure thread saftey. This should now display live images for your webcam. There might, however, be a error created when you close the app. This is because the capture device has not been shutdown correctly. (On my mac this does not happen, but on my Linux machine it does). To solve this we need to override App
's closeEvent
function and single to the thread to shut down.
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
super().__init__()
self._run_flag = True
def run(self):
# capture from web cam
cap = cv2.VideoCapture(0)
while self._run_flag:
ret, cv_img = cap.read()
if ret:
self.change_pixmap_signal.emit(cv_img)
# shut down capture system
cap.release()
def stop(self):
"""Sets run flag to False and waits for thread to finish"""
self._run_flag = False
self.wait()
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt live label demo")
self.disply_width = 640
self.display_height = 480
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
def closeEvent(self, event):
self.thread.stop()
event.accept()
@pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__=="__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
@docPhil99 thanks for sharing this. its really helpful and understandable.
@AntonioDomenech Hi, thanks for your post. I am testing it with adding buttons in it. Could you please share the 'form.ui' file so that I can understand the frame window/button structure you designed?
I want to add a new button which should support the functionality in this manner:
Please help me to understand where should I define this new button function which should work in this manner:
Thanks.