上一篇博客(Python开发非金属涡轮表面缺陷检测上位机)我们主要讲述了对涡轮表面缺陷检测上位机的一些开发流程,并对涡轮表面缺陷检测的效果进行了展示。这一篇博客我们主要对所开发的上位机的程序进行一些讲解(文章内附带上位机程序)。

我们首先将上位机开发后的功能演示视频进行一下展示,然后后面再详细介绍上位机代码,上位机功能演示视频如图1所示。

图1 上位机功能演示视频

上位机代码:

上位机文件格式如图2所示。

1、考虑后面将上位机软件进行打包,所以进行了虚拟环境配置,当然大家电脑本地有配置好的Python环境的话可以不用关注这个;

2、文件/models和/models_tiny是训练好的两个模型,分开进行存放;

3、文件/outputData保存的是检测之后的图片或者视频,上位机软件中也包含了数据回放功能;

4、文件/preloadImages为上位机软件预加载图片;

5、文件/BEEP.mp3为录制的警报声音,当检测到涡轮缺陷时上位机界面提示NG,并发出警报声音提示;

6、文件/PyQt_Form.py和/TurbineSurfaceDefectDetection.py为上位机程序,下面主要对其进行介绍;

图2 上位机文件

下面是完整的主程序,在程序内部做了详细的批注,为便于对上位机界面进行调整,主程序和上位机软件界面搭载程序分为两个文件。

1、上位机软件使用PyQt5进行界面搭载;

2、软件界面优化采用的是qdarkstyle库函数;

3、图像处理采用的opencv-python(opencv可以直接加载YOLO训练好的模型);

4、软件中包含警报提示,声音输出采用pygame;

主程序:

1)需要加载的库函数,本地缺少的库函数可以使用pip进行安装。

#coding: UTF-8
import sys, os
if hasattr(sys, 'frozen'):
    os.environ['PATH'] = sys._MEIPASS + ";" + os.environ['PATH']
# 加载PyQt的函数库
from PyQt5 import QtWidgets
from PyQt_Form import Ui_Form  # 加载绘制的UI界面
from PyQt5.QtWidgets import QFileDialog, QMessageBox # 弹出提示窗
from PyQt5.QtGui import QImage, QPixmap   # 二维图片数据显示
from PyQt5.QtCore import Qt, QTimer
import qdarkstyle
import cv2 as cv
import numpy as np
import threading
import inspect
import ctypes
import datetime 
import time
from pygame import mixer 

2)在类函数中预定义全局变量(提示:在程序开发过程中应尽量减少全局变量使用,在类函数中定义全局变量,可与主函数分别存放于不同的py文件中)。

# 预定义全局变量
class global_param_init():
    # Initialize the parameters
    confThreshold = 0.2     # Confidence threshold
    nmsThreshold = 0.4      # Non-maximum suppression threshold
    inpWidth = 416          # Width of network's input image
    inpHeight = 416         # Height of network's input image
    PLC_send2me = 0         # 标志位0:接收到PLC的信号(已放置新的工件) 
                            # 标志位1:表示工件已准备就绪
                            # 标志位2:表示工件检测完毕
                    
    # modelConfiguration = "models/yolov3-tiny.cfg"
    # modelWeights = "models/yolov3-tiny.weights"
    # labels = "models/coco.names"

    # 用于检测图片2,5,6 TODO:准确率75%,实时性较差
    modelConfiguration = "models/my_yolov4.cfg"
    modelWeights = "models/my_yolov4_best.weights"
    labels = "models/coco.names"

    # # 用于检测视频1,6 TODO:实时性较好,准确率较低55%
    # modelConfiguration = "models_tiny/my_yolov4_tiny.cfg"
    # modelWeights = "models_tiny/my_yolov4_tiny_best.weights"
    # labels = "models_tiny/coco.names"

    # image_video_sign = 0    # 标志位0:表示初始化
    #                         # 标志位1:表示输入图片
    #                         # 标志位2:表示输入视频/打开相机
    classes = None
    mixer.init()
    mixer.music.load('BEEP.mp3')

3)软件界面初始化。其中加载YOLO模型使用的是opencv的DNN模块,主要用到cv.dnn.DNN_BACKEND_OPENCVcv.dnn.DNN_TARGET_CPU函数。

self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# self.net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)    # 速度较快

其中:

          函数 def comboxSelectionchange(self):  # 定义下拉框用于选择训练好的深度学习模型。

          函数 def getClasses(self, classesFile):   # 定义训练识别目标种类个数。

class MyPyQT_Form(QtWidgets.QWidget,Ui_Form):
    def __init__(self):
        super(MyPyQT_Form,self).__init__()
        self.setupUi(self)
        # pixmap = QPixmap("F:\Study_self\python test\PYQT_template\kaiji.jpg")  # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
        # self.label.setPixmap(pixmap)  # 在label上显示图片
        # self.label.setScaledContents(True)  # 让图片自适应label大小
        self.comboBox.currentIndexChanged.connect(self.comboxSelectionchange)
     
        self.net = cv.dnn.readNetFromDarknet(cfgFile=global_param_init.modelConfiguration, darknetModel=global_param_init.modelWeights)
        self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
        self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
        # self.net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
        self.classes = self.getClasses(classesFile=global_param_init.labels)
        # 标志位:相机是否打开
        self.cap_flag = False   

    def comboxSelectionchange(self):
        if self.comboBox.currentText() == "YOLOV4":
            # print("YOLOV41")
            # 用于检测图片2,5,6 TODO:准确率75%,实时性较差
            global_param_init.modelConfiguration = "models/my_yolov4.cfg"
            global_param_init.modelWeights = "models/my_yolov4_best.weights"
            global_param_init.labels = "models/coco.names"
        elif self.comboBox.currentText() == "YOLOV4tiny":
            # 用于检测视频1,6 TODO:实时性较好,准确率较低55%
            # print("YOLOV42")
            global_param_init.modelConfiguration = "models_tiny/my_yolov4_tiny.cfg"
            global_param_init.modelWeights = "models_tiny/my_yolov4_tiny_best.weights"
            global_param_init.labels = "models_tiny/coco.names"

        self.net = cv.dnn.readNetFromDarknet(cfgFile=global_param_init.modelConfiguration, darknetModel=global_param_init.modelWeights)
        self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
        self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
        # self.net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
        self.classes = self.getClasses(classesFile=global_param_init.labels)
        # 标志位:相机是否打开
        self.cap_flag = False
        # print("define is ok")

    # Load names of classes
    def getClasses(self, classesFile):
        global_param_init.classes = None
        with open(classesFile, 'rt') as df:
            global_param_init.classes = df.read().rstrip('\n').split('\n')
        return global_param_init.classes

4)opencv对识别后的目标物体绘制BBox包围框,并在BBox的上方提示识别的类别Label(可对BBox的粗细,颜色等,Label的字体、大小、颜色等进行更改)。

####################################################################################
# Define some methods
####################################################################################
    def getOutputsNames(self, net_):
        """
        Get the names of the output layers
        """
        # Get the names of all the layers in the network
        layersNames = net_.getLayerNames()
        # Get the names of the output layers, i.e. the layers with unconnected outputs
        return [layersNames[i[0] - 1] for i in net_.getUnconnectedOutLayers()]

    def drawPred(self, classId, conf, left, top, right, bottom):
        """
        # Draw the predicted bounding box
        """
        # Draw a bounding box.
        cv.rectangle(self.frame, (left, top), (right, bottom), (255, 0, 0), 5)
        
        label = '%.2f' % conf
            
        # Get the label for the class name and its confidence
        if self.classes:
            assert(classId < len(self.classes))
            label = '%s:%s' % (self.classes[classId], label)

        #Display the label at the top of the bounding box
        labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 2)
        top = max(top, labelSize[1])
        # cv.rectangle(self.frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
        cv.putText(self.frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 2, (255,0,0), 4)
        return label

    def postprocess(self, frame, outs):
        """
        # Remove the bounding boxes with low confidence using non-maxima suppression
        """
        frameHeight = frame.shape[0]
        frameWidth = frame.shape[1]

        # Scan through all the bounding boxes output from the network and keep only the
        # ones with high confidence scores. Assign the box's class label as the class with the highest score.
        classIds = []
        confidences = []
        boxes = []
        for out in outs:
            for detection in out:
                scores = detection[5:]
                classId = np.argmax(scores)
                confidence = scores[classId] 
                if confidence > global_param_init.confThreshold:
                    center_x = int(detection[0] * frameWidth)
                    center_y = int(detection[1] * frameHeight)
                    width = int(detection[2] * frameWidth)
                    height = int(detection[3] * frameHeight)
                    left = int(center_x - width / 2)
                    top = int(center_y - height / 2)
                    classIds.append(classId)
                    confidences.append(float(confidence))
                    boxes.append([left, top, width, height])

        # Perform non maximum suppression to eliminate redundant overlapping boxes with
        # lower confidences.
        indices = cv.dnn.NMSBoxes(boxes, confidences, global_param_init.confThreshold, global_param_init.nmsThreshold)
        label_cout = 0
        for i in indices:
            i = i[0]
            box = boxes[i]
            left = box[0]
            top = box[1]
            width = box[2]
            height = box[3]
            return_label = self.drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
            # print("indices", indices, len(indices))
            if float(return_label.split(":")[-1]) > 0.0:
                label_cout = label_cout + 1
        # print("len(indices), label_cout" ,len(indices), label_cout)    
        return len(indices), label_cout

5)下面函数为上位机界面中各按钮控件的功能定义。

其中:   

          函数 def program_init(self):                             # 将上位机软件进行初始化,即将所有的参数都进行初始化;
          函数 def reback_images_videos(self):           # 函数中定义定时器QTimer,调用函数reback_images_videos(self): ,将保存的图片或者视频文件进行重新播放,实现数据回放的功能;
          函数 def timer_reback_video_display(self):  # QTimer,在上位机界面中播放录制的数据;
          函数 def load_images_videos(self):               # 加载图片或者视频离线数据,支持jpg、mp4等众多格式;
          函数 def load_webcam(self):                           # 打开本地摄像头,调用函数 camera_video_threading(self): 进行实时缺陷检测识别;
          函数 def camera_video_threading(self):        # threading,检测涡轮是否存在缺陷,如果加载的是图片只检测一次即可判断结果是OK/NG;如果是数据流格式(视频或者WebCam),那么需要连续检测两帧都是NG状态才在上位机界面提示NG,并发出警报声音;
          函数 def alarm_sound_threading(self):          # 通过pygame库函数,发出警报声音;
          函数 def video_display(self, frame_):             # 上位机界面中显示视频;
          函数 def OKOrNG_remind(self, sign):            # 上位机界面提示OK/NG;
####################################################################################
####################################################################################
####################################################################################
    def program_init(self):
        print("program_init")
        self.quit_all(None)

    def reback_images_videos(self):
        print("save_images_videos")
        self.quit_all(None)
        # QMessageBox.critical(self, "waring", "功能正在开发中...")
        try:
            reback_files_path = os.path.join(os.getcwd(), "outputData")
            fileName_choose, filetype = QFileDialog.getOpenFileName(self,
                                                                    "情选择PNG或JPG文件",
                                                                    reback_files_path,  # 起始路径
                                                                    "All Files (*);;Text Files (*.jpg)")  # 设置文件扩展名过滤,用双分号间
            # 下面程序是使用pyqt显示图像
            if fileName_choose is not "":
                self.capReback = cv.VideoCapture(fileName_choose)

                self.timer_reback_display = QTimer(self) #初始化一个定时器
                self.timer_reback_display.timeout.connect(self.timer_reback_video_display) #计时结束调用operate()方法
                self.timer_reback_display.start(20) #设置计时间隔并启动
        except:
            print("数据追溯失败")

    def timer_reback_video_display(self):
        try:
            self.hasFrameReback, self.frameReback = self.capReback.read()
            if self.hasFrameReback:
                self.video_display(self.frameReback)
            else:
                self.capReback.release()
                self.timer_reback_display.stop()
        except:
            print("reback error")
            self.capReback.release()
            self.timer_reback_display.stop()
    
    def load_images_videos(self):
        print("load_images_videos")
        self.quit_all(None)
        global_param_init.PLC_send2me = 1

        try:
            try:
                fileName_choose, filetype = QFileDialog.getOpenFileName(self,
                                                                        "情选择PNG或JPG文件",
                                                                        os.getcwd(),  # 起始路径
                                                                        "All Files (*);;Text Files (*.jpg)")  # 设置文件扩展名过滤,用双分号间
                print(fileName_choose)
                if fileName_choose is not "":
                    self.cap = cv.VideoCapture(fileName_choose)
                    self.cap_flag = True    # self.cap.isOpened()

                    # TODO:使用threading显示图像
                    try:
                        self.threading_camera = threading.Thread(target=self.camera_video_threading)
                        self.threading_camera.start()
                    except:
                        # QMessageBox.critical(self, "warning", "图片识别出现")
                        print("图片/视频识别出现错误")
                else:
                    QMessageBox.critical(self, "waring", "系统未检测到任何文件")
            except:
                fileName_choose = ""
                QMessageBox.critical(self, "waring", "打开文件失败")

        except:
            # print("摄像头打开失败")
            QMessageBox.critical(self, "warning", "离线数据加载程序出现严重错误")

    def load_webcam(self):
        self.quit_all(None)

        if self.cap_flag:
            QMessageBox.critical(self, "warning", "已打开摄像头 \n 请勿重复打开")
        else:
            # self.quit_all(None)
            # self.stackedWidget.setCurrentIndex(2)
            try:
                self.cap = cv.VideoCapture(0, cv.CAP_DSHOW)
                self.cap_flag = self.cap.isOpened()

                # TODO:使用threading显示图像
                if self.cap_flag:
                    try:
                        self.threading_camera = threading.Thread(target=self.camera_video_threading)
                        self.threading_camera.start()
                        # self.camera_video_threading()
                    except:
                        QMessageBox.critical(self, "warning", "未知错误 YB-101")
            except:
                # print("摄像头打开失败")
                QMessageBox.critical(self, "warning", "相机程序出现严重错误")

    def camera_video_threading(self):
        NG_sign_cout = 0    # NG的次数
        input_data_cout = 0 # 使用YOLO算法一次检测到的图片数量(新放置的工件重新从0开始计数)
        alarm_sound_sign = 0 # 1表示警报响起,0表示关闭警报
        while self.cap_flag:
            try: 
                # get frame from the video
                self.hasFrame, self.frame = self.cap.read()
                # print("self.cap.read()", self.hasFrame, self.frame.shape)
                if self.hasFrame:
                    # Create a 4D blob from a frame.
                    blob = cv.dnn.blobFromImage(self.frame, 1/255, (global_param_init.inpWidth, global_param_init.inpHeight), [0,0,0], 1, crop=False)
                    # Sets the input to the network
                    self.net.setInput(blob)

                    # Runs the forward pass to get output of the output layers
                    outs = self.net.forward(self.getOutputsNames(self.net))

                    # Remove the bounding boxes with low confidence
                    NG_sign, label_cout_sign = self.postprocess(self.frame, outs)

                    if NG_sign > 0 and label_cout_sign > 0: # NG_sign:表示1张图片中存在缺陷的个数
                        NG_sign_cout = NG_sign_cout + 1

                    if NG_sign_cout >= 2:
                        self.label_2.setStyleSheet("background-color: red ")
                        self.label_2.setText("NG")
                        self.label_2.setAlignment(Qt.AlignCenter) 

                    # Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
                    t, _ = self.net.getPerfProfile()
                    label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
                    cv.putText(self.frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

                    self.video_display(self.frame)
                    # print("global_param_init.PLC_send2me, input_data_cout", global_param_init.PLC_send2me, input_data_cout)
                    input_data_cout = input_data_cout + 1   # 新工件检测图片计数
                    if global_param_init.PLC_send2me == 1:
                        if (input_data_cout > 2):
                            try:
                                vid_writer.write(self.frame.astype(np.uint8))
                            except:
                                print("reback error YB-104")
                            if (NG_sign_cout > 1):
                                self.OKOrNG_remind(2)
                                alarm_sound_sign = alarm_sound_sign + 1

                                if alarm_sound_sign == 1:
                                    self.threading_alarm = threading.Thread(target=self.alarm_sound_threading)
                                    self.threading_alarm.start()

                    if global_param_init.PLC_send2me == 2:   # 接收到PLC信号,工件检查完毕,将NG计数的标志清0
                        if (input_data_cout == 1) and (NG_sign_cout > 0):
                            self.OKOrNG_remind(2)
                            self.threading_alarm = threading.Thread(target=self.alarm_sound_threading)
                            self.threading_alarm.start()
                            # playsound("BEEP.wav")
                        elif (input_data_cout > 1) and (NG_sign_cout > 1):
                            self.OKOrNG_remind(2)
                            self.threading_alarm = threading.Thread(target=self.alarm_sound_threading)
                            self.threading_alarm.start()
                            # playsound("BEEP.wav")
                        else:
                            self.OKOrNG_remind(1)
                        try:
                            vid_writer.release()
                        except:
                            print("vid_writer.release() 未知错误 YB-103")
                        try:
                            mixer.music.stop()
                        except:
                            print("警报声音关闭失败")
                        try:
                            self.stop_thread(self.threading_alarm)
                        except:
                            print("警报退出失败1(定时器)")
                        global_param_init.PLC_send2me = 0
                        input_data_cout = 0 
                        NG_sign_cout = 0
                        alarm_sound_sign = 0 
                        
                    if input_data_cout == 1:
                        # TODO:保存数据流
                        outputFile_video = str(datetime.datetime.now()).replace(":", "_").replace(" ", "_") + ".jpg"
                        old_outputFilePath = os.path.join(os.getcwd(), "outputData")
                        if not os.path.exists(old_outputFilePath):
                            os.mkdir(old_outputFilePath)
                        new_outputFilePath = os.path.join(old_outputFilePath, outputFile_video)
                        print("new_outputFilePath1", new_outputFilePath)
                        vid_writer = cv.VideoWriter(new_outputFilePath, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(self.cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(self.cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
                        single_frame_image = self.frame

                    if input_data_cout == 2:
                        # TODO:保存数据流
                        outputFile_video = str(datetime.datetime.now()).replace(":", "_").replace(" ", "_") + ".avi"
                        old_outputFilePath = os.path.join(os.getcwd(), "outputData")
                        if not os.path.exists(old_outputFilePath):
                            os.mkdir(old_outputFilePath)
                        new_outputFilePath = os.path.join(old_outputFilePath, outputFile_video)
                        print("new_outputFilePath2", new_outputFilePath)
                        vid_writer = cv.VideoWriter(new_outputFilePath, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(self.cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(self.cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
                else:
                    if input_data_cout == 1:
                        if NG_sign_cout > 0:
                            self.OKOrNG_remind(2)  
                            self.threading_alarm = threading.Thread(target=self.alarm_sound_threading)
                            self.threading_alarm.start()
                            # playsound("BEEP.wav")
                        else:
                            self.OKOrNG_remind(1)
                        vid_writer.write(single_frame_image.astype(np.uint8))    
                        vid_writer.release()
            
                    elif input_data_cout > 1:
                        if NG_sign_cout <= 1:
                            self.OKOrNG_remind(1)
                        vid_writer.release()
                    try:
                        self.stop_thread(self.threading_alarm)
                    except:
                        print("警报退出失败1(定时器)")
                    global_param_init.PLC_send2me = 0
                    input_data_cout = 0 
                    NG_sign_cout = 0
                    alarm_sound_sign = 0 
                    break
            except:
                try:
                    mixer.music.stop()
                except:
                    print("警报声音关闭失败")
                try:
                    self.stop_thread(self.threading_alarm)
                except:
                    print("警报退出失败1(定时器)")
                # global_param_init.PLC_send2me = 0
                input_data_cout = 0 
                NG_sign_cout = 0
                alarm_sound_sign = 0 
                break
    
    def alarm_sound_threading(self):
        try:
            mixer.music.play()
        except:
            mixer.music.stop()

    def video_display(self, frame_):
        # 下面程序是使用pyqt显示图像
        if frame_.ndim == 3:
            img_gray = cv.cvtColor(frame_, cv.COLOR_BGR2RGB)
            QImage_Format = QImage.Format_RGB888
        elif frame_.ndim == 2:
            img_gray = cv.cvtColor(frame_, cv.COLOR_BGR2RGB)
            QImage_Format = QImage.Format_Grayscale8
        img = QImage(img_gray.flatten(), img_gray.shape[1], img_gray.shape[0],
                    img_gray.strides[0], QImage_Format)
        # img = img.rgbSwapped()    # 红色和蓝色颜色互换
        self.label.setPixmap(QPixmap.fromImage(img))
        self.label.setScaledContents(True)

    def OKOrNG_remind(self, sign):
        if sign == 2:
            self.label_2.setText("NG")
            self.label_2.setStyleSheet("background-color: red ")
            self.label_2.setAlignment(Qt.AlignCenter)

        elif sign == 1:
            self.label_2.setStyleSheet("background-color: green ")
            self.label_2.setText("OK")
            self.label_2.setAlignment(Qt.AlignCenter)
        else:
            self.label_2.setText("")
            self.label_2.setStyleSheet("background-color:   ")

上位机软件界面程序:

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'e:\Study_self\python_lesson_ybsteer\object_detection_yolo_opencv-wzf\ENV\PyQt_Form.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_Form(object):
    def setupUi(self, Form):
        Form.setObjectName("Form")
        Form.resize(788, 509)
        icon = QtGui.QIcon()
        icon.addPixmap(QtGui.QPixmap("preloadImages/favicon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        Form.setWindowIcon(icon)
        Form.setWindowOpacity(3.0)
        self.gridLayout = QtWidgets.QGridLayout(Form)
        self.gridLayout.setObjectName("gridLayout")
        self.frame = QtWidgets.QFrame(Form)
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
        self.frame.setSizePolicy(sizePolicy)
        self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
        self.frame.setObjectName("frame")
        self.gridLayout_2 = QtWidgets.QGridLayout(self.frame)
        self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
        self.gridLayout_2.setSpacing(0)
        self.gridLayout_2.setObjectName("gridLayout_2")
        self.label = QtWidgets.QLabel(self.frame)
        font = QtGui.QFont()
        font.setFamily("AcadEref")
        font.setPointSize(28)
        self.label.setFont(font)
        self.label.setAlignment(QtCore.Qt.AlignCenter)
        self.label.setObjectName("label")
        self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
        self.gridLayout.addWidget(self.frame, 0, 0, 1, 1)
        self.frame_2 = QtWidgets.QFrame(Form)
        self.frame_2.setMaximumSize(QtCore.QSize(150, 16777215))
        self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
        self.frame_2.setObjectName("frame_2")
        self.pushButton = QtWidgets.QPushButton(self.frame_2)
        self.pushButton.setGeometry(QtCore.QRect(10, 60, 131, 51))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(18)
        self.pushButton.setFont(font)
        self.pushButton.setObjectName("pushButton")
        self.pushButton_3 = QtWidgets.QPushButton(self.frame_2)
        self.pushButton_3.setGeometry(QtCore.QRect(10, 120, 131, 51))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(18)
        self.pushButton_3.setFont(font)
        self.pushButton_3.setObjectName("pushButton_3")
        self.pushButton_4 = QtWidgets.QPushButton(self.frame_2)
        self.pushButton_4.setGeometry(QtCore.QRect(10, 180, 131, 51))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(18)
        self.pushButton_4.setFont(font)
        self.pushButton_4.setObjectName("pushButton_4")
        self.pushButton_5 = QtWidgets.QPushButton(self.frame_2)
        self.pushButton_5.setGeometry(QtCore.QRect(10, 240, 131, 91))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(18)
        self.pushButton_5.setFont(font)
        self.pushButton_5.setStyleSheet("background-color: rgb(255, 35, 39);")
        self.pushButton_5.setObjectName("pushButton_5")
        self.label_2 = QtWidgets.QLabel(self.frame_2)
        self.label_2.setGeometry(QtCore.QRect(10, 340, 131, 141))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(48)
        self.label_2.setFont(font)
        self.label_2.setText("")
        self.label_2.setObjectName("label_2")
        self.comboBox = QtWidgets.QComboBox(self.frame_2)
        self.comboBox.setGeometry(QtCore.QRect(10, 10, 131, 41))
        font = QtGui.QFont()
        font.setFamily("宋体")
        font.setPointSize(16)
        self.comboBox.setFont(font)
        self.comboBox.setObjectName("comboBox")
        self.comboBox.addItem("")
        self.comboBox.addItem("")
        self.gridLayout.addWidget(self.frame_2, 0, 1, 1, 1)

        self.retranslateUi(Form)
        self.pushButton.clicked.connect(Form.load_images_videos)
        self.pushButton_3.clicked.connect(Form.load_webcam)
        self.pushButton_4.clicked.connect(Form.reback_images_videos)
        self.pushButton_5.clicked.connect(Form.program_init)
        QtCore.QMetaObject.connectSlotsByName(Form)

    def retranslateUi(self, Form):
        _translate = QtCore.QCoreApplication.translate
        Form.setWindowTitle(_translate("Form", "涡轮表面缺陷检测"))
        self.label.setText(_translate("Form", "深度学习\n"
"涡轮表面缺陷检测"))
        self.pushButton.setText(_translate("Form", "离线数据"))
        self.pushButton_3.setText(_translate("Form", "加载相机"))
        self.pushButton_4.setText(_translate("Form", "数据回放"))
        self.pushButton_5.setText(_translate("Form", "急  停"))
        self.comboBox.setItemText(0, _translate("Form", "YOLOV4"))
        self.comboBox.setItemText(1, _translate("Form", "YOLOV4tiny"))

前面也提到本程序支持二次开发,大家也可以将自己训练好的模型(如人脸识别等)放置到/model文件夹进行替换,便可以实现对应的检测识别功能。

写在最后

目前程序仍然存在很多需要优化的地方,还有就是关于博客如何写便于大家阅读,大家有什么需要交流或者有好的建议可以在评论区留言哦~