YOLO配合PYQT做自定义虚拟电子围栏
【摘要】 YOLO配合PYQT做自定义虚拟电子围栏 介绍YOLO(You Only Look Once)是一种实时目标检测算法,其速度和准确性使其成为计算机视觉领域的热门选择。PyQt是Python的一个库,用于开发图形用户界面(GUI)。将YOLO与PyQt结合,可以实现自定义绘制多边形的虚拟电子围栏,用于监控特定区域内的物体移动。 应用使用场景仓库管理:监控高价值货物区域,防止未经授权的访问。安...
YOLO配合PYQT做自定义虚拟电子围栏
介绍
YOLO(You Only Look Once)是一种实时目标检测算法,其速度和准确性使其成为计算机视觉领域的热门选择。PyQt是Python的一个库,用于开发图形用户界面(GUI)。将YOLO与PyQt结合,可以实现自定义绘制多边形的虚拟电子围栏,用于监控特定区域内的物体移动。
应用使用场景
- 仓库管理:监控高价值货物区域,防止未经授权的访问。
- 安防监控:在住宅或商业区域内设置虚拟围栏,检测未经许可的进入。
- 交通管理:监控特定道路或停车区域,自动识别违规停放车辆。
- 农业监控:在农田间设置围栏,监测牲畜或野生动物的活动。
需要安装OpenCV库才能运行这些示例:
pip install opencv-python
仓库管理
监控高价值货物区域,防止未经授权的访问:
import cv2
import numpy as np
# 加载预训练的YOLO模型和必要的文件
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# 加载视频源
cap = cv2.VideoCapture("warehouse_feed.mp4")
def is_in_high_value_area(x, y):
# 定义高价值区域坐标
return 100 < x < 300 and 100 < y < 300
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
height, width, channels = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5: # 调整信心阈值
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
if is_in_high_value_area(center_x, center_y):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, "Unauthorized Access", (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
cv2.imshow('Warehouse Monitor', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
安防监控
设置虚拟围栏,检测未经许可的进入:
import cv2
import numpy as np
# 加载预训练的YOLO模型和必要的文件
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# 加载视频源
cap = cv2.VideoCapture("security_feed.mp4")
def is_in_virtual_fence(x, y):
# 定义虚拟围栏坐标
return 200 < x < 600 and 200 < y < 400
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
height, width, channels = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5: # 调整信心阈值
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
if is_in_virtual_fence(center_x, center_y):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, "Intrusion Detected", (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
cv2.imshow('Security Monitor', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
交通管理
监控特定道路或停车区域,自动识别违规停放车辆:
import cv2
import numpy as np
# 加载预训练的YOLO模型和必要的文件
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# 加载视频源
cap = cv2.VideoCapture("traffic_feed.mp4")
def is_in_no_parking_zone(x, y):
# 定义禁停车区域坐标
return 100 < x < 400 and 100 < y < 300
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
height, width, channels = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5 and class_id == 2: # 只检测车辆
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
if is_in_no_parking_zone(center_x, center_y):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(frame, "Illegal Parking", (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
cv2.imshow('Traffic Monitor', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
农业监控
在农田间设置围栏,监测牲畜或野生动物的活动:
import cv2
import numpy as np
# 加载预训练的YOLO模型和必要的文件
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# 加载视频源
cap = cv2.VideoCapture("farm_feed.mp4")
def is_inside_farm_area(x, y):
# 定义农田区域坐标
return 50 < x < 450 and 50 < y < 350
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
height, width, channels = frame.shape
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5: # 调整信心阈值
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
if is_inside_farm_area(center_x, center_y):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, "Animal Activity", (x, y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
cv2.imshow('Farm Monitor', frame)if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
这些代码示例展示了如何使用 OpenCV 和预训练的 YOLO 模型来实现不同类型的监控应用。
原理解释
虚拟电子围栏通过目标检测算法(如YOLO)识别视频流中的物体,并借助PyQt创建交互式界面供用户绘制多边形围栏。系统持续检测物体位置,一旦物体进入或离开多边形围栏区域,就触发相应的警报或操作。
算法原理流程图
算法原理解释
- 启动应用:初始化系统并加载资源。
- 加载YOLO模型:加载预训练的YOLO模型进行目标检测。
- 初始化PyQt界面:创建GUI,使用户能够绘制多边形围栏。
- 用户绘制多边形围栏:通过鼠标点击绘制围栏区域。
- 读取视频流:从摄像头或视频文件中获取视频帧。
- 进行目标检测:使用YOLO模型检测视频帧中的物体。
- 检查物体是否在围栏内:计算物体的位置并判断是否在围栏内。
- 触发警报/操作:如果物体在围栏内,执行预定义的操作(如报警)。
- 继续检测:重复步骤5至8。
实际应用代码示例实现
导入必要的库
import cv2
import numpy as np
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QPushButton, QFileDialog
from PyQt5.QtGui import QPixmap, QImage, QPainter, QPen
from PyQt5.QtCore import Qt, QPoint
初始化YOLO模型
class YOLO:
def __init__(self, config_path, weights_path, classes_path):
self.net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
self.layer_names = self.net.getLayerNames()
self.output_layers = [self.layer_names[i - 1] for i in self.net.getUnconnectedOutLayers()]
with open(classes_path, 'r') as f:
self.classes = f.read().strip().split('\n')
def detect(self, image):
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
return outs
创建PyQt界面
class PolygonDrawer(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 100, 800, 600)
self.setWindowTitle('YOLO 配合 PYQT 绘制多边形电子围栏')
self.label = QLabel(self)
self.label.resize(800, 600)
vbox = QVBoxLayout()
vbox.addWidget(self.label)
self.setLayout(vbox)
self.points = []
self.drawing = False
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.points.append(event.pos())
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setPen(QPen(Qt.red, 2))
if len(self.points) > 1:
painter.drawPolygon(*self.points)
for point in self.points:
painter.drawEllipse(point, 3, 3)
集成YOLO与PyQt
def main():
app = QApplication([])
window = PolygonDrawer()
# Load YOLO model
yolo = YOLO("yolov3.cfg", "yolov3.weights", "coco.names")
# Capture input video
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
outs = yolo.detect(frame)
height, width, _ = frame.shape
boxes = []
confidences = []
class_ids = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(yolo.classes[class_ids[i]])
color = (0, 255, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# Convert image for PyQt display
rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qt_image)
window.label.setPixmap(pixmap)
window.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
app.exec_()
if __name__ == '__main__':
main()
测试代码
测试代码可以通过运行以上 main
函数来启动程序,然后通过捕获的视频流来检测物体并展示在PyQt窗口中。
部署场景
该应用可以部署在具有摄像头的PC或嵌入式设备上,如树莓派等,通过网络连接进行远程监控。
材料链接
总结
结合YOLO与PyQt,我们可以构建一个强大的自定义虚拟电子围栏系统。这不仅能提高安全性,还能实现实时监控和报警,同时具备良好的用户交互体验。
未来展望
未来可以考虑引入更多高级功能,例如:
- 使用深度学习进行更精准的物体识别。
- 融合机器学习算法以预测异常行为。
- 增加云端数据存储和分析功能,实现更全面的监控解决方案。
【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)