opencv 头盔检测

举报
风吹稻花香 发表于 2021/06/05 23:40:16 2021/06/05
【摘要】 只能检测头盔,不能检测人头,不能判断是否带头盔 https://github.com/BlcaKHat/yolov3-Helmet-Detection/blob/master/Helmet_detection_YOLOV3.py 权重: https://github.com/rezabonyadi/Helmet_Detection_YOLO from time imp...

只能检测头盔,不能检测人头,不能判断是否带头盔

https://github.com/BlcaKHat/yolov3-Helmet-Detection/blob/master/Helmet_detection_YOLOV3.py

权重:

https://github.com/rezabonyadi/Helmet_Detection_YOLO


  
  1. from time import sleep
  2. import cv2 as cv
  3. import argparse
  4. import sys
  5. import numpy as np
  6. import os.path
  7. from glob import glob
  8. #from PIL import image
  9. frame_count = 0 # used in mainloop where we're extracting images., and then to drawPred( called by post process)
  10. frame_count_out=0 # used in post process loop, to get the no of specified class value.
  11. # Initialize the parameters
  12. confThreshold = 0.5 #Confidence threshold
  13. nmsThreshold = 0.4 #Non-maximum suppression threshold
  14. inpWidth = 416 #Width of network's input image
  15. inpHeight = 416 #Height of network's input image
  16. # Load names of classes
  17. classesFile = "obj.names";
  18. classes = None
  19. with open(classesFile, 'rt') as f:
  20. classes = f.read().rstrip('\n').split('\n')
  21. # Give the configuration and weight files for the model and load the network using them.
  22. modelConfiguration = "yolov3-obj.cfg";
  23. modelWeights = "yolov3-obj_2400.weights";
  24. net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
  25. net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
  26. net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
  27. # Get the names of the output layers
  28. def getOutputsNames(net):
  29. # Get the names of all the layers in the network
  30. layersNames = net.getLayerNames()
  31. # Get the names of the output layers, i.e. the layers with unconnected outputs
  32. return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  33. # Draw the predicted bounding box
  34. def drawPred(classId, conf, left, top, right, bottom):
  35. global frame_count
  36. # Draw a bounding box.
  37. cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
  38. label = '%.2f' % conf
  39. # Get the label for the class name and its confidence
  40. if classes:
  41. assert(classId < len(classes))
  42. label = '%s:%s' % (classes[classId], label)
  43. #Display the label at the top of the bounding box
  44. labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  45. top = max(top, labelSize[1])
  46. #print(label) #testing
  47. #print(labelSize) #testing
  48. #print(baseLine) #testing
  49. label_name,label_conf = label.split(':') #spliting into class & confidance. will compare it with person.
  50. if label_name == 'Helmet':
  51. #will try to print of label have people.. or can put a counter to find the no of people occurance.
  52. #will try if it satisfy the condition otherwise, we won't print the boxes or leave it.
  53. cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
  54. cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
  55. frame_count+=1
  56. #print(frame_count)
  57. if(frame_count> 0):
  58. return frame_count
  59. # Remove the bounding boxes with low confidence using non-maxima suppression
  60. def postprocess(frame, outs):
  61. frameHeight = frame.shape[0]
  62. frameWidth = frame.shape[1]
  63. global frame_count_out
  64. frame_count_out=0
  65. classIds = []
  66. confidences = []
  67. boxes = []
  68. # Scan through all the bounding boxes output from the network and keep only the
  69. # ones with high confidence scores. Assign the box's class label as the class with the highest score.
  70. classIds = [] #have to fins which class have hieghest confidence........=====>>><<<<=======
  71. confidences = []
  72. boxes = []
  73. for out in outs:
  74. for detection in out:
  75. scores = detection[5:]
  76. classId = np.argmax(scores)
  77. confidence = scores[classId]
  78. if confidence > confThreshold:
  79. center_x = int(detection[0] * frameWidth)
  80. center_y = int(detection[1] * frameHeight)
  81. width = int(detection[2] * frameWidth)
  82. height = int(detection[3] * frameHeight)
  83. left = int(center_x - width / 2)
  84. top = int(center_y - height / 2)
  85. classIds.append(classId)
  86. #print(classIds)
  87. confidences.append(float(confidence))
  88. boxes.append([left, top, width, height])
  89. # Perform non maximum suppression to eliminate redundant overlapping boxes with
  90. # lower confidences.
  91. indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
  92. count_person=0 # for counting the classes in this loop.
  93. for i in indices:
  94. i = i[0]
  95. box = boxes[i]
  96. left = box[0]
  97. top = box[1]
  98. width = box[2]
  99. height = box[3]
  100. #this function in loop is calling drawPred so, try pushing one test counter in parameter , so it can calculate it.
  101. frame_count_out = drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
  102. #increase test counter till the loop end then print...
  103. #checking class, if it is a person or not
  104. my_class='Helmet' #======================================== mycode .....
  105. unknown_class = classes[classId]
  106. if my_class == unknown_class:
  107. count_person += 1
  108. #if(frame_count_out > 0):
  109. print(frame_count_out)
  110. if count_person >= 1:
  111. path = 'test_out/'
  112. frame_name=os.path.basename(fn) # trimm the path and give file name.
  113. cv.imwrite(str(path)+frame_name, frame) # writing to folder.
  114. #print(type(frame))
  115. cv.imshow('img',frame)
  116. cv.waitKey(800)
  117. #cv.imwrite(frame_name, frame)
  118. #======================================mycode.........
  119. # Process inputs
  120. winName = 'Deep learning object detection in OpenCV'
  121. cv.namedWindow(winName, cv.WINDOW_NORMAL)
  122. for fn in glob('images/*.jpg'):
  123. frame = cv.imread(fn)
  124. frame_count =0
  125. # Create a 4D blob from a frame.
  126. blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
  127. # Sets the input to the network
  128. net.setInput(blob)
  129. # Runs the forward pass to get output of the output layers
  130. outs = net.forward(getOutputsNames(net))
  131. # Remove the bounding boxes with low confidence
  132. postprocess(frame, outs)
  133. # Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
  134. t, _ = net.getPerfProfile()
  135. #print(t)
  136. label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
  137. #print(label)
  138. cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
  139. #print(label)

 

文章来源: blog.csdn.net,作者:网奇,版权归原作者所有,如需转载,请联系作者。

原文链接:blog.csdn.net/jacke121/article/details/90647925

【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。