-
Notifications
You must be signed in to change notification settings - Fork 0
/
yolo.py
78 lines (63 loc) · 2.94 KB
/
yolo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import time
import cv2
import numpy as np
class YOLO:
def __init__(self, config, model, labels, size=416, confidence=0.5, threshold=0.3):
self.confidence = confidence
self.threshold = threshold
self.size = size
self.labels = labels
self.net = cv2.dnn.readNetFromDarknet(config, model)
def inference_from_file(self, file):
mat = cv2.imread(file)
return self.inference(mat)
def inference(self, image):
ih, iw = image.shape[:2]
ln = self.net.getLayerNames()
ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (self.size, self.size), swapRB=True, crop=False)
self.net.setInput(blob)
start = time.time()
layerOutputs = self.net.forward(ln)
end = time.time()
inference_time = end - start
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > self.confidence:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([iw, ih, iw, ih])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)
results = []
if len(idxs) > 0:
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = (boxes[i][0], boxes[i][1])
w, h = (boxes[i][2], boxes[i][3])
id = classIDs[i]
confidence = confidences[i]
results.append((id, self.labels[id], confidence, x, y, w, h))
return iw, ih, inference_time, results