-
Notifications
You must be signed in to change notification settings - Fork 124
/
number_plate_redaction.py
231 lines (199 loc) · 7.11 KB
/
number_plate_redaction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import io
import json
import math
import re
from itertools import combinations
from pathlib import Path
from PIL import Image, ImageFilter
from plate_recognition import draw_bb, parse_arguments, recognition_api
def blur(im, blur_amount, api_res, ignore_no_bb=False, ignore_list=None):
for res in api_res.get("results", []):
if ignore_no_bb and res["vehicle"]["score"] == 0.0:
continue
if ignore_list:
skip_blur = False
for ignore_regex in ignore_list:
if re.search(ignore_regex, res["plate"]):
skip_blur = True
break
if skip_blur:
continue
b = res["box"]
width, height = b["xmax"] - b["xmin"], b["ymax"] - b["ymin"]
crop_box = (b["xmin"], b["ymin"], b["xmax"], b["ymax"])
ic = im.crop(crop_box)
# Increase amount of blur with size of bounding box
blur_image = ic.filter(
ImageFilter.GaussianBlur(
radius=math.sqrt(width * height) * 0.3 * blur_amount / 10
)
)
im.paste(blur_image, crop_box)
return im
def bb_iou(a, b):
# determine the (x, y)-coordinates of the intersection rectangle
x_a = max(a["xmin"], b["xmin"])
y_a = max(a["ymin"], b["ymin"])
x_b = min(a["xmax"], b["xmax"])
y_b = min(a["ymax"], b["ymax"])
# compute the area of both the prediction and ground-truth
# rectangles
area_a = (a["xmax"] - a["xmin"]) * (a["ymax"] - a["ymin"])
area_b = (b["xmax"] - b["xmin"]) * (b["ymax"] - b["ymin"])
# compute the area of intersection rectangle
area_inter = max(0, x_b - x_a) * max(0, y_b - y_a)
return area_inter / float(max(area_a + area_b - area_inter, 1))
def clean_objs(objects, threshold=0.1):
# Only keep the ones with best score or no overlap
for o1, o2 in combinations(objects, 2):
if (
"remove" in o1
or "remove" in o2
or bb_iou(o1["box"], o2["box"]) <= threshold
):
continue
if o1["score"] > o2["score"]:
o2["remove"] = True
else:
o1["remove"] = True
return [x for x in objects if "remove" not in x]
def merge_results(images):
result = dict(results=[])
for data in images:
for item in data["prediction"]["results"]:
result["results"].append(item)
for b in [item["box"], item["vehicle"].get("box", {})]:
b["ymin"] += data["y"]
b["xmin"] += data["x"]
b["ymax"] += data["y"]
b["xmax"] += data["x"]
result["results"] = clean_objs(result["results"])
return result
def inside(a, b):
return (
a["xmin"] > b["xmin"]
and a["ymin"] > b["ymin"]
and a["xmax"] < b["xmax"]
and a["ymax"] < b["ymax"]
)
def post_processing(results):
new_list = []
for item in results["results"]:
if item["score"] < 0.2 and any(
[inside(x["box"], item["box"]) for x in results["results"] if x != item]
):
continue
new_list.append(item)
results["results"] = new_list
return results
def process_image(path, args, i):
config = dict(
threshold_d=args.detection_threshold,
threshold_o=args.ocr_threshold,
mode="redaction",
)
# Predictions
source_im = Image.open(path)
if source_im.mode != "RGB":
source_im = source_im.convert("RGB")
images = [((0, 0), source_im)] # Entire image
# Top left and top right crops
if args.split_image:
y = 0
win_size = 0.55
width, height = source_im.width * win_size, source_im.height * win_size
for x in [0, int((1 - win_size) * source_im.width)]:
images.append(((x, y), source_im.crop((x, y, x + width, y + height))))
# Inference
results = []
for (x, y), im in images:
im_bytes = io.BytesIO()
im.save(im_bytes, "JPEG", quality=95)
im_bytes.seek(0)
im_results = recognition_api(
im_bytes, args.regions, args.api_key, args.sdk_url, config=config
)
results.append(dict(prediction=im_results, x=x, y=y))
results = post_processing(merge_results(results))
results["filename"] = Path(path).name
# Set bounding box padding
for item in results["results"]:
# Decrease padding size for large bounding boxes
b = item["box"]
width, height = b["xmax"] - b["xmin"], b["ymax"] - b["ymin"]
padding_x = int(max(0, width * (0.3 * math.exp(-10 * width / source_im.width))))
padding_y = int(
max(0, height * (0.3 * math.exp(-10 * height / source_im.height)))
)
b["xmin"] = b["xmin"] - padding_x
b["ymin"] = b["ymin"] - padding_y
b["xmax"] = b["xmax"] + padding_x
b["ymax"] = b["ymax"] + padding_y
if args.show_boxes or args.save_blurred:
im = blur(
source_im,
5,
results,
ignore_no_bb=args.ignore_no_bb,
ignore_list=args.ignore_regexp,
)
if args.show_boxes:
im.show()
if args.save_blurred:
filename = Path(path)
im.save(filename.parent / (f"{filename.stem}_blurred{filename.suffix}"))
if 0:
draw_bb(source_im, results["results"]).show()
return results
def custom_args(parser):
parser.epilog += "To analyse the image for redaction: python number_plate_redaction.py --api-key MY_API_KEY --split-image /tmp/car.jpg"
parser.add_argument(
"--split-image",
action="store_true",
help="Do extra lookups on parts of the image. Useful on high resolution images.",
)
parser.add_argument(
"--show-boxes", action="store_true", help="Display the resulting blurred image."
)
parser.add_argument(
"--save-blurred",
action="store_true",
help="Blur license plates and save image in filename_blurred.jpg.",
)
parser.add_argument(
"--ignore-regexp",
action="append",
help="Plate regex to ignore during blur. Usually invalid plate numbers.",
)
parser.add_argument(
"--ignore-no-bb",
action="store_true",
help="Ignore detections without a vehicle bounding box during blur.",
)
parser.add_argument(
"--detection-threshold",
type=float,
default=0.2,
help="Keep all detections above this threshold. Between 0 and 1.",
)
parser.add_argument(
"--ocr-threshold",
type=float,
default=0.5,
help="Keep all plates if the characters reading score is above this threshold. Between 0 and 1.",
)
def main():
args = parse_arguments(custom_args)
result = []
for i, path in enumerate(args.files):
if Path(path).is_file():
result.append(process_image(path, args, i))
if 0:
for im_result in result:
for i, x in enumerate(im_result["results"]):
im_result["results"][i] = dict(
dscore=x["dscore"], score=x["score"], box=x["box"]
)
print(json.dumps(result, indent=2))
if __name__ == "__main__":
main()