-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathSave_Detections_Faster-RCNN.py
135 lines (93 loc) · 4.29 KB
/
Save_Detections_Faster-RCNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#!/usr/bin/env python
# coding: utf-8
# # Tutorial adapted from the Detectron2 colab example
# # Install detectron2
# https://github.com/facebookresearch/detectron2
#
# https://detectron2.readthedocs.io/en/latest/tutorials/install.html
# In[2]:
# check pytorch installation:
import torch, torchvision
print(torch.__version__, torch.cuda.is_available())
# In[2]:
# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
# # Train on a iSAID dataset
# In this section, we show how to train an existing detectron2 model on the iSAID dataset.
#
#
# ## Prepare the dataset
# Since iSAID is in COCO format, it can be easily registered in Detectron2
# In[3]:
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
from detectron2.data.datasets import register_coco_instances
register_coco_instances("data_train", {},
"/home/muhammad.ali/Desktop/dataset/material_version/instances_train_trashcan.json",
"/home/muhammad.ali/Desktop/dataset/Enhanced/RGHS/material_version/train")
register_coco_instances("data_val", {},
"/home/muhammad.ali/Desktop/dataset/material_version/instances_val_trashcan.json",
"/home/muhammad.ali/Desktop/dataset/Enhanced/RGHS/material_version/val")
# ## Train!
#
# Now, let's fine-tune a COCO-pretrained R101-FPN Mask R-CNN model on the iSAID dataset.
# In[4]:
from detectron2.engine import DefaultTrainer
cfg = get_cfg()
cfg.OUTPUT_DIR = 'NewFRCNNMATERIALENHANCED'
# cfg.MODEL.RETINANET.NUM_CLASSES = 22
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_101_C4_3x.yaml"))
cfg.DATASETS.TRAIN = ("data_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_101_C4_3x.yaml") # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0025 # pick a good LR # orignal LR WAS 0.0005
#cfg.build_lr_scheduler(cfg.SOLVER.BASE_LR)
cfg.SOLVER.MAX_ITER = 40000
cfg.SOLVER.STEPS = [] # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 16
#os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
#trainer = DefaultTrainer(cfg)
#trainer.resume_or_load(resume=False)
#trainer.train()
# ### Look at training curves in tensorboard by running in the terminal:
# tensorboard --logdir output_fasterrcnn
# ## Inference & evaluation using the trained model
# Now, let's run inference with the trained model on the validation dataset. First, let's create a predictor using the model we just trained:
#
#
# In[5]:
# Inference should use the config with parameters that are used in training
# cfg now already contains everything we've set previously. We changed it a little bit for inference:
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
# Check segmentation results on a sample image
from detectron2.data import build_detection_test_loader
val_loader = build_detection_test_loader(cfg, "data_val")
metadata = MetadataCatalog.get("data_val")
img_list = os.listdir('/home/muhammad.ali/Desktop/dataset/Enhanced/RGHS/material_version/val/')
for iname in img_list:
im = cv2.imread("/home/muhammad.ali/Desktop/dataset/Enhanced/RGHS/material_version/val/"+iname)
outputs = predictor(im)
v = Visualizer(
im[:, :, ::-1],
metadata=metadata,
scale=1
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite("/home/muhammad.ali/Desktop/TAC_Res"+iname, out.get_image()[:, :, ::-1])