forked from tensorflow/tensorrt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbase_run_inference.sh
executable file
·157 lines (128 loc) · 4.43 KB
/
base_run_inference.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/bin/bash
BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
nvidia-smi
# Runtime Parameters
MODEL_NAME=""
DATA_DIR=""
MODEL_DIR=""
# Default Argument Values
BYPASS_ARGUMENTS=""
BATCH_SIZE=8
# Loop through arguments and process them
for arg in "$@"
do
case $arg in
--model_name=*)
MODEL_NAME="${arg#*=}"
shift # Remove --model_name from processing
;;
--batch_size=*)
BATCH_SIZE="${arg#*=}"
shift # Remove --batch_size= from processing
;;
--data_dir=*)
DATA_DIR="${arg#*=}"
shift # Remove --data_dir= from processing
;;
--total_max_samples=*)
shift # Remove --total_max_samples= from processing
;;
--output_tensors_name=*)
shift # Remove --output_tensors_name= from processing
;;
--input_saved_model_dir=*)
MODEL_DIR="${arg#*=}"
shift # Remove --input_saved_model_dir= from processing
;;
*)
BYPASS_ARGUMENTS="${BYPASS_ARGUMENTS} ${arg}"
;;
esac
done
# Trimming front and back whitespaces
BYPASS_ARGUMENTS=$(echo ${BYPASS_ARGUMENTS} | tr -s " ")
# ============== Set model specific parameters ============= #
INPUT_SIZE=640
MAX_WORKSPACE_SIZE=$((2 ** (32 + 1))) # + 1 necessary compared to python
MAX_SAMPLES=5000
OUTPUT_TENSORS_NAME="boxes,classes,num_detections,scores"
case ${MODEL_NAME} in
"faster_rcnn_resnet50_coco" | "ssd_mobilenet_v1_fpn_coco")
MAX_WORKSPACE_SIZE=$((2 ** (24 + 1))) # + 1 necessary compared to python
;;
esac
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
echo -e "\n********************************************************************"
echo "[*] MODEL_NAME: ${MODEL_NAME}"
echo ""
echo "[*] DATA_DIR: ${DATA_DIR}"
echo "[*] MODEL_DIR: ${MODEL_DIR}"
echo ""
# Custom Object Detection Task Flags
echo "[*] BATCH_SIZE: ${BATCH_SIZE}"
echo "[*] INPUT_SIZE: ${INPUT_SIZE}"
echo "[*] MAX_WORKSPACE_SIZE: ${MAX_WORKSPACE_SIZE}"
echo "[*] MAX_SAMPLES: ${MAX_SAMPLES}"
echo "[*] OUTPUT_TENSORS_NAME: ${OUTPUT_TENSORS_NAME}"
echo ""
echo "[*] BYPASS_ARGUMENTS: ${BYPASS_ARGUMENTS}"
echo -e "********************************************************************\n"
# ======================= ARGUMENT VALIDATION ======================= #
# ---------------------- Dataset Directory --------------
if [[ -z ${DATA_DIR} ]]; then
echo "ERROR: \`--data_dir=/path/to/directory\` is missing."
exit 1
fi
if [[ ! -d ${DATA_DIR} ]]; then
echo "ERROR: \`--data_dir=/path/to/directory\` does not exist. [Received: \`${DATA_DIR}\`]"
exit 1
fi
VAL_DATA_DIR=${DATA_DIR}/val2017
ANNOTATIONS_DATA_FILE=${DATA_DIR}/annotations/instances_val2017.json
if [[ ! -d ${VAL_DATA_DIR} ]]; then
echo "ERROR: the directory \`${VAL_DATA_DIR}\` does not exist."
exit 1
fi
if [[ ! -f ${ANNOTATIONS_DATA_FILE} ]]; then
echo "ERROR: the file \`${ANNOTATIONS_DATA_FILE}\` does not exist."
exit 1
fi
# ---------------------- Model Directory --------------
if [[ -z ${MODEL_DIR} ]]; then
echo "ERROR: \`--input_saved_model_dir=/path/to/directory\` is missing."
exit 1
fi
if [[ ! -d ${MODEL_DIR} ]]; then
echo "ERROR: \`--input_saved_model_dir=/path/to/directory\` does not exist. [Received: \`${MODEL_DIR}\`]"
exit 1
fi
INPUT_SAVED_MODEL_DIR=${MODEL_DIR}/${MODEL_NAME}_640_bs${BATCH_SIZE}
if [[ ! -d ${INPUT_SAVED_MODEL_DIR} ]]; then
echo "ERROR: the directory \`${INPUT_SAVED_MODEL_DIR}\` does not exist."
exit 1
fi
# %%%%%%%%%%%%%%%%%%%%%%% ARGUMENT VALIDATION %%%%%%%%%%%%%%%%%%%%%%% #
BENCH_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd ${BENCH_DIR}
# Step 1: Installing dependencies if needed:
python -c "from pycocotools.coco import COCO" > /dev/null 2>&1
DEPENDENCIES_STATUS=$?
if [[ ${DEPENDENCIES_STATUS} != 0 ]]; then
bash "${BASE_DIR}/../helper_scripts/install_pycocotools.sh"
fi
set -x
# Step 2: Execute the example
python ${BASE_DIR}/infer.py \
--data_dir ${VAL_DATA_DIR} \
--calib_data_dir ${VAL_DATA_DIR} \
--annotation_path ${ANNOTATIONS_DATA_FILE} \
--input_saved_model_dir ${INPUT_SAVED_MODEL_DIR} \
--output_saved_model_dir /tmp/$RANDOM \
--model_name "${MODEL_NAME}" \
--model_source "tf_models_object" \
--batch_size ${BATCH_SIZE} \
--input_size ${INPUT_SIZE} \
--max_workspace_size ${MAX_WORKSPACE_SIZE} \
--total_max_samples=${MAX_SAMPLES} \
--output_tensors_name=${OUTPUT_TENSORS_NAME} \
${BYPASS_ARGUMENTS}