Skip to content

Commit

Permalink
fix all demo
Browse files Browse the repository at this point in the history
  • Loading branch information
sunjiahao1999 committed Oct 9, 2023
1 parent 40bd7b1 commit 5c51f1a
Show file tree
Hide file tree
Showing 13 changed files with 453 additions and 206 deletions.
133 changes: 48 additions & 85 deletions demo/inference_demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,117 +2,83 @@
"cells": [
{
"cell_type": "code",
"execution_count": null,
"source": [
"from mmdet3d.apis import inference_detector, init_model\n",
"from mmdet3d.registry import VISUALIZERS\n",
"from mmdet3d.utils import register_all_modules"
],
"outputs": [],
"execution_count": 25,
"metadata": {
"pycharm": {
"is_executing": false
}
}
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# register all modules in mmdet3d into the registries\n",
"register_all_modules()"
],
},
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 8,
"source": [
"config_file = '../configs/second/hv_second_secfpn_6x8_80e_kitti-3d-car.py'\n",
"# download the checkpoint from model zoo and put it in `checkpoints/`\n",
"checkpoint_file = '../work_dirs/second/epoch_40.pth'"
],
"outputs": [],
"metadata": {
"pycharm": {
"is_executing": false
}
}
"from mmdet3d.apis import LidarDet3DInferencer"
]
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# build the model from a config file and a checkpoint file\n",
"model = init_model(config_file, checkpoint_file, device='cuda:0')"
],
"metadata": {},
"outputs": [],
"metadata": {}
"source": [
"# initialize inferencer\n",
"inferencer = LidarDet3DInferencer('pointpillars_kitti-3class')"
]
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# init visualizer\n",
"visualizer = VISUALIZERS.build(model.cfg.visualizer)\n",
"visualizer.dataset_meta = {\n",
" 'CLASSES': model.CLASSES,\n",
" 'PALETTE': model.PALETTE\n",
"}"
],
"outputs": [],
"metadata": {
"pycharm": {
"is_executing": false
}
}
},
"outputs": [],
"source": [
"# inference\n",
"inputs = dict(points='./data/kitti/000008.bin')\n",
"inferencer(inputs)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"source": [
"# test a single sample\n",
"pcd = './data/kitti/000008.bin'\n",
"result, data = inference_detector(model, pcd)\n",
"points = data['inputs']['points']\n",
"data_input = dict(points=points)"
],
"execution_count": null,
"metadata": {},
"outputs": [],
"metadata": {
"pycharm": {
"is_executing": false
}
}
"source": [
"# inference and visualize\n",
"# NOTE: use the `Esc` key to exit Open3D window in Jupyter Notebook Environment\n",
"inferencer(inputs, show=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"source": [
"# show the results\n",
"out_dir = './'\n",
"visualizer.add_datasample(\n",
" 'result',\n",
" data_input,\n",
" data_sample=result,\n",
" draw_gt=False,\n",
" show=True,\n",
" wait_time=0,\n",
" out_file=out_dir,\n",
" vis_task='det')"
],
"metadata": {},
"outputs": [],
"metadata": {
"pycharm": {
"is_executing": false
}
}
"source": [
"# If your operating environment does not have a display device,\n",
"# (e.g. a remote server), you can save the predictions and visualize\n",
"# them in local devices.\n",
"inferencer(inputs, show=False, out_dir='./remote_outputs')\n",
"\n",
"# Simulate the migration process\n",
"%mv ./remote_outputs ./local_outputs\n",
"\n",
"# Visualize the predictions from the saved files\n",
"# NOTE: use the `Esc` key to exit Open3D window in Jupyter Notebook Environment\n",
"local_inferencer = LidarDet3DInferencer('pointpillars_kitti-3class')\n",
"inputs = local_inferencer._inputs_to_list(inputs)\n",
"local_inferencer.visualize_preds_fromfile(inputs, ['local_outputs/preds/000008.json'], show=True)"
]
}
],
"metadata": {
"interpreter": {
"hash": "a0c343fece975dd89087e8c2194dd4d3db28d7000f1b32ed9ed9d584dd54dbbe"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.6 64-bit ('torch1.7-cu10.1': conda)"
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand All @@ -124,19 +90,16 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
"version": "3.9.16"
},
"pycharm": {
"stem_cell": {
"cell_type": "raw",
"source": [],
"metadata": {
"collapsed": false
}
},
"source": []
}
},
"interpreter": {
"hash": "a0c343fece975dd89087e8c2194dd4d3db28d7000f1b32ed9ed9d584dd54dbbe"
}
},
"nbformat": 4,
Expand Down
98 changes: 98 additions & 0 deletions demo/new_mono_det_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
from argparse import ArgumentParser

from mmengine.logging import print_log

from mmdet3d.apis import MonoDet3DInferencer


def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('infos', help='Infos file with annotations')
parser.add_argument('model', help='Config file')
parser.add_argument('weights', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--cam-type',
type=str,
default='CAM_BACK',
help='choose camera type to inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of prediction results.')
parser.add_argument(
'--show',
action='store_true',
help='Show online visualization results')
parser.add_argument(
'--wait_time',
type=float,
default=-1,
help='The interval of show (s). Demo will be blocked in showing'
'results, if wait_time is -1. Defaults to -1.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
call_args = vars(parser.parse_args())

call_args['inputs'] = dict(
img=call_args.pop('img'), infos=call_args.pop('infos'))
call_args.pop('cam_type')

if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''

init_kws = ['model', 'weights', 'device']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)

# NOTE: If your operating environment does not have a display device,
# (e.g. a remote server), you can save the predictions and visualize
# them in local devices.
if os.environ.get('DISPLAY') is None and call_args['show']:
print_log(
'Display device not found. `--show` is forced to False',
logger='current',
level=logging.WARNING)
call_args['show'] = False

return init_args, call_args


def main():
# TODO: Support inference of point cloud numpy file.
init_args, call_args = parse_args()

inferencer = MonoDet3DInferencer(**init_args)
inferencer(**call_args)

if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(
f'results have been saved at {call_args["out_dir"]}',
logger='current')


if __name__ == '__main__':
main()
101 changes: 101 additions & 0 deletions demo/new_multi_modality_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
from argparse import ArgumentParser

from mmengine.logging import print_log

from mmdet3d.apis import MultiModalityDet3DInferencer


def parse_args():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('img', help='Image file')
parser.add_argument('infos', help='Infos file with annotations')
parser.add_argument('model', help='Config file')
parser.add_argument('weights', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--cam-type',
type=str,
default='CAM_BACK',
help='choose camera type to inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of prediction results.')
parser.add_argument(
'--show',
action='store_true',
help='Show online visualization results')
parser.add_argument(
'--wait_time',
type=float,
default=-1,
help='The interval of show (s). Demo will be blocked in showing'
'results, if wait_time is -1. Defaults to -1.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
call_args = vars(parser.parse_args())

call_args['inputs'] = dict(
points=call_args.pop('pcd'),
img=call_args.pop('img'),
infos=call_args.pop('infos'))
call_args.pop('cam_type')

if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''

init_kws = ['model', 'weights', 'device']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)

# NOTE: If your operating environment does not have a display device,
# (e.g. a remote server), you can save the predictions and visualize
# them in local devices.
if os.environ.get('DISPLAY') is None and call_args['show']:
print_log(
'Display device not found. `--show` is forced to False',
logger='current',
level=logging.WARNING)
call_args['show'] = False

return init_args, call_args


def main():
# TODO: Support inference of point cloud numpy file.
init_args, call_args = parse_args()

inferencer = MultiModalityDet3DInferencer(**init_args)
inferencer(**call_args)

if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(
f'results have been saved at {call_args["out_dir"]}',
logger='current')


if __name__ == '__main__':
main()
Loading

0 comments on commit 5c51f1a

Please sign in to comment.