forked from GillianGrayson/dnam
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_multiclass_inference.py
41 lines (31 loc) · 1.35 KB
/
run_multiclass_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="main.yaml")
def main(config: DictConfig):
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from src.utils import utils
from experiment.multiclass.inference import inference
import torch
# A couple of optional utilities:
# - disabling python warnings
# - easier access to debug mode
# - forcing debug friendly configuration
# You can safely get rid of this line if you don't want those
utils.extras(config)
use_cuda = torch.cuda.is_available()
if use_cuda:
print('CUDNN VERSION:', torch.backends.cudnn.version())
print('Number CUDA Devices:', torch.cuda.device_count())
print('CUDA Device Name:', torch.cuda.get_device_name(0))
print('CUDA Device Total Memory [GB]:', torch.cuda.get_device_properties(0).total_memory / 1024**3)
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
return inference(config)
if __name__ == "__main__":
main()