-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprepare.py
81 lines (63 loc) · 2.19 KB
/
prepare.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import torch
import random
import numpy as np
import json
import hashlib
import socket
import time
from util import *
from argument import *
args = parser.parse_args()
for name in args.__dict__:
if getattr(args,name) in ['True','False','None']:
setattr(args,name,eval(getattr(args,name)))
if callable(getattr(args,name)):
setattr(args,name,getattr(args,name)(args.hparams_gen_seed))
important_args = {k: getattr(args,k) for k in args.__dict__ if k not in unimportant_args}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
args.gpus_per_node = torch.cuda.device_count()
if args.distributed:
if 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
args.rank = args.rank + args.local_rank #base rank + local_rank
args.gpu = args.local_rank % torch.cuda.device_count()
# Set some environment flags
if "SLURM_JOBID" in os.environ:
jobid = os.environ["SLURM_JOBID"]
else:
jobid = '-1'
hostfile = os.path.join(args.experiment_path, jobid + ".txt")
if args.rank == 0:
ip = socket.gethostbyname(socket.gethostname())
port = find_free_port()
endpoint = '{}{}:{}'.format('tcp://', ip, port)
os.environ['MASTER_ADDR'] = endpoint
with open(hostfile, "w") as f:
f.write(endpoint)
else:
while not os.path.exists(hostfile):
time.sleep(1)
with open(hostfile, "r") as f:
os.environ['MASTER_ADDR'] = f.read().strip()
print(args.rank, args.world_size,os.environ['MASTER_ADDR'])
torch.distributed.init_process_group(backend='nccl', init_method=os.environ['MASTER_ADDR'],
world_size=args.world_size, rank=args.rank)
try:
os.remove(hostfile)
except:
pass
torch.cuda.set_device(args.gpu)
else:
args.gpu = None
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True