-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
166 lines (146 loc) · 7.14 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os, sys, stat
import sys
import optparse
import configparser
import pickle
import shutil
import platform
from time import time
from subprocess import call
from os import system, rename
# Simulator imports
from simulator.Simulator import *
from simulator.environment.AzureFog import *
from simulator.environment.BitbrainFog import *
from simulator.workload.StaticWorkload_StaticDistribution import *
from simulator.workload.BitbrainWorkload2 import *
from simulator.workload.Azure2017Workload import *
from simulator.workload.Azure2019Workload import *
# Scheduler imports
from scheduler.IQR_MMT_Random import IQRMMTRScheduler
from scheduler.MAD_MMT_Random import MADMMTRScheduler
from scheduler.MAD_MC_Random import MADMCRScheduler
from scheduler.LR_MMT_Random import LRMMTRScheduler
from scheduler.Random_Random_FirstFit import RFScheduler
from scheduler.Random_Random_LeastFull import RLScheduler
from scheduler.Threshold_MMT_Random import TMMTRScheduler
from scheduler.Threshold_MMT_LeastFull import TMMTLScheduler
from scheduler.RLR_MMT_Random import RLRMMTRScheduler
from scheduler.Threshold_MC_Random import TMCRScheduler
from scheduler.Random_Random_Random import RandomScheduler
from scheduler.HGP_LBFGS import HGPScheduler
# Provisioner imports
from provisioner.Provisioner import Provisioner
from provisioner.Random_Provisioner import RandomProvisioner
from provisioner.DecisionNN import DecisionNNProvisioner
from provisioner.ACOLSTM import ACOLSTMProvisioner
from provisioner.ACOARIMA import ACOARIMAProvisioner
from provisioner.UAHS import UAHSProvisioner
from provisioner.CAHS import CAHSProvisioner
from provisioner.SemiDirect import SemiDirectProvisioner
from provisioner.Narya import NaryaProvisioner
from provisioner.CILP import CILPProvisioner
from provisioner.CILP_IL import CILP_ILProvisioner
from provisioner.CILP_Trans import CILP_TransProvisioner
# Auxiliary imports
from stats.Stats import *
from utils.Utils import *
from pdb import set_trace as bp
from sys import argv
import argparse
usage = "usage: python main.py --provisioner <provisioner> --workload <workload>"
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('--provisioner',
help='Name of provisioner. One of ACOARIMA, ACOLSTM, DecisionNN, SemiDirect, UAHS, Narya, CAHS, or CILP, CILP_IL, CILP_Trans.')
parser.add_argument('--workload',
help='Name of workload. One of Azure2017, Azure2019 or Bitbrain.')
args = parser.parse_args()
# Global constants
NUM_SIM_STEPS = 200
HOSTS = 10 * 5
CONTAINERS = HOSTS
TOTAL_POWER = 1000
ROUTER_BW = 10000
INTERVAL_TIME = 300 # seconds
NEW_CONTAINERS = 7
# Proposed: CILP. Ablations: CILP_IL, CILP_Trans
# Baselines: ACOARIMA, ACOLSTM, DecisionNN, SemiDirect, UAHS, Narya, CAHS
def initalizeEnvironment(workload, provisioner):
# Initialize simple fog datacenter
''' Can be SimpleFog, BitbrainFog, AzureFog '''
datacenter = AzureFog(HOSTS)
# Initialize workload
''' Can be Bitbrain, Azure2017, Azure2019 '''
workload = eval(workload + 'Workload')(NEW_CONTAINERS, 1.5)
# Initialize scheduler
''' Can be LRMMTR, RF, RL, RM, Random, RLRMMTR, TMCR, TMMR, TMMTR, GA, GOBI (arg = 'energy_latency_'+str(HOSTS)) '''
scheduler = RLScheduler()
# Initialize provisioner
''' Can be CILP, ACOARIMA, ACOLSTM, DecisionNN, SemiDirect, UAHS, Narya, CAHS '''
provisioner = eval(provisioner + 'Provisioner')(datacenter, CONTAINERS)
# Initialize Environment
hostlist = datacenter.generateHosts()
env = Simulator(TOTAL_POWER, ROUTER_BW, scheduler, provisioner, CONTAINERS, INTERVAL_TIME, hostlist)
# Execute first step
newcontainerinfos = workload.generateNewContainers(env.interval) # New containers info
deployed = env.addContainersInit(newcontainerinfos) # Deploy new containers and get container IDs
start = time()
decision = scheduler.placement(deployed) # Decide placement using container ids
schedulingTime = time() - start
migrations = env.allocateInit(decision) # Schedule containers
workload.updateDeployedContainers(env.getCreationIDs(migrations, deployed)) # Update workload allocated using creation IDs
print("Deployed containers' creation IDs:", env.getCreationIDs(migrations, deployed))
print("Containers in host:", env.getContainersInHosts())
print("Schedule:", env.getActiveContainerList())
printDecisionAndMigrations(decision, migrations)
# Initialize stats
stats = Stats(env, workload, datacenter, scheduler)
stats.saveStats(deployed, migrations, [], deployed, decision, provisioner.decision, schedulingTime)
return datacenter, workload, scheduler, provisioner, env, stats
def stepSimulation(workload, scheduler, provisioner, env, stats):
newcontainerinfos = workload.generateNewContainers(env.interval) # New containers info
pdecision, orphaned = provisioner.provision()
deployed, destroyed = env.addContainers(newcontainerinfos) # Deploy new containers and get container IDs
start = time()
selected = scheduler.selection() # Select container IDs for migration
decision = scheduler.filter_placement(scheduler.placement(selected+deployed)) # Decide placement for selected container ids
schedulingTime = time() - start
migrations = env.simulationStep(decision) # Schedule containers
workload.updateDeployedContainers(env.getCreationIDs(migrations, deployed)) # Update workload deployed using creation IDs
print("Deployed containers' creation IDs:", env.getCreationIDs(migrations, deployed))
print("Deployed:", len(env.getCreationIDs(migrations, deployed)), "of", len(newcontainerinfos), [i[0] for i in newcontainerinfos])
print("Destroyed:", len(destroyed), "of", env.getNumActiveContainers())
print("Containers in host:", env.getContainersInHosts())
print("Num active containers:", env.getNumActiveContainers())
print("Host allocation:", [(c.getHostID() if c else -1) for c in env.containerlist])
print("Num Hosts:", len(env.hostlist))
printDecisionAndMigrations(decision, migrations)
stats.saveStats(deployed, migrations, destroyed, selected, decision, pdecision, schedulingTime)
def saveStats(stats, datacenter, workload, env, end=True):
dirname = "logs/" + datacenter.__class__.__name__
dirname += "_" + workload.__class__.__name__
dirname += "_" + str(NUM_SIM_STEPS)
dirname += "_" + str(HOSTS)
dirname += "_" + str(CONTAINERS)
dirname += "_" + str(TOTAL_POWER)
dirname += "_" + str(ROUTER_BW)
dirname += "_" + str(INTERVAL_TIME)
dirname += "_" + str(NEW_CONTAINERS)
if not os.path.exists("logs"): os.mkdir("logs")
if os.path.exists(dirname): shutil.rmtree(dirname, ignore_errors=True)
os.mkdir(dirname)
if not end: return
stats.generateDatasets(dirname)
table = stats.generateGraphs(dirname)
# stats.generateCompleteDatasets(dirname)
stats.env, stats.workload, stats.datacenter, stats.scheduler = None, None, None, None
with open(dirname + '/' + dirname.split('/')[1] +'.pk', 'wb') as handle:
pickle.dump(stats, handle)
return table
if __name__ == '__main__':
datacenter, workload, scheduler, provisioner, env, stats = initalizeEnvironment(args.workload, args.provisioner)
for step in range(NUM_SIM_STEPS):
print(color.BOLD+"Simulation Interval:", step, color.ENDC)
stepSimulation(workload, scheduler, provisioner, env, stats)
saveStats(stats, datacenter, workload, env, False)
saveStats(stats, datacenter, workload, env)