-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdream.py
159 lines (128 loc) · 5.68 KB
/
dream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import argparse
import numpy as np
import pickle
import random
import time
import os
import yaml
from yaml import Loader
from pytheus import fancy_classes as fc, theseus as th, help_functions as hf
import csv
import pandas as pd
import torch
import re
from pytheus.lossfunctions import fidelity, make_lossString_entanglement
from datagen import generatorGraphFidelity, constructGraph
from neuralnet import prep_data, load_model, dream_model, neuron_selector
# This function obtains the N maximum elements in a list. This is used for dreaming with the best fidelity examples
def maxNElems(listor, N):
final_max = []
tempList = listor
for i in range(0,N):
maxTemp = 0
maxIndex = 0
for j in range(len(tempList)):
if tempList[j] > maxTemp:
maxTemp = tempList[j]
maxIndex = j
tempList[maxIndex] = 0
final_max.append(maxIndex)
return final_max
stream = open("configs/dream.yaml", 'r')
cnfg = yaml.load(stream, Loader=Loader)
learnRate = cnfg['learnRate'] # learning rate of inverse training
num_of_epochs = cnfg['num_of_epochs'] # for how many epochs should we run the inverse training?
nnType = cnfg['nnType'] # the type of neural network we wish to examine
modelname = cnfg['modelname'] # The trained neural network
num_start_graphs = cnfg['num_start_graphs'] if cnfg['start_graph'] == 'random' else 1
seed = cnfg['seed']
random.seed(seed)
# load data
if cnfg['datafile'].split('.')[-1] == 'pkl':
# Load up the training dataset
with open(cnfg['datafile'], 'rb') as f:
data_full, res_full = pickle.load(f)
data = data_full[:]
res = res_full[:]
else:
df = pd.read_csv(cnfg['datafile'], names=['weights', 'res'], delimiter=";", nrows=cnfg['num_of_examples_fixed'])
try:
data = np.array([eval(graph) for graph in df['weights']])
except:
data = np.array(
[eval(re.sub(r" *", ',', graph.replace('\n', '').replace('[ ', '['))) for graph in df['weights']])
res = df['res'].to_numpy()
vals_train_np, vals_test_np, res_train_np, res_test_np = prep_data(data, res, 0.95)
best_graph = np.argmax(res_train_np) # Index pertaining to the graph with the highest fidelity in the dataset
randinds = []
for ii in range(num_start_graphs):
randinds.append(random.randint(0, len(res_train_np)))
proc_id = 2
# replace with slurm parser
# choose start graph
start_graph_id = proc_id % num_start_graphs
if cnfg['start_graph'] == 'best':
ind = best_graph
elif cnfg['start_graph'] == 'random':
ind = randinds[start_graph_id]
else:
ind = start_graph_id
# choose neuron from array given in config
neuron_id = proc_id // num_start_graphs
neuron_array = eval(cnfg['neuron_array'])
neuron_id = neuron_id % len(neuron_array)
cnfg['layer'], cnfg['neuron'] = neuron_array[neuron_id]
cnfg['dream_file'] += f"_layer{cnfg['layer']}"
cnfg['dream_file'] += f'_{seed}'
dreamfolder = cnfg['dream_file']
cnfg['dream_file'] += f'/dream{start_graph_id}_{neuron_id}.csv'
print(cnfg['dream_file'])
kets = hf.makeState(cnfg['state'])
state = fc.State(kets, normalize=True)
cnfg['dims'] = th.stateDimensions(state.kets)
# We generate a graph for the purposes of obtaining some additional properties about the graphs we are generating (e.g. we have 24 edge)
input_graph, ket_amplitudes, output_fidelity = generatorGraphFidelity(cnfg['dims'], state, num_edges=None,
short_output=False)
NN_INPUT = len(input_graph.weights)
NN_OUTPUT = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:", device)
# Load up our trained neural network
direc = os.getcwd() + f'/models/{modelname}'
model = load_model(direc, device, NN_INPUT, NN_OUTPUT, nnType)
# Here, we look over our training examples and choose the one
# which activates the neuron the most.
startPred = np.zeros(len(vals_train_np))
sysDict = hf.get_sysdict(cnfg['dims'])
cnfgfid = {"heralding_out": False, "imaginary": False}
if(cnfg['prop'] == 'concurrence'): # Computes the concurrence of a random graph
fid = make_lossString_entanglement(input_graph,sysDict)
else: # Computes the fidelity, though this can be reconfigured for any other quantum property we wanna measure
fid = fidelity(input_graph,state,cnfgfid)
if (cnfg['bestExamples']):
intermediateModel = neuron_selector(model,device, cnfg['layer'],cnfg['neuron'])
for ii in range(len(vals_train_np)):
fid, temp_graph = constructGraph(vals_train_np[ii], cnfg['dims'], fid, cnfg['prop'])
# Evaluate starting prediction
startPred[ii] = intermediateModel(torch.tensor(temp_graph.weights, dtype=torch.float).to(device))
# If best examples is enabled, we choose the graph that triggers the maximum activation on the neuron.
bestInds = maxNElems(startPred,num_start_graphs)
ind = bestInds[start_graph_id]
print(ind)
print(bestInds)
print(start_graph_id)
# We proceed to generate an initial set of edges from the dreaming process.
if cnfg['start_graph'] == 'zero':
fidel, start_graph = constructGraph([0] * len(input_graph), cnfg['dims'], fid, cnfg['prop'])
else:
fidel, start_graph = constructGraph(vals_train_np[ind], cnfg['dims'], fid, cnfg['prop'])
start_res = fidel
start_pred = model(torch.tensor(start_graph.weights, dtype=torch.float).to(device)).item()
if not os.path.exists(dreamfolder):
os.makedirs(dreamfolder)
with open(cnfg['dream_file'], 'a') as f:
writer = csv.writer(f, delimiter=";")
writer.writerow([start_res, start_pred, start_graph.weights])
start_time = time.time()
dream_model(model, state, start_graph, cnfg, fid)
print(f"--- done in {time.time() - start_time} seconds ---")