Skip to content

Commit

Permalink
Merge pull request #224 from NREL/dev-circfutures_liaison
Browse files Browse the repository at this point in the history
Updates issue branch for #208 with recent commits
  • Loading branch information
rjhanes authored Jan 21, 2025
2 parents c86cd9d + 3840eca commit 8eb8b40
Show file tree
Hide file tree
Showing 13 changed files with 118 additions and 176 deletions.
Empty file modified celavi/__init__.py
100755 → 100644
Empty file.
4 changes: 2 additions & 2 deletions celavi/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def create_pathway_queue(self, from_facility_id: int):
from_facility_id: int
The starting location of the the component.
"""
path_choices = self.context.cost_graph.choose_paths(source=self.in_use_facility)
path_choices = self.context.cost_graph.choose_paths(source_node=self.in_use_facility)
path_choices_dict = {
path_choice["source"]: path_choice for path_choice in path_choices
}
Expand Down Expand Up @@ -192,7 +192,7 @@ def bol_process(self, env):
yield env.timeout(self.initial_lifespan_timesteps)

# Component's next steps are determined and stored in self.pathway
self.create_pathway_queue(self.in_use_facility_id)
self.create_pathway_queue(self.in_use_facility)

# Component is decremented from in use inventories
count_inventory.increment_quantity(self.kind, -1, env.now)
Expand Down
34 changes: 18 additions & 16 deletions celavi/costgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def list_of_tuples(
else:
return list(map(lambda x, y: (x, y), list1, list2))

def find_nearest(self, source: str, crit: str):
def find_nearest(self, source_node: str, crit: str):
"""
Method that finds the nearest nodes to source and returns that node name,
the path length to the nearest node, and the path to the nearest node as
Expand All @@ -251,7 +251,7 @@ def find_nearest(self, source: str, crit: str):
Parameters
----------
source
source_node
Name of node where this path begins.
crit
Criteria to calculate path "length". May be cost or dict.
Expand All @@ -267,10 +267,10 @@ def find_nearest(self, source: str, crit: str):

# Calculate the length of paths from fromnode to all other nodes
lengths = nx.single_source_bellman_ford_path_length(
self.supply_chain, source, weight=crit
self.supply_chain, source_node, weight=crit
)

short_paths = nx.single_source_bellman_ford_path(self.supply_chain, source)
short_paths = nx.single_source_bellman_ford_path(self.supply_chain, source_node)

# We are only interested in a particular type(s) of node
targets = list(
Expand All @@ -282,7 +282,10 @@ def find_nearest(self, source: str, crit: str):
# return the smallest of all lengths to get to typeofnode
if subdict:
# dict of shortest paths to all targets
nearest = min(subdict, key=subdict.get)
try:
nearest = min(subdict, key=subdict.get)
except NameError:
pdb.set_trace()
timeout = nx.get_node_attributes(self.supply_chain, "timeout")
timeout_list = [
value for key, value in timeout.items() if key in short_paths[nearest]
Expand All @@ -303,13 +306,12 @@ def find_nearest(self, source: str, crit: str):

# create dictionary for this preferred pathway cost and decision
# criterion and append to the pathway_crit_history
_fac_id = self.supply_chain.nodes[source]["facility_id"]
_fac_id = self.supply_chain.nodes[source_node]["facility_id"]
_loc_line = self.loc_df[self.loc_df.facility_id == _fac_id]
_bol_crit = nx.shortest_path_length(
self.supply_chain,
source="manufacturing_"
+ str(self.find_upstream_neighbor(node_id=_fac_id, crit="cost")),
target=str(source),
source=self.find_upstream_neighbor(node_id=_fac_id, crit="cost"),
target=source_node,
weight=crit,
method="bellman-ford",
)
Expand Down Expand Up @@ -739,7 +741,7 @@ def build_supplychain_graph(self):
flush=True,
)

def choose_paths(self, source: str = None, crit: str = "cost"):
def choose_paths(self, source_node: str = None, crit: str = "cost"):
"""
Calculate total pathway costs (sum of all node and edge costs) over
all possible pathways between source and target nodes. Other "costs"
Expand All @@ -748,7 +750,7 @@ def choose_paths(self, source: str = None, crit: str = "cost"):
Parameters
----------
source : str
source_node : str
Node name in the format "facilitytype_facilityid".
crit : str
Criterion on which "shortest" path is defined. Defaults to cost.
Expand All @@ -762,17 +764,17 @@ def choose_paths(self, source: str = None, crit: str = "cost"):
# Since all edges now contain both processing costs (for the u node)
# as well as transport costs (including distances), all we need to do
# is get the shortest path using the 'cost' attribute as the edge weight
if source is None:
if source_node is None:
raise ValueError(f"CostGraph.choose_paths: source node cannot be None")
else:
if source not in self.supply_chain.nodes():
raise ValueError(f"CostGraph.choose_paths: {source} not in CostGraph")
if source_node not in self.supply_chain.nodes():
raise ValueError(f"CostGraph.choose_paths: {source_node} not in CostGraph")
else:
_paths = []
_chosen_path = self.find_nearest(source=source, crit=crit)
_chosen_path = self.find_nearest(source_node=source_node, crit=crit)
_paths.append(
{
"source": source,
"source": source_node,
"target": _chosen_path[0],
"path": _chosen_path[2],
"cost": _chosen_path[1],
Expand Down
38 changes: 8 additions & 30 deletions celavi/costmethods.py
Original file line number Diff line number Diff line change
Expand Up @@ -622,10 +622,7 @@ def glass_wool_manufacturing(self, path_dict):
_learn_dict['learn rate'],
self.run
)
_loss = apply_array_uncertainty(
path_dict['path_split']['glass wool manufacturing']['fraction'],
self.run
)
_loss = 0.0
_initial_cost = apply_array_uncertainty(
path_dict['cost uncertainty']['glass wool manufacturing']['initial cost'],
self.run
Expand All @@ -637,10 +634,7 @@ def glass_wool_manufacturing(self, path_dict):

elif path_dict['cost uncertainty']['glass wool manufacturing']['uncertainty'] == 'stochastic':
if path_dict['year'] == self.start_year:
_loss = apply_stoch_uncertainty(
path_dict['path_split']['glass wool manufacturing']['fraction'],
seed=self.seed
)
_loss = 0.0
_learn_rate = -1.0 * apply_stoch_uncertainty(
_learn_dict['learn rate'],
seed=self.seed
Expand All @@ -653,26 +647,21 @@ def glass_wool_manufacturing(self, path_dict):
path_dict['cost uncertainty']['glass wool manufacturing']['revenue'],
seed=self.seed
)
if isinstance(path_dict['path_split']['glass wool manufacturing']['fraction'],dict):
path_dict['path_split']['glass wool manufacturing']['fraction']['value'] = _loss
if isinstance(_learn_dict['learn rate'], dict):
_learn_dict['learn rate']['value'] = _learn_rate
if isinstance(path_dict['cost uncertainty']['glass wool manufacturing']['initial cost'],dict):
path_dict['cost uncertainty']['glass wool manufacturing']['initial cost']['value'] = _initial_cost
if isinstance(path_dict['cost uncertainty']['glass wool manufacturing']['revenue'], dict):
path_dict['cost uncertainty']['glass wool manufacturing']['revenue']['value'] = _revenue
else:
_loss = path_dict['path_split']['glass wool manufacturing']['fraction']['value']
_loss = 0.0
_learn_rate = _learn_dict['learn rate']['value']
_initial_cost = path_dict['cost uncertainty']['glass wool manufacturing']['initial cost']['value']
_revenue = path_dict['cost uncertainty']['glass wool manufacturing']['revenue']['value']
else:
# No uncertainty
_learn_rate = apply_array_uncertainty(_learn_dict['learn rate'], self.run)
_loss = apply_array_uncertainty(
path_dict['path_split']['glass wool manufacturing']['fraction'],
self.run
)
_loss = 0.0
_initial_cost = path_dict['cost uncertainty']['glass wool manufacturing']['initial cost']
_revenue = path_dict['cost uncertainty']['glass wool manufacturing']['revenue']

Expand Down Expand Up @@ -732,10 +721,7 @@ def scm_manufacturing(self, path_dict):
_learn_dict['learn rate'],
self.run
)
_loss = apply_array_uncertainty(
path_dict['path_split']['scm manufacturing']['fraction'],
self.run
)
_loss = 0.0
_initial_cost = apply_array_uncertainty(
path_dict['cost uncertainty']['scm manufacturing']['initial cost'],
self.run
Expand All @@ -747,10 +733,7 @@ def scm_manufacturing(self, path_dict):

elif path_dict['cost uncertainty']['scm manufacturing']['uncertainty'] == 'stochastic':
if path_dict['year'] == self.start_year:
_loss = apply_stoch_uncertainty(
path_dict['path_split']['scm manufacturing']['fraction'],
seed=self.seed
)
_loss = 0.0
_learn_rate = -1.0 * apply_stoch_uncertainty(
_learn_dict['learn rate'],
seed=self.seed
Expand All @@ -763,26 +746,21 @@ def scm_manufacturing(self, path_dict):
path_dict['cost uncertainty']['scm manufacturing']['revenue'],
seed=self.seed
)
if isinstance(path_dict['path_split']['scm manufacturing']['fraction'],dict):
path_dict['path_split']['scm manufacturing']['fraction']['value'] = _loss
if isinstance(_learn_dict['learn rate'], dict):
_learn_dict['learn rate']['value'] = _learn_rate
if isinstance(path_dict['cost uncertainty']['scm manufacturing']['initial cost'],dict):
path_dict['cost uncertainty']['scm manufacturing']['initial cost']['value'] = _initial_cost
if isinstance(path_dict['cost uncertainty']['scm manufacturing']['revenue'], dict):
path_dict['cost uncertainty']['scm manufacturing']['revenue']['value'] = _revenue
else:
_loss = path_dict['path_split']['scm manufacturing']['fraction']['value']
_loss = 0.0
_learn_rate = _learn_dict['learn rate']['value']
_initial_cost = path_dict['cost uncertainty']['scm manufacturing']['initial cost']['value']
_revenue = path_dict['cost uncertainty']['scm manufacturing']['revenue']['value']
else:
# No uncertainty
_learn_rate = apply_array_uncertainty(_learn_dict['learn rate'], self.run)
_loss = apply_array_uncertainty(
path_dict['path_split']['scm manufacturing']['fraction'],
self.run
)
_loss = 0.0
_initial_cost = path_dict['cost uncertainty']['scm manufacturing']['initial cost']
_revenue = path_dict['cost uncertainty']['scm manufacturing']['revenue']

Expand Down
62 changes: 17 additions & 45 deletions celavi/pylca_celavi/des_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,12 @@
class PylcaCelavi:
def __init__(
self,
data_dir,
liaison_params,
lcia_des_filename,
shortcutlca_filename,
intermediate_demand_filename,
dynamic_lci_filename,
electricity_grid_spatial_level,
static_lci_filename,
uslci_tech_filename,
uslci_emission_filename,
uslci_process_filename,
stock_filename,
emissions_lci_filename,
traci_lci_filename,
use_shortcut_lca_calculations,
verbose,
substitution_rate,
run=0,
):
"""
Expand All @@ -40,55 +31,36 @@ def __init__(
lcia_des_filename: str
Path to file that stores calculated impacts for passing back to the
discrete event simulation.
data_dir: str
Path to outer directory of data repository.
liaison_params: Dict
Dictionary of liaison-specific parameters
shortcutlca_filename: str
Path to file where previously calculated impacts are stored. This file
can be used instead of re-calculating impacts from the inventory.
intermediate_demand_filename: str
Path to file that stores the final demand vector every time the LCIA
calculations are run. For debugging purposes only.
dynamic_lci_filename: str
Path to the LCI dataset which changes with time.
electricity_grid_spatial_level: str
Specification of grid spatial level used for lca calculations. Must be
"state" or "national".
static_lci_filename: str
Path to the LCI dataset which does not change with time.
uslci_filename: str
Path to the U.S. LCI dataset pickle file.
stock_filename: str
Filename for storage pickle variable.
emissions_lci_filename: str
Filename for emissions inventory.
traci_lci_filename: str
Filename for TRACI 2.0 characterization factor dataset.
use_shortcut_lca_calculations: Boolean
Boolean flag for using previously calculating impact data or running the
optimization code to re-calculate impacts.
verbose: int
0 to suppress detailed print statements
1 to allow print statements
substitution_rate: Dict
Dictionary of material name: substitution rates for materials displaced by the
circular component.
run: int
Model run. Defaults to zero.
"""
# filepaths for files used in the pylca calculations
self.generated_dir = os.path.join(data_dir, 'generated','liaison')
self.inputs_dir = os.path.join(data_dir, 'inputs','liaison')
# create liaison-specific input directories if they don't exist
for _dir in [self.generated_dir, self.inputs_dir]:
if not os.path.isdir(_dir):
os.makedirs(
_dir
)
self.liaison_params = liaison_params
self.lcia_des_filename = lcia_des_filename
self.shortcutlca_filename = shortcutlca_filename
self.intermediate_demand_filename = intermediate_demand_filename
self.dynamic_lci_filename = dynamic_lci_filename
self.electricity_grid_spatial_level = electricity_grid_spatial_level
self.static_lci_filename = static_lci_filename
self.uslci_tech_filename = uslci_tech_filename
self.uslci_emission_filename = uslci_emission_filename
self.uslci_process_filename = uslci_process_filename
self.stock_filename = stock_filename
self.emissions_lci_filename = emissions_lci_filename
self.traci_lci_filename = traci_lci_filename
self.use_shortcut_lca_calculations = use_shortcut_lca_calculations
self.verbose = verbose
self.substitution_rate = substitution_rate
self.run = run

# The results file should be removed if present. The LCA results are appended to the results file.
Expand Down Expand Up @@ -283,7 +255,7 @@ def pylca_run_main(self, df, verbose=0):
index=False,
header=False,
)
res.to_csv('results.csv',mode='a', index=False)
res.to_csv('results_checked_to_be_deleted.csv',mode='a', index=False)
res_calculated = res


Expand Down Expand Up @@ -312,7 +284,7 @@ def pylca_run_main(self, df, verbose=0):
res_df['impacts'] = res_df['lcia']
res_df['impact'] = res_df['value']
res_df2 = res_df[['year','facility_id','material','route_id','state','stage','impacts','impact','run']]
res_df2.to_csv(self.lcia_des_filename, mode='a', header=False, index=False)
res_df2.to_csv(self.lcia_des_filename, mode='a', header=True, index=False)

# This is the result that needs to be analyzed every timestep.
return res_df2
14 changes: 6 additions & 8 deletions celavi/pylca_celavi/liaison/edit_activity_ecoinvent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def electricity_correction(exchange_ob):
"""
if 'electricity' in exchange_ob['name']:
name_of_flow = 'ReEDS_State_Grid_Mix'
name_of_flow = 'market group for electricity, high voltage'
else:
name_of_flow = exchange_ob['name']

Expand All @@ -39,7 +39,7 @@ def user_controlled_editing_ecoinvent_activity(process_selected_as_foreground,ye
-------
"""
print('Editing activities within ecoinvent to US location and US state wise grid mix',flush=True)
new_location = location_under_study
new_location = str(location_under_study)

# These variables are used to create inventory dataframe
process = []
Expand All @@ -64,16 +64,14 @@ def user_controlled_editing_ecoinvent_activity(process_selected_as_foreground,ye


#Extracting ecoinvent database for activity and flows and creating a LiAISON friendly dataframe
for key in process_selected_as_foreground.keys():
for exch in process_selected_as_foreground[key].exchanges():
process.append(process_selected_as_foreground[key]['name'])
for exch in process_selected_as_foreground.exchanges():
process.append(process_selected_as_foreground['name'])
value.append(exch['amount'])
unit.append(exch['unit'])

#Changing name of electricity flow
#Changing name of electricity flow
#name_of_flow = electricity_correction(exch)
name_of_flow = exch['name']
name_of_flow = electricity_correction(exch)
flow.append(name_of_flow)

if exch['type'] == 'production':
Expand Down Expand Up @@ -128,7 +126,7 @@ def user_controlled_editing_ecoinvent_activity(process_selected_as_foreground,ye
example['code'] = flow_code

#Sanity check to write the dataframe. Can be deleted later
example.to_csv(data_dir+process_selected_as_foreground+str(year_of_study)+location_under_study+'.csv',index=False)
example.to_csv(data_dir+process_selected_as_foreground['name']+str(year_of_study)+location_under_study+'.csv',index=False)
run_filename = example


Expand Down
Loading

0 comments on commit 8eb8b40

Please sign in to comment.