Skip to content

Commit

Permalink
Merge pull request Pyomo#3245 from adowling2/pyomo-doe-fixes
Browse files Browse the repository at this point in the history
Pyomo.DoE bug fixes
  • Loading branch information
blnicho authored May 7, 2024
2 parents 6e08021 + 87e6cc4 commit 662c843
Show file tree
Hide file tree
Showing 2 changed files with 120 additions and 24 deletions.
116 changes: 105 additions & 11 deletions pyomo/contrib/doe/doe.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@
from pyomo.contrib.sensitivity_toolbox.sens import get_dsdp
from pyomo.contrib.doe.scenario import ScenarioGenerator, FiniteDifferenceStep
from pyomo.contrib.doe.result import FisherResults, GridSearchResult
import collections.abc

import inspect


class CalculationMode(Enum):
Expand Down Expand Up @@ -101,6 +104,8 @@ def __init__(
"""

# parameters
if not isinstance(param_init, collections.abc.Mapping):
raise ValueError("param_init should be a dictionary.")
self.param = param_init
# design variable name
self.design_name = design_vars.variable_names
Expand Down Expand Up @@ -226,6 +231,7 @@ def stochastic_program(
# FIM = Jacobian.T@Jacobian, the FIM is scaled by squared value the Jacobian is scaled
self.fim_scale_constant_value = self.scale_constant_value**2

# Start timer
sp_timer = TicTocTimer()
sp_timer.tic(msg=None)

Expand All @@ -236,14 +242,17 @@ def stochastic_program(
m, analysis_square = self._compute_stochastic_program(m, optimize_opt)

if self.optimize:
# If set to optimize, solve the optimization problem (with degrees of freedom)
analysis_optimize = self._optimize_stochastic_program(m)
dT = sp_timer.toc(msg=None)
self.logger.info("elapsed time: %0.1f" % dT)
self.logger.info("elapsed time: %0.1f seconds" % dT)
# Return both square problem and optimization problem results
return analysis_square, analysis_optimize

else:
dT = sp_timer.toc(msg=None)
self.logger.info("elapsed time: %0.1f" % dT)
self.logger.info("elapsed time: %0.1f seconds" % dT)
# Return only square problem results
return analysis_square

def _compute_stochastic_program(self, m, optimize_option):
Expand Down Expand Up @@ -387,7 +396,7 @@ def compute_FIM(
FIM_analysis = self._direct_kaug()

dT = square_timer.toc(msg=None)
self.logger.info("elapsed time: %0.1f" % dT)
self.logger.info("elapsed time: %0.1f seconds" % dT)

return FIM_analysis

Expand Down Expand Up @@ -587,12 +596,37 @@ def _create_block(self):
# Set for block/scenarios
mod.scenario = pyo.Set(initialize=self.scenario_data.scenario_indices)

# Determine if create_model takes theta as an optional input
pass_theta_to_initialize = (
'theta' in inspect.getfullargspec(self.create_model).args
)

# Allow user to self-define complex design variables
self.create_model(mod=mod, model_option=ModelOptionLib.stage1)

# Fix parameter values in the copy of the stage1 model (if they exist)
for par in self.param:
cuid = pyo.ComponentUID(par)
var = cuid.find_component_on(mod)
if var is not None:
# Fix the parameter value
# Otherwise, the parameter does not exist on the stage 1 model
var.fix(self.param[par])

def block_build(b, s):
# create block scenarios
self.create_model(mod=b, model_option=ModelOptionLib.stage2)
# idea: check if create_model takes theta as an optional input, if so, pass parameter values to create_model

if pass_theta_to_initialize:
# Grab the values of theta for this scenario/block
theta_initialize = self.scenario_data.scenario[s]
# Add model on block with theta values
self.create_model(
mod=b, model_option=ModelOptionLib.stage2, theta=theta_initialize
)
else:
# Otherwise add model on block without theta values
self.create_model(mod=b, model_option=ModelOptionLib.stage2)

# fix parameter values to perturbed values
for par in self.param:
Expand All @@ -603,7 +637,7 @@ def block_build(b, s):
mod.block = pyo.Block(mod.scenario, rule=block_build)

# discretize the model
if self.discretize_model:
if self.discretize_model is not None:
mod = self.discretize_model(mod)

# force design variables in blocks to be equal to global design values
Expand Down Expand Up @@ -775,7 +809,7 @@ def run_grid_search(
# update the controlled value of certain time points for certain design variables
for i, names in enumerate(design_dimension_names):
# if the element is a list, all design variables in this list share the same values
if type(names) is list or type(names) is tuple:
if isinstance(names, collections.abc.Sequence):
for n in names:
design_iter[n] = list(design_set_iter)[i]
else:
Expand Down Expand Up @@ -879,11 +913,36 @@ def identity_matrix(m, i, j):
else:
return 0

### Initialize the Jacobian if provided by the user

# If the user provides an initial Jacobian, convert it to a dictionary
if self.jac_initial is not None:
dict_jac_initialize = {}
for i, bu in enumerate(model.regression_parameters):
for j, un in enumerate(model.measured_variables):
if isinstance(self.jac_initial, dict):
# Jacobian is a dictionary of arrays or lists where the key is the regression parameter name
dict_jac_initialize[(bu, un)] = self.jac_initial[bu][j]
elif isinstance(self.jac_initial, np.ndarray):
# Jacobian is a numpy array, rows are regression parameters, columns are measured variables
dict_jac_initialize[(bu, un)] = self.jac_initial[i][j]

# Initialize the Jacobian matrix
def initialize_jac(m, i, j):
# If provided by the user, use the values now stored in the dictionary
if self.jac_initial is not None:
return dict_jac_initialize[(i, j)]
# Otherwise initialize to 0.1 (which is an arbitrary non-zero value)
else:
return 0.1

model.sensitivity_jacobian = pyo.Var(
model.regression_parameters, model.measured_variables, initialize=0.1
model.regression_parameters,
model.measured_variables,
initialize=initialize_jac,
)

if self.fim_initial:
if self.fim_initial is not None:
dict_fim_initialize = {}
for i, bu in enumerate(model.regression_parameters):
for j, un in enumerate(model.regression_parameters):
Expand All @@ -892,7 +951,7 @@ def identity_matrix(m, i, j):
def initialize_fim(m, j, d):
return dict_fim_initialize[(j, d)]

if self.fim_initial:
if self.fim_initial is not None:
model.fim = pyo.Var(
model.regression_parameters,
model.regression_parameters,
Expand Down Expand Up @@ -1011,6 +1070,32 @@ def fim_rule(m, p, q):
return model

def _add_objective(self, m):

### Initialize the Cholesky decomposition matrix
if self.Cholesky_option:

# Assemble the FIM matrix
fim = np.zeros((len(self.param), len(self.param)))
for i, bu in enumerate(m.regression_parameters):
for j, un in enumerate(m.regression_parameters):
fim[i][j] = m.fim[bu, un].value

# Calculate the eigenvalues of the FIM matrix
eig = np.linalg.eigvals(fim)

# If the smallest eigenvalue is (practically) negative, add a diagonal matrix to make it positive definite
small_number = 1e-10
if min(eig) < small_number:
fim = fim + np.eye(len(self.param)) * (small_number - min(eig))

# Compute the Cholesky decomposition of the FIM matrix
L = np.linalg.cholesky(fim)

# Initialize the Cholesky matrix
for i, c in enumerate(m.regression_parameters):
for j, d in enumerate(m.regression_parameters):
m.L_ele[c, d].value = L[i, j]

def cholesky_imp(m, c, d):
"""
Calculate Cholesky L matrix using algebraic constraints
Expand Down Expand Up @@ -1101,14 +1186,20 @@ def _fix_design(self, m, design_val, fix_opt=True, optimize_option=None):
m: model
"""
for name in self.design_name:
# Loop over design variables
# Get Pyomo variable object
cuid = pyo.ComponentUID(name)
var = cuid.find_component_on(m)
if fix_opt:
# If fix_opt is True, fix the design variable
var.fix(design_val[name])
else:
# Otherwise check optimize_option
if optimize_option is None:
# If optimize_option is None, unfix all design variables
var.unfix()
else:
# Otherwise, unfix only the design variables listed in optimize_option with value True
if optimize_option[name]:
var.unfix()
return m
Expand All @@ -1124,7 +1215,7 @@ def _get_default_ipopt_solver(self):
def _solve_doe(self, m, fix=False, opt_option=None):
"""Solve DOE model.
If it's a square problem, fix design variable and solve.
Else, fix design variable and solve square problem firstly, then unfix them and solve the optimization problem
Else, fix design variable and solve square problem first, then unfix them and solve the optimization problem
Parameters
----------
Expand All @@ -1138,7 +1229,10 @@ def _solve_doe(self, m, fix=False, opt_option=None):
-------
solver_results: solver results
"""
### Solve square problem
# if fix = False, solve the optimization problem
# if fix = True, solve the square problem

# either fix or unfix the design variables
mod = self._fix_design(
m, self.design_values, fix_opt=fix, optimize_option=opt_option
)
Expand Down
28 changes: 15 additions & 13 deletions pyomo/contrib/doe/measurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@
# ___________________________________________________________________________

import itertools
import collections.abc
from pyomo.common.numeric_types import native_numeric_types


class VariablesWithIndices:
Expand Down Expand Up @@ -93,18 +95,18 @@ def add_variables(
upper_bounds,
)

if values:
if values is not None:
# this dictionary keys are special set, values are its value
self.variable_names_value.update(zip(added_names, values))

# if a scalar (int or float) is given, set it as the lower bound for all variables
if lower_bounds:
if type(lower_bounds) in [int, float]:
if lower_bounds is not None:
if type(lower_bounds) in native_numeric_types:
lower_bounds = [lower_bounds] * len(added_names)
self.lower_bounds.update(zip(added_names, lower_bounds))

if upper_bounds:
if type(upper_bounds) in [int, float]:
if upper_bounds is not None:
if type(upper_bounds) in native_numeric_types:
upper_bounds = [upper_bounds] * len(added_names)
self.upper_bounds.update(zip(added_names, upper_bounds))

Expand Down Expand Up @@ -171,26 +173,26 @@ def _check_valid_input(
"""
Check if the measurement information provided are valid to use.
"""
assert type(var_name) is str, "var_name should be a string."
assert isinstance(var_name, str), "var_name should be a string."

if time_index_position not in indices:
raise ValueError("time index cannot be found in indices.")

# if given a list, check if bounds have the same length with flattened variable
if values and len(values) != len_indices:
if values is not None and len(values) != len_indices:
raise ValueError("Values is of different length with indices.")

if (
lower_bounds
and type(lower_bounds) == list
and len(lower_bounds) != len_indices
lower_bounds is not None # ensure not None
and isinstance(lower_bounds, collections.abc.Sequence) # ensure list-like
and len(lower_bounds) != len_indices # ensure same length
):
raise ValueError("Lowerbounds is of different length with indices.")

if (
upper_bounds
and type(upper_bounds) == list
and len(upper_bounds) != len_indices
upper_bounds is not None # ensure None
and isinstance(upper_bounds, collections.abc.Sequence) # ensure list-like
and len(upper_bounds) != len_indices # ensure same length
):
raise ValueError("Upperbounds is of different length with indices.")

Expand Down

0 comments on commit 662c843

Please sign in to comment.