Skip to content

Commit

Permalink
add prune mode to optimizer
Browse files Browse the repository at this point in the history
  • Loading branch information
Yan Georget committed Dec 12, 2024
1 parent 5e0a6f4 commit eb9971a
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 34 deletions.
12 changes: 5 additions & 7 deletions nucs/examples/tsp/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
###############################################################################
import argparse

from nucs.constants import LOG_LEVEL_INFO, LOG_LEVELS, OPT_MODES, OPT_RESET
from nucs.constants import LOG_LEVEL_INFO, LOG_LEVELS, OPT_MODES, OPT_PRUNE
from nucs.examples.tsp.tsp_instances import TSP_INSTANCES
from nucs.examples.tsp.tsp_problem import TSPProblem
from nucs.heuristics.heuristics import DOM_HEURISTIC_MIN_COST, VAR_HEURISTIC_MAX_REGRET
from nucs.heuristics.heuristics import VAR_HEURISTIC_MAX_REGRET, DOM_HEURISTIC_MIN_COST
from nucs.solvers.backtrack_solver import BacktrackSolver
from nucs.solvers.consistency_algorithms import CONSISTENCY_ALG_BC, CONSISTENCY_ALG_SHAVING

Expand All @@ -26,19 +26,17 @@
parser.add_argument("--log_level", choices=LOG_LEVELS, default=LOG_LEVEL_INFO)
parser.add_argument("--name", choices=["GR17", "GR21", "GR24"], default="GR17")
parser.add_argument("--shaving", type=bool, action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--opt_mode", choices=OPT_MODES, default=OPT_RESET)
parser.add_argument("--opt_mode", choices=OPT_MODES, default=OPT_PRUNE)
args = parser.parse_args()
tsp_instance = TSP_INSTANCES[args.name]
problem = TSPProblem(tsp_instance)
# mrp_var_heuristic_idx = register_var_heuristic(mrp_var_heuristic)
# mcp_dom_heuristic_idx = register_dom_heuristic(mcp_dom_heuristic)
solver = BacktrackSolver(
problem,
consistency_alg_idx=CONSISTENCY_ALG_SHAVING if args.shaving else CONSISTENCY_ALG_BC,
decision_domains=list(range(len(tsp_instance))),
var_heuristic_idx=VAR_HEURISTIC_MAX_REGRET,
var_heuristic_idx=VAR_HEURISTIC_MAX_REGRET, # register_var_heuristic(mrp_var_heuristic),
var_heuristic_params=tsp_instance,
dom_heuristic_idx=DOM_HEURISTIC_MIN_COST,
dom_heuristic_idx=DOM_HEURISTIC_MIN_COST, # register_dom_heuristic(mcp_dom_heuristic),
dom_heuristic_params=tsp_instance,
log_level=args.log_level,
)
Expand Down
17 changes: 8 additions & 9 deletions nucs/examples/tsp/mcp_dom_heuristic.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,39 +22,38 @@

@njit(cache=True)
def mcp_dom_heuristic(
params: NDArray,
shr_domains_stack: NDArray,
not_entailed_propagators_stack: NDArray,
dom_update_stack: NDArray,
stacks_top: NDArray,
dom_idx: int,
params: NDArray,
) -> int:
"""
Chooses the value that minimizes the cost.
:param params: a two-dimensional (first dimension correspond to variables, second to values) costs array
:param shr_domains_stack: the stack of shared domains
:param not_entailed_propagators_stack: the stack of not entailed propagators
:param dom_update_stack: the stack of domain updates
:param stacks_top: the index of the top of the stacks as a Numpy array
:param dom_idx: the index of the shared domain
:param params: a two-dimensional (first dimension correspond to variables, second to values) costs array
:return: the events
"""
cp_top_idx = stacks_top[0]
top = stacks_top[0]
n = len(params)
used = np.zeros(n, dtype=np.bool)
for idx in range(n):
used[shr_domains_stack[cp_top_idx, idx, MIN]] = (
shr_domains_stack[cp_top_idx, idx, MIN] == shr_domains_stack[cp_top_idx, idx, MAX]
)
for i in range(n):
if shr_domains_stack[top, i, MIN] == shr_domains_stack[top, i, MAX]:
used[shr_domains_stack[top, i, MIN]] = True
best_cost = sys.maxsize
best_value = -1
shr_domain = shr_domains_stack[cp_top_idx, dom_idx]
shr_domain = shr_domains_stack[top, dom_idx]
for value in range(shr_domain[MIN], shr_domain[MAX] + 1):
if not used[value]:
cost = params[dom_idx][value]
if 0 < cost < best_cost:
best_cost = cost
best_value = value
return value_dom_heuristic(
params, shr_domains_stack, not_entailed_propagators_stack, dom_update_stack, stacks_top, dom_idx, best_value
shr_domains_stack, not_entailed_propagators_stack, dom_update_stack, stacks_top, dom_idx, best_value, params
)
21 changes: 10 additions & 11 deletions nucs/examples/tsp/mrp_var_heuristic.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,30 +21,29 @@

@njit(cache=True)
def mrp_var_heuristic(
params: NDArray, decision_domains: NDArray, shr_domains_stack: NDArray, stacks_top: NDArray
decision_domains: NDArray, shr_domains_stack: NDArray, stacks_top: NDArray, params: NDArray
) -> int:
"""
Chooses the variable with the maximal regret (difference between best and second-best value).
:param params: a two-dimensional (first dimension correspond to variables, second to values) costs array
:param decision_domains: the indices of a subset of the shared domains
:param shr_domains_stack: the stack of shared domains
:param stacks_top: the index of the top of the stacks as a Numpy array
:param params: a two-dimensional (first dimension correspond to variables, second to values) costs array
:return: the index of the shared domain
"""
cp_top_idx = stacks_top[0]
used = np.zeros(len(decision_domains), dtype=np.bool)
for dom_idx in decision_domains:
used[shr_domains_stack[cp_top_idx, dom_idx, MIN]] = (
shr_domains_stack[cp_top_idx, dom_idx, MIN] == shr_domains_stack[cp_top_idx, dom_idx, MAX]
)
top = stacks_top[0]
n = len(decision_domains)
used = np.zeros(n, dtype=np.bool)
for i in range(n):
if shr_domains_stack[top, i, MIN] == shr_domains_stack[top, i, MAX]:
used[shr_domains_stack[top, i, MIN]] = True
max_regret = 0
best_idx = -1
for dom_idx in decision_domains:
shr_domain = shr_domains_stack[cp_top_idx, dom_idx]
shr_domain = shr_domains_stack[top, dom_idx]
size = shr_domain[MAX] - shr_domain[MIN] # actually this is size - 1
if 0 < size:
best_cost = sys.maxsize
second_cost = sys.maxsize
best_cost = second_cost = sys.maxsize
for value in range(shr_domain[MIN], shr_domain[MAX] + 1):
if not used[value]:
cost = params[dom_idx][value]
Expand Down
7 changes: 4 additions & 3 deletions nucs/examples/tsp/total_cost_propagator.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,15 @@ def compute_domains_total_cost(domains: NDArray, parameters: NDArray) -> int:
n = len(domains) - 1
used = np.zeros(n, dtype=np.bool)
for i in range(n):
used[domains[i, MIN]] = domains[i, MIN] == domains[i, MAX]
if domains[i, MIN] == domains[i, MAX]:
used[domains[i, MIN]] = True
global_min = global_max = 0
for i in range(n):
local_min = sys.maxsize
local_max = 0
if domains[i, MIN] == domains[i, MAX]:
local_min = local_max = parameters[i * n + domains[i, MIN]]
else:
local_min = sys.maxsize
local_max = 0
for value in range(domains[i, MIN], domains[i, MAX] + 1):
if not used[value]:
cost = parameters[i * n + value]
Expand Down
5 changes: 1 addition & 4 deletions nucs/examples/tsp/tsp_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,10 @@ def __init__(self, cost_rows: List[List[int]]) -> None:
for i in range(n):
self.add_propagator(([i, cost_vars + i], ALG_ELEMENT_IV, cost_rows[i]))
self.add_propagator((list(range(cost_vars, cost_vars + n + 1)), ALG_AFFINE_EQ, [1] * n + [-1, 0]))
alg_total_cost = register_propagator(
get_triggers_total_cost, get_complexity_total_cost, compute_domains_total_cost
)
self.add_propagator(
(
list(range(0, n)) + [total_cost_var],
alg_total_cost,
register_propagator(get_triggers_total_cost, get_complexity_total_cost, compute_domains_total_cost),
[cost for cost_row in cost_rows for cost in cost_row],
)
)

0 comments on commit eb9971a

Please sign in to comment.