diff --git a/alns/ALNS.py b/alns/ALNS.py index cd2d2724..8521de8b 100644 --- a/alns/ALNS.py +++ b/alns/ALNS.py @@ -19,17 +19,17 @@ class _OperatorType(Protocol): def __call__( self, state: State, - rnd_state: rnd.RandomState, + rng: rnd.Generator, **kwargs, ) -> State: - pass # pragma: no cover + ... # pragma: no cover class _CallbackType(Protocol): __name__: str - def __call__(self, state: State, rnd_state: rnd.RandomState, **kwargs): - pass # pragma: no cover + def __call__(self, state: State, rng: rnd.Generator, **kwargs): + ... # pragma: no cover logger = logging.getLogger(__name__) @@ -47,7 +47,7 @@ class ALNS: callback functions (registered via :meth:`~alns.ALNS.ALNS.on_best`, :meth:`~alns.ALNS.ALNS.on_better`, :meth:`~alns.ALNS.ALNS.on_accept`, or :meth:`~alns.ALNS.ALNS.on_reject`) should take a candidate - :class:`~alns.State.State` and :class:`~numpy.random.RandomState` as + :class:`~alns.State.State` and :class:`~numpy.random.Generator` as arguments. Unlike the operators, no solution should be returned: if desired, the given candidate solution should be modified in-place instead. Note that this solution is **not** evaluated again (so a @@ -55,11 +55,11 @@ class ALNS: Parameters ---------- - rnd_state - Optional random state to use for random number generation. When - passed, this state is used for operator selection and general - computations requiring random numbers. It is also passed to the - destroy and repair operators, as a second argument. + rng + Optional random number generator (RNG). When passed, this generator + is used for operator selection and general computations requiring + random numbers. It is also passed to the destroy and repair operators, + as a second argument. References ---------- @@ -68,8 +68,8 @@ class ALNS: - 420). Springer. """ - def __init__(self, rnd_state: rnd.RandomState = rnd.RandomState()): - self._rnd_state = rnd_state + def __init__(self, rng: rnd.Generator = rnd.default_rng()): + self._rng = rng self._d_ops: Dict[str, _OperatorType] = {} self._r_ops: Dict[str, _OperatorType] = {} @@ -121,7 +121,7 @@ def add_destroy_operator( op An operator that, when applied to the current state, returns a new state reflecting its implemented destroy action. Its second - argument is the random state passed to the ALNS instance. + argument is the RNG passed to the ALNS instance. name Optional name argument, naming the operator. When not passed, the function name is used instead. @@ -140,7 +140,7 @@ def add_repair_operator( op An operator that, when applied to the destroyed state, returns a new state reflecting its implemented repair action. Its second - argument is the random state passed to the ALNS instance. + argument is the RNG passed to the ALNS instance. name Optional name argument, naming the operator. When not passed, the function name is used instead. @@ -212,16 +212,16 @@ class of vehicle routing problems with backhauls. *European stats.collect_objective(init_obj) stats.collect_runtime(time.perf_counter()) - while not stop(self._rnd_state, best, curr): - d_idx, r_idx = op_select(self._rnd_state, best, curr) + while not stop(self._rng, best, curr): + d_idx, r_idx = op_select(self._rng, best, curr) d_name, d_operator = self.destroy_operators[d_idx] r_name, r_operator = self.repair_operators[r_idx] logger.debug(f"Selected operators {d_name} and {r_name}.") - destroyed = d_operator(curr, self._rnd_state, **kwargs) - cand = r_operator(destroyed, self._rnd_state, **kwargs) + destroyed = d_operator(curr, self._rng, **kwargs) + cand = r_operator(destroyed, self._rng, **kwargs) best, curr, outcome = self._eval_cand( accept, best, curr, cand, **kwargs @@ -295,7 +295,7 @@ def _eval_cand( func = self._on_outcome.get(outcome) if callable(func): - func(cand, self._rnd_state, **kwargs) + func(cand, self._rng, **kwargs) if outcome == Outcome.BEST: return cand, cand, outcome @@ -317,7 +317,7 @@ def _determine_outcome( """ outcome = Outcome.REJECT - if accept(self._rnd_state, best, curr, cand): # accept candidate + if accept(self._rng, best, curr, cand): # accept candidate outcome = Outcome.ACCEPT if cand.objective() < curr.objective(): diff --git a/alns/State.py b/alns/State.py index caf2d3ec..c5b98600 100644 --- a/alns/State.py +++ b/alns/State.py @@ -13,6 +13,7 @@ def objective(self) -> float: """ Computes the state's associated objective value. """ + ... # pragma: no cover class ContextualState(State, Protocol): @@ -25,3 +26,4 @@ def get_context(self) -> np.ndarray: """ Computes a context vector for the current state. """ + ... # pragma: no cover diff --git a/alns/accept/AcceptanceCriterion.py b/alns/accept/AcceptanceCriterion.py index e30b94cf..5becb78a 100644 --- a/alns/accept/AcceptanceCriterion.py +++ b/alns/accept/AcceptanceCriterion.py @@ -1,6 +1,6 @@ from typing import Protocol -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State @@ -11,7 +11,7 @@ class AcceptanceCriterion(Protocol): """ def __call__( - self, rnd: RandomState, best: State, current: State, candidate: State + self, rng: Generator, best: State, current: State, candidate: State ) -> bool: """ Determines whether to accept the proposed, candidate solution based on @@ -19,7 +19,7 @@ def __call__( Parameters ---------- - rnd + rng May be used to draw random numbers from. best The best solution state observed so far. @@ -33,3 +33,4 @@ def __call__( bool Whether to accept the candidate state (True), or not (False). """ + ... # pragma: no cover diff --git a/alns/accept/AlwaysAccept.py b/alns/accept/AlwaysAccept.py index 126cfd4f..be60a499 100644 --- a/alns/accept/AlwaysAccept.py +++ b/alns/accept/AlwaysAccept.py @@ -3,5 +3,5 @@ class AlwaysAccept: This criterion always accepts the candidate solution. """ - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): return True diff --git a/alns/accept/GreatDeluge.py b/alns/accept/GreatDeluge.py index 60abb94b..36dc0c69 100644 --- a/alns/accept/GreatDeluge.py +++ b/alns/accept/GreatDeluge.py @@ -50,7 +50,7 @@ def alpha(self): def beta(self): return self._beta - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): if self._threshold is None: self._threshold = self.alpha * best.objective() diff --git a/alns/accept/HillClimbing.py b/alns/accept/HillClimbing.py index bdc06920..9576a0f6 100644 --- a/alns/accept/HillClimbing.py +++ b/alns/accept/HillClimbing.py @@ -4,5 +4,5 @@ class HillClimbing: that result in a worse objective value. """ - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): return candidate.objective() <= current.objective() diff --git a/alns/accept/LateAcceptanceHillClimbing.py b/alns/accept/LateAcceptanceHillClimbing.py index 5d345546..f093442b 100644 --- a/alns/accept/LateAcceptanceHillClimbing.py +++ b/alns/accept/LateAcceptanceHillClimbing.py @@ -59,7 +59,7 @@ def greedy(self): def better_history(self): return self._better_history - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): if not self._history: self._history.append(current.objective()) return candidate.objective() < current.objective() diff --git a/alns/accept/MovingAverageThreshold.py b/alns/accept/MovingAverageThreshold.py index 741682c0..24b627a9 100644 --- a/alns/accept/MovingAverageThreshold.py +++ b/alns/accept/MovingAverageThreshold.py @@ -62,7 +62,7 @@ def gamma(self) -> int: def history(self) -> List[float]: return list(self._history) - def __call__(self, rnd, best, current, candidate) -> bool: + def __call__(self, rng, best, current, candidate) -> bool: self._history.append(candidate.objective()) recent_best = min(self._history) recent_avg = mean(self._history) diff --git a/alns/accept/NonLinearGreatDeluge.py b/alns/accept/NonLinearGreatDeluge.py index 664a1d32..040d8b4d 100644 --- a/alns/accept/NonLinearGreatDeluge.py +++ b/alns/accept/NonLinearGreatDeluge.py @@ -61,7 +61,7 @@ def gamma(self): def delta(self): return self._delta - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): if self._threshold is None: if best.objective() == 0: raise ValueError("Initial solution cannot have zero value.") diff --git a/alns/accept/RandomAccept.py b/alns/accept/RandomAccept.py index 02117cf9..c75b168a 100644 --- a/alns/accept/RandomAccept.py +++ b/alns/accept/RandomAccept.py @@ -72,12 +72,12 @@ def step(self) -> float: def method(self) -> str: return self._method - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): # Always accept better res = candidate.objective() < current.objective() if not res: # maybe accept worse - res = rnd.random() < self._prob + res = rng.random() < self._prob self._prob = max( self.end_prob, update(self._prob, self.step, self.method) diff --git a/alns/accept/RecordToRecordTravel.py b/alns/accept/RecordToRecordTravel.py index 0bd052d7..525cdaea 100644 --- a/alns/accept/RecordToRecordTravel.py +++ b/alns/accept/RecordToRecordTravel.py @@ -101,7 +101,7 @@ def step(self) -> float: def method(self) -> str: return self._method - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): # From [2] p. 87 (RRT; best), and [3] p. 162 (TA; current). baseline = best if self._cmp_best else current res = candidate.objective() - baseline.objective() <= self._threshold diff --git a/alns/accept/SimulatedAnnealing.py b/alns/accept/SimulatedAnnealing.py index e37b0c45..ec294e10 100644 --- a/alns/accept/SimulatedAnnealing.py +++ b/alns/accept/SimulatedAnnealing.py @@ -98,7 +98,7 @@ def step(self) -> float: def method(self) -> str: return self._method - def __call__(self, rnd, best, current, candidate): + def __call__(self, rng, best, current, candidate): probability = np.exp( (current.objective() - candidate.objective()) / self._temperature ) @@ -110,12 +110,7 @@ def __call__(self, rnd, best, current, candidate): update(self._temperature, self.step, self.method), ) - # TODO deprecate RandomState in favour of Generator - which uses - # random(), rather than random_sample(). - try: - return probability >= rnd.random() - except AttributeError: - return probability >= rnd.random_sample() + return probability >= rng.random() @classmethod def autofit( diff --git a/alns/accept/tests/test_great_deluge.py b/alns/accept/tests/test_great_deluge.py index ad8db4d7..2e6448a6 100644 --- a/alns/accept/tests/test_great_deluge.py +++ b/alns/accept/tests/test_great_deluge.py @@ -40,21 +40,21 @@ def test_accepts_below_threshold(): great_deluge = GreatDeluge(2.01, 0.5) # Initial threshold is set at 2.01, hence Two should be accepted - assert_(great_deluge(rnd.RandomState(), One(), Zero(), Two())) + assert_(great_deluge(rnd.default_rng(), One(), Zero(), Two())) def test_rejects_above_threshold(): great_deluge = GreatDeluge(1.01, 0.5) # Initial threshold is set at 1.01, hence Two should be rejected - assert_(not great_deluge(rnd.RandomState(), One(), Zero(), Two())) + assert_(not great_deluge(rnd.default_rng(), One(), Zero(), Two())) def test_rejects_equal_threshold(): great_deluge = GreatDeluge(2, 0.5) # Initial threshold is set at 2, hence Two should be rejected - assert_(not great_deluge(rnd.RandomState(), One(), Zero(), Two())) + assert_(not great_deluge(rnd.default_rng(), One(), Zero(), Two())) def test_evaluate_consecutive_solutions(): @@ -65,11 +65,11 @@ def test_evaluate_consecutive_solutions(): # The initial threshold is set at 2*0 = 0, so the first candidate with # value one is rejected. The threshold is updated to 0.01. - assert_(not great_deluge(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not great_deluge(rnd.default_rng(), Zero(), Zero(), One())) # The second candidate is below the threshold (0 < 0.01), hence accepted. # The threshold is updated to 0.0099. - assert_(great_deluge(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(great_deluge(rnd.default_rng(), Zero(), Zero(), Zero())) # The third candidate is below the threshold (0 < 0.0099), hence accepted. - assert_(great_deluge(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(great_deluge(rnd.default_rng(), Zero(), Zero(), Zero())) diff --git a/alns/accept/tests/test_hill_climbing.py b/alns/accept/tests/test_hill_climbing.py index e206cf0a..60e6117c 100644 --- a/alns/accept/tests/test_hill_climbing.py +++ b/alns/accept/tests/test_hill_climbing.py @@ -10,7 +10,7 @@ def test_accepts_better(): Tests if the hill climbing method accepts a better solution. """ hill_climbing = HillClimbing() - assert_(hill_climbing(rnd.RandomState(), One(), One(), Zero())) + assert_(hill_climbing(rnd.default_rng(), One(), One(), Zero())) def test_rejects_worse(): @@ -18,7 +18,7 @@ def test_rejects_worse(): Tests if the hill climbing method accepts a worse solution. """ hill_climbing = HillClimbing() - assert_(not hill_climbing(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not hill_climbing(rnd.default_rng(), Zero(), Zero(), One())) def test_accepts_equal(): @@ -27,4 +27,4 @@ def test_accepts_equal(): same objective value. """ hill_climbing = HillClimbing() - assert_(hill_climbing(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(hill_climbing(rnd.default_rng(), Zero(), Zero(), Zero())) diff --git a/alns/accept/tests/test_late_acceptance_hill_climbing.py b/alns/accept/tests/test_late_acceptance_hill_climbing.py index c360062b..edd7b807 100644 --- a/alns/accept/tests/test_late_acceptance_hill_climbing.py +++ b/alns/accept/tests/test_late_acceptance_hill_climbing.py @@ -40,10 +40,10 @@ def test_zero_lookback_period(greedy, better_history): """ lahc = LateAcceptanceHillClimbing(0, greedy, better_history) - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) - assert_(not lahc(rnd.RandomState(), Zero(), One(), One())) - assert_(not lahc(rnd.RandomState(), Zero(), Zero(), Zero())) - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), One(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), Zero(), Zero())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) @mark.parametrize("lookback_period", [0, 3, 10, 50]) @@ -54,12 +54,12 @@ def test_accept(lookback_period): """ lahc = LateAcceptanceHillClimbing(lookback_period, False, False) - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) for _ in range(lookback_period): # The then-current solution `lookback_period` iterations ago is 2, so # the candidate solution with value 1 should be accepted. - assert_(lahc(rnd.RandomState(), Zero(), One(), One())) + assert_(lahc(rnd.default_rng(), Zero(), One(), One())) @mark.parametrize("lookback_period", [0, 3, 10, 50]) @@ -71,11 +71,11 @@ def test_reject(lookback_period): lahc = LateAcceptanceHillClimbing(lookback_period, False, False) for _ in range(lookback_period): - assert_(lahc(rnd.RandomState(), Zero(), One(), Zero())) + assert_(lahc(rnd.default_rng(), Zero(), One(), Zero())) # The then-current solution from `lookback_period` iterations ago has # value 1, so the candidate solution with value 1 should be rejected. - assert_(not lahc(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), Zero(), One())) @mark.parametrize("lookback_period", [0, 3, 10, 50]) @@ -87,13 +87,13 @@ def test_greedy_accept(lookback_period): """ lahc = LateAcceptanceHillClimbing(lookback_period, True, False) - assert_(not lahc(rnd.RandomState(), Zero(), Zero(), Two())) + assert_(not lahc(rnd.default_rng(), Zero(), Zero(), Two())) for _ in range(lookback_period): # The candidate solution (1) is accepted because it is better than the # current solution (2), despite being worse than the compared # historial solution from `lookback_period` iterations ago (1). - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) def test_better_history_small_example(): @@ -103,14 +103,14 @@ def test_better_history_small_example(): """ lahc = LateAcceptanceHillClimbing(1, False, True) - assert_(lahc(rnd.RandomState(), Zero(), One(), Zero())) - assert_(lahc(rnd.RandomState(), Zero(), Two(), Zero())) + assert_(lahc(rnd.default_rng(), Zero(), One(), Zero())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), Zero())) # Previous current stays at 1 because 2 was not better - assert_(not lahc(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), Zero(), One())) # Previous current is updated to Zero - assert_(not lahc(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(not lahc(rnd.default_rng(), Zero(), Zero(), Zero())) @mark.parametrize("lookback_period", [3, 10, 50]) @@ -123,16 +123,16 @@ def test_better_history_reject(lookback_period): lahc = LateAcceptanceHillClimbing(lookback_period, False, True) for _ in range(lookback_period): - assert_(not lahc(rnd.RandomState(), Zero(), One(), Two())) + assert_(not lahc(rnd.default_rng(), Zero(), One(), Two())) for _ in range(lookback_period): # The current solutions are not stored because they are worse # than the historial current solutions - assert_(not lahc(rnd.RandomState(), Zero(), Two(), Two())) + assert_(not lahc(rnd.default_rng(), Zero(), Two(), Two())) for _ in range(lookback_period): # The candidates (1) do not improve the historical solutions (1) - assert_(not lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), Two(), One())) def test_full_example(): @@ -144,17 +144,17 @@ def test_full_example(): # The first iteration compares candidate (1) against current (2) # and stores current in the history. - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) # The second candidate (1) is accepted based on late-acceptance (2). # The historical value is updated to 1. - assert_(lahc(rnd.RandomState(), Zero(), One(), One())) + assert_(lahc(rnd.default_rng(), Zero(), One(), One())) # The third candidate (1) is accepted based on greedy comparison with # the current solution (2). The historical value is not updated because # the current does not improve the historical value (1). - assert_(lahc(rnd.RandomState(), Zero(), Two(), One())) + assert_(lahc(rnd.default_rng(), Zero(), Two(), One())) # The fourth candidate (1) is not accepted because it does not improve # the current (1) nor does it improve the historical value (1). - assert_(not lahc(rnd.RandomState(), Zero(), One(), One())) + assert_(not lahc(rnd.default_rng(), Zero(), One(), One())) diff --git a/alns/accept/tests/test_moving_average_threshold.py b/alns/accept/tests/test_moving_average_threshold.py index 36713730..b21b46ba 100644 --- a/alns/accept/tests/test_moving_average_threshold.py +++ b/alns/accept/tests/test_moving_average_threshold.py @@ -39,49 +39,49 @@ def test_gamma(gamma): def test_accepts_below_threshold(): moving_average = MovingAverageThreshold(eta=0.5, gamma=4) - moving_average(rnd.RandomState(), One(), One(), One()) - moving_average(rnd.RandomState(), One(), One(), Zero()) + moving_average(rnd.default_rng(), One(), One(), One()) + moving_average(rnd.default_rng(), One(), One(), Zero()) # The threshold is set at 0 + 0.5 * (0.5 - 0) = 0.25 - assert_(moving_average(rnd.RandomState(), One(), One(), Zero())) + assert_(moving_average(rnd.default_rng(), One(), One(), Zero())) def test_rejects_above_threshold(): moving_average = MovingAverageThreshold(eta=0.5, gamma=4) - moving_average(rnd.RandomState(), One(), One(), Two()) - moving_average(rnd.RandomState(), One(), One(), Zero()) + moving_average(rnd.default_rng(), One(), One(), Two()) + moving_average(rnd.default_rng(), One(), One(), Zero()) # The threshold is set at 0 + 0.5 * (1 - 0) = 0.5 - assert_(not moving_average(rnd.RandomState(), One(), One(), One())) + assert_(not moving_average(rnd.default_rng(), One(), One(), One())) def test_accepts_equal_threshold(): moving_average = MovingAverageThreshold(eta=0.5, gamma=4) - moving_average(rnd.RandomState(), One(), One(), VarObj(7100)) - moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7100)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) # The threshold is set at 7100 + 0.5 * (7140 - 7100) = 7120 - assert_(moving_average(rnd.RandomState(), One(), One(), VarObj(7120))) + assert_(moving_average(rnd.default_rng(), One(), One(), VarObj(7120))) def test_accepts_over_gamma_candidates(): moving_average = MovingAverageThreshold(eta=0.2, gamma=3) - moving_average(rnd.RandomState(), One(), One(), VarObj(7100)) - moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) - moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7100)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) # The threshold is set at 7000 + 0.2 * (7133.33 - 7000) = 7013.33 - assert_(moving_average(rnd.RandomState(), One(), One(), VarObj(7000))) + assert_(moving_average(rnd.default_rng(), One(), One(), VarObj(7000))) def test_rejects_over_gamma_candidates(): moving_average = MovingAverageThreshold(eta=0.2, gamma=3) for value in [7100, 7200, 7200, 7000]: - moving_average(rnd.RandomState(), One(), One(), VarObj(value)) + moving_average(rnd.default_rng(), One(), One(), VarObj(value)) # The threshold is set at 7000 + 0.2 * (7100 - 7000) = 7020 - result = moving_average(rnd.RandomState(), One(), One(), VarObj(7100)) + result = moving_average(rnd.default_rng(), One(), One(), VarObj(7100)) assert_(not result) @@ -92,14 +92,14 @@ def test_evaluate_consecutive_solutions(): moving_average = MovingAverageThreshold(eta=0.5, gamma=4) # The threshold is set at 7100, hence the solution is accepted. - assert_(moving_average(rnd.RandomState(), One(), One(), VarObj(7100))) + assert_(moving_average(rnd.default_rng(), One(), One(), VarObj(7100))) # The threshold is set at 7125, hence the solution is accepted. - result = moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) + result = moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) assert_(not result) # The threshold is set at 7120, hence the solution is accepted. - assert_(moving_average(rnd.RandomState(), One(), One(), VarObj(7120))) + assert_(moving_average(rnd.default_rng(), One(), One(), VarObj(7120))) def test_history(): @@ -108,17 +108,17 @@ def test_history(): """ moving_average = MovingAverageThreshold(eta=0.5, gamma=4) - moving_average(rnd.RandomState(), One(), One(), VarObj(7100)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7100)) assert_equal(moving_average.history, [7100]) - moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) assert_equal(moving_average.history, [7100, 7200]) - moving_average(rnd.RandomState(), One(), One(), VarObj(7120)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7120)) assert_equal(moving_average.history, [7100, 7200, 7120]) - moving_average(rnd.RandomState(), One(), One(), VarObj(7100)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7100)) assert_equal(moving_average.history, [7100, 7200, 7120, 7100]) - moving_average(rnd.RandomState(), One(), One(), VarObj(7200)) + moving_average(rnd.default_rng(), One(), One(), VarObj(7200)) assert_equal(moving_average.history, [7200, 7120, 7100, 7200]) diff --git a/alns/accept/tests/test_non_linear_great_deluge.py b/alns/accept/tests/test_non_linear_great_deluge.py index 3a685a5a..e6ab94eb 100644 --- a/alns/accept/tests/test_non_linear_great_deluge.py +++ b/alns/accept/tests/test_non_linear_great_deluge.py @@ -51,14 +51,14 @@ def test_raise_zero_best(): nlgd = NonLinearGreatDeluge(2, 0.1, 0.01, 1) with assert_raises(ValueError): - nlgd(rnd.RandomState(), Zero(), One(), One()) + nlgd(rnd.default_rng(), Zero(), One(), One()) def test_accepts_below_threshold(): nlgd = NonLinearGreatDeluge(1.01, 0.5, 0.01, 1) # Initial threshold is set at 1.01, hence One should be accepted. - assert_(nlgd(rnd.RandomState(), One(), Zero(), One())) + assert_(nlgd(rnd.default_rng(), One(), Zero(), One())) def test_rejects_above_threshold(): @@ -66,7 +66,7 @@ def test_rejects_above_threshold(): # Initial threshold is set at 1.99 and the current solution is Zero, # hence Two should be rejected. - assert_(not nlgd(rnd.RandomState(), One(), Zero(), Two())) + assert_(not nlgd(rnd.default_rng(), One(), Zero(), Two())) def test_rejects_equal_threshold(): @@ -74,7 +74,7 @@ def test_rejects_equal_threshold(): # Initial threshold is set at 2 and the current solution is Zero, # hence Two should be rejected. - assert_(not nlgd(rnd.RandomState(), One(), Zero(), Two())) + assert_(not nlgd(rnd.default_rng(), One(), Zero(), Two())) def test_accepts_improving_current(): @@ -82,7 +82,7 @@ def test_accepts_improving_current(): # Candidate is not below the threshold (2 == 2) but does improve the # current solution (2 < 3), hence candidate should be accepted. - assert_(nlgd(rnd.RandomState(), One(), VarObj(3), Two())) + assert_(nlgd(rnd.default_rng(), One(), VarObj(3), Two())) def test_evaluate_consecutive_solutions(): @@ -96,6 +96,6 @@ def test_evaluate_consecutive_solutions(): # candidate is below the threshold (0 < 1.51), so the second should be # accepted. The threshold is then exponentially decreased to 1.20. The # third candidate is below the theshold (1 < 1.20) and is accepted. - assert_(not nlgd(rnd.RandomState(), One(), Zero(), Two())) - assert_(nlgd(rnd.RandomState(), One(), Zero(), Zero())) - assert_(nlgd(rnd.RandomState(), One(), Zero(), One())) + assert_(not nlgd(rnd.default_rng(), One(), Zero(), Two())) + assert_(nlgd(rnd.default_rng(), One(), Zero(), Zero())) + assert_(nlgd(rnd.default_rng(), One(), Zero(), One())) diff --git a/alns/accept/tests/test_random_accept.py b/alns/accept/tests/test_random_accept.py index 0bb8bb56..09f70db6 100644 --- a/alns/accept/tests/test_random_accept.py +++ b/alns/accept/tests/test_random_accept.py @@ -67,7 +67,7 @@ def test_zero_prob_accepts_better(): solutions. """ rnd_vals = [1] - rng = Mock(spec_set=rnd.RandomState, random=lambda: rnd_vals.pop(0)) + rng = Mock(spec_set=rnd.Generator, random=lambda: rnd_vals.pop(0)) random_accept = RandomAccept(0, 0, 0.1) assert_(random_accept(rng, Zero(), One(), Zero())) @@ -81,8 +81,8 @@ def test_zero_prob_never_accept_worse(): """ random_accept = RandomAccept(0, 0, 0, "linear") - assert_(not random_accept(rnd.RandomState(), Zero(), One(), One())) - assert_(not random_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not random_accept(rnd.default_rng(), Zero(), One(), One())) + assert_(not random_accept(rnd.default_rng(), Zero(), Zero(), One())) def test_one_prob_always_accept(): @@ -93,7 +93,7 @@ def test_one_prob_always_accept(): random_accept = RandomAccept(1, 0, 0, "linear") for _ in range(100): - assert_(random_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(random_accept(rnd.default_rng(), Zero(), Zero(), One())) def test_linear_consecutive_solutions(): @@ -102,7 +102,7 @@ def test_linear_consecutive_solutions(): rejects consecutive solutions. """ rnd_vals = [0.9, 0.8, 0.7, 0.6, 0.5, 1] - rng = Mock(spec_set=rnd.RandomState, random=lambda: rnd_vals.pop(0)) + rng = Mock(spec_set=rnd.Generator, random=lambda: rnd_vals.pop(0)) random_accept = RandomAccept(1, 0, 0.1, "linear") # For the first five, the probability is, resp., 1, 0.9, 0.8, 0.7, 0.6 @@ -121,7 +121,7 @@ def test_exponential_consecutive_solutions(): and rejects consecutive solutions. """ rnd_vals = [0.5, 0.25, 0.125, 1] - rng = Mock(spec_set=rnd.RandomState, random=lambda: rnd_vals.pop(0)) + rng = Mock(spec_set=rnd.Generator, random=lambda: rnd_vals.pop(0)) random_accept = RandomAccept(1, 0, 0.5, "exponential") # For the first three, the probability is, resp., 1, 0.5, 0.25 diff --git a/alns/accept/tests/test_random_walk.py b/alns/accept/tests/test_random_walk.py index c5bd47b3..f19268e5 100644 --- a/alns/accept/tests/test_random_walk.py +++ b/alns/accept/tests/test_random_walk.py @@ -10,7 +10,7 @@ def test_accepts_better(): Tests if the always accept method accepts a better solution. """ always_accept = AlwaysAccept() - assert_(always_accept(rnd.RandomState(), One(), One(), Zero())) + assert_(always_accept(rnd.default_rng(), One(), One(), Zero())) def test_accepts_worse(): @@ -18,7 +18,7 @@ def test_accepts_worse(): Tests if the always accept method accepts a worse solution. """ always_accept = AlwaysAccept() - assert_(always_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(always_accept(rnd.default_rng(), Zero(), Zero(), One())) def test_accepts_equal(): @@ -27,4 +27,4 @@ def test_accepts_equal(): same objective value. """ always_accept = AlwaysAccept() - assert_(always_accept(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(always_accept(rnd.default_rng(), Zero(), Zero(), Zero())) diff --git a/alns/accept/tests/test_record_to_record_travel.py b/alns/accept/tests/test_record_to_record_travel.py index 9259c0e6..ab6ce328 100644 --- a/alns/accept/tests/test_record_to_record_travel.py +++ b/alns/accept/tests/test_record_to_record_travel.py @@ -58,7 +58,7 @@ def test_properties(start, end, step, method): def test_accepts_better(): record_travel = RecordToRecordTravel(1, 0, 0.1) - assert_(record_travel(rnd.RandomState(), One(), Zero(), Zero())) + assert_(record_travel(rnd.default_rng(), One(), Zero(), Zero())) def test_rejects_worse(): @@ -66,7 +66,7 @@ def test_rejects_worse(): # This results in a relative worsening of plus one, which is bigger than # the threshold (0.5). - assert_(not record_travel(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not record_travel(rnd.default_rng(), Zero(), Zero(), One())) def test_accepts_equal(): @@ -74,7 +74,7 @@ def test_accepts_equal(): # Even a the strictest threshold, this should be accepted since the # relative improvement is zero (they are equal). - assert_(record_travel(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(record_travel(rnd.default_rng(), Zero(), Zero(), Zero())) def test_linear_threshold_update(): @@ -84,10 +84,10 @@ def test_linear_threshold_update(): # worsening is plus one, so this should be accepted (lower or equal to # threshold). for _ in range(5): - assert_(record_travel(rnd.RandomState(), Zero(), Zero(), One())) + assert_(record_travel(rnd.default_rng(), Zero(), Zero(), One())) # Threshold is now zero, so this should no longer be accepted. - assert_(not record_travel(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not record_travel(rnd.default_rng(), Zero(), Zero(), One())) def test_exponential_threshold_update(): @@ -96,8 +96,8 @@ def test_exponential_threshold_update(): # The relative worsening is plus one, so this should be accepted initially, # as the threshold is 5, resp. 0.5. In the second case, 1 > 0.5, so the # second should be rejected. - assert_(record_travel(rnd.RandomState(), Zero(), Zero(), One())) - assert_(not record_travel(rnd.RandomState(), Zero(), Zero(), One())) + assert_(record_travel(rnd.default_rng(), Zero(), Zero(), One())) + assert_(not record_travel(rnd.default_rng(), Zero(), Zero(), One())) @mark.parametrize( diff --git a/alns/accept/tests/test_simulated_annealing.py b/alns/accept/tests/test_simulated_annealing.py index f7742db9..14749650 100644 --- a/alns/accept/tests/test_simulated_annealing.py +++ b/alns/accept/tests/test_simulated_annealing.py @@ -99,7 +99,7 @@ def test_end_temperature(end: float): def test_accepts_better(): for _ in range(1, 100): simulated_annealing = SimulatedAnnealing(2, 1, 1) - assert_(simulated_annealing(rnd.RandomState(), One(), Zero(), Zero())) + assert_(simulated_annealing(rnd.default_rng(), One(), Zero(), Zero())) def test_accepts_equal(): @@ -108,7 +108,7 @@ def test_accepts_equal(): for _ in range(100): # This results in an acceptance probability of exp{0}, that is, one. # Thus, the candidate state should always be accepted. - assert_(simulated_annealing(rnd.RandomState(), One(), One(), One())) + assert_(simulated_annealing(rnd.default_rng(), One(), One(), One())) def test_linear_random_solutions(): @@ -118,14 +118,14 @@ def test_linear_random_solutions(): """ simulated_annealing = SimulatedAnnealing(2, 1, 1, "linear") - state = rnd.RandomState(0) + rng = rnd.default_rng(1) - # Using the above seed, the first two random numbers are 0.55 and .72, + # Using the above seed, the first two random numbers are 0.51 and 0.95, # respectively. The acceptance probability is 0.61 first, so the first - # should be accepted (0.61 > 0.55). Thereafter, it drops to 0.37, so the - # second should not (0.37 < 0.72). - assert_(simulated_annealing(state, Zero(), Zero(), One())) - assert_(not simulated_annealing(state, Zero(), Zero(), One())) + # should be accepted (0.61 > 0.51). Thereafter, it drops to 0.37, so the + # second should not (0.37 < 0.95). + assert_(simulated_annealing(rng, Zero(), Zero(), One())) + assert_(not simulated_annealing(rng, Zero(), Zero(), One())) def test_exponential_random_solutions(): @@ -136,33 +136,10 @@ def test_exponential_random_solutions(): """ simulated_annealing = SimulatedAnnealing(2, 1, 0.5, "exponential") - state = rnd.RandomState(0) + rng = rnd.default_rng(1) - assert_(simulated_annealing(state, Zero(), Zero(), One())) - assert_(not simulated_annealing(state, Zero(), Zero(), One())) - - -def test_accepts_generator_and_random_state(): - """ - Tests if SimulatedAnnealing works with both Generator and RandomState - randomness classes. - - See also https://numpy.org/doc/1.18/reference/random/index.html#quick-start - """ - - class Old: # old RandomState interface mock - def random_sample(self): # pylint: disable=no-self-use - return 0.5 - - simulated_annealing = SimulatedAnnealing(2, 1, 1) - assert_(simulated_annealing(Old(), One(), One(), Zero())) - - class New: # new Generator interface mock - def random(self): # pylint: disable=no-self-use - return 0.5 - - simulated_annealing = SimulatedAnnealing(2, 1, 1) - assert_(simulated_annealing(New(), One(), One(), Zero())) + assert_(simulated_annealing(rng, Zero(), Zero(), One())) + assert_(not simulated_annealing(rng, Zero(), Zero(), One())) @mark.parametrize( diff --git a/alns/accept/tests/test_threshold_accept.py b/alns/accept/tests/test_threshold_accept.py index 3adeb879..8fd7d738 100644 --- a/alns/accept/tests/test_threshold_accept.py +++ b/alns/accept/tests/test_threshold_accept.py @@ -11,7 +11,7 @@ def test_accepts_better(): threshold_accept = RecordToRecordTravel(1, 0, 0.1, cmp_best=False) - assert_(threshold_accept(rnd.RandomState(), Zero(), One(), Zero())) + assert_(threshold_accept(rnd.default_rng(), Zero(), One(), Zero())) def test_rejects_worse(): @@ -19,7 +19,7 @@ def test_rejects_worse(): # This results in a relative worsening of plus one, which is bigger than # the threshold (0.5). - assert_(not threshold_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not threshold_accept(rnd.default_rng(), Zero(), Zero(), One())) def test_accepts_equal(): @@ -27,7 +27,7 @@ def test_accepts_equal(): # Even at the strictest threshold, this should be accepted since the # relative improvement is zero (they are equal). - assert_(threshold_accept(rnd.RandomState(), Zero(), Zero(), Zero())) + assert_(threshold_accept(rnd.default_rng(), Zero(), Zero(), Zero())) def test_linear_threshold_update(): @@ -37,10 +37,10 @@ def test_linear_threshold_update(): # worsening is plus one, so this should be accepted (lower or equal to # threshold). for _ in range(5): - assert_(threshold_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(threshold_accept(rnd.default_rng(), Zero(), Zero(), One())) # Threshold is now zero, so this should no longer be accepted. - assert_(not threshold_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(not threshold_accept(rnd.default_rng(), Zero(), Zero(), One())) def test_exponential_threshold_update(): @@ -49,5 +49,5 @@ def test_exponential_threshold_update(): # The relative worsening is plus one, so this should be accepted initially, # as the threshold is 5, resp. 0.5. In the second case, 1 > 0.5, so the # second should be rejected. - assert_(threshold_accept(rnd.RandomState(), Zero(), Zero(), One())) - assert_(not threshold_accept(rnd.RandomState(), Zero(), Zero(), One())) + assert_(threshold_accept(rnd.default_rng(), Zero(), Zero(), One())) + assert_(not threshold_accept(rnd.default_rng(), Zero(), Zero(), One())) diff --git a/alns/select/AlphaUCB.py b/alns/select/AlphaUCB.py index 2381278c..6fadda3d 100644 --- a/alns/select/AlphaUCB.py +++ b/alns/select/AlphaUCB.py @@ -101,7 +101,7 @@ def scores(self) -> List[float]: def alpha(self) -> float: return self._alpha - def __call__(self, rnd, best, curr): + def __call__(self, rng, best, curr): """ Returns the (destroy, repair) operator pair that maximises the average reward and exploration bonus. diff --git a/alns/select/MABSelector.py b/alns/select/MABSelector.py index 8a1221a8..da1bd7d2 100644 --- a/alns/select/MABSelector.py +++ b/alns/select/MABSelector.py @@ -1,7 +1,7 @@ from typing import List, Optional, Tuple import numpy as np -from numpy.random import RandomState +from numpy.random import Generator from alns.Outcome import Outcome from alns.State import ContextualState @@ -125,7 +125,7 @@ def mab(self) -> "MAB": def __call__( # type: ignore[override] self, - rnd_state: RandomState, + rng: Generator, best: ContextualState, curr: ContextualState, ) -> Tuple[int, int]: @@ -137,7 +137,7 @@ def __call__( # type: ignore[override] # The MAB object has not yet been fit. In that case we return any # feasible operator index pair as a first observation. allowed = np.argwhere(self._op_coupling) - idx = rnd_state.randint(len(allowed)) + idx = rng.integers(len(allowed)) return allowed[idx][0], allowed[idx][1] has_ctx = self._mab.is_contextual diff --git a/alns/select/OperatorSelectionScheme.py b/alns/select/OperatorSelectionScheme.py index 4514e991..dba5e94c 100644 --- a/alns/select/OperatorSelectionScheme.py +++ b/alns/select/OperatorSelectionScheme.py @@ -2,7 +2,7 @@ from typing import Optional, Tuple import numpy as np -from numpy.random import RandomState +from numpy.random import Generator from alns.Outcome import Outcome from alns.State import State @@ -56,7 +56,7 @@ def op_coupling(self) -> np.ndarray: @abstractmethod def __call__( - self, rnd: RandomState, best: State, curr: State + self, rng: Generator, best: State, curr: State ) -> Tuple[int, int]: """ Determine which destroy and repair operator pair to apply in this @@ -64,8 +64,8 @@ def __call__( Parameters ---------- - rnd_state - Random state object, to be used for random number generation. + rng + Random number generator object. best The best solution state observed so far. current diff --git a/alns/select/RandomSelect.py b/alns/select/RandomSelect.py index dab21c14..82f2909e 100644 --- a/alns/select/RandomSelect.py +++ b/alns/select/RandomSelect.py @@ -9,12 +9,12 @@ class RandomSelect(OperatorSelectionScheme): pairs respect the operator coupling matrix. """ - def __call__(self, rnd, best, curr): + def __call__(self, rng, best, curr): """ Selects a (destroy, repair) operator pair with uniform probability. """ allowed = np.argwhere(self._op_coupling) - idx = rnd.randint(len(allowed)) + idx = rng.integers(len(allowed)) return tuple(allowed[idx]) diff --git a/alns/select/RouletteWheel.py b/alns/select/RouletteWheel.py index 84644283..c1499a6a 100644 --- a/alns/select/RouletteWheel.py +++ b/alns/select/RouletteWheel.py @@ -1,7 +1,7 @@ from typing import List, Optional, Tuple import numpy as np -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State from alns.select.OperatorSelectionScheme import OperatorSelectionScheme @@ -102,7 +102,7 @@ def decay(self) -> float: return self._decay def __call__( - self, rnd_state: RandomState, best: State, curr: State + self, rng: Generator, best: State, curr: State ) -> Tuple[int, int]: """ Selects a destroy and repair operator pair to apply in this iteration. @@ -112,8 +112,8 @@ def __call__( Parameters ---------- - rnd_state - Random state object, to be used for random number generation. + rng + Random number generator. best The best solution state observed so far. current @@ -127,7 +127,7 @@ def __call__( def select(op_weights): probs = op_weights / np.sum(op_weights) - return rnd_state.choice(range(len(op_weights)), p=probs) + return rng.choice(range(len(op_weights)), p=probs) d_idx = select(self._d_weights) coupled_r_idcs = np.flatnonzero(self.op_coupling[d_idx]) diff --git a/alns/select/SegmentedRouletteWheel.py b/alns/select/SegmentedRouletteWheel.py index 8864cfff..54cff00f 100644 --- a/alns/select/SegmentedRouletteWheel.py +++ b/alns/select/SegmentedRouletteWheel.py @@ -78,7 +78,7 @@ def __init__( def seg_length(self): return self._seg_length - def __call__(self, rnd_state, best: State, curr: State): + def __call__(self, rng, best: State, curr: State): self._iter += 1 if self._iter % self._seg_length == 0: @@ -92,7 +92,7 @@ def __call__(self, rnd_state, best: State, curr: State): self._reset_segment_weights() - return super().__call__(rnd_state, best, curr) + return super().__call__(rng, best, curr) def update(self, cand, d_idx, r_idx, outcome): self._d_seg_weights[d_idx] += self._scores[outcome] diff --git a/alns/select/tests/test_alpha_ucb.py b/alns/select/tests/test_alpha_ucb.py index 275ccd4c..84732e95 100644 --- a/alns/select/tests/test_alpha_ucb.py +++ b/alns/select/tests/test_alpha_ucb.py @@ -58,21 +58,21 @@ def test_raises_invalid_arguments( def test_call_with_only_one_operator_pair(): # Only one operator pair, so the algorithm should select (0, 0). select = AlphaUCB([2, 1, 1, 0], 0.5, 1, 1) - state = rnd.RandomState() + rng = rnd.default_rng() - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (0, 0)) def test_update_with_two_operator_pairs(): select = AlphaUCB([2, 1, 1, 0], 0.5, 2, 1) - state = rnd.RandomState() + rng = rnd.default_rng() # Avg. reward for (0, 0) after this is 2, for (1, 0) is still 1 (default). select.update(Zero(), 0, 0, outcome=Outcome.BEST) # So now (0, 0) is selected again. - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (0, 0)) # One more update. Avg. reward goes to 1, and number of times to 2. @@ -80,5 +80,5 @@ def test_update_with_two_operator_pairs(): # The Q value of (0, 0) is now approx 1.432, and that of (1, 0) is now # approx 1.74. So (1, 0) is selected. - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (1, 0)) diff --git a/alns/select/tests/test_mab_selector.py b/alns/select/tests/test_mab_selector.py index 0f37ab41..e3265127 100644 --- a/alns/select/tests/test_mab_selector.py +++ b/alns/select/tests/test_mab_selector.py @@ -73,42 +73,42 @@ def test_call_with_only_one_operator_pair(): select = MABSelector( [2, 1, 1, 0], 1, 1, LearningPolicy.EpsilonGreedy(0.15) ) - state = rnd.RandomState() + rng = rnd.default_rng() for _ in range(10): - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (0, 0)) def test_mab_epsilon_greedy(): - state = rnd.RandomState() + rng = rnd.default_rng() # epsilon=0 is equivalent to greedy selection select = MABSelector([2, 1, 1, 0], 2, 1, LearningPolicy.EpsilonGreedy(0.0)) select.update(Zero(), 0, 0, outcome=Outcome.BETTER) - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) for _ in range(10): - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (0, 0)) select.update(Zero(), 1, 0, outcome=Outcome.BEST) for _ in range(10): - selected = select(state, Zero(), Zero()) + selected = select(rng, Zero(), Zero()) assert_equal(selected, (1, 0)) @mark.parametrize("alpha", [0.25, 0.5]) def test_mab_ucb1(alpha): - state = rnd.RandomState() + rng = rnd.default_rng() select = MABSelector([2, 1, 1, 0], 2, 1, LearningPolicy.UCB1(alpha)) select.update(Zero(), 0, 0, outcome=Outcome.BEST) - mab_select = select(state, Zero(), Zero()) + mab_select = select(rng, Zero(), Zero()) assert_equal(mab_select, (0, 0)) select.update(Zero(), 0, 0, outcome=Outcome.REJECT) - mab_select = select(state, Zero(), Zero()) + mab_select = select(rng, Zero(), Zero()) assert_equal(mab_select, (0, 0)) @@ -125,7 +125,7 @@ def test_contextual_mab_requires_context(): def text_contextual_mab_uses_context(): - state = rnd.RandomState() + rng = rnd.default_rng() select = MABSelector( [2, 1, 1, 0], 2, @@ -142,8 +142,8 @@ def text_contextual_mab_uses_context(): select.update(ZeroWithOneContext(), 1, 0, outcome=Outcome.REJECT) select.update(ZeroWithOneContext(), 0, 0, outcome=Outcome.BEST) - mab_select = select(state, ZeroWithZeroContext(), ZeroWithZeroContext()) + mab_select = select(rng, ZeroWithZeroContext(), ZeroWithZeroContext()) assert_equal(mab_select, (1, 0)) - mab_select = select(state, ZeroWithZeroContext(), ZeroWithZeroContext()) + mab_select = select(rng, ZeroWithZeroContext(), ZeroWithZeroContext()) assert_equal(mab_select, (0, 0)) diff --git a/alns/select/tests/test_random_select.py b/alns/select/tests/test_random_select.py index ffed712d..7b8d4658 100644 --- a/alns/select/tests/test_random_select.py +++ b/alns/select/tests/test_random_select.py @@ -7,7 +7,7 @@ def test_op_coupling(): - rnd_state = rnd.RandomState(1) + rng = rnd.default_rng(1) # For i in {1..5}, each destroy operator i is coupled with repair operator # i. So only (i, i) pairs can be selected. @@ -15,18 +15,18 @@ def test_op_coupling(): select = RandomSelect(5, 5, op_coupling) for _ in range(1_000): - d_idx, r_idx = select(rnd_state, Zero(), Zero()) + d_idx, r_idx = select(rng, Zero(), Zero()) assert_(d_idx == r_idx) def test_uniform_selection(): - rnd_state = rnd.RandomState(1) + rng = rnd.default_rng(1) histogram = np.zeros((2, 2)) select = RandomSelect(2, 2) for _ in range(10_000): - d_idx, r_idx = select(rnd_state, Zero(), Zero()) + d_idx, r_idx = select(rng, Zero(), Zero()) histogram[d_idx, r_idx] += 1 # There are four operator pair combinations, so each pair should have a @@ -37,7 +37,7 @@ def test_uniform_selection(): def test_uniform_selection_op_coupling(): - rnd_state = rnd.RandomState(1) + rng = rnd.default_rng(1) histogram = np.zeros((2, 2)) op_coupling = np.eye(2) @@ -46,7 +46,7 @@ def test_uniform_selection_op_coupling(): select = RandomSelect(2, 2, op_coupling) for _ in range(10_000): - d_idx, r_idx = select(rnd_state, Zero(), Zero()) + d_idx, r_idx = select(rng, Zero(), Zero()) histogram[d_idx, r_idx] += 1 # There are three OK operator pair combinations, so each such pair should @@ -63,8 +63,8 @@ def test_uniform_selection_op_coupling(): def test_single_operators(): - rnd_state = rnd.RandomState(1) + rng = rnd.default_rng(1) select = RandomSelect(1, 1) # Only one (destroy, repair) operator pair, so should return (0, 0). - assert_(select(rnd_state, Zero(), Zero()) == (0, 0)) + assert_(select(rng, Zero(), Zero()) == (0, 0)) diff --git a/alns/select/tests/test_roulette_wheel.py b/alns/select/tests/test_roulette_wheel.py index 2b43f211..95a46cbc 100644 --- a/alns/select/tests/test_roulette_wheel.py +++ b/alns/select/tests/test_roulette_wheel.py @@ -82,12 +82,12 @@ def test_select_coupled_operators(op_coupling): Test if the indices of the selected operators correspond to the ones that are given by the operator coupling. """ - rnd_state = rnd.RandomState() + rng = rnd.default_rng() n_destroy, n_repair = op_coupling.shape select = RouletteWheel( [0, 0, 0, 0], 0, n_destroy, n_repair, op_coupling=op_coupling ) - d_idx, r_idx = select(rnd_state, Zero(), Zero()) + d_idx, r_idx = select(rng, Zero(), Zero()) assert_((d_idx, r_idx) in np.argwhere(op_coupling == 1)) diff --git a/alns/select/tests/test_segmented_roulette_wheel.py b/alns/select/tests/test_segmented_roulette_wheel.py index b053785c..89cc8e6b 100644 --- a/alns/select/tests/test_segmented_roulette_wheel.py +++ b/alns/select/tests/test_segmented_roulette_wheel.py @@ -54,12 +54,12 @@ def test_does_not_raise_valid_decay(decay: float): ], ) def test_update(scores: List[float], decay: float, expected: List[float]): - rnd_state = np.random.RandomState(1) + rng = np.random.default_rng(1) select = SegmentedRouletteWheel(scores, decay, 1, 1, 1) # TODO other weights? select.update(Zero(), 0, 0, 1) - select(rnd_state, Zero(), Zero()) + select(rng, Zero(), Zero()) assert_almost_equal(select.destroy_weights[0], expected[0]) assert_almost_equal(select.repair_weights[0], expected[1]) diff --git a/alns/stop/MaxIterations.py b/alns/stop/MaxIterations.py index c01f5426..95af4472 100644 --- a/alns/stop/MaxIterations.py +++ b/alns/stop/MaxIterations.py @@ -1,4 +1,4 @@ -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State @@ -19,7 +19,7 @@ def __init__(self, max_iterations: int): def max_iterations(self) -> int: return self._max_iterations - def __call__(self, rnd: RandomState, best: State, current: State) -> bool: + def __call__(self, rng: Generator, best: State, current: State) -> bool: self._current_iteration += 1 return self._current_iteration > self.max_iterations diff --git a/alns/stop/MaxRuntime.py b/alns/stop/MaxRuntime.py index 12248f66..298b441e 100644 --- a/alns/stop/MaxRuntime.py +++ b/alns/stop/MaxRuntime.py @@ -1,7 +1,7 @@ import time from typing import Optional -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State @@ -22,7 +22,7 @@ def __init__(self, max_runtime: float): def max_runtime(self) -> float: return self._max_runtime - def __call__(self, rnd: RandomState, best: State, current: State) -> bool: + def __call__(self, rng: Generator, best: State, current: State) -> bool: if self._start_runtime is None: self._start_runtime = time.perf_counter() diff --git a/alns/stop/NoImprovement.py b/alns/stop/NoImprovement.py index e494be76..4285727e 100644 --- a/alns/stop/NoImprovement.py +++ b/alns/stop/NoImprovement.py @@ -1,6 +1,6 @@ from typing import Optional -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State @@ -28,7 +28,7 @@ def __init__(self, max_iterations: int): def max_iterations(self) -> int: return self._max_iterations - def __call__(self, rnd: RandomState, best: State, current: State) -> bool: + def __call__(self, rng: Generator, best: State, current: State) -> bool: if self._target is None or best.objective() < self._target: self._target = best.objective() self._counter = 0 diff --git a/alns/stop/StoppingCriterion.py b/alns/stop/StoppingCriterion.py index 9669ed2c..c3bc3239 100644 --- a/alns/stop/StoppingCriterion.py +++ b/alns/stop/StoppingCriterion.py @@ -1,6 +1,6 @@ from typing import Protocol -from numpy.random import RandomState +from numpy.random import Generator from alns.State import State @@ -10,13 +10,13 @@ class StoppingCriterion(Protocol): Protocol describing a stopping criterion. """ - def __call__(self, rnd: RandomState, best: State, current: State) -> bool: + def __call__(self, rng: Generator, best: State, current: State) -> bool: """ Determines whether to stop. Parameters ---------- - rnd + rng May be used to draw random numbers from. best The best solution state observed so far. @@ -28,3 +28,4 @@ def __call__(self, rnd: RandomState, best: State, current: State) -> bool: bool Whether to stop iterating (True), or not (False). """ + ... # pragma: no cover diff --git a/alns/stop/tests/test_max_iterations.py b/alns/stop/tests/test_max_iterations.py index 66ed97ed..7e936967 100644 --- a/alns/stop/tests/test_max_iterations.py +++ b/alns/stop/tests/test_max_iterations.py @@ -1,5 +1,5 @@ import pytest -from numpy.random import RandomState +from numpy.random import default_rng from numpy.testing import assert_, assert_equal, assert_raises from alns.stop import MaxIterations @@ -34,18 +34,18 @@ def test_max_iterations(max_iterations): def test_before_max_iterations(): stop = MaxIterations(100) - rnd = RandomState(0) + rng = default_rng(0) for _ in range(100): - assert_(not stop(rnd, Zero(), Zero())) + assert_(not stop(rng, Zero(), Zero())) def test_after_max_iterations(): stop = MaxIterations(100) - rnd = RandomState() + rng = default_rng() for _ in range(100): - stop(rnd, Zero(), Zero()) + stop(rng, Zero(), Zero()) for _ in range(100): - assert_(stop(rnd, Zero(), Zero())) + assert_(stop(rng, Zero(), Zero())) diff --git a/alns/stop/tests/test_max_runtime.py b/alns/stop/tests/test_max_runtime.py index bb8bf21e..dd667cbf 100644 --- a/alns/stop/tests/test_max_runtime.py +++ b/alns/stop/tests/test_max_runtime.py @@ -1,7 +1,7 @@ import time import pytest -from numpy.random import RandomState +from numpy.random import default_rng from numpy.testing import assert_, assert_equal, assert_raises from alns.stop import MaxRuntime @@ -49,17 +49,17 @@ def test_max_runtime(max_runtime): @pytest.mark.parametrize("max_runtime", [0.01, 0.05, 0.10]) def test_before_max_runtime(max_runtime): stop = MaxRuntime(max_runtime) - rnd = RandomState() + rng = default_rng() for _ in range(100): - assert_(not stop(rnd, Zero(), Zero())) + assert_(not stop(rng, Zero(), Zero())) @pytest.mark.parametrize("max_runtime", [0.01, 0.05, 0.10]) def test_after_max_runtime(max_runtime): stop = MaxRuntime(max_runtime) - rnd = RandomState() - stop(rnd, Zero(), Zero()) # Trigger the first time measurement + rng = default_rng() + stop(rng, Zero(), Zero()) # Trigger the first time measurement sleep(max_runtime) for _ in range(100): - assert_(stop(rnd, Zero(), Zero())) + assert_(stop(rng, Zero(), Zero())) diff --git a/alns/stop/tests/test_no_improvement.py b/alns/stop/tests/test_no_improvement.py index d73e8d45..410db162 100644 --- a/alns/stop/tests/test_no_improvement.py +++ b/alns/stop/tests/test_no_improvement.py @@ -1,5 +1,5 @@ import pytest -from numpy.random import RandomState +from numpy.random import default_rng from numpy.testing import assert_, assert_equal, assert_raises from alns.stop import NoImprovement @@ -29,10 +29,10 @@ def test_zero_max_iterations(): Test if setting max_iterations to zero always stops. """ stop = NoImprovement(0) - rnd = RandomState() + rng = default_rng() - assert_(stop(rnd, One(), Zero())) - assert_(stop(rnd, Zero(), Zero())) + assert_(stop(rng, One(), Zero())) + assert_(stop(rng, Zero(), Zero())) def test_one_max_iterations(): @@ -41,11 +41,11 @@ def test_one_max_iterations(): best solution has been found. """ stop = NoImprovement(1) - rnd = RandomState() + rng = default_rng() - assert_(not stop(rnd, One(), Zero())) - assert_(not stop(rnd, Zero(), Zero())) - assert_(stop(rnd, Zero(), Zero())) + assert_(not stop(rng, One(), Zero())) + assert_(not stop(rng, Zero(), Zero())) + assert_(stop(rng, Zero(), Zero())) @pytest.mark.parametrize("n", [10, 100, 1000]) @@ -56,13 +56,13 @@ def test_n_max_iterations_non_improving(n): the criterion should stop. """ stop = NoImprovement(n) - rnd = RandomState() + rng = default_rng() for _ in range(n): - assert_(not stop(rnd, Zero(), Zero())) + assert_(not stop(rng, Zero(), Zero())) for _ in range(n): - assert_(stop(rnd, Zero(), Zero())) + assert_(stop(rng, Zero(), Zero())) @pytest.mark.parametrize("n, k", [(10, 2), (100, 20), (1000, 200)]) @@ -74,13 +74,13 @@ def test_n_max_iterations_with_single_improvement(n, k): the criterion should stop. """ stop = NoImprovement(n) - rnd = RandomState() + rng = default_rng() for _ in range(k): - assert_(not stop(rnd, One(), Zero())) + assert_(not stop(rng, One(), Zero())) for _ in range(n): - assert_(not stop(rnd, Zero(), Zero())) + assert_(not stop(rng, Zero(), Zero())) for _ in range(n): - assert_(stop(rnd, Zero(), Zero())) + assert_(stop(rng, Zero(), Zero())) diff --git a/alns/tests/test_alns.py b/alns/tests/test_alns.py index 701152b3..4decdb29 100644 --- a/alns/tests/test_alns.py +++ b/alns/tests/test_alns.py @@ -23,7 +23,7 @@ def get_alns_instance( """ Test helper method. """ - alns = ALNS(rnd.RandomState(seed)) + alns = ALNS(rnd.default_rng(seed)) if repair_operators is not None: for idx, repair_operator in enumerate(repair_operators): @@ -72,13 +72,13 @@ def test_on_best_is_called(): Tests if the callback is invoked when a new global best is found. """ alns = get_alns_instance( - [lambda state, rnd_state: Zero()], [lambda state, rnd_state: Zero()] + [lambda state, rng: Zero()], [lambda state, rng: Zero()] ) # Called when a new global best is found. In this case, that happens once: # in the only iteration below. We change the objective, and test whether # that is indeed the solution that is returned - def callback(state, rnd_state): + def callback(state, rng): state.obj = 1 alns.on_best(callback) @@ -91,19 +91,19 @@ def callback(state, rnd_state): def test_other_callbacks_are_called(): alns = get_alns_instance( - [lambda state, rnd: state], - [lambda state, rnd: VarObj(rnd.random())], + [lambda state, rng: state], + [lambda state, rng: VarObj(rng.random())], seed=1, ) registry = dict(on_better=False, on_accept=False, on_reject=False) - def mock_callback(state, rnd, key): + def mock_callback(state, rng, key): registry[key] = True - alns.on_better(lambda state, rnd: mock_callback(state, rnd, "on_better")) - alns.on_accept(lambda state, rnd: mock_callback(state, rnd, "on_accept")) - alns.on_reject(lambda state, rnd: mock_callback(state, rnd, "on_reject")) + alns.on_better(lambda state, rng: mock_callback(state, rng, "on_better")) + alns.on_accept(lambda state, rng: mock_callback(state, rng, "on_accept")) + alns.on_reject(lambda state, rng: mock_callback(state, rng, "on_reject")) select = RouletteWheel([1, 1, 1, 1], 0.5, 1, 1) accept = SimulatedAnnealing(1_000, 1, 0.05) @@ -125,7 +125,7 @@ def test_add_destroy_operator(): alns = ALNS() for count in [1, 2]: - alns.add_destroy_operator(lambda state, rnd: state, name=str(count)) + alns.add_destroy_operator(lambda state, rng: state, name=str(count)) assert_equal(len(alns.destroy_operators), count) @@ -156,7 +156,7 @@ def test_add_repair_operator(): alns = ALNS() for count in [1, 2]: - alns.add_repair_operator(lambda state, rnd: state, name=str(count)) + alns.add_repair_operator(lambda state, rng: state, name=str(count)) assert_equal(len(alns.repair_operators), count) @@ -186,7 +186,7 @@ def test_raises_missing_destroy_operator(): """ Tests if the algorithm raises when no destroy operators have been set. """ - alns = get_alns_instance(repair_operators=[lambda state, rnd: None]) + alns = get_alns_instance(repair_operators=[lambda state, rng: None]) # Pretend we have a destroy operator for the selection scheme, so that # does not raise an error. @@ -200,7 +200,7 @@ def test_raises_missing_repair_operator(): """ Tests if the algorithm raises when no repair operators have been set. """ - alns = get_alns_instance(destroy_operators=[lambda state, rnd: None]) + alns = get_alns_instance(destroy_operators=[lambda state, rng: None]) # Pretend we have a destroy operator for the selection scheme, so that # does not raise an error. @@ -216,7 +216,7 @@ def test_zero_max_iterations(): stopping criterion is zero max iterations. """ alns = get_alns_instance( - [lambda state, rnd: None], [lambda state, rnd: None] + [lambda state, rng: None], [lambda state, rng: None] ) initial_solution = One() @@ -235,7 +235,7 @@ def test_zero_max_runtime(): stopping criterion is zero max runtime. """ alns = get_alns_instance( - [lambda state, rnd: None], [lambda state, rnd: None] + [lambda state, rng: None], [lambda state, rng: None] ) initial_solution = One() @@ -249,11 +249,11 @@ def test_zero_max_runtime(): def test_iterate_kwargs_are_correctly_passed_to_operators(): - def test_operator(state, rnd, item): + def test_operator(state, rng, item): assert_(item is orig_item) return state - alns = get_alns_instance([lambda state, rnd, item: state], [test_operator]) + alns = get_alns_instance([lambda state, rng, item: state], [test_operator]) init_sol = One() select = RouletteWheel([1, 1, 1, 1], 0.5, 1, 1) @@ -270,12 +270,12 @@ def test_bugfix_pass_kwargs_to_on_best(): passed to iterate(). """ - def test_operator(state, rnd, item): + def test_operator(state, rng, item): assert_(item is orig_item) return Zero() # better, so on_best is triggered - alns = get_alns_instance([lambda state, rnd, item: state], [test_operator]) - alns.on_best(lambda state, rnd, item: state) + alns = get_alns_instance([lambda state, rng, item: state], [test_operator]) + alns.on_best(lambda state, rng, item: state) init_sol = One() select = RouletteWheel([1, 1, 1, 1], 0.5, 1, 1) @@ -295,7 +295,7 @@ def test_trivial_example(): solution is one, and any other operator returns zero. """ alns = get_alns_instance( - [lambda state, rnd: Zero()], [lambda state, rnd: Zero()] + [lambda state, rng: Zero()], [lambda state, rng: Zero()] ) select = RouletteWheel([1, 1, 1, 1], 0.5, 1, 1) @@ -304,15 +304,15 @@ def test_trivial_example(): assert_equal(result.best_state.objective(), 0) -@mark.parametrize("seed,desired", [(0, 0.01171), (1, 0.00011), (2, 0.01025)]) +@mark.parametrize("seed,desired", [(0, 0.00995), (1, 0.02648), (2, 0.00981)]) def test_fixed_seed_outcomes(seed: int, desired: float): """ Tests if fixing a seed results in deterministic outcomes even when using a 'random' acceptance criterion (here SA). """ alns = get_alns_instance( - [lambda state, rnd: VarObj(rnd.random_sample())], - [lambda state, rnd: None], + [lambda state, rng: VarObj(rng.random())], + [lambda state, rng: None], seed, ) @@ -329,7 +329,7 @@ def test_nonnegative_max_iterations(max_iterations): Test that the result statistics have size equal to max iterations (+1). """ alns = get_alns_instance( - [lambda state, rnd: Zero()], [lambda state, rnd: Zero()] + [lambda state, rng: Zero()], [lambda state, rng: Zero()] ) initial_solution = One() @@ -352,7 +352,7 @@ def test_nonnegative_max_runtime(max_runtime): Test that the result runtime statistics match the stopping criterion. """ alns = get_alns_instance( - [lambda state, rnd: Zero()], [lambda state, rnd: Zero()] + [lambda state, rng: Zero()], [lambda state, rng: Zero()] ) initial_solution = One() diff --git a/alns/tests/test_result.py b/alns/tests/test_result.py index 8e33150b..8defd43d 100644 --- a/alns/tests/test_result.py +++ b/alns/tests/test_result.py @@ -36,15 +36,15 @@ def get_statistics(): statistics.collect_objective(objective) # We should make sure these results are reproducible. - state = rnd.RandomState(1) + rng = rnd.default_rng(1) operators = ["test1", "test2", "test3"] for _ in range(100): - operator = state.choice(operators) + operator = rng.choice(operators) - statistics.collect_destroy_operator("d_" + operator, state.randint(4)) - statistics.collect_repair_operator("r_" + operator, state.randint(4)) + statistics.collect_destroy_operator("d_" + operator, rng.integers(4)) + statistics.collect_repair_operator("r_" + operator, rng.integers(4)) return statistics diff --git a/docs/source/examples/other single-trajectory heuristics.rst b/docs/source/examples/other single-trajectory heuristics.rst index bacc40b4..edf791ef 100644 --- a/docs/source/examples/other single-trajectory heuristics.rst +++ b/docs/source/examples/other single-trajectory heuristics.rst @@ -23,12 +23,12 @@ At a high level, one could thus implement the following: from alns import ALNS, State - def perturb(sol: State, rnd_state) -> State: + def perturb(sol: State, rng) -> State: return - def local_search(sol: State, rnd_state) -> State: + def local_search(sol: State, rng) -> State: return @@ -67,14 +67,14 @@ Then, a high-level implementation could look like: k: int - def perturb(sol: State, rnd_state, neighbourhood: Neighbourhood) -> State: + def perturb(sol: State, rng, neighbourhood: Neighbourhood) -> State: return def local_search( sol: State, - rnd_state, + rng, neighbourhood: Neighbourhood ) -> State: @@ -90,7 +90,7 @@ Then, a high-level implementation could look like: return - def on_best(sol: State, rnd_state, neighbourhood: Neighbourhood): + def on_best(sol: State, rng, neighbourhood: Neighbourhood): # New best solution: start again from first neighbourhood. neighbourhood.k = 1 @@ -123,12 +123,12 @@ At a high level, one could thus implement the following: from alns import ALNS, State - def destroy(sol: State, rnd_state) -> State: + def destroy(sol: State, rng) -> State: return - def greedy_randomised_repair(sol: State, rnd_state) -> State: + def greedy_randomised_repair(sol: State, rng) -> State: return diff --git a/docs/source/setup/template.rst b/docs/source/setup/template.rst index 0e03a16d..99aeded7 100644 --- a/docs/source/setup/template.rst +++ b/docs/source/setup/template.rst @@ -60,13 +60,13 @@ The following is a quickstart template that can help you get started: pass - def destroy(current: ProblemState, rnd_state: rnd.RandomState) -> ProblemState: + def destroy(current: ProblemState, rng: rnd.Generator) -> ProblemState: # TODO implement how to destroy the current state, and return the destroyed # state. Make sure to (deep)copy the current state before modifying! pass - def repair(destroyed: ProblemState, rnd_state: rnd.RandomState) -> ProblemState: + def repair(destroyed: ProblemState, rng: rnd.Generator) -> ProblemState: # TODO implement how to repair a destroyed state, and return it pass @@ -76,7 +76,7 @@ The following is a quickstart template that can help you get started: print(f"Initial solution objective is {init_sol.objective()}.") # Create ALNS and add one or more destroy and repair operators - alns = ALNS(rnd.RandomState(seed=42)) + alns = ALNS(rnd.default_rng(seed=42)) alns.add_destroy_operator(destroy) alns.add_repair_operator(repair) diff --git a/examples/alns_features.ipynb b/examples/alns_features.ipynb index fc2fe4ce..e44757d2 100644 --- a/examples/alns_features.ipynb +++ b/examples/alns_features.ipynb @@ -164,9 +164,9 @@ }, "outputs": [], "source": [ - "def random_remove(state: KnapsackState, rnd_state):\n", + "def random_remove(state: KnapsackState, rng):\n", " probs = state.x / state.x.sum()\n", - " to_remove = rnd_state.choice(np.arange(n), size=to_destroy(state), p=probs)\n", + " to_remove = rng.choice(np.arange(n), size=to_destroy(state), p=probs)\n", "\n", " assignments = state.x.copy()\n", " assignments[to_remove] = 0\n", @@ -184,7 +184,7 @@ }, "outputs": [], "source": [ - "def worst_remove(state: KnapsackState, rnd_state):\n", + "def worst_remove(state: KnapsackState, rng):\n", " merit = state.x * p / w\n", " by_merit = np.argsort(-merit)\n", " by_merit = by_merit[by_merit > 0]\n", @@ -220,9 +220,9 @@ }, "outputs": [], "source": [ - "def random_repair(state: KnapsackState, rnd_state):\n", + "def random_repair(state: KnapsackState, rng):\n", " unselected = np.argwhere(state.x == 0)\n", - " rnd_state.shuffle(unselected)\n", + " rng.shuffle(unselected)\n", "\n", " while True:\n", " can_insert = w[unselected] <= W - state.weight()\n", @@ -258,8 +258,8 @@ "outputs": [], "source": [ "def make_alns() -> ALNS:\n", - " rnd_state = np.random.RandomState(SEED)\n", - " alns = ALNS(rnd_state)\n", + " rng = np.random.default_rng(SEED)\n", + " alns = ALNS(rng)\n", "\n", " alns.add_destroy_operator(random_remove)\n", " alns.add_destroy_operator(worst_remove)\n", @@ -821,7 +821,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/examples/capacitated_vehicle_routing_problem.ipynb b/examples/capacitated_vehicle_routing_problem.ipynb index 4bc026bf..8509b4ba 100644 --- a/examples/capacitated_vehicle_routing_problem.ipynb +++ b/examples/capacitated_vehicle_routing_problem.ipynb @@ -21,7 +21,7 @@ "from alns import ALNS\n", "from alns.accept import RecordToRecordTravel\n", "from alns.select import RouletteWheel\n", - "from alns.stop import MaxRuntime" + "from alns.stop import MaxIterations" ] }, { @@ -258,13 +258,13 @@ }, "outputs": [], "source": [ - "def random_removal(state, rnd_state):\n", + "def random_removal(state, rng):\n", " \"\"\"\n", " Removes a number of randomly selected customers from the passed-in solution.\n", " \"\"\"\n", " destroyed = state.copy()\n", "\n", - " for customer in rnd_state.choice(\n", + " for customer in rng.choice(\n", " range(1, data[\"dimension\"]), customers_to_remove, replace=False\n", " ):\n", " destroyed.unassigned.append(customer)\n", @@ -302,12 +302,12 @@ }, "outputs": [], "source": [ - "def greedy_repair(state, rnd_state):\n", + "def greedy_repair(state, rng):\n", " \"\"\"\n", " Inserts the unassigned customers in the best route. If there are no\n", " feasible insertions, then a new route is created.\n", " \"\"\"\n", - " rnd_state.shuffle(state.unassigned)\n", + " rng.shuffle(state.unassigned)\n", "\n", " while len(state.unassigned) != 0:\n", " customer = state.unassigned.pop()\n", @@ -456,7 +456,7 @@ }, "outputs": [], "source": [ - "alns = ALNS(rnd.RandomState(SEED))\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(random_removal)\n", "\n", @@ -472,10 +472,13 @@ }, "outputs": [], "source": [ + "num_iterations = 3000\n", "init = nearest_neighbor()\n", "select = RouletteWheel([25, 5, 1, 0], 0.8, 1, 1)\n", - "accept = RecordToRecordTravel.autofit(init.objective(), 0.02, 0, 9000)\n", - "stop = MaxRuntime(60)\n", + "accept = RecordToRecordTravel.autofit(\n", + " init.objective(), 0.02, 0, num_iterations\n", + ")\n", + "stop = MaxIterations(num_iterations)\n", "\n", "result = alns.iterate(init, select, accept, stop)" ] @@ -559,7 +562,7 @@ "MAX_STRING_SIZE = 12\n", "\n", "\n", - "def string_removal(state, rnd_state):\n", + "def string_removal(state, rng):\n", " \"\"\"\n", " Remove partial routes around a randomly chosen customer.\n", " \"\"\"\n", @@ -570,7 +573,7 @@ " max_string_removals = min(len(state.routes), MAX_STRING_REMOVALS)\n", "\n", " destroyed_routes = []\n", - " center = rnd_state.randint(1, data[\"dimension\"])\n", + " center = rng.integers(1, data[\"dimension\"])\n", "\n", " for customer in neighbors(center):\n", " if len(destroyed_routes) >= max_string_removals:\n", @@ -583,20 +586,20 @@ " if route in destroyed_routes:\n", " continue\n", "\n", - " customers = remove_string(route, customer, max_string_size, rnd_state)\n", + " customers = remove_string(route, customer, max_string_size, rng)\n", " destroyed.unassigned.extend(customers)\n", " destroyed_routes.append(route)\n", "\n", " return destroyed\n", "\n", "\n", - "def remove_string(route, cust, max_string_size, rnd_state):\n", + "def remove_string(route, cust, max_string_size, rng):\n", " \"\"\"\n", " Remove a string that constains the passed-in customer.\n", " \"\"\"\n", " # Find consecutive indices to remove that contain the customer\n", - " size = rnd_state.randint(1, min(len(route), max_string_size) + 1)\n", - " start = route.index(cust) - rnd_state.randint(size)\n", + " size = rng.integers(1, min(len(route), max_string_size) + 1)\n", + " start = route.index(cust) - rng.integers(size)\n", " idcs = [idx % len(route) for idx in range(start, start + size)]\n", "\n", " # Remove indices in descending order\n", @@ -616,7 +619,7 @@ }, "outputs": [], "source": [ - "alns = ALNS(rnd.RandomState(SEED))\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(string_removal)\n", "\n", @@ -632,10 +635,13 @@ }, "outputs": [], "source": [ + "num_iterations = 3000\n", "init = nearest_neighbor()\n", "select = RouletteWheel([25, 5, 1, 0], 0.8, 1, 1)\n", - "accept = RecordToRecordTravel.autofit(init.objective(), 0.02, 0, 6000)\n", - "stop = MaxRuntime(60)\n", + "accept = RecordToRecordTravel.autofit(\n", + " init.objective(), 0.02, 0, num_iterations\n", + ")\n", + "stop = MaxIterations(num_iterations)\n", "\n", "result = alns.iterate(init, select, accept, stop)" ] @@ -696,18 +702,10 @@ "id": "7424ca3f-b4e7-459d-a307-be2d26426cdc", "metadata": {}, "source": [ - "In this notebook we implemented two heuristics for the CVRP, using the ALNS meta-heuristic framework. The first heuristic used a random customer destroy operator and we obtained a solution which is 6.8% worse than the best known solution. The second heuristic used a destroy operator which removes strings arround a randomly selected customer and we obtained a solution that is only 3% worse than the best known solution. \n", + "In this notebook we implemented two heuristics for the CVRP, using the ALNS meta-heuristic framework. The first heuristic used a random customer destroy operator and we obtained a solution which is 10% worse than the best known solution. The second heuristic used a destroy operator which removes strings arround a randomly selected customer and we obtained a solution that is only 3% worse than the best known solution. \n", "\n", "This example shows that by constructing problem-specific operators, one can create even more powerful ALNS heuristics." ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a1cba864", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -718,7 +716,7 @@ "provenance": [] }, "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -732,7 +730,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/examples/cutting_stock_problem.ipynb b/examples/cutting_stock_problem.ipynb index f5146a0a..1c2cdf39 100644 --- a/examples/cutting_stock_problem.ipynb +++ b/examples/cutting_stock_problem.ipynb @@ -199,14 +199,14 @@ "metadata": {}, "outputs": [], "source": [ - "def random_removal(state, random_state):\n", + "def random_removal(state, rng):\n", " \"\"\"\n", " Iteratively removes randomly chosen beam assignments.\n", " \"\"\"\n", " state = state.copy()\n", "\n", " for _ in range(beams_to_remove(state.objective())):\n", - " idx = random_state.randint(state.objective())\n", + " idx = rng.integers(state.objective())\n", " state.unassigned.extend(state.assignments.pop(idx))\n", "\n", " return state" @@ -218,7 +218,7 @@ "metadata": {}, "outputs": [], "source": [ - "def worst_removal(state, random_state):\n", + "def worst_removal(state, rng):\n", " \"\"\"\n", " Removes beams in decreasing order of wastage, such that the\n", " poorest assignments are removed first.\n", @@ -250,12 +250,12 @@ "metadata": {}, "outputs": [], "source": [ - "def greedy_insert(state, random_state):\n", + "def greedy_insert(state, rng):\n", " \"\"\"\n", " Inserts the unassigned beams greedily into the first fitting\n", " beam. Shuffles the unassigned ordered beams before inserting.\n", " \"\"\"\n", - " random_state.shuffle(state.unassigned)\n", + " rng.shuffle(state.unassigned)\n", "\n", " while len(state.unassigned) != 0:\n", " beam = state.unassigned.pop(0)\n", @@ -276,7 +276,7 @@ "metadata": {}, "outputs": [], "source": [ - "def minimal_wastage(state, random_state):\n", + "def minimal_wastage(state, rng):\n", " \"\"\"\n", " For every unassigned ordered beam, the operator determines\n", " which beam would minimise that beam's waste once the ordered\n", @@ -317,10 +317,10 @@ "metadata": {}, "outputs": [], "source": [ - "rnd_state = rnd.RandomState(SEED)\n", + "rng = rnd.default_rng(SEED)\n", "\n", "state = CspState([], BEAMS.copy())\n", - "init_sol = greedy_insert(state, rnd_state)\n", + "init_sol = greedy_insert(state, rng)\n", "\n", "print(\"Initial solution has objective value:\", init_sol.objective())" ] @@ -347,7 +347,7 @@ "metadata": {}, "outputs": [], "source": [ - "alns = ALNS(rnd_state)\n", + "alns = ALNS(rng)\n", "\n", "alns.add_destroy_operator(random_removal)\n", "alns.add_destroy_operator(worst_removal)\n", @@ -469,7 +469,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/examples/permutation_flow_shop_problem.ipynb b/examples/permutation_flow_shop_problem.ipynb index 74ea58bc..fd57cc55 100644 --- a/examples/permutation_flow_shop_problem.ipynb +++ b/examples/permutation_flow_shop_problem.ipynb @@ -15,6 +15,7 @@ "import numpy.random as rnd\n", "import matplotlib.pyplot as plt\n", "\n", + "\n", "from alns import ALNS\n", "from alns.accept import SimulatedAnnealing\n", "from alns.select import AlphaUCB\n", @@ -152,7 +153,7 @@ " start = completion - DATA.processing_times\n", "\n", " # Plot each job using its start and completion time\n", - " cmap = plt.get_cmap(\"rainbow\", n_jobs)\n", + " cmap = plt.colormaps[\"rainbow\"].resampled(n_jobs)\n", " machines, length, start_job, job_colors = zip(\n", " *[\n", " (i, DATA.processing_times[i, j], start[i, j], cmap(j - 1))\n", @@ -169,6 +170,7 @@ " ax.set_xlabel(f\"Completion time\")\n", " ax.set_yticks(range(DATA.n_machines))\n", " ax.set_yticklabels(range(1, DATA.n_machines + 1))\n", + " ax.invert_yaxis()\n", "\n", " plt.show()" ] @@ -295,13 +297,13 @@ "metadata": {}, "outputs": [], "source": [ - "def random_removal(state: Solution, rnd_state, n_remove=2) -> Solution:\n", + "def random_removal(state: Solution, rng, n_remove=2) -> Solution:\n", " \"\"\"\n", " Randomly remove a number jobs from the solution.\n", " \"\"\"\n", " destroyed = deepcopy(state)\n", "\n", - " for job in rnd_state.choice(DATA.n_jobs, n_remove, replace=False):\n", + " for job in rng.choice(DATA.n_jobs, n_remove, replace=False):\n", " destroyed.unassigned.append(job)\n", " destroyed.schedule.remove(job)\n", "\n", @@ -315,13 +317,13 @@ "metadata": {}, "outputs": [], "source": [ - "def adjacent_removal(state: Solution, rnd_state, n_remove=2) -> Solution:\n", + "def adjacent_removal(state: Solution, rng, n_remove=2) -> Solution:\n", " \"\"\"\n", " Randomly remove a number adjacent jobs from the solution.\n", " \"\"\"\n", " destroyed = deepcopy(state)\n", "\n", - " start = rnd_state.randint(DATA.n_jobs - n_remove)\n", + " start = rng.integers(DATA.n_jobs - n_remove)\n", " jobs_to_remove = [state.schedule[start + idx] for idx in range(n_remove)]\n", "\n", " for job in jobs_to_remove:\n", @@ -347,7 +349,7 @@ "metadata": {}, "outputs": [], "source": [ - "def greedy_repair(state: Solution, rnd_state, **kwargs) -> Solution:\n", + "def greedy_repair(state: Solution, rng, **kwargs) -> Solution:\n", " \"\"\"\n", " Greedily insert the unassigned jobs back into the schedule. The jobs are\n", " inserted in non-decreasing order of total processing times.\n", @@ -432,7 +434,7 @@ "metadata": {}, "outputs": [], "source": [ - "alns = ALNS(rnd.RandomState(SEED))\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(random_removal)\n", "alns.add_destroy_operator(adjacent_removal)\n", @@ -558,12 +560,12 @@ "metadata": {}, "outputs": [], "source": [ - "def greedy_repair_then_local_search(state: Solution, rnd_state, **kwargs):\n", + "def greedy_repair_then_local_search(state: Solution, rng, **kwargs):\n", " \"\"\"\n", " Greedily insert the unassigned jobs back into the schedule (using NEH\n", " ordering). Apply local search afterwards.\n", " \"\"\"\n", - " state = greedy_repair(state, rnd_state, **kwargs)\n", + " state = greedy_repair(state, rng, **kwargs)\n", " local_search(state, **kwargs)\n", " return state\n", "\n", @@ -599,7 +601,7 @@ "metadata": {}, "outputs": [], "source": [ - "alns = ALNS(rnd.RandomState(SEED))\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(random_removal)\n", "alns.add_destroy_operator(adjacent_removal)\n", @@ -684,7 +686,7 @@ "metadata": {}, "outputs": [], "source": [ - "alns = ALNS(rnd.RandomState(SEED))\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(random_removal)\n", "alns.add_destroy_operator(adjacent_removal)\n", @@ -736,7 +738,7 @@ "id": "d93bf45e-90fe-41ad-b78e-7aa7fdbe4bf6", "metadata": {}, "source": [ - "From this simple experiment, it looks like removing between 2 to 6 jobs works best. Our experiment is clearly too simple to draw serious conclusions from, but this example can be easily extended to include more instances and also more random seeds." + "From this simple experiment, it looks like removing 3 works best. Our experiment is clearly too simple to draw serious conclusions from, but this example can be easily extended to include more instances and also more random seeds." ] }, { @@ -745,7 +747,7 @@ "metadata": {}, "source": [ "## Conclusions\n", - "In this notebook, we implemented several variants of adaptive large neighborhood search heuristic to solve the permutation flow shop problem. We obtained a solution that is only 1.5% worse than the best known solution. Furthermore, we showed several advanced features in ALNS, including:\n", + "In this notebook, we implemented several variants of adaptive large neighborhood search heuristic to solve the permutation flow shop problem. We obtained a solution that is only 1-2% worse than the best known solution. Furthermore, we showed several advanced features in ALNS, including:\n", "\n", "* Autofitting acceptance criteria\n", "* Adding local search to repair operators\n", @@ -770,7 +772,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/examples/resource_constrained_project_scheduling_problem.ipynb b/examples/resource_constrained_project_scheduling_problem.ipynb index 7158f43c..809afd2e 100644 --- a/examples/resource_constrained_project_scheduling_problem.ipynb +++ b/examples/resource_constrained_project_scheduling_problem.ipynb @@ -14,7 +14,6 @@ "import re\n", "from dataclasses import dataclass\n", "from functools import lru_cache\n", - "from typing import List, Tuple\n", "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", @@ -49,7 +48,7 @@ }, "outputs": [], "source": [ - "SEED = 5432" + "SEED = 42" ] }, { @@ -77,8 +76,7 @@ "In this notebook, we solve an instance of the RCPSP using ALNS.\n", "In particular, we solve instance `j9041_6` of the [PSPLib](http://www.om-db.wi.tum.de/psplib/library.html) benchmark suite.\n", "This instance consists of 90 jobs, and four resources.\n", - "The optimal makespan of this instance is known to be between 123 and 135.\n", - "We find a solution with a makespan of 141, just 4% above the best known solution to this instance.\n" + "The optimal makespan of this instance is known to be between 123 and 135." ] }, { @@ -110,8 +108,8 @@ " num_resources: int\n", "\n", " duration: np.ndarray # job durations\n", - " successors: List[List[int]] # job successors\n", - " predecessors: List[List[int]] # job predecessors\n", + " successors: list[list[int]] # job successors\n", + " predecessors: list[list[int]] # job predecessors\n", " needs: np.ndarray # job resource needs\n", " resources: np.ndarray # resource capacities\n", "\n", @@ -128,7 +126,7 @@ "\n", " @property\n", " @lru_cache(1)\n", - " def all_predecessors(self) -> List[List[int]]:\n", + " def all_predecessors(self) -> list[list[int]]:\n", " pred = [set() for _ in range(self.num_jobs)]\n", "\n", " for job, pre in enumerate(self.predecessors):\n", @@ -139,7 +137,7 @@ "\n", " @property\n", " @lru_cache(1)\n", - " def all_successors(self) -> List[List[int]]:\n", + " def all_successors(self) -> list[list[int]]:\n", " succ = [set() for _ in range(self.num_jobs)]\n", "\n", " for job, suc in zip(\n", @@ -262,7 +260,7 @@ "outputs": [], "source": [ "@lru_cache(32)\n", - "def schedule(jobs: Tuple[int]) -> Tuple[np.ndarray, np.ndarray]:\n", + "def schedule(jobs: tuple[int]) -> tuple[np.ndarray, np.ndarray]:\n", " \"\"\"\n", " Computes a serial schedule of the given list of jobs. See Figure 1\n", " in Fleszar and Hindi (2004) for the algorithm. Returns the schedule,\n", @@ -315,7 +313,7 @@ " topologically).\n", " \"\"\"\n", "\n", - " def __init__(self, jobs: List[int]):\n", + " def __init__(self, jobs: list[int]):\n", " self.jobs = jobs\n", "\n", " def __copy__(self):\n", @@ -325,9 +323,9 @@ " def indices(self) -> np.ndarray:\n", " \"\"\"\n", " Returns a mapping from job -> idx in the schedule. Unscheduled\n", - " jobs have index num_jobs.\n", + " jobs have index ``len(self.jobs)``.\n", " \"\"\"\n", - " indices = np.full(instance.num_jobs, instance.num_jobs, dtype=int)\n", + " indices = np.full(instance.num_jobs, len(self.jobs), dtype=int)\n", "\n", " for idx, job in enumerate(self.jobs):\n", " indices[job] = idx\n", @@ -335,7 +333,7 @@ " return indices\n", "\n", " @property\n", - " def unscheduled(self) -> List[int]:\n", + " def unscheduled(self) -> list[int]:\n", " \"\"\"\n", " All jobs that are not currently scheduled, in topological order.\n", " \"\"\"\n", @@ -409,7 +407,7 @@ }, "outputs": [], "source": [ - "def most_mobile_removal(state, rnd_state):\n", + "def most_mobile_removal(state, rng):\n", " \"\"\"\n", " This operator unschedules those jobs that are most mobile, that is, those\n", " that can be 'moved' most within the schedule, as determined by their\n", @@ -445,7 +443,7 @@ " mobility[[instance.first_job, instance.last_job]] = 0\n", " p = mobility / mobility.sum()\n", "\n", - " for job in rnd_state.choice(instance.num_jobs, Q, replace=False, p=p):\n", + " for job in rng.choice(instance.num_jobs, Q, replace=False, p=p):\n", " state.jobs.remove(job)\n", "\n", " return state" @@ -461,7 +459,7 @@ }, "outputs": [], "source": [ - "def non_peak_removal(state: RcpspState, rnd_state):\n", + "def non_peak_removal(state: RcpspState, rng):\n", " \"\"\"\n", " Removes up to Q jobs that are scheduled in periods with limited resource\n", " use. Those jobs might be grouped together better when they are rescheduled.\n", @@ -489,7 +487,7 @@ " if np.all((high_util <= start[job]) | (high_util >= end[job]))\n", " ]\n", "\n", - " for job in rnd_state.choice(jobs, min(len(jobs), Q), replace=False):\n", + " for job in rng.choice(jobs, min(len(jobs), Q), replace=False):\n", " state.jobs.remove(job)\n", "\n", " return state" @@ -505,12 +503,12 @@ }, "outputs": [], "source": [ - "def segment_removal(state, rnd_state):\n", + "def segment_removal(state, rng):\n", " \"\"\"\n", " Removes a whole segment of jobs from the current solution.\n", " \"\"\"\n", " state = copy.copy(state)\n", - " offset = rnd_state.randint(1, instance.num_jobs - Q)\n", + " offset = rng.integers(1, instance.num_jobs - Q)\n", "\n", " del state.jobs[offset : offset + Q]\n", "\n", @@ -591,7 +589,7 @@ }, "outputs": [], "source": [ - "def random_insert(state, rnd_state):\n", + "def random_insert(state, rng):\n", " \"\"\"\n", " Randomly inserts jobs into the schedule. The resulting solution state\n", " is guaranteed to be feasible.\n", @@ -606,7 +604,7 @@ " ll = np.max(indices[preds[job]], initial=-1) + 1\n", " rl = np.min(indices[succs[job]], initial=len(state.jobs))\n", "\n", - " idx = rnd_state.randint(ll, rl) if ll < rl else ll\n", + " idx = rng.integers(ll, rl) if ll < rl else ll\n", " state.jobs.insert(idx, job)\n", "\n", " indices[indices >= idx] += 1\n", @@ -680,7 +678,7 @@ }, "outputs": [], "source": [ - "rnd_state = rnd.RandomState(SEED)" + "rng = rnd.default_rng(SEED)" ] }, { @@ -693,7 +691,7 @@ }, "outputs": [], "source": [ - "alns = ALNS(rnd_state)\n", + "alns = ALNS(rng)\n", "\n", "alns.add_destroy_operator(most_mobile_removal)\n", "alns.add_destroy_operator(non_peak_removal)\n", @@ -760,13 +758,13 @@ "## Conclusion\n", "\n", "In this notebook we solved a challenging instance of the resource-constrained project scheduling problem, using several operators and enhancement techniques from the literature.\n", - "The resulting heuristic solution is competitive with other heuristics for this problem: the best known solution achieves a makespan of 135, and we find 141, just 4% higher." + "The resulting heuristic solution is competitive with other heuristics for this problem: the best known solution achieves a makespan of 135, and we find 142, just 5% higher." ] } ], "metadata": { "kernelspec": { - "display_name": ".venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -780,7 +778,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/examples/travelling_salesman_problem.ipynb b/examples/travelling_salesman_problem.ipynb index 6a0d7a77..209e6a48 100644 --- a/examples/travelling_salesman_problem.ipynb +++ b/examples/travelling_salesman_problem.ipynb @@ -193,7 +193,7 @@ "metadata": {}, "outputs": [], "source": [ - "def worst_removal(current, rnd_state):\n", + "def worst_removal(current, rng):\n", " \"\"\"\n", " Worst removal iteratively removes the 'worst' edges, that is,\n", " those edges that have the largest distance.\n", @@ -216,14 +216,14 @@ "metadata": {}, "outputs": [], "source": [ - "def path_removal(current, rnd_state):\n", + "def path_removal(current, rng):\n", " \"\"\"\n", " Removes an entire consecutive sub-path, that is, a series of\n", " contiguous edges.\n", " \"\"\"\n", " destroyed = copy.deepcopy(current)\n", "\n", - " node_idx = rnd_state.choice(len(destroyed.nodes))\n", + " node_idx = rng.choice(len(destroyed.nodes))\n", " node = destroyed.nodes[node_idx]\n", "\n", " for _ in range(edges_to_remove(current)):\n", @@ -238,13 +238,13 @@ "metadata": {}, "outputs": [], "source": [ - "def random_removal(current, rnd_state):\n", + "def random_removal(current, rng):\n", " \"\"\"\n", " Random removal iteratively removes random edges.\n", " \"\"\"\n", " destroyed = copy.deepcopy(current)\n", "\n", - " for idx in rnd_state.choice(\n", + " for idx in rng.choice(\n", " len(destroyed.nodes), edges_to_remove(current), replace=False\n", " ):\n", " del destroyed.edges[destroyed.nodes[idx]]\n", @@ -292,7 +292,7 @@ "metadata": {}, "outputs": [], "source": [ - "def greedy_repair(current, rnd_state):\n", + "def greedy_repair(current, rng):\n", " \"\"\"\n", " Greedily repairs a tour, stitching up nodes that are not departed\n", " with those not visited.\n", @@ -301,7 +301,7 @@ "\n", " # This kind of randomness ensures we do not cycle between the same\n", " # destroy and repair steps every time.\n", - " shuffled_idcs = rnd_state.permutation(len(current.nodes))\n", + " shuffled_idcs = rng.permutation(len(current.nodes))\n", " nodes = [current.nodes[idx] for idx in shuffled_idcs]\n", "\n", " while len(current.edges) != len(current.nodes):\n", @@ -343,10 +343,10 @@ "metadata": {}, "outputs": [], "source": [ - "random_state = rnd.RandomState(SEED)\n", + "rng = rnd.default_rng(SEED)\n", "state = TspState(CITIES, {})\n", "\n", - "init_sol = greedy_repair(state, random_state)\n", + "init_sol = greedy_repair(state, rng)\n", "print(f\"Initial solution objective is {init_sol.objective()}.\")" ] }, @@ -374,7 +374,7 @@ "metadata": {}, "outputs": [], "source": [ - "alns = ALNS(random_state)\n", + "alns = ALNS(rnd.default_rng(SEED))\n", "\n", "alns.add_destroy_operator(random_removal)\n", "alns.add_destroy_operator(path_removal)\n", @@ -448,7 +448,7 @@ "source": [ "## Conclusions\n", "\n", - "In the code above we implemented a very simple heuristic for the TSP, using the ALNS meta-heuristic framework. We did not tinker too much with the various hyperparameters available on the ALNS implementation, but even for these relatively basic heuristic methods and workflow we find a very good result - just 2% worse than the optimal tour.\n", + "In the code above we implemented a very simple heuristic for the TSP, using the ALNS meta-heuristic framework. We did not tinker too much with the various hyperparameters available on the ALNS implementation, but even for these relatively basic heuristic methods and workflow we find a very good result - just a few percent worse than the optimal tour.\n", "\n", "This notebook showcases how the ALNS library may be put to use to construct powerful, efficient heuristic pipelines from simple, locally greedy operators." ] @@ -470,7 +470,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.9" + "version": "3.9.19" } }, "nbformat": 4,