diff --git a/pypesto/profile/options.py b/pypesto/profile/options.py index f2c9dc42a..bcc3b805e 100644 --- a/pypesto/profile/options.py +++ b/pypesto/profile/options.py @@ -32,9 +32,9 @@ class ProfileOptions(dict): reg_order: Maximum degree of regression polynomial used in regression based adaptive profile points proposal. - magic_factor_obj_value: - There is this magic factor in the old profiling code which slows down - profiling at small ratios (must be >= 0 and < 1). + adaptive_target_scaling_factor: + The scaling factor of the next_obj_target in next guess generation. + Larger values result in larger next_guess step size (must be > 1). whole_path: Whether to profile the whole bounds or only till we get below the ratio. @@ -44,13 +44,13 @@ def __init__( self, default_step_size: float = 0.01, min_step_size: float = 0.001, - max_step_size: float = 1.0, + max_step_size: float = 0.1, step_size_factor: float = 1.25, delta_ratio_max: float = 0.1, ratio_min: float = 0.145, reg_points: int = 10, reg_order: int = 4, - magic_factor_obj_value: float = 0.5, + adaptive_target_scaling_factor: float = 1.5, whole_path: bool = False, ): super().__init__() @@ -63,7 +63,7 @@ def __init__( self.delta_ratio_max = delta_ratio_max self.reg_points = reg_points self.reg_order = reg_order - self.magic_factor_obj_value = magic_factor_obj_value + self.adaptive_target_scaling_factor = adaptive_target_scaling_factor self.whole_path = whole_path self.validate() @@ -112,5 +112,5 @@ def validate(self): if self.default_step_size < self.min_step_size: raise ValueError("default_step_size must be >= min_step_size.") - if self.magic_factor_obj_value < 0 or self.magic_factor_obj_value >= 1: - raise ValueError("magic_factor_obj_value must be >= 0 and < 1.") + if self.adaptive_target_scaling_factor < 1: + raise ValueError("adaptive_target_scaling_factor must be > 1.") diff --git a/pypesto/profile/profile.py b/pypesto/profile/profile.py index e4e124964..2df0b4f99 100644 --- a/pypesto/profile/profile.py +++ b/pypesto/profile/profile.py @@ -24,7 +24,7 @@ def parameter_profile( profile_index: Iterable[int] = None, profile_list: int = None, result_index: int = 0, - next_guess_method: Union[Callable, str] = "adaptive_step_regression", + next_guess_method: Union[Callable, str] = "adaptive_step_order_1", profile_options: ProfileOptions = None, progress_bar: bool = None, filename: Union[str, Callable, None] = None, @@ -93,7 +93,9 @@ def parameter_profile( profile_options = ProfileOptions.create_instance(profile_options) profile_options.validate() - # create a function handle that will be called later to get the next point + # Create a function handle that will be called later to get the next point. + # This function will be used to generate the initial points of optimization + # steps in profiling in `walk_along_profile.py` if isinstance(next_guess_method, str): def create_next_guess( @@ -104,6 +106,8 @@ def create_next_guess( current_profile_, problem_, global_opt_, + min_step_increase_factor_, + max_step_reduce_factor_, ): return next_guess( x, @@ -114,6 +118,8 @@ def create_next_guess( current_profile_, problem_, global_opt_, + min_step_increase_factor_, + max_step_reduce_factor_, ) elif callable(next_guess_method): diff --git a/pypesto/profile/profile_next_guess.py b/pypesto/profile/profile_next_guess.py index fd523e062..dd05b6b8b 100644 --- a/pypesto/profile/profile_next_guess.py +++ b/pypesto/profile/profile_next_guess.py @@ -1,3 +1,4 @@ +import logging from typing import Callable, Literal import numpy as np @@ -6,6 +7,8 @@ from ..result import ProfilerResult from .options import ProfileOptions +logger = logging.getLogger(__name__) + __all__ = ["next_guess", "fixed_step", "adaptive_step"] @@ -23,6 +26,8 @@ def next_guess( current_profile: ProfilerResult, problem: Problem, global_opt: float, + min_step_increase_factor: float = 1.0, + max_step_reduce_factor: float = 1.0, ) -> np.ndarray: """ Create the next initial guess for the optimizer. @@ -53,17 +58,22 @@ def next_guess( The problem to be solved. global_opt: Log-posterior value of the global optimum. + min_step_increase_factor: + Factor to increase the minimal step size bound. Used only in + :func:`adaptive_step`. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Used only in + :func:`adaptive_step`. Returns ------- The next initial guess as base for the next profile point. """ if update_type == "fixed_step": - return fixed_step( + next_initial_guess = fixed_step( x, par_index, par_direction, profile_options, problem ) - - if update_type == "adaptive_step_order_0": + elif update_type == "adaptive_step_order_0": order = 0 elif update_type == "adaptive_step_order_1": order = 1 @@ -73,18 +83,28 @@ def next_guess( raise ValueError( f"Unsupported `update_type` {update_type} for `next_guess`." ) + if update_type != "fixed_step": + next_initial_guess = adaptive_step( + x, + par_index, + par_direction, + profile_options, + current_profile, + problem, + global_opt, + order, + min_step_increase_factor, + max_step_reduce_factor, + ) - return adaptive_step( - x, - par_index, - par_direction, - profile_options, - current_profile, - problem, - global_opt, - order, + logger.info( + f"Next guess for {problem.x_names[par_index]} in direction " + f"{par_direction} is {next_initial_guess[par_index]:.4f}. Step size: " + f"{next_initial_guess[par_index] - x[par_index]:.4f}." ) + return next_initial_guess + def fixed_step( x: np.ndarray, @@ -138,6 +158,8 @@ def adaptive_step( problem: Problem, global_opt: float, order: int = 1, + min_step_increase_factor: float = 1.0, + max_step_reduce_factor: float = 1.0, ) -> np.ndarray: """Group of more complex methods for point proposal. @@ -168,6 +190,10 @@ def adaptive_step( * ``1``: the last two points are used to extrapolate all parameters * ``np.nan``: indicates that a more complex regression should be used as determined by :attr:`pypesto.profile.ProfileOptions.reg_order`. + min_step_increase_factor: + Factor to increase the minimal step size bound. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Returns @@ -177,9 +203,9 @@ def adaptive_step( # restrict step proposal to minimum and maximum step size def clip_to_minmax(step_size_proposal): - return np.clip( - step_size_proposal, options.min_step_size, options.max_step_size - ) + min_step_size = options.min_step_size * min_step_increase_factor + max_step_size = options.max_step_size * max_step_reduce_factor + return np.clip(step_size_proposal, min_step_size, max_step_size) # restrict step proposal to bounds def clip_to_bounds(step_proposal): @@ -193,6 +219,7 @@ def clip_to_bounds(step_proposal): delta_x_dir, reg_par, delta_obj_value, + last_delta_fval, ) = handle_profile_history( x, par_index, @@ -206,15 +233,18 @@ def clip_to_bounds(step_proposal): # check whether we must make a minimum step anyway, since we're close to # the next bound - min_delta_x = x[par_index] + par_direction * options.min_step_size + min_delta_x = ( + x[par_index] + + par_direction * options.min_step_size * min_step_increase_factor + ) if par_direction == -1 and (min_delta_x < problem.lb_full[par_index]): - step_length = problem.lb_full[par_index] - x[par_index] - return x + step_length * delta_x_dir + step_length = abs(problem.lb_full[par_index] - x[par_index]) + return clip_to_bounds(x + step_length * delta_x_dir) if par_direction == 1 and (min_delta_x > problem.ub_full[par_index]): - step_length = problem.ub_full[par_index] - x[par_index] - return x + step_length * delta_x_dir + step_length = abs(problem.ub_full[par_index] - x[par_index]) + return clip_to_bounds(x + step_length * delta_x_dir) # parameter extrapolation function n_profile_points = len(current_profile.fval_path) @@ -241,28 +271,58 @@ def par_extrapol(step_length): x[par_index] + step_length * par_direction ) ) + # Define a trust region for the step size in all directions + # to avoid overshooting + x_step = np.clip( + x_step, x - options.max_step_size, x + options.max_step_size + ) + return clip_to_bounds(x_step) else: # if not, we do simple extrapolation def par_extrapol(step_length): - x_step = x + step_length * delta_x_dir - return clip_to_bounds(x_step) + # Define a trust region for the step size in all directions + # to avoid overshooting + step_in_x = np.clip( + step_length * delta_x_dir, + -options.max_step_size, + options.max_step_size, + ) + x_stepped = x + step_in_x + return clip_to_bounds(x_stepped) # compute proposal next_x = par_extrapol(step_size_guess) # next start point has to be searched # compute the next objective value which we aim for - next_obj_target = ( + high_next_obj_target = ( -np.log(1.0 - options.delta_ratio_max) - + options.magic_factor_obj_value * delta_obj_value + + options.adaptive_target_scaling_factor * abs(last_delta_fval) + + current_profile.fval_path[-1] + ) + low_next_obj_target = ( + +np.log(1.0 - options.delta_ratio_max) + - options.adaptive_target_scaling_factor * abs(last_delta_fval) + current_profile.fval_path[-1] ) + # Clip both by 0.5 * delta_obj_value to avoid overshooting + if delta_obj_value != 0: + high_next_obj_target = min( + high_next_obj_target, + current_profile.fval_path[-1] + 0.5 * delta_obj_value, + ) + low_next_obj_target = max( + low_next_obj_target, + current_profile.fval_path[-1] - 0.5 * delta_obj_value, + ) + # compute objective at the guessed point problem.fix_parameters(par_index, next_x[par_index]) next_obj = problem.objective(problem.get_reduced_vector(next_x)) + current_obj = current_profile.fval_path[-1] # iterate until good step size is found return do_line_search( @@ -270,12 +330,16 @@ def par_extrapol(step_length): step_size_guess, par_extrapol, next_obj, - next_obj_target, + current_obj, + high_next_obj_target, + low_next_obj_target, clip_to_minmax, clip_to_bounds, par_index, problem, options, + min_step_increase_factor, + max_step_reduce_factor, ) @@ -304,6 +368,8 @@ def handle_profile_history( The regression polynomial for profile extrapolation. delta_obj_value: The difference of the objective function value between the last point and `global_opt`. + last_delta_fval: + The difference of the objective function value between the last two points. """ n_profile_points = len(current_profile.fval_path) @@ -313,32 +379,53 @@ def handle_profile_history( reg_par = None # Is this the first step along this profile? If so, try a simple step - if n_profile_points == 1: + # Do the same if the last two points are too close to avoid division by small numbers + if n_profile_points == 1 or np.isclose( + current_profile.x_path[par_index, -1], + current_profile.x_path[par_index, -2], + ): # try to use the default step size step_size_guess = options.default_step_size delta_obj_value = 0.0 + last_delta_fval = 0.0 else: # try to reuse the previous step size - step_size_guess = np.abs( + last_delta_x_par_index = np.abs( current_profile.x_path[par_index, -1] - current_profile.x_path[par_index, -2] ) + # Bound the step size by default values + step_size_guess = min( + last_delta_x_par_index, options.default_step_size + ) + # Step size cannot be smaller than the minimum step size + step_size_guess = max(step_size_guess, options.min_step_size) + delta_obj_value = current_profile.fval_path[-1] - global_opt + last_delta_fval = ( + current_profile.fval_path[-1] - current_profile.fval_path[-2] + ) if order == 1 or (np.isnan(order) and n_profile_points < 3): # set the update direction (extrapolate with order 1) last_delta_x = ( current_profile.x_path[:, -1] - current_profile.x_path[:, -2] ) - delta_x_dir = last_delta_x / step_size_guess + delta_x_dir = last_delta_x / last_delta_x_par_index elif np.isnan(order): # compute the regression polynomial for parameter extrapolation reg_par = get_reg_polynomial( par_index, current_profile, problem, options ) - return step_size_guess, delta_x_dir, reg_par, delta_obj_value + return ( + step_size_guess, + delta_x_dir, + reg_par, + delta_obj_value, + last_delta_fval, + ) def get_reg_polynomial( @@ -395,12 +482,16 @@ def do_line_search( step_size_guess: float, par_extrapol: Callable, next_obj: float, - next_obj_target: float, + current_obj: float, + high_next_obj_target: float, + low_next_obj_target: float, clip_to_minmax: Callable, clip_to_bounds: Callable, par_index: int, problem: Problem, options: ProfileOptions, + min_step_increase_factor: float, + max_step_reduce_factor: float, ) -> np.ndarray: """Perform the line search. @@ -429,14 +520,29 @@ def do_line_search( The parameter estimation problem. options: Profile likelihood options. + min_step_increase_factor: + Factor to increase the minimal step size bound. + max_step_reduce_factor: + Factor to reduce the maximal step size bound. Returns ------- Parameter vector that is expected to yield the objective function value closest to `next_obj_target`. """ - # Was the initial step too big or too small? - direction = "decrease" if next_obj_target < next_obj else "increase" + decreasing_to_low_target = False + decreasing_to_high_target = False + + # Determine the direction of the step + if next_obj > low_next_obj_target and next_obj < high_next_obj_target: + direction = "increase" + elif next_obj <= low_next_obj_target: + direction = "decrease" + decreasing_to_low_target = True + elif next_obj >= high_next_obj_target: + direction = "decrease" + decreasing_to_high_target = True + if direction == "increase": adapt_factor = options.step_size_factor else: @@ -452,12 +558,14 @@ def do_line_search( # Check if we hit the bounds if ( direction == "decrease" - and step_size_guess == options.min_step_size + and step_size_guess + == options.min_step_size * min_step_increase_factor ): return next_x if ( direction == "increase" - and step_size_guess == options.max_step_size + and step_size_guess + == options.max_step_size * max_step_reduce_factor ): return next_x @@ -467,11 +575,22 @@ def do_line_search( next_obj = problem.objective(problem.get_reduced_vector(next_x)) # check for root crossing and compute correct step size in case - if (direction == "decrease" and next_obj_target >= next_obj) or ( - direction == "increase" and next_obj_target <= next_obj + if (direction == "increase" and next_obj > high_next_obj_target) or ( + direction == "decrease" + and next_obj < high_next_obj_target + and decreasing_to_high_target + ): + return next_x_interpolate( + next_obj, last_obj, next_x, last_x, high_next_obj_target + ) + + if (direction == "increase" and next_obj < low_next_obj_target) or ( + direction == "decrease" + and next_obj > low_next_obj_target + and decreasing_to_low_target ): return next_x_interpolate( - next_obj, last_obj, next_x, last_x, next_obj_target + next_obj, last_obj, next_x, last_x, low_next_obj_target ) diff --git a/pypesto/profile/util.py b/pypesto/profile/util.py index 6a87403f8..3ea7a0d00 100644 --- a/pypesto/profile/util.py +++ b/pypesto/profile/util.py @@ -189,6 +189,7 @@ def fill_profile_list( gradnorm_path=np.array([gradnorm]), exitflag_path=np.array([optimizer_result["exitflag"]]), time_path=np.array([0.0]), + color_path=np.array([[1, 0, 0, 1]]), time_total=0.0, n_fval=0, n_grad=0, diff --git a/pypesto/profile/walk_along_profile.py b/pypesto/profile/walk_along_profile.py index c4f610001..0478c0dc0 100644 --- a/pypesto/profile/walk_along_profile.py +++ b/pypesto/profile/walk_along_profile.py @@ -63,6 +63,7 @@ def walk_along_profile( while True: # get current position on the profile path x_now = current_profile.x_path[:, -1] + color_now = current_profile.color_path[-1] # check if the next profile point needs to be computed # ... check bounds @@ -78,26 +79,164 @@ def walk_along_profile( ): break - # compute the new start point for optimization - x_next = create_next_guess( - x_now, - i_par, - par_direction, - options, - current_profile, - problem, - global_opt, - ) + optimization_successful = False + max_step_reduce_factor = 1.0 + + while not optimization_successful: + # Check max_step_size is not reduced below min_step_size + if ( + options.max_step_size * max_step_reduce_factor + < options.min_step_size + ): + logger.warning( + "Max step size reduced below min step size. " + "Setting a lower min step size can help avoid this issue." + ) + break + + # compute the new start point for optimization + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + 1.0, + max_step_reduce_factor, + ) + + # fix current profiling parameter to current value and set start point + problem.fix_parameters(i_par, x_next[i_par]) + startpoint = x_next[problem.x_free_indices] + + if startpoint.size > 0: + optimizer_result = optimizer.minimize( + problem=problem, + x0=startpoint, + id=str(0), + optimize_options=OptimizeOptions( + allow_failed_starts=False + ), + ) + + if np.isfinite(optimizer_result.fval): + optimization_successful = True + if max_step_reduce_factor == 1.0: + # The color of the point is set to black if no changes were made + color_next = np.array([0, 0, 0, 1]) + else: + # The color of the point is set to red if the max_step_size was reduced + color_next = np.array([1, 0, 0, 1]) + else: + max_step_reduce_factor *= 0.5 + logger.warning( + f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed. " + f"Reducing max_step_size to {options.max_step_size * max_step_reduce_factor}." + ) + else: + # if too many parameters are fixed, there is nothing to do ... + fval = problem.objective(np.array([])) + optimizer_result = OptimizerResult( + id="0", + x=np.array([]), + fval=fval, + n_fval=0, + n_grad=0, + n_res=0, + n_hess=0, + n_sres=0, + x0=np.array([]), + fval0=fval, + time=0, + ) + optimizer_result.update_to_full(problem=problem) + optimization_successful = True + color_next = np.concatenate((color_now[:3], [0.3])) + + if not optimization_successful: + # Cannot optimize successfully by reducing max_step_size + # Let's try to optimize by increasing min_step_size + logger.warning( + f"Failing to optimize at {problem.x_names[i_par]}={x_next[i_par]} after reducing max_step_size." + f"Trying to increase min_step_size." + ) + min_step_increase_factor = 1.25 + while not optimization_successful: + # Check min_step_size is not increased above max_step_size + if ( + options.min_step_size * min_step_increase_factor + > options.max_step_size + ): + logger.warning( + "Min step size increased above max step size. " + "Setting a higher max step size can help avoid this issue." + ) + break - # fix current profiling parameter to current value and set start point - problem.fix_parameters(i_par, x_next[i_par]) - startpoint = x_next[problem.x_free_indices] + # compute the new start point for optimization + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + min_step_increase_factor, + 1.0, + ) + + # fix current profiling parameter to current value and set start point + problem.fix_parameters(i_par, x_next[i_par]) + startpoint = x_next[problem.x_free_indices] + + optimizer_result = optimizer.minimize( + problem=problem, + x0=startpoint, + id=str(0), + optimize_options=OptimizeOptions(allow_failed_starts=False), + ) + + if np.isfinite(optimizer_result.fval): + optimization_successful = True + # The color of the point is set to blue if the min_step_size was increased + color_next = np.array([0, 0, 1, 1]) + else: + min_step_increase_factor *= 1.25 + logger.warning( + f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed. " + f"Increasing min_step_size to {options.min_step_size * min_step_increase_factor}." + ) + + if not optimization_successful: + # Cannot optimize successfully by reducing max_step_size or increasing min_step_size + # sample a new starting point for another attempt for max_tries times + logger.warning( + f"Failing to optimize at {problem.x_names[i_par]}={x_next[i_par]} after reducing max_step_size." + f"Trying to sample {max_tries} new starting points." + ) + + x_next = create_next_guess( + x_now, + i_par, + par_direction, + options, + current_profile, + problem, + global_opt, + 1.0, + 1.0, + ) + + problem.fix_parameters(i_par, x_next[i_par]) - # run optimization - if startpoint.size > 0: - # number of optimization attempts for the given value of i_par in case - # no finite solution is found for i_optimize_attempt in range(max_tries): + startpoint = problem.startpoint_method( + n_starts=1, problem=problem + )[0] + optimizer_result = optimizer.minimize( problem=problem, x0=startpoint, @@ -107,40 +246,22 @@ def walk_along_profile( ), ) if np.isfinite(optimizer_result.fval): + # The color of the point is set to green if the parameter was resampled + color_next = np.array([0, 1, 0, 1]) break logger.warning( f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed." ) - # sample a new starting point for another attempt - # might be preferable to stay close to the previous point, at least initially, - # but for now, we just sample from anywhere within the parameter bounds - # alternatively, run multi-start optimization - startpoint = problem.startpoint_method( - n_starts=1, problem=problem - )[0] else: raise RuntimeError( f"Computing profile point failed. Could not find a finite solution after {max_tries} attempts." ) - else: - # if too many parameters are fixed, there is nothing to do ... - fval = problem.objective(np.array([])) - optimizer_result = OptimizerResult( - id="0", - x=np.array([]), - fval=fval, - n_fval=0, - n_grad=0, - n_res=0, - n_hess=0, - n_sres=0, - x0=np.array([]), - fval0=fval, - time=0, - ) - optimizer_result.update_to_full(problem=problem) + logger.info( + f"Optimization successful for {problem.x_names[i_par]}={x_next[i_par]:.4f}. " + f"Start fval {problem.objective(x_next[problem.x_free_indices]):.6f}, end fval {optimizer_result.fval:.6f}." + ) if optimizer_result[GRAD] is not None: gradnorm = np.linalg.norm( optimizer_result[GRAD][problem.x_free_indices] @@ -154,6 +275,7 @@ def walk_along_profile( ratio=np.exp(global_opt - optimizer_result.fval), gradnorm=gradnorm, time=optimizer_result.time, + color=color_next, exitflag=optimizer_result.exitflag, n_fval=optimizer_result.n_fval, n_grad=optimizer_result.n_grad, diff --git a/pypesto/result/profile.py b/pypesto/result/profile.py index af14e5f1a..5f8ce8405 100644 --- a/pypesto/result/profile.py +++ b/pypesto/result/profile.py @@ -38,6 +38,13 @@ class ProfilerResult(dict): Number of gradient evaluations. n_hess: Number of Hessian evaluations. + color_path: + The color of the profile path. Signifies types of steps made. + Red indicates a step for which min_step_size was reduced, blue + indicates a step for which max_step_size was increased, and green + indicates a step for which the profiler had to resample the parameter + vector due to optimization failure of the previous two. Black + indicates a step for which none of the above was necessary. message: Textual comment on the profile result. @@ -55,6 +62,7 @@ def __init__( gradnorm_path: np.ndarray = None, exitflag_path: np.ndarray = None, time_path: np.ndarray = None, + color_path: np.ndarray = None, time_total: float = 0.0, n_fval: int = 0, n_grad: int = 0, @@ -86,6 +94,13 @@ def __init__( else: self.time_path = time_path.copy() + if color_path is None: + self.color_path = np.full( + (x_path.shape[1], 4), np.array([1, 0, 0, 0.3]) + ) + else: + self.color_path = color_path.copy() + if ( not self.x_path.shape[1] == len(self.fval_path) @@ -122,6 +137,7 @@ def append_profile_point( ratio: float, gradnorm: float = np.nan, time: float = np.nan, + color: np.ndarray = np.nan, exitflag: float = np.nan, n_fval: int = 0, n_grad: int = 0, @@ -143,6 +159,8 @@ def append_profile_point( The gradient norm at `x`. time: The computation time to find `x`. + color: + The color of the profile path. Signifies types of steps made. exitflag: The exitflag of the optimizer (useful if an optimization was performed to find `x`). @@ -159,6 +177,7 @@ def append_profile_point( self.gradnorm_path = np.hstack((self.gradnorm_path, gradnorm)) self.exitflag_path = np.hstack((self.exitflag_path, exitflag)) self.time_path = np.hstack((self.time_path, time)) + self.color_path = np.vstack((self.color_path, color)) # increment the time and f_eval counters self.time_total += time @@ -180,6 +199,7 @@ def flip_profile(self) -> None: self.gradnorm_path = np.flip(self.gradnorm_path) self.exitflag_path = np.flip(self.exitflag_path) self.time_path = np.flip(self.time_path) + self.color_path = np.flip(self.color_path, axis=0) class ProfileResult: diff --git a/pypesto/visualize/profiles.py b/pypesto/visualize/profiles.py index f4ecb6443..5c11c3f23 100644 --- a/pypesto/visualize/profiles.py +++ b/pypesto/visualize/profiles.py @@ -24,6 +24,8 @@ def profiles( profile_list_ids: Union[int, Sequence[int]] = 0, ratio_min: float = 0.0, show_bounds: bool = False, + plot_objective_values: bool = False, + quality_colors: bool = False, ) -> plt.Axes: """ Plot classical 1D profile plot. @@ -45,7 +47,7 @@ def profiles( List of reference points for optimization results, containing at least a function value fval. colors: - List of colors, or single color. + List of colors, or single color. Cannot be provided if quality_colors is set to True. legends: Labels for line plots, one label per result object. x_labels: @@ -56,12 +58,30 @@ def profiles( Minimum ratio below which to cut off. show_bounds: Whether to show, and extend the plot to, the lower and upper bounds. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood + ratio values. + quality_colors: + If set to True, the profiles are colored according to types of steps the + profiler took. This gives additional information about the profile quality. + Red indicates a step for which min_step_size was reduced, blue indicates a step for which + max_step_size was increased, and green indicates a step for which the profiler + had to resample the parameter vector due to optimization failure of the previous two. + Black indicates a step for which none of the above was necessary. This option is only + available if there is only one result and one profile_list_id (one profile per plot). Returns ------- ax: The plot axes. """ + + if colors is not None and quality_colors: + raise ValueError( + "Cannot visualize the profiles with `quality_colors` of profiler_result.color_path " + " and `colors` provided at the same time. Please provide only one of them." + ) + # parse input results, profile_list_ids, colors, legends = process_result_list_profiles( results, profile_list_ids, colors, legends @@ -75,11 +95,12 @@ def profiles( # loop over results for i_result, result in enumerate(results): for i_profile_list, profile_list_id in enumerate(profile_list_ids): - fvals = handle_inputs( + fvals, color_paths = handle_inputs( result, profile_indices=profile_indices, profile_list=profile_list_id, ratio_min=ratio_min, + plot_objective_values=plot_objective_values, ) # add x_labels for parameters @@ -98,17 +119,30 @@ def profiles( # multiple results per axes object color_ind = i_result + # If quality_colors is set to True, we use the colors provided + # by profiler_result.color_path. This will be done only if there is + # only one result and one profile_list_id (basically one profile per plot). + if ( + len(results) == 1 + and len(profile_list_ids) == 1 + and quality_colors + ): + color = color_paths + else: + color = colors[color_ind] + # call lowlevel routine ax = profiles_lowlevel( fvals=fvals, ax=ax, size=size, - color=colors[color_ind], + color=color, legend_text=legends[color_ind], x_labels=x_labels, show_bounds=show_bounds, lb_full=result.problem.lb_full, ub_full=result.problem.ub_full, + plot_objective_values=plot_objective_values, ) # parse and apply plotting options @@ -132,6 +166,7 @@ def profiles_lowlevel( show_bounds: bool = False, lb_full: Sequence[float] = None, ub_full: Sequence[float] = None, + plot_objective_values: bool = False, ) -> list[plt.Axes]: """ Lowlevel routine for profile plotting. @@ -147,8 +182,9 @@ def profiles_lowlevel( size: Figure size (width, height) in inches. Is only applied when no ax object is specified. - color: RGBA, optional - Color for profiles in plot. + color: RGBA, list[np.ndarray[RGBA]], optional + Color for profiles in plot. In case of quality_colors=True, this is a list of + np.ndarray[RGBA] for each profile -- one color per profile point for each profile. legend_text: Label for line plots. show_bounds: @@ -157,6 +193,9 @@ def profiles_lowlevel( Lower bound. ub_full: Upper bound. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood + ratio values. Returns ------- @@ -215,6 +254,12 @@ def profiles_lowlevel( # if we have empty profiles and more axes than profiles: skip if n_plots != n_fvals and fval is None: continue + # If we use colors from profiler_result.color_path, + # we need to take the color path of each profile + if isinstance(color, list) and isinstance(color[i_plot], np.ndarray): + color_i = color[i_plot] + else: + color_i = color # handle legend if i_plot == 0: @@ -235,7 +280,7 @@ def profiles_lowlevel( fval, ax[counter], size=size, - color=color, + color=color_i, legend_text=tmp_legend, show_bounds=show_bounds, lb=lb, @@ -249,13 +294,10 @@ def profiles_lowlevel( ax[counter].set_xlabel(x_labels[counter]) if counter % columns == 0: - ax[counter].set_ylabel("Log-posterior ratio") - else: - # fix pyPESTO/pyPESTO/pypesto/visualize/profiles.py:228: - # UserWarning: FixedFormatter should only be used - # together with FixedLocator. Fix from matplotlib #18848. - ax[counter].set_yticks(ax[counter].get_yticks()) - ax[counter].set_yticklabels(["" for _ in ax[counter].get_yticks()]) + if plot_objective_values: + ax[counter].set_ylabel("Objective function value") + else: + ax[counter].set_ylabel("Log-posterior ratio") # increase counter and cleanup legend counter += 1 @@ -302,9 +344,17 @@ def profile_lowlevel( """ # parse input fvals = np.asarray(fvals) - # get colors - color = assign_colors([1.0], color) + if ( + color is None + or isinstance(color, list) + or isinstance(color, tuple) + or (isinstance(color, np.ndarray) and not len(color.shape) == 2) + ): + color = assign_colors([1.0], color) + single_color = True + else: + single_color = False # axes if ax is None: @@ -317,7 +367,37 @@ def profile_lowlevel( # plot if fvals.size != 0: ax.xaxis.set_major_locator(MaxNLocator(integer=True)) - ax.plot(fvals[0, :], fvals[1, :], color=color[0], label=legend_text) + xs = fvals[0, :] + ratios = fvals[1, :] + + # If we use colors from profiler_result.color_path, + # we need to make a mapping from profile points to their colors + if not single_color: + # Create a mapping from (x, ratio) to color + point_to_color = dict(zip(zip(xs, ratios), color)) + else: + point_to_color = None + + # Plot each profile point individually to allow for different colors + for i in range(1, len(xs)): + point_color = ( + color + if single_color + else tuple(point_to_color[(xs[i], ratios[i])]) + ) + ax.plot( + [xs[i - 1], xs[i]], + [ratios[i - 1], ratios[i]], + color=color if single_color else (0, 0, 0, 1), + linestyle="-", + ) + if not single_color and point_color != (0, 0, 0, 1): + ax.plot(xs[i], ratios[i], color=point_color, marker="o") + else: + ax.plot(xs[i], ratios[i], color=point_color, marker=".") + + # Plot legend text + ax.plot([], [], color=color[0], label=legend_text) if legend_text is not None: ax.legend() @@ -366,6 +446,7 @@ def handle_inputs( profile_indices: Sequence[int], profile_list: int, ratio_min: float, + plot_objective_values: bool, ) -> list[np.array]: """ Retrieve the values of the profiles to be plotted. @@ -381,6 +462,8 @@ def handle_inputs( ratio_min: Exclude values where profile likelihood ratio is smaller than ratio_min. + plot_objective_values: + Whether to plot the objective function values instead of the likelihood Returns ------- @@ -388,6 +471,7 @@ def handle_inputs( """ # extract ratio values from result fvals = [] + colors = [] for i_par in range(0, len(result.profile_result.list[profile_list])): if ( i_par in profile_indices @@ -399,18 +483,31 @@ def handle_inputs( ratios = result.profile_result.list[profile_list][ i_par ].ratio_path[:] + colors_for_par = result.profile_result.list[profile_list][ + i_par + ].color_path # constrain indices = np.where(ratios > ratio_min) xs = xs[indices] ratios = ratios[indices] - - fvals_for_par = np.array([xs, ratios]) + colors_for_par = colors_for_par[indices] + + if plot_objective_values: + obj_vals = result.profile_result.list[profile_list][ + i_par + ].fval_path + obj_vals = obj_vals[indices] + fvals_for_par = np.array([xs, obj_vals]) + else: + fvals_for_par = np.array([xs, ratios]) else: fvals_for_par = None + colors_for_par = None fvals.append(fvals_for_par) + colors.append(colors_for_par) - return fvals + return fvals, colors def process_result_list_profiles( diff --git a/test/profile/test_profile.py b/test/profile/test_profile.py index f422fe8bf..8b2e3cbaa 100644 --- a/test/profile/test_profile.py +++ b/test/profile/test_profile.py @@ -68,7 +68,7 @@ def test_default_profiling(self): steps = result.profile_result.list[i_run][0]["ratio_path"].size if method == "adaptive_step_regression": self.assertTrue( - steps < 20, + steps < 100, "Profiling with regression based " "proposal needed too many steps.", ) @@ -79,7 +79,7 @@ def test_default_profiling(self): ) elif method == "adaptive_step_order_1": self.assertTrue( - steps < 25, + steps < 100, "Profiling with 1st order based " "proposal needed too many steps.", ) @@ -90,7 +90,7 @@ def test_default_profiling(self): ) elif method == "adaptive_step_order_0": self.assertTrue( - steps < 100, + steps < 300, "Profiling with 0th order based " "proposal needed too many steps.", ) @@ -479,6 +479,7 @@ def test_gh1165(lb, ub): progress_bar=False, profile_options=profile.ProfileOptions( min_step_size=0.1, + max_step_size=1.0, delta_ratio_max=0.05, default_step_size=0.5, ratio_min=0.01,