diff --git a/popt/update_schemes/enopt.py b/popt/update_schemes/enopt.py index 9eed6e5..e3fa869 100644 --- a/popt/update_schemes/enopt.py +++ b/popt/update_schemes/enopt.py @@ -172,11 +172,13 @@ def calc_update(self): improvement = False success = False resampling_iter = 0 + self.optimizer.restore_parameters() while not improvement: # resampling loop - # Shrink covariance each time we try resampling + # Shrink covariance and step size each time we try resampling shrink = self.cov_factor ** resampling_iter + self.optimizer.apply_backtracking(np.sqrt(self.cov_factor)** resampling_iter) # Calculate gradient if self.nesterov: diff --git a/popt/update_schemes/genopt.py b/popt/update_schemes/genopt.py index 2762514..ef60691 100644 --- a/popt/update_schemes/genopt.py +++ b/popt/update_schemes/genopt.py @@ -138,6 +138,7 @@ def calc_update(self): improvement = False success = False resampling_iter = 0 + self.optimizer.restore_parameters() while improvement is False: # resampling loop diff --git a/popt/update_schemes/smcopt.py b/popt/update_schemes/smcopt.py index e0ea9fe..981cbcb 100644 --- a/popt/update_schemes/smcopt.py +++ b/popt/update_schemes/smcopt.py @@ -126,11 +126,13 @@ def calc_update(self,): success = False resampling_iter = 0 inflate = 2 * (self.inflation_factor + self.iteration) + self.optimizer.restore_parameters() while improvement is False: # resampling loop - # Shrink covariance each time we try resampling + # Shrink covariance and step size each time we try resampling shrink = self.cov_factor ** resampling_iter + self.optimizer.apply_backtracking(np.sqrt(self.cov_factor) ** resampling_iter) # Calc sensitivity (sens_matrix, self.best_state, best_func_tmp) = self.sens(self.mean_state, inflate, diff --git a/popt/update_schemes/subroutines/optimizers.py b/popt/update_schemes/subroutines/optimizers.py index d5b2b72..a5bafe4 100644 --- a/popt/update_schemes/subroutines/optimizers.py +++ b/popt/update_schemes/subroutines/optimizers.py @@ -124,12 +124,12 @@ def apply_smc_update(self, control, gradient, **kwargs): new_control = (1-alpha) * control + alpha * gradient return new_control - def apply_backtracking(self): + def apply_backtracking(self, shrink=0.5): """ Apply backtracking by reducing step size and momentum temporarily. """ - self._step_size = 0.5*self._step_size - self._momentum = 0.5*self._momentum + self._step_size = shrink*self._step_size + self._momentum = shrink*self._momentum def restore_parameters(self): """