1  """ 
 2  Use the optimization scikits to define a set of optimizer and models 
 3  using LinearOperators. 
 4  """ 
 5  import numpy as np 
 6  from scikits.optimization import step, line_search, criterion, optimizer 
 7  from scikits.optimization.optimizer import StandardOptimizer 
 8  import criterions 
 9   
10   
11  default_step = step.PRPConjugateGradientStep() 
12  default_line_search = line_search.QuadraticInterpolationSearch(0) 
13  default_stop_criterion = criterion.criterion() 
14   
15   
22          self.criterion = criterion 
23          self.n_variables = criterion.n_variables 
24          self.step = step 
25          self.line_search = line_search 
26          self.stop_criterion = stop_criterion 
27          self.first_guess() 
28          self.optimizer = StandardOptimizer(function=self.criterion, 
29                                             step=self.step, 
30                                             line_search=self.line_search, 
31                                             criterion=self.stop_criterion, 
32                                             x0=self.current_solution) 
33          self.optimizer.recordHistory = self.callback 
 35          state = self.optimizer.state 
36          iter_ = state['iteration'] 
37          crit = state['new_value'] 
38          step = state['alpha_step'] 
39          if state['iteration'] == 0: 
40              print("Iteration \t Step \t \Criterion") 
41          print("\t%i \t %e \t %e" % (iter_, step, crit)) 
 43          """ 
44          Sets current_solution attribute to initial value. 
45          """ 
46          if x0 is None: 
47              self.current_solution = np.zeros(self.n_variables) 
48          else: 
49              self.current_solution = copy(x0) 
 56 -    def __init__(self, model, data, priors, hypers, **kwargs): 
 61   
63          d = state['direction'] 
64          g = state['gradient'] 
65          H = state['function'].model 
66          Ds = state['function'].priors 
67          hypers = state['function'].hypers 
68          algo_norms = state['function'].norms 
69           
70           
71          algo_norms = [n if isinstance(n, norms.Norm2) else norms.Norm2() for n in algo_norms] 
72           
73          a = -.5 * np.dot(d.T, g) 
74          a /= algo_norms[0](H * d) + np.sum([h * n(D * d) for h, D, n in zip(hypers, Ds, algo_norms)]) 
75          return a