diff options
author | Edoardo Pasca <edo.paskino@gmail.com> | 2019-07-04 21:57:01 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-07-04 21:57:01 +0100 |
commit | 77b6dc865be1a0f6c26cf5d6525ea610d737ef2f (patch) | |
tree | 988dc9961a081902c406e88725ca9f71f8315d53 /Wrappers/Python | |
parent | 6876bda04cde114642ebce3d6bd577da40fa34e9 (diff) | |
download | framework-77b6dc865be1a0f6c26cf5d6525ea610d737ef2f.tar.gz framework-77b6dc865be1a0f6c26cf5d6525ea610d737ef2f.tar.bz2 framework-77b6dc865be1a0f6c26cf5d6525ea610d737ef2f.tar.xz framework-77b6dc865be1a0f6c26cf5d6525ea610d737ef2f.zip |
Revert "Gf algorithm bug fix (#348)" (#349)
This reverts commit 6876bda04cde114642ebce3d6bd577da40fa34e9.
Diffstat (limited to 'Wrappers/Python')
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py | 33 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py | 39 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py | 24 | ||||
-rw-r--r-- | Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 145 | ||||
-rw-r--r-- | Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py | 25 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py | 27 | ||||
-rw-r--r-- | Wrappers/Python/setup.py | 9 |
7 files changed, 207 insertions, 95 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py index 82fbb96..de904fb 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py @@ -34,25 +34,27 @@ class CGLS(Algorithm): https://web.stanford.edu/group/SOL/software/cgls/ ''' - + + + def __init__(self, **kwargs): super(CGLS, self).__init__() - x_init = kwargs.get('x_init', None) - operator = kwargs.get('operator', None) - data = kwargs.get('data', None) - tolerance = kwargs.get('tolerance', 1e-6) - - if x_init is not None and operator is not None and data is not None: - print(self.__class__.__name__ , "set_up called from creator") - self.set_up(x_init=x_init, operator=operator, data=data, tolerance=tolerance) - - def set_up(self, x_init, operator, data, tolerance=1e-6): + self.x = kwargs.get('x_init', None) + self.operator = kwargs.get('operator', None) + self.data = kwargs.get('data', None) + self.tolerance = kwargs.get('tolerance', 1e-6) + if self.x is not None and self.operator is not None and \ + self.data is not None: + print (self.__class__.__name__ , "set_up called from creator") + self.set_up(x_init =kwargs['x_init'], + operator=kwargs['operator'], + data =kwargs['data']) + + + def set_up(self, x_init, operator , data ): self.x = x_init * 0. - self.operator = operator - self.tolerance = tolerance - self.r = data - self.operator.direct(self.x) self.s = self.operator.adjoint(self.r) @@ -62,7 +64,8 @@ class CGLS(Algorithm): ## self.norms = self.s.norm() ## - + + self.gamma = self.norms0**2 self.normx = self.x.norm() self.xmax = self.normx diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index fa1d8d8..9e40c95 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -39,31 +39,40 @@ class FISTA(Algorithm): '''initialisation can be done at creation time if all proper variables are passed or later with set_up''' super(FISTA, self).__init__() - f = kwargs.get('f', None) - g = kwargs.get('g', ZeroFunction()) - x_init = kwargs.get('x_init', None) - - if x_init is not None and f is not None: - print(self.__class__.__name__ , "set_up called from creator") - self.set_up(x_init=x_init, f=f, g=g) - - def set_up(self, x_init, f, g=ZeroFunction()): + self.f = kwargs.get('f', None) + self.g = kwargs.get('g', ZeroFunction()) + self.x_init = kwargs.get('x_init',None) + self.invL = None + self.t_old = 1 + if self.x_init is not None and \ + self.f is not None and self.g is not None: + print ("FISTA set_up called from creator") + self.set_up(self.x_init, self.f, self.g) + + def set_up(self, x_init, f, g, opt=None, **kwargs): + + self.f = f + self.g = g + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-4} + self.y = x_init.copy() self.x_old = x_init.copy() self.x = x_init.copy() - self.u = x_init.copy() + self.u = x_init.copy() - self.f = f - self.g = g self.invL = 1/f.L - self.t = 1 + + self.t_old = 1 self.update_objective() self.configured = True def update(self): - self.t_old = self.t + self.f.gradient(self.y, out=self.u) self.u.__imul__( -self.invL ) self.u.__iadd__( self.y ) @@ -78,7 +87,7 @@ class FISTA(Algorithm): self.y.__iadd__( self.x ) self.x_old.fill(self.x) - + self.t_old = self.t def update_objective(self): self.loss.append( self.f(self.x) + self.g(self.x) ) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py index 8c2b693..cbccd73 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py @@ -25,14 +25,18 @@ class GradientDescent(Algorithm): '''initialisation can be done at creation time if all proper variables are passed or later with set_up''' super(GradientDescent, self).__init__() - - x_init = kwargs.get('x_init', None) - objective_function = kwargs.get('objective_function', None) - rate = kwargs.get('rate', None) - - if x_init is not None and objective_function is not None and rate is not None: - print(self.__class__.__name__, "set_up called from creator") - self.set_up(x_init=x_init, objective_function=objective_function, rate=rate) + self.x = None + self.rate = 0 + self.objective_function = None + self.regulariser = None + args = ['x_init', 'objective_function', 'rate'] + for k,v in kwargs.items(): + if k in args: + args.pop(args.index(k)) + if len(args) == 0: + self.set_up(x_init=kwargs['x_init'], + objective_function=kwargs['objective_function'], + rate=kwargs['rate']) def should_stop(self): '''stopping cryterion, currently only based on number of iterations''' @@ -43,17 +47,14 @@ class GradientDescent(Algorithm): self.x = x_init.copy() self.objective_function = objective_function self.rate = rate - self.loss.append(objective_function(x_init)) self.iteration = 0 - try: self.memopt = self.objective_function.memopt except AttributeError as ae: self.memopt = False if self.memopt: self.x_update = x_init.copy() - self.configured = True def update(self): @@ -67,3 +68,4 @@ class GradientDescent(Algorithm): def update_objective(self): self.loss.append(self.objective_function(self.x)) + diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 8f37765..2503fe6 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -24,42 +24,42 @@ from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer from ccpi.optimisation.functions import FunctionOperatorComposition - class PDHG(Algorithm): '''Primal Dual Hybrid Gradient''' def __init__(self, **kwargs): super(PDHG, self).__init__(max_iteration=kwargs.get('max_iteration',0)) - f = kwargs.get('f', None) - operator = kwargs.get('operator', None) - g = kwargs.get('g', None) - tau = kwargs.get('tau', None) - sigma = kwargs.get('sigma', 1.) - - if f is not None and operator is not None and g is not None: - print(self.__class__.__name__ , "set_up called from creator") - self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma) - - def set_up(self, f, g, operator, tau=None, sigma=1.): - - # can't happen with default sigma - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - + self.f = kwargs.get('f', None) + self.operator = kwargs.get('operator', None) + self.g = kwargs.get('g', None) + self.tau = kwargs.get('tau', None) + self.sigma = kwargs.get('sigma', 1.) + + + if self.f is not None and self.operator is not None and \ + self.g is not None: + if self.tau is None: + # Compute operator Norm + normK = self.operator.norm() + # Primal & dual stepsizes + self.tau = 1/(self.sigma*normK**2) + print ("Calling from creator") + self.set_up(self.f, + self.g, + self.operator, + self.tau, + self.sigma) + + def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): # algorithmic parameters + self.operator = operator self.f = f self.g = g - self.operator = operator - self.tau = tau self.sigma = sigma - - if self.tau is None: - # Compute operator Norm - normK = self.operator.norm() - # Primal & dual stepsizes - self.tau = 1 / (self.sigma * normK ** 2) - + self.opt = opt + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') self.x_old = self.operator.domain_geometry().allocate() self.x_tmp = self.x_old.copy() @@ -83,7 +83,7 @@ class PDHG(Algorithm): self.y_tmp *= self.sigma self.y_tmp += self.y_old - # self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) # Gradient ascent, Primal problem solution @@ -91,9 +91,10 @@ class PDHG(Algorithm): self.x_tmp *= -1*self.tau self.x_tmp += self.x_old + self.g.proximal(self.x_tmp, self.tau, out=self.x) - # Update + #Update self.x.subtract(self.x_old, out=self.xbar) self.xbar *= self.theta self.xbar += self.x @@ -106,4 +107,90 @@ class PDHG(Algorithm): p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) d1 = -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(-1*self.operator.adjoint(self.y))) - self.loss.append([p1, d1, p1-d1])
\ No newline at end of file + self.loss.append([p1,d1,p1-d1]) + + + +def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ + 'memopt': False} + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + + niter = opt['niter'] if 'niter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False + stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False + + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() + + xbar = x_old.copy() + x_tmp = x_old.copy() + x = x_old.copy() + + y_tmp = y_old.copy() + y = y_tmp.copy() + + + # relaxation parameter + theta = 1 + + t = time.time() + + primal = [] + dual = [] + pdgap = [] + + + for i in range(niter): + + + + if memopt: + operator.direct(xbar, out = y_tmp) + y_tmp *= sigma + y_tmp += y_old + else: + y_tmp = y_old + sigma * operator.direct(xbar) + + f.proximal_conjugate(y_tmp, sigma, out=y) + + if memopt: + operator.adjoint(y, out = x_tmp) + x_tmp *= -1*tau + x_tmp += x_old + else: + x_tmp = x_old - tau*operator.adjoint(y) + + g.proximal(x_tmp, tau, out=x) + + x.subtract(x_old, out=xbar) + xbar *= theta + xbar += x + + x_old.fill(x) + y_old.fill(y) + + if i%10==0: + + p1 = f(operator.direct(x)) + g(x) + d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) + + + + t_end = time.time() + + return x, t_end - t, primal, dual, pdgap + + + diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py index ca5b084..02ca937 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py @@ -31,17 +31,19 @@ class SIRT(Algorithm): ''' def __init__(self, **kwargs): super(SIRT, self).__init__() + self.x = kwargs.get('x_init', None) + self.operator = kwargs.get('operator', None) + self.data = kwargs.get('data', None) + self.constraint = kwargs.get('constraint', None) + if self.x is not None and self.operator is not None and \ + self.data is not None: + print ("Calling from creator") + self.set_up(x_init=kwargs['x_init'], + operator=kwargs['operator'], + data=kwargs['data'], + constraint=kwargs['constraint']) - x_init = kwargs.get('x_init', None) - operator = kwargs.get('operator', None) - data = kwargs.get('data', None) - constraint = kwargs.get('constraint', None) - - if x_init is not None and operator is not None and data is not None: - print(self.__class__.__name__, "set_up called from creator") - self.set_up(x_init=x_init, operator=operator, data=data, constraint=constraint) - - def set_up(self, x_init, operator, data, constraint=None): + def set_up(self, x_init, operator , data, constraint=None ): self.x = x_init.copy() self.operator = operator @@ -55,7 +57,6 @@ class SIRT(Algorithm): # Set up scaling matrices D and M. self.M = 1/self.operator.direct(self.operator.domain_geometry().allocate(value=1.0)) self.D = 1/self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0)) - self.update_objective() self.configured = True @@ -66,7 +67,7 @@ class SIRT(Algorithm): self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r)) if self.constraint is not None: - self.x = self.constraint.proximal(self.x, None) + self.x = self.constraint.proximal(self.x,None) # self.constraint.proximal(self.x,None, out=self.x) def update_objective(self): diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py index eea300d..204fdc4 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py @@ -36,14 +36,27 @@ class Norm2Sq(Function): ''' - def __init__(self, A, b, c=1.0): + def __init__(self,A,b,c=1.0,memopt=False): super(Norm2Sq, self).__init__() self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. - self.range_tmp = A.range_geometry().allocate() - + if memopt: + try: + self.range_tmp = A.range_geometry().allocate() + self.domain_tmp = A.domain_geometry().allocate() + self.memopt = True + except NameError as ne: + warnings.warn(str(ne)) + self.memopt = False + except NotImplementedError as nie: + print (nie) + warnings.warn(str(nie)) + self.memopt = False + else: + self.memopt = False + # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise try: @@ -56,7 +69,7 @@ class Norm2Sq(Function): #def grad(self,x): # return self.gradient(x, out=None) - def __call__(self, x): + def __call__(self,x): #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) #if out is None: # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) @@ -71,8 +84,8 @@ class Norm2Sq(Function): # added for compatibility with SIRF return (y.norm()**2) * self.c - def gradient(self, x, out=None): - if out is not None: + def gradient(self, x, out = None): + if self.memopt: #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) self.A.direct(x, out=self.range_tmp) self.range_tmp -= self.b @@ -80,4 +93,4 @@ class Norm2Sq(Function): #self.direct_placehold.multiply(2.0*self.c, out=out) out *= (self.c * 2.0) else: - return (2.0*self.c)*self.A.adjoint(self.A.direct(x) - self.b) + return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index ea6181e..e680df3 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -37,12 +37,9 @@ setup( 'ccpi.processors', 'ccpi.contrib','ccpi.contrib.optimisation', 'ccpi.contrib.optimisation.algorithms'], - data_files = [('share/ccpi', ['data/boat.tiff', - 'data/peppers.tiff', - 'data/camera.png', - 'data/resolution_chart.tiff', - 'data/shapes.png', - 'data/24737_fd_normalised.nxs'])], + data_files = [('share/ccpi', ['data/boat.tiff', 'data/peppers.tiff', + 'data/camera.png', + 'data/resolution_chart.tiff', 'data/shapes.png'])], # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine |