summaryrefslogtreecommitdiffstats
path: root/Wrappers
diff options
context:
space:
mode:
authorepapoutsellis <epapoutsellis@gmail.com>2019-04-07 17:57:21 +0100
committerepapoutsellis <epapoutsellis@gmail.com>2019-04-07 17:57:21 +0100
commit7114e1b3575e57d7f1f6b2a45edee473d01787a6 (patch)
tree580bdd412e07c88b74c0ebf8543d1722999f6014 /Wrappers
parentc5673f753915b88308a32ec8734380c7a5468bb6 (diff)
downloadframework-7114e1b3575e57d7f1f6b2a45edee473d01787a6.tar.gz
framework-7114e1b3575e57d7f1f6b2a45edee473d01787a6.tar.bz2
framework-7114e1b3575e57d7f1f6b2a45edee473d01787a6.tar.xz
framework-7114e1b3575e57d7f1f6b2a45edee473d01787a6.zip
update L1Norm add tests
Diffstat (limited to 'Wrappers')
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L1Norm.py229
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/__init__.py5
-rw-r--r--Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py126
3 files changed, 308 insertions, 52 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
index 5a47edd..163eefa 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
@@ -22,71 +22,202 @@ Created on Wed Mar 6 19:42:34 2019
@author: evangelos
"""
-import numpy as np
-#from ccpi.optimisation.funcs import Function
-from ccpi.optimisation.functions import Function
-from ccpi.framework import DataContainer, ImageData, ImageGeometry
+#import numpy as np
+##from ccpi.optimisation.funcs import Function
+#from ccpi.optimisation.functions import Function
+#from ccpi.framework import DataContainer, ImageData
+#
+#
+############################# L1NORM FUNCTIONS #############################
+#class SimpleL1Norm(Function):
+#
+# def __init__(self, alpha=1):
+#
+# super(SimpleL1Norm, self).__init__()
+# self.alpha = alpha
+#
+# def __call__(self, x):
+# return self.alpha * x.abs().sum()
+#
+# def gradient(self,x):
+# return ValueError('Not Differentiable')
+#
+# def convex_conjugate(self,x):
+# return 0
+#
+# def proximal(self, x, tau):
+# ''' Soft Threshold'''
+# return x.sign() * (x.abs() - tau * self.alpha).maximum(0)
+#
+# def proximal_conjugate(self, x, tau):
+# return x.divide((x.abs()/self.alpha).maximum(1.0))
+
+#class L1Norm(SimpleL1Norm):
+#
+# def __init__(self, alpha=1, **kwargs):
+#
+# super(L1Norm, self).__init__()
+# self.alpha = alpha
+# self.b = kwargs.get('b',None)
+#
+# def __call__(self, x):
+#
+# if self.b is None:
+# return SimpleL1Norm.__call__(self, x)
+# else:
+# return SimpleL1Norm.__call__(self, x - self.b)
+#
+# def gradient(self, x):
+# return ValueError('Not Differentiable')
+#
+# def convex_conjugate(self,x):
+# if self.b is None:
+# return SimpleL1Norm.convex_conjugate(self, x)
+# else:
+# return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum()
+#
+# def proximal(self, x, tau):
+#
+# if self.b is None:
+# return SimpleL1Norm.proximal(self, x, tau)
+# else:
+# return self.b + SimpleL1Norm.proximal(self, x - self.b , tau)
+#
+# def proximal_conjugate(self, x, tau):
+#
+# if self.b is None:
+# return SimpleL1Norm.proximal_conjugate(self, x, tau)
+# else:
+# return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau)
+#
+###############################################################################
+from ccpi.optimisation.functions import Function
+from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
+from ccpi.optimisation.operators import ShrinkageOperator
+
-############################ L1NORM FUNCTIONS #############################
-class SimpleL1Norm(Function):
+class L1Norm(Function):
- def __init__(self, alpha=1):
+ def __init__(self, **kwargs):
- super(SimpleL1Norm, self).__init__()
- self.alpha = alpha
+ super(L1Norm, self).__init__()
+ self.b = kwargs.get('b',None)
def __call__(self, x):
- return self.alpha * x.abs().sum()
+
+ y = x
+ if self.b is not None:
+ y = x - self.b
+ return y.abs().sum()
def gradient(self,x):
- return ValueError('Not Differentiable')
-
- def convex_conjugate(self,x):
- return 0
+ #TODO implement subgradient???
+ return ValueError('Not Differentiable')
- def proximal(self, x, tau):
- ''' Soft Threshold'''
- return x.sign() * (x.abs() - tau * self.alpha).maximum(0)
-
- def proximal_conjugate(self, x, tau):
- return x.divide((x.abs()/self.alpha).maximum(1.0))
-
-class L1Norm(SimpleL1Norm):
+ def convex_conjugate(self,x):
+ #TODO implement Indicator infty???
+
+ y = 0
+ if self.b is not None:
+ y = 0 + (self.b * x).sum()
+ return y
- def __init__(self, alpha=1, **kwargs):
+ def proximal(self, x, tau, out=None):
- super(L1Norm, self).__init__()
- self.alpha = alpha
- self.b = kwargs.get('b',None)
+ # TODO implement shrinkage operator, we will need it later e.g SplitBregman
- def __call__(self, x):
+ if out is None:
+ if self.b is not None:
+ return self.b + ShrinkageOperator.__call__(self, x - self.b, tau)
+ else:
+ return ShrinkageOperator.__call__(self, x, tau)
+ else:
+ if self.b is not None:
+ out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau))
+ else:
+ out.fill(ShrinkageOperator.__call__(self, x, tau))
+
+ def proximal_conjugate(self, x, tau, out=None):
- if self.b is None:
- return SimpleL1Norm.__call__(self, x)
+ if out is None:
+ if self.b is not None:
+ return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))
+ else:
+ return x.divide(x.abs().maximum(1.0))
else:
- return SimpleL1Norm.__call__(self, x - self.b)
-
- def gradient(self, x):
- return ValueError('Not Differentiable')
+ if self.b is not None:
+ out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)))
+ else:
+ out.fill(x.divide(x.abs().maximum(1.0)) )
- def convex_conjugate(self,x):
- if self.b is None:
- return SimpleL1Norm.convex_conjugate(self, x)
- else:
- return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum()
+ def __rmul__(self, scalar):
+ return ScaledFunction(self, scalar)
+
+
+
+if __name__ == '__main__':
+
+ from ccpi.framework import ImageGeometry
+ import numpy
+ N, M = 40,40
+ ig = ImageGeometry(N, M)
+ scalar = 10
+ b = ig.allocate('random_int')
+ u = ig.allocate('random_int')
+
+ f = L1Norm()
+ f_scaled = scalar * L1Norm()
+
+ f_b = L1Norm(b=b)
+ f_scaled_b = scalar * L1Norm(b=b)
+
+ # call
- def proximal(self, x, tau):
+ a1 = f(u)
+ a2 = f_scaled(u)
+ numpy.testing.assert_equal(scalar * a1, a2)
+
+ a3 = f_b(u)
+ a4 = f_scaled_b(u)
+ numpy.testing.assert_equal(scalar * a3, a4)
+
+ # proximal
+ tau = 0.4
+ b1 = f.proximal(u, tau*scalar)
+ b2 = f_scaled.proximal(u, tau)
- if self.b is None:
- return SimpleL1Norm.proximal(self, x, tau)
- else:
- return self.b + SimpleL1Norm.proximal(self, x - self.b , tau)
+ numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4)
+
+ b3 = f_b.proximal(u, tau*scalar)
+ b4 = f_scaled_b.proximal(u, tau)
+
+ z1 = b + (u-b).sign() * ((u-b).abs() - tau * scalar).maximum(0)
- def proximal_conjugate(self, x, tau):
+ numpy.testing.assert_array_almost_equal(b3.as_array(), b4.as_array(), decimal=4)
+#
+# #proximal conjugate
+#
+ c1 = f_scaled.proximal_conjugate(u, tau)
+ c2 = u.divide((u.abs()/scalar).maximum(1.0))
+
+ numpy.testing.assert_array_almost_equal(c1.as_array(), c2.as_array(), decimal=4)
+
+ c3 = f_scaled_b.proximal_conjugate(u, tau)
+ c4 = (u - tau*b).divide( ((u-tau*b).abs()/scalar).maximum(1.0) )
+
+ numpy.testing.assert_array_almost_equal(c3.as_array(), c4.as_array(), decimal=4)
+
+
+
+
+
+
+
+
- if self.b is None:
- return SimpleL1Norm.proximal_conjugate(self, x, tau)
- else:
- return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau)
- \ No newline at end of file
+
+
+
+
+ \ No newline at end of file
diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
index 2ed36f5..9dbb505 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py
@@ -2,11 +2,10 @@
from .Function import Function
from .ZeroFun import ZeroFun
-from .L1Norm import SimpleL1Norm, L1Norm
-#from .L2NormSquared import L2NormSq, SimpleL2NormSq
+from .L1Norm import L1Norm
from .L2NormSquared import L2NormSquared
-from .BlockFunction import BlockFunction
from .ScaledFunction import ScaledFunction
+from .BlockFunction import BlockFunction
from .FunctionOperatorComposition import FunctionOperatorComposition
from .MixedL21Norm import MixedL21Norm
from .IndicatorBox import IndicatorBox
diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py
new file mode 100644
index 0000000..06df622
--- /dev/null
+++ b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Feb 22 14:53:03 2019
+
+@author: evangelos
+"""
+
+from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG, PDHG_old
+
+from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
+from ccpi.optimisation.functions import ZeroFun, L1Norm, \
+ MixedL21Norm, FunctionOperatorComposition, BlockFunction
+
+
+from skimage.util import random_noise
+
+
+
+# ############################################################################
+# Create phantom for TV denoising
+
+N = 200
+data = np.zeros((N,N))
+data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5
+data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1
+
+ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N)
+ag = ig
+
+# Create noisy data. Add Gaussian noise
+n1 = random_noise(data, mode = 's&p', seed=10)
+noisy_data = ImageData(n1)
+
+plt.imshow(noisy_data.as_array())
+plt.colorbar()
+plt.show()
+
+#%%
+
+# Regularisation Parameter
+alpha = 1000
+
+#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ")
+method = '1'
+if method == '0':
+
+ # Create operators
+ op1 = Gradient(ig)
+ op2 = Identity(ig, ag)
+
+ # Form Composite Operator
+ operator = BlockOperator(op1, op2, shape=(2,1) )
+
+ #### Create functions
+# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \
+# L2NormSq(0.5, b = noisy_data) )
+
+ f1 = alpha * MixedL21Norm()
+ f2 = L1Norm(b = noisy_data)
+
+ f = BlockFunction(f1, f2 )
+ g = ZeroFun()
+
+else:
+
+ ###########################################################################
+ # No Composite #
+ ###########################################################################
+ operator = Gradient(ig)
+ f = alpha * FunctionOperatorComposition(operator, MixedL21Norm())
+ g = 0.5 * L1Norm(b = noisy_data)
+ ###########################################################################
+#%%
+
+# Compute operator Norm
+normK = operator.norm()
+print ("normK", normK)
+# Primal & dual stepsizes
+sigma = 1
+tau = 1/(sigma*normK**2)
+
+opt = {'niter':2000}
+
+res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
+
+plt.figure(figsize=(5,5))
+plt.imshow(res.as_array())
+plt.colorbar()
+plt.show()
+
+#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
+#pdhg.max_iteration = 2000
+#pdhg.update_objective_interval = 10
+#
+#pdhg.run(2000)
+
+
+
+#sol = pdhg.get_output().as_array()
+##sol = result.as_array()
+##
+#fig = plt.figure()
+#plt.subplot(1,2,1)
+#plt.imshow(noisy_data.as_array())
+##plt.colorbar()
+#plt.subplot(1,2,2)
+#plt.imshow(sol)
+##plt.colorbar()
+#plt.show()
+##
+
+##
+#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth')
+#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon')
+#plt.legend()
+#plt.show()
+
+
+#%%
+#