diff options
| -rw-r--r-- | Core/regularisers_CPU/LLT_ROF_core.c | 6 | ||||
| -rw-r--r-- | Core/regularisers_CPU/ROF_TV_core.c | 14 | ||||
| -rw-r--r-- | Core/regularisers_GPU/LLT_ROF_GPU_core.cu | 6 | ||||
| -rwxr-xr-x | Core/regularisers_GPU/TV_ROF_GPU_core.cu | 28 | ||||
| -rw-r--r-- | Readme.md | 5 | 
5 files changed, 30 insertions, 29 deletions
| diff --git a/Core/regularisers_CPU/LLT_ROF_core.c b/Core/regularisers_CPU/LLT_ROF_core.c index 6dcf018..1584a29 100644 --- a/Core/regularisers_CPU/LLT_ROF_core.c +++ b/Core/regularisers_CPU/LLT_ROF_core.c @@ -19,7 +19,7 @@ limitations under the License.  #include "LLT_ROF_core.h"  #define EPS_LLT 0.01 -#define EPS_ROF 1.0e-5 +#define EPS_ROF 1.0e-12  #define MAX(x, y) (((x) > (y)) ? (x) : (y))  #define MIN(x, y) (((x) < (y)) ? (x) : (y)) @@ -363,7 +363,7 @@ float Update2D_LLT_ROF(float *U0, float *U, float *D1_LLT, float *D2_LLT, float  			div = dv1 + dv2; /*build Divirgent*/  			/*combine all into one cost function to minimise */ -            U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); +            U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));  		}  	}  	return *U; @@ -400,7 +400,7 @@ float Update3D_LLT_ROF(float *U0, float *U, float *D1_LLT, float *D2_LLT, float  				div = dv1 + dv2 + dv3; /*build Divirgent*/  				/*combine all into one cost function to minimise */ -				U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); +				U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));  			}  		}  	} diff --git a/Core/regularisers_CPU/ROF_TV_core.c b/Core/regularisers_CPU/ROF_TV_core.c index e89774f..fb3bc7c 100644 --- a/Core/regularisers_CPU/ROF_TV_core.c +++ b/Core/regularisers_CPU/ROF_TV_core.c @@ -19,7 +19,7 @@  #include "ROF_TV_core.h" -#define EPS 1.0e-5 +#define EPS 1.0e-12  #define MAX(x, y) (((x) > (y)) ? (x) : (y))  #define MIN(x, y) (((x) < (y)) ? (x) : (y)) @@ -148,7 +148,7 @@ float D2_func(float *A, float *D2, int dimX, int dimY, int dimZ)          for(j=0; j<dimY; j++) {              for(i=0; i<dimX; i++) {                  for(k=0; k<dimZ; k++) { -					index = (dimX*dimY)*k + j*dimX+i; +                    index = (dimX*dimY)*k + j*dimX+i;                      /* symmetric boundary conditions (Neuman) */                      i1 = i + 1; if (i1 >= dimX) i1 = i-1;                      i2 = i - 1; if (i2 < 0) i2 = i+1; @@ -179,7 +179,7 @@ float D2_func(float *A, float *D2, int dimX, int dimY, int dimZ)  #pragma omp parallel for shared (A, D2, dimX, dimY) private(i, j, i1, j1, i2, j2, NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2,index)          for(j=0; j<dimY; j++) {              for(i=0; i<dimX; i++) { -				index = j*dimX+i; +		index = j*dimX+i;                  /* symmetric boundary conditions (Neuman) */                  i1 = i + 1; if (i1 >= dimX) i1 = i-1;                  i2 = i - 1; if (i2 < 0) i2 = i+1; @@ -265,8 +265,8 @@ float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float lambd                      dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2];                      dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i]; -                    B[index] = B[index] + tau*(lambda*(dv1 + dv2 + dv3) - (B[index] - A[index]));    -                }}}		 +                    B[index] += tau*(2.0f*lambda*(dv1 + dv2 + dv3) - (B[index] - A[index]));    +                }}}      }      else {  #pragma omp parallel for shared (D1, D2, B, dimX, dimY) private(index, i, j, i1, j1, i2, j2,dv1,dv2) @@ -281,9 +281,9 @@ float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float lambd                  /* divergence components  */                  dv1 = D1[index] - D1[j2*dimX + i]; -                dv2 = D2[index] - D2[j*dimX + i2];                 +                dv2 = D2[index] - D2[j*dimX + i2]; -                B[index] =  B[index] + tau*(lambda*(dv1 + dv2) - (B[index] - A[index])); +                B[index] += tau*(2.0f*lambda*(dv1 + dv2) - (B[index] - A[index]));              }}      }      return *B; diff --git a/Core/regularisers_GPU/LLT_ROF_GPU_core.cu b/Core/regularisers_GPU/LLT_ROF_GPU_core.cu index 70c9295..0228bf0 100644 --- a/Core/regularisers_GPU/LLT_ROF_GPU_core.cu +++ b/Core/regularisers_GPU/LLT_ROF_GPU_core.cu @@ -61,7 +61,7 @@ limitations under the License.  #define EPS_LLT 0.01 -#define EPS_ROF 1.0e-5 +#define EPS_ROF 1.0e-12  #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) @@ -357,7 +357,7 @@ __global__ void Update2D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, floa  			div = dv1 + dv2; /*build Divirgent*/  			/*combine all into one cost function to minimise */ -            U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); +            U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));  		}  } @@ -395,7 +395,7 @@ __global__ void Update3D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, floa  			div = dv1 + dv2 + dv3; /*build Divirgent*/  			/*combine all into one cost function to minimise */ -            U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index])); +            U[index] += tau*(2.0f*lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));          }  } diff --git a/Core/regularisers_GPU/TV_ROF_GPU_core.cu b/Core/regularisers_GPU/TV_ROF_GPU_core.cu index 67cdd5c..57de63d 100755 --- a/Core/regularisers_GPU/TV_ROF_GPU_core.cu +++ b/Core/regularisers_GPU/TV_ROF_GPU_core.cu @@ -54,7 +54,7 @@ limitations under the License.  #define BLKXSIZE2D 16  #define BLKYSIZE2D 16 -#define EPS 1.0e-5 +#define EPS 1.0e-12  #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) @@ -91,10 +91,10 @@ __host__ __device__ int sign (float x)                  NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */                  denom1 = NOMx_1*NOMx_1; -                denom2 = 0.5f*(sign((float)NOMy_1) + sign((float)NOMy_0))*(MIN(abs((float)NOMy_1),abs((float)NOMy_0))); +                denom2 = 0.5f*(sign((float)NOMy_1) + sign((float)NOMy_0))*(MIN(abs((float)NOMy_1), abs((float)NOMy_0)));                  denom2 = denom2*denom2;                  T1 = sqrt(denom1 + denom2 + EPS); -                D1[index] = NOMx_1/T1;	 +                D1[index] = NOMx_1/T1;  		}		  	}        @@ -106,7 +106,7 @@ __host__ __device__ int sign (float x)  		int i = blockDim.x * blockIdx.x + threadIdx.x;          int j = blockDim.y * blockIdx.y + threadIdx.y; -        int index = i + N*j;         +        int index = i + N*j;          if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { @@ -121,10 +121,10 @@ __host__ __device__ int sign (float x)                  NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */                  denom1 = NOMy_1*NOMy_1; -                denom2 = 0.5f*(sign((float)NOMx_1) + sign((float)NOMx_0))*(MIN(abs((float)NOMx_1),abs((float)NOMx_0))); +                denom2 = 0.5f*(sign((float)NOMx_1) + sign((float)NOMx_0))*(MIN(abs((float)NOMx_1), abs((float)NOMx_0)));                  denom2 = denom2*denom2;                  T2 = sqrt(denom1 + denom2 + EPS); -                D2[index] = NOMy_1/T2;	 +                D2[index] = NOMy_1/T2;  		}		  	} @@ -139,15 +139,15 @@ __host__ __device__ int sign (float x)          if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { -				/* boundary conditions (Neumann reflections) */                 -                i2 = i - 1; if (i2 < 0) i2 = i+1;                 +				/* boundary conditions (Neumann reflections) */ +                i2 = i - 1; if (i2 < 0) i2 = i+1;                  j2 = j - 1; if (j2 < 0) j2 = j+1;   				/* divergence components  */                  dv1 = D1[index] - D1[j2*N + i]; -                dv2 = D2[index] - D2[j*N + i2];                                 +                dv2 = D2[index] - D2[j*N + i2]; -                Update[index] =  Update[index] + tau*(lambda*(dv1 + dv2) - (Update[index] - Input[index]));       +                Update[index] += tau*(2.0f*lambda*(dv1 + dv2) - (Update[index] - Input[index]));        		}    	}    @@ -268,7 +268,7 @@ __host__ __device__ int sign (float x)                  denom3 = 0.5*(sign(NOMy_1) + sign(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0)));                  denom3 = denom3*denom3;                  T3 = sqrt(denom1 + denom2 + denom3 + EPS); -                D3[index] = NOMz_1/T3;		 +                D3[index] = NOMz_1/T3;  		}  	} @@ -297,7 +297,7 @@ __host__ __device__ int sign (float x)                      dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2];                      dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i]; -                    Update[index] = Update[index] + tau*(lambda*(dv1 + dv2 + dv3) - (Update[index] - Input[index])); +                    Update[index] += tau*(2.0f*lambda*(dv1 + dv2 + dv3) - (Update[index] - Input[index]));  		}    	} @@ -321,7 +321,7 @@ extern "C" void TV_ROF_GPU_main(float* Input, float* Output, float lambdaPar, in          CHECK(cudaMemcpy(d_update,Input,N*M*Z*sizeof(float),cudaMemcpyHostToDevice));                if (Z > 1) { -			// TV - 3D case                  +			// TV - 3D case              dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);              dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE));             @@ -341,7 +341,7 @@ extern "C" void TV_ROF_GPU_main(float* Input, float* Output, float lambdaPar, in                  CHECK(cudaDeviceSynchronize());              } -            CHECK(cudaFree(d_D3));          +            CHECK(cudaFree(d_D3));          }          else {  	    // TV - 2D case @@ -89,8 +89,9 @@ conda install ccpi-regulariser -c ccpi -c conda-forge  ### Applications: -* [Regularised FISTA-type iterative reconstruction algorithm for X-ray tomographic reconstruction with highly inaccurate measurements (MATLAB code)](https://github.com/dkazanc/FISTA-tomo) -* [Joint image reconstruction method with correlative multi-channel prior for X-ray spectral computed tomography](https://github.com/dkazanc/multi-channel-X-ray-CT) +* [Regularised FISTA iterative reconstruction algorithm for X-ray tomographic reconstruction with highly inaccurate measurements (MATLAB code)](https://github.com/dkazanc/FISTA-tomo) +* [Regularised ADMM iterative reconstruction algorithm for X-ray tomographic reconstruction (MATLAB code)](https://github.com/dkazanc/ADMM-tomo) +* [Joint image reconstruction method with correlative multi-channel prior for X-ray spectral computed tomography (MATLAB code)](https://github.com/dkazanc/multi-channel-X-ray-CT)  ### License:  [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) | 
