-
Notifications
You must be signed in to change notification settings - Fork 537
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
26 changed files
with
1,565 additions
and
0 deletions.
There are no files selected for viewing
120 changes: 120 additions & 0 deletions
120
TrainingCodes/DnCNN_TrainingCodes_DagNN_v1.1/Demo_Test_DnCNN_DAG.m
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||
% @article{zhang2017beyond, | ||
% title={Beyond a {Gaussian} denoiser: Residual learning of deep {CNN} for image denoising}, | ||
% author={Zhang, Kai and Zuo, Wangmeng and Chen, Yunjin and Meng, Deyu and Zhang, Lei}, | ||
% journal={IEEE Transactions on Image Processing}, | ||
% year={2017}, | ||
% volume={26}, | ||
% number={7}, | ||
% pages={3142-3155}, | ||
% } | ||
|
||
% by Kai Zhang (1/2018) | ||
% [email protected] | ||
% https://github.com/cszn | ||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||
|
||
% clear; clc; | ||
|
||
%% testing set | ||
addpath(fullfile('utilities')); | ||
|
||
folderModel = 'model'; | ||
folderTest = 'testsets'; | ||
folderResult= 'results'; | ||
imageSets = {'BSD68','Set12'}; % testing datasets | ||
setTestCur = imageSets{2}; % current testing dataset | ||
|
||
|
||
showresult = 1; | ||
gpu = 1; | ||
|
||
|
||
noiseSigma = 25; | ||
|
||
% load model | ||
epoch = 50; | ||
|
||
modelName = 'DnCNN'; | ||
|
||
% case one: for the model in 'data/model' | ||
%load(fullfile('data',folderModel,[modelName,'-epoch-',num2str(epoch),'.mat'])); | ||
|
||
% case two: for the model in 'utilities' | ||
load(fullfile('utilities',[modelName,'-epoch-',num2str(epoch),'.mat'])); | ||
|
||
|
||
|
||
|
||
net = dagnn.DagNN.loadobj(net) ; | ||
|
||
net.removeLayer('loss') ; | ||
out1 = net.getVarIndex('prediction') ; | ||
net.vars(net.getVarIndex('prediction')).precious = 1 ; | ||
|
||
net.mode = 'test'; | ||
|
||
if gpu | ||
net.move('gpu'); | ||
end | ||
|
||
% read images | ||
ext = {'*.jpg','*.png','*.bmp'}; | ||
filePaths = []; | ||
for i = 1 : length(ext) | ||
filePaths = cat(1,filePaths, dir(fullfile(folderTest,setTestCur,ext{i}))); | ||
end | ||
|
||
folderResultCur = fullfile(folderResult, [setTestCur,'_',int2str(noiseSigma)]); | ||
if ~isdir(folderResultCur) | ||
mkdir(folderResultCur) | ||
end | ||
|
||
|
||
% PSNR and SSIM | ||
PSNRs = zeros(1,length(filePaths)); | ||
SSIMs = zeros(1,length(filePaths)); | ||
|
||
|
||
for i = 1 : length(filePaths) | ||
|
||
% read image | ||
label = imread(fullfile(folderTest,setTestCur,filePaths(i).name)); | ||
[~,nameCur,extCur] = fileparts(filePaths(i).name); | ||
[w,h,c]=size(label); | ||
if c==3 | ||
label = rgb2gray(label); | ||
end | ||
|
||
% add additive Gaussian noise | ||
randn('seed',0); | ||
noise = noiseSigma/255.*randn(size(label)); | ||
input = im2single(label) + single(noise); | ||
|
||
if gpu | ||
input = gpuArray(input); | ||
end | ||
net.eval({'input', input}) ; | ||
% output (single) | ||
output = gather(squeeze(gather(net.vars(out1).value))); | ||
|
||
|
||
% calculate PSNR and SSIM | ||
[PSNRCur, SSIMCur] = Cal_PSNRSSIM(label,im2uint8(output),0,0); | ||
if showresult | ||
imshow(cat(2,im2uint8(input),im2uint8(label),im2uint8(output))); | ||
title([filePaths(i).name,' ',num2str(PSNRCur,'%2.2f'),'dB',' ',num2str(SSIMCur,'%2.4f')]) | ||
imwrite(im2uint8(output), fullfile(folderResultCur, [nameCur, '_' int2str(noiseSigma),'_PSNR_',num2str(PSNRCur*100,'%4.0f'), extCur] )); | ||
drawnow; | ||
% pause() | ||
end | ||
PSNRs(i) = PSNRCur; | ||
SSIMs(i) = SSIMCur; | ||
end | ||
|
||
|
||
disp([mean(PSNRs),mean(SSIMs)]); | ||
|
||
|
||
|
||
|
61 changes: 61 additions & 0 deletions
61
TrainingCodes/DnCNN_TrainingCodes_DagNN_v1.1/Demo_Train_DnCNN_DAG.m
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
|
||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||
% @article{zhang2017beyond, | ||
% title={Beyond a {Gaussian} denoiser: Residual learning of deep {CNN} for image denoising}, | ||
% author={Zhang, Kai and Zuo, Wangmeng and Chen, Yunjin and Meng, Deyu and Zhang, Lei}, | ||
% journal={IEEE Transactions on Image Processing}, | ||
% year={2017}, | ||
% volume={26}, | ||
% number={7}, | ||
% pages={3142-3155}, | ||
% } | ||
|
||
% by Kai Zhang (1/2018) | ||
% [email protected] | ||
% https://github.com/cszn | ||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||
|
||
% The training data is generated by '[imdb] = generatepatches;' in line 126 of 'DnCNN_train_dag.m'. | ||
|
||
rng('default') | ||
|
||
addpath('utilities'); | ||
%------------------------------------------------------------------------- | ||
% Configuration | ||
%------------------------------------------------------------------------- | ||
opts.modelName = 'DnCNN'; % model name | ||
opts.learningRate = [logspace(-3,-3,22) logspace(-4,-4,105)];% you can change the learning rate | ||
opts.batchSize = 128; % | ||
opts.gpus = [1]; | ||
opts.numSubBatches = 2; | ||
|
||
% solver | ||
opts.solver = 'Adam'; % global | ||
opts.derOutputs = {'objective',1} ; | ||
|
||
opts.backPropDepth = Inf; | ||
%------------------------------------------------------------------------- | ||
% Initialize model | ||
%------------------------------------------------------------------------- | ||
|
||
net = feval([opts.modelName,'_Init']); | ||
|
||
%------------------------------------------------------------------------- | ||
% Train | ||
%------------------------------------------------------------------------- | ||
|
||
[net, info] = DnCNN_train_dag(net, ... | ||
'learningRate',opts.learningRate, ... | ||
'derOutputs',opts.derOutputs, ... | ||
'numSubBatches',opts.numSubBatches, ... | ||
'backPropDepth',opts.backPropDepth, ... | ||
'solver',opts.solver, ... | ||
'batchSize', opts.batchSize, ... | ||
'modelname', opts.modelName, ... | ||
'gpus',opts.gpus) ; | ||
|
||
|
||
|
||
|
||
|
||
|
213 changes: 213 additions & 0 deletions
213
TrainingCodes/DnCNN_TrainingCodes_DagNN_v1.1/DnCNN_Init.m
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,213 @@ | ||
function net = DnCNN_Init() | ||
|
||
% by Kai Zhang (1/2018) | ||
% [email protected] | ||
% https://github.com/cszn | ||
|
||
% Create DAGNN object | ||
net = dagnn.DagNN(); | ||
|
||
% conv + relu | ||
blockNum = 1; | ||
inVar = 'input'; | ||
channel= 1; % grayscale image | ||
dims = [3,3,channel,64]; | ||
pad = [1,1]; | ||
stride = [1,1]; | ||
lr = [1,1]; | ||
[net, inVar, blockNum] = addConv(net, blockNum, inVar, dims, pad, stride, lr); | ||
[net, inVar, blockNum] = addReLU(net, blockNum, inVar); | ||
|
||
for i = 1:15 | ||
% conv + bn + relu | ||
dims = [3,3,64,64]; | ||
pad = [1,1]; | ||
stride = [1,1]; | ||
lr = [1,0]; | ||
[net, inVar, blockNum] = addConv(net, blockNum, inVar, dims, pad, stride, lr); | ||
n_ch = dims(4); | ||
[net, inVar, blockNum] = addBnorm(net, blockNum, inVar, n_ch); | ||
[net, inVar, blockNum] = addReLU(net, blockNum, inVar); | ||
end | ||
|
||
% conv | ||
dims = [3,3,64,channel]; | ||
pad = [1,1]; | ||
stride = [1,1]; | ||
lr = [1,0]; % or [1,1], it does not influence the results | ||
[net, inVar, blockNum] = addConv(net, blockNum, inVar, dims, pad, stride, lr); | ||
|
||
% sum | ||
inVar = {inVar,'input'}; | ||
[net, inVar, blockNum] = addSum(net, blockNum, inVar); | ||
|
||
outputName = 'prediction'; | ||
net.renameVar(inVar,outputName) | ||
|
||
% loss | ||
net.addLayer('loss', dagnn.Loss('loss','L2'), {'prediction','label'}, {'objective'},{}); | ||
net.vars(net.getVarIndex('prediction')).precious = 1; | ||
|
||
|
||
end | ||
|
||
|
||
|
||
|
||
% Add a Concat layer | ||
function [net, inVar, blockNum] = addConcat(net, blockNum, inVar) | ||
|
||
outVar = sprintf('concat%d', blockNum); | ||
layerCur = sprintf('concat%d', blockNum); | ||
|
||
block = dagnn.Concat('dim',3); | ||
net.addLayer(layerCur, block, inVar, {outVar},{}); | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% Add a loss layer | ||
function [net, inVar, blockNum] = addLoss(net, blockNum, inVar) | ||
|
||
outVar = 'objective'; | ||
layerCur = sprintf('loss%d', blockNum); | ||
|
||
block = dagnn.Loss('loss','L2'); | ||
net.addLayer(layerCur, block, inVar, {outVar},{}); | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% Add a sum layer | ||
function [net, inVar, blockNum] = addSum(net, blockNum, inVar) | ||
|
||
outVar = sprintf('sum%d', blockNum); | ||
layerCur = sprintf('sum%d', blockNum); | ||
|
||
block = dagnn.Sum(); | ||
net.addLayer(layerCur, block, inVar, {outVar},{}); | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% Add a relu layer | ||
function [net, inVar, blockNum] = addReLU(net, blockNum, inVar) | ||
|
||
outVar = sprintf('relu%d', blockNum); | ||
layerCur = sprintf('relu%d', blockNum); | ||
|
||
block = dagnn.ReLU('leak',0); | ||
net.addLayer(layerCur, block, {inVar}, {outVar},{}); | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% Add a bnorm layer | ||
function [net, inVar, blockNum] = addBnorm(net, blockNum, inVar, n_ch) | ||
|
||
trainMethod = 'adam'; | ||
outVar = sprintf('bnorm%d', blockNum); | ||
layerCur = sprintf('bnorm%d', blockNum); | ||
|
||
params={[layerCur '_g'], [layerCur '_b'], [layerCur '_m']}; | ||
net.addLayer(layerCur, dagnn.BatchNorm('numChannels', n_ch), {inVar}, {outVar},params) ; | ||
|
||
pidx = net.getParamIndex({[layerCur '_g'], [layerCur '_b'], [layerCur '_m']}); | ||
b_min = 0.025; | ||
net.params(pidx(1)).value = clipping(sqrt(2/(9*n_ch))*randn(n_ch,1,'single'),b_min); | ||
net.params(pidx(1)).learningRate= 1; | ||
net.params(pidx(1)).weightDecay = 0; | ||
net.params(pidx(1)).trainMethod = trainMethod; | ||
|
||
net.params(pidx(2)).value = zeros(n_ch, 1, 'single'); | ||
net.params(pidx(2)).learningRate= 1; | ||
net.params(pidx(2)).weightDecay = 0; | ||
net.params(pidx(2)).trainMethod = trainMethod; | ||
|
||
net.params(pidx(3)).value = [zeros(n_ch,1,'single'), 0.01*ones(n_ch,1,'single')]; | ||
net.params(pidx(3)).learningRate= 1; | ||
net.params(pidx(3)).weightDecay = 0; | ||
net.params(pidx(3)).trainMethod = 'average'; | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% add a ConvTranspose layer | ||
function [net, inVar, blockNum] = addConvt(net, blockNum, inVar, dims, crop, upsample, lr) | ||
opts.cudnnWorkspaceLimit = 1024*1024*1024*2; % 2GB | ||
convOpts = {'CudnnWorkspaceLimit', opts.cudnnWorkspaceLimit} ; | ||
trainMethod = 'adam'; | ||
|
||
outVar = sprintf('convt%d', blockNum); | ||
|
||
layerCur = sprintf('convt%d', blockNum); | ||
|
||
convBlock = dagnn.ConvTranspose('size', dims, 'crop', crop,'upsample', upsample, ... | ||
'hasBias', true, 'opts', convOpts); | ||
|
||
net.addLayer(layerCur, convBlock, {inVar}, {outVar},{[layerCur '_f'], [layerCur '_b']}); | ||
|
||
f = net.getParamIndex([layerCur '_f']) ; | ||
sc = sqrt(2/(dims(1)*dims(2)*dims(4))) ; %improved Xavier | ||
net.params(f).value = sc*randn(dims, 'single'); | ||
net.params(f).learningRate = lr(1); | ||
net.params(f).weightDecay = 1; | ||
net.params(f).trainMethod = trainMethod; | ||
|
||
f = net.getParamIndex([layerCur '_b']) ; | ||
net.params(f).value = zeros(dims(3), 1, 'single'); | ||
net.params(f).learningRate = lr(2); | ||
net.params(f).weightDecay = 1; | ||
net.params(f).trainMethod = trainMethod; | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
% add a Conv layer | ||
function [net, inVar, blockNum] = addConv(net, blockNum, inVar, dims, pad, stride, lr) | ||
opts.cudnnWorkspaceLimit = 1024*1024*1024*2; % 2GB | ||
convOpts = {'CudnnWorkspaceLimit', opts.cudnnWorkspaceLimit} ; | ||
trainMethod = 'adam'; | ||
|
||
outVar = sprintf('conv%d', blockNum); | ||
layerCur = sprintf('conv%d', blockNum); | ||
|
||
convBlock = dagnn.Conv('size', dims, 'pad', pad,'stride', stride, ... | ||
'hasBias', true, 'opts', convOpts); | ||
|
||
net.addLayer(layerCur, convBlock, {inVar}, {outVar},{[layerCur '_f'], [layerCur '_b']}); | ||
|
||
f = net.getParamIndex([layerCur '_f']) ; | ||
sc = sqrt(2/(dims(1)*dims(2)*max(dims(3), dims(4)))) ; %improved Xavier | ||
net.params(f).value = sc*randn(dims, 'single') ; | ||
net.params(f).learningRate = lr(1); | ||
net.params(f).weightDecay = 1; | ||
net.params(f).trainMethod = trainMethod; | ||
|
||
f = net.getParamIndex([layerCur '_b']) ; | ||
net.params(f).value = zeros(dims(4), 1, 'single'); | ||
net.params(f).learningRate = lr(2); | ||
net.params(f).weightDecay = 1; | ||
net.params(f).trainMethod = trainMethod; | ||
|
||
inVar = outVar; | ||
blockNum = blockNum + 1; | ||
end | ||
|
||
|
||
function A = clipping(A,b) | ||
A(A>=0&A<b) = b; | ||
A(A<0&A>-b) = -b; | ||
end |
Oops, something went wrong.