Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pyTorch upgrade 0.3.1 -> 1.1.0 (incl. trained weights) #21

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 40 additions & 40 deletions ChexnetTrainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def train (pathDirData, pathFileTrain, pathFileVal, nnArchitecture, nnIsTrained,
scheduler = ReduceLROnPlateau(optimizer, factor = 0.1, patience = 5, mode = 'min')

#-------------------- SETTINGS: LOSS
loss = torch.nn.BCELoss(size_average = True)
loss = torch.nn.BCELoss(reduction='mean')

#---- Load checkpoint
if checkpoint != None:
Expand All @@ -90,15 +90,14 @@ def train (pathDirData, pathFileTrain, pathFileVal, nnArchitecture, nnIsTrained,
timestampTime = time.strftime("%H%M%S")
timestampDate = time.strftime("%d%m%Y")
timestampSTART = timestampDate + '-' + timestampTime

ChexnetTrainer.epochTrain (model, dataLoaderTrain, optimizer, scheduler, trMaxEpoch, nnClassCount, loss)
lossVal, losstensor = ChexnetTrainer.epochVal (model, dataLoaderVal, optimizer, scheduler, trMaxEpoch, nnClassCount, loss)

timestampTime = time.strftime("%H%M%S")
timestampDate = time.strftime("%d%m%Y")
timestampEND = timestampDate + '-' + timestampTime

scheduler.step(losstensor.data[0])
scheduler.step(losstensor.item())

if lossVal < lossMIN:
lossMIN = lossVal
Expand Down Expand Up @@ -130,28 +129,27 @@ def epochTrain (model, dataLoader, optimizer, scheduler, epochMax, classCount, l
#--------------------------------------------------------------------------------

def epochVal (model, dataLoader, optimizer, scheduler, epochMax, classCount, loss):

model.eval ()

lossVal = 0
lossValNorm = 0

losstensorMean = 0

for i, (input, target) in enumerate (dataLoader):

target = target.cuda(async=True)

varInput = torch.autograd.Variable(input, volatile=True)
varTarget = torch.autograd.Variable(target, volatile=True)
varOutput = model(varInput)

losstensor = loss(varOutput, varTarget)
losstensorMean += losstensor

lossVal += losstensor.data[0]
lossValNorm += 1

with torch.no_grad():
for i, (input, target) in enumerate (dataLoader):
target = target.cuda(async=True)
varInput = torch.autograd.Variable(input)
varTarget = torch.autograd.Variable(target)
varOutput = model(varInput)
losstensor = loss(varOutput, varTarget)
losstensorMean += losstensor
lossVal += losstensor.item()
lossValNorm += 1
outLoss = lossVal / lossValNorm
losstensorMean = losstensorMean / lossValNorm

Expand Down Expand Up @@ -220,6 +218,8 @@ def test (pathDirData, pathFileTest, pathModel, nnArchitecture, nnClassCount, nn
transformList.append(transforms.TenCrop(transCrop))
transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))


transformSequence=transforms.Compose(transformList)

datasetTest = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTest, transform=transformSequence)
Expand All @@ -229,29 +229,29 @@ def test (pathDirData, pathFileTest, pathModel, nnArchitecture, nnClassCount, nn
outPRED = torch.FloatTensor().cuda()

model.eval()

for i, (input, target) in enumerate(dataLoaderTest):

target = target.cuda()
outGT = torch.cat((outGT, target), 0)

bs, n_crops, c, h, w = input.size()
with torch.no_grad():
for i, (input, target) in enumerate(dataLoaderTest):

target = target.cuda()
outGT = torch.cat((outGT, target), 0)

bs, n_crops, c, h, w = input.size()

varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda())

out = model(varInput)
outMean = out.view(bs, n_crops, -1).mean(1)

outPRED = torch.cat((outPRED, outMean.data), 0)

aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
aurocMean = np.array(aurocIndividual).mean()

varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)
print ('AUROC mean ', aurocMean)

out = model(varInput)
outMean = out.view(bs, n_crops, -1).mean(1)
for i in range (0, len(aurocIndividual)):
print (CLASS_NAMES[i], ' ', aurocIndividual[i])

outPRED = torch.cat((outPRED, outMean.data), 0)

aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
aurocMean = np.array(aurocIndividual).mean()

print ('AUROC mean ', aurocMean)

for i in range (0, len(aurocIndividual)):
print (CLASS_NAMES[i], ' ', aurocIndividual[i])


return
#--------------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion HeatmapGenerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def generate (self, pathImageFile, pathOutputFile, transCrop):

pathInputImage = 'test/00009285_000.png'
pathOutputImage = 'test/heatmap.png'
pathModel = 'models/m-25012018-123527.pth.tar'
pathModel = 'models/m-02082019-004013.pth.tar'

nnArchitecture = 'DENSE-NET-121'
nnClassCount = 14
Expand Down
2 changes: 1 addition & 1 deletion Main.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def runTest():
imgtransResize = 256
imgtransCrop = 224

pathModel = './models/m-25012018-123527.pth.tar'
pathModel = './models/m-02082019-004013.pth.tar'

timestampLaunch = ''

Expand Down
38 changes: 19 additions & 19 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,21 @@ Yet another PyTorch implementation of the [CheXNet](https://arxiv.org/abs/1711.0
frontal chest X-ray images. This implementation is based on approach presented [here](https://github.com/arnoweng/CheXNet). Ten-crops
technique is used to transform images at the testing stage to get better accuracy.

The highest accuracy evaluated with AUROC was 0.8508 (see the model m-25012018-123527 in the models directory).
The highest accuracy evaluated with AUROC was 0.8508 (see the model m-02082019-004013.pth in the models directory).
The same training (70%), validation (10%) and testing (20%) datasets were used as in [this](https://github.com/arnoweng/CheXNet)
implementation.

![alt text](test/heatmap.png)

## Prerequisites
* Python 3.5.2
* Pytorch
* Python 3.6.8
* Pytorch 1.1.0
* OpenCV (for generating CAMs)

## Usage
* Download the ChestX-ray14 database from [here](https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/37178474737)
* Unpack archives in separate directories (e.g. images_001.tar.gz into images_001)
* Run **python Main.py** to run test using the pre-trained model (m-25012018-123527)
* Run **python Main.py** to run test using the pre-trained model (m-02082019-004013)
* Use the **runTrain()** function in the **Main.py** to train a model from scratch

This implementation allows to conduct experiments with 3 different densenet architectures: densenet-121, densenet-169 and
Expand All @@ -27,24 +27,24 @@ densenet-201.
* To generate CAM of a test file run script HeatmapGenerator

## Results
The highest accuracy 0.8508 was achieved by the model m-25012018-123527 (see the models directory).
The highest accuracy 0.8508 was achieved by the model m-02082019-004013 (see the models directory).

| Pathology | AUROC |
| ------------- |:-------------:|
| Atelectasis | 0.8321 |
| Cardiomegaly | 0.9107 |
| Effusion | 0.8860 |
| Infiltration | 0.7145 |
| Mass | 0.8653 |
| Nodule | 0.8037 |
| Pneumonia | 0.7655 |
| Pneumothorax | 0.8857 |
| Consolidation | 0.8157 |
| Edema | 0.9017 |
| Emphysema | 0.9422 |
| Fibrosis | 0.8523 |
| P.T. | 0.7948 |
| Hernia | 0.9416 |
| Atelectasis | 0.8316 |
| Cardiomegaly | 0.9164 |
| Effusion | 0.8861 |
| Infiltration | 0.7141 |
| Mass | 0.8686 |
| Nodule | 0.8081 |
| Pneumonia | 0.7737 |
| Pneumothorax | 0.8869 |
| Consolidation | 0.8175 |
| Edema | 0.9014 |
| Emphysema | 0.9393 |
| Fibrosis | 0.8521 |
| P.T. | 0.7917 |
| Hernia | 0.9385 |

## Computation time
The training was done using single Tesla P100 GPU and took approximately 22h.
Expand Down
Binary file not shown.