forked from Hillgaertner/CANN
-
Notifications
You must be signed in to change notification settings - Fork 18
/
main.py
114 lines (92 loc) · 2.88 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# Standard imports
import numpy as np
import matplotlib.pyplot as plt
import keras as k
import tensorflow as tf
import time
# Own files
from CANN import CANN
from CANN import subANNs as subANNs
from CANN import Outputs
from CANN import Inputs
from CANN import Helpers as help
#####
##
#####
# Switch to read pre-defined model from HDD (attention: when using a noisy data set the data import will create new/different noise)
readModelFromDisk = True
# Switch to change problem
problem = 1
"""
0: Treloar
1: Generalized Mooney Rivlin
2: Generalized Mooney Rivlin with noise
"""
#####
## General
#####
# Disable eager execution
tf.compat.v1.disable_eager_execution()
# Obtain input data and problem specific settings
F, P11, extra, ds, problemName, numTens, numDir, batchSize, epochs, incomp = Inputs.defineProblem(problem)
# Create output folder
outputFolder = Outputs.prepareOutputFolder('output/', problemName)
# Define ANN architecture
#####
## Create model and statistics
#####
# Create model
if type(extra) is np.ndarray:
numExtra = extra.shape[1]
else:
numExtra = 0
myCANN = CANN.CANN_wrapper(subANNs.Psi_layers_wrapper(), subANNs.dir_layers_wrapper(), subANNs.w_layers_wrapper(), numTens=numTens, numDir=numDir, numExtra=numExtra, incomp=incomp)
model_fit, model_full = myCANN()
# Output debugging information
Outputs.showModelSummary(model_full, numDir=numDir, outputFolder=outputFolder)
Outputs.plotModelGraph(model_full, outputFolder, numDir=numDir)
if readModelFromDisk==False:
#####
## Compile and fit model
#####
# Compile model
model_fit.compile(
optimizer = k.optimizers.Adam(learning_rate = 0.001),
loss = 'mean_squared_error',
metrics = ['MeanSquaredError']
)
# Split data into training and validation
train_in, train_out, val_in, val_out = help.trainValidationSplit(F, P11, extra, numExtra, trainSize=0.8, outputFolder=outputFolder)
# Fit model
startTime = time.time()
his = model_fit.fit(
train_in,
train_out,
verbose = 1,
batch_size = batchSize,
epochs = epochs,
validation_data = (val_in, val_out),
callbacks = [tf.keras.callbacks.ModelCheckpoint(
outputFolder+'modelWeights.h5',
monitor = 'val_loss',
verbose = 1,
save_best_only = True,
mode = 'min'
)]
)
endTime = time.time()
print('\n\n\n FITTING TIME: ' + help.timeString(endTime-startTime) + '\n\n')
# Plot/save loss
Outputs.plotLoss(his, outputFolder)
Outputs.saveLoss(his, outputFolder)
# Reload best weights
model_fit = help.loadWeights(model_fit , outputFolder)
model_full = help.loadWeights(model_full, outputFolder)
#####
## Visualization of results
#####
ds.predict(model_fit)
Outputs.saveMainCurves(ds, outputFolder)
Outputs.plotMainCurves(ds, outputFolder)
if problem == 1 or problem == 2: # Cannot be done for Treloar, since it's not based on an analytical strain energy
Outputs.saveAndPlotErrors(ds, model_full, outputFolder, 55, 300, stepSizeI1=0.2, stepSizeI2=1.)