-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_ConvAE.py
53 lines (43 loc) · 1.69 KB
/
train_ConvAE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from __future__ import division, print_function, absolute_import
import tensorflow as tf
from model_convAE import AutoEncoder
from image_utilities import ImageUtils
process_and_save_images = True
flatten_before_encode = True
img_shape = (50,50)
ratio_train_test = 0.8
seed = 100
num_steps = 20000
batch_size = 10
img_train_dir_raw = "train_raw"
img_train_dir_proc = "train_proc"
img_test_dir_raw = "test_raw"
img_test_dir_proc = "test_proc"
IU = ImageUtils()
# Process and save
if process_and_save_images:
# Training images
IU.save_raw2resized(raw_dir=img_train_dir_raw,
processed_dir=img_train_dir_proc,
img_shape=img_shape)
AE = AutoEncoder()
with tf.Session() as sess:
# Run the initializer
sess.run(AE.init)
saver = tf.train.Saver()
x_data_train, all_train_filenames = IU.load_raw2resizednorm(img_dir=img_train_dir_proc, img_shape=img_shape)
print("x_data_train.shape = {0}".format(x_data_train.shape))
# Flatten data if necessary
if flatten_before_encode:
x_data_train = IU.flatten_img_data(x_data_train)
print("x_data_train.shape = {0}".format(x_data_train.shape))
# Training
for i in range(1, num_steps + 1):
training_steps = int((len(x_data_train) / batch_size)) + 1
for step in range(training_steps):
X_batch = x_data_train[(step * batch_size):((step + 1) * batch_size)]
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([AE.optimizer, AE.loss], feed_dict={AE.X: X_batch})
# Display loss per step
print('Step %i: Minibatch Loss: %f' % (i, l))
save_path = saver.save(sess, "model/model_convAE.ckpt")