-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathmodels.py
97 lines (82 loc) · 4.02 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import ops
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import pickle as pkl
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import urllib
import os
import tarfile
import skimage
import skimage.io
import skimage.transform
def shared_encoder(x, name='feat_ext', reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-6),
activation_fn=tf.nn.relu,normalizer_fn=slim.batch_norm):
net = slim.conv2d(x, 32, [5, 5],scope = 'conv1_shared_encoder')
net = slim.max_pool2d(net, [2, 2], scope='pool1_shared_encoder')
net = slim.conv2d(net, 64, [5, 5], scope='conv2_shared_encoder')
net = slim.max_pool2d(net, [2, 2], scope='pool2_shared_encoder')
net = slim.flatten(net, scope='flat_shared_encoder')
net = slim.flatten(net)
net = slim.fully_connected(net, 100, scope='shared_fc1')
return net
#Private Target Encoder
def private_target_encoder(x, name='priviate_target_encoder', reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-6),
activation_fn=tf.nn.relu,normalizer_fn=slim.batch_norm):
net = slim.conv2d(x, 32, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2],2, scope='pool1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [2, 2],2, scope='pool2')
net = slim.flatten(net)
net = slim.fully_connected(net, 100, scope='private_target_fc1')
return net
#Private Source Encoder
def private_source_encoder(x, name='priviate_source_encoder', reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-6),
activation_fn=tf.nn.relu,normalizer_fn=slim.batch_norm):
net = slim.conv2d(x, 32, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.flatten(net)
net = slim.fully_connected(net, 100, scope='private_source_fc1')
return net
def shared_decoder(feat,height,width,channels,reuse=False, name='shared_decoder'):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-6),
activation_fn=tf.nn.relu,normalizer_fn=slim.batch_norm):
net = slim.fully_connected(feat, 600, scope='fc1_decoder')
net = tf.reshape(net, [-1, 10, 10, 6])
net = slim.conv2d(net, 32, [5, 5], scope='conv1_1_decoder')
net = tf.image.resize_nearest_neighbor(net, (16, 16))
net = slim.conv2d(net, 32, [5, 5], scope='conv2_1_decoder')
net = tf.image.resize_nearest_neighbor(net, (32, 32))
net = slim.conv2d(net, 32, [5, 5], scope='conv3_2_decoder')
output_size = [height, width]
net = tf.image.resize_nearest_neighbor(net, output_size)
with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]):
net = slim.conv2d(net, channels, activation_fn=None, scope='conv4_1_decoder')
return net