-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathops.py
More file actions
123 lines (107 loc) · 4.51 KB
/
ops.py
File metadata and controls
123 lines (107 loc) · 4.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import numpy as np
import tensorflow as tf
def batch_norm(opts, _input, is_train, reuse, scope, scale=True):
"""Batch normalization based on tf.contrib.layers.
"""
return tf.contrib.layers.batch_norm(
_input, center=True, scale=scale,
epsilon=opts['batch_norm_eps'], decay=opts['batch_norm_decay'],
is_training=is_train, reuse=reuse, updates_collections=None,
scope=scope, fused=False)
def linear(opts, input_, output_dim, scope=None, init='normal', reuse=None):
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
assert len(shape) > 0
in_shape = shape[1]
if len(shape) > 2:
input_ = tf.reshape(input_, [-1, np.prod(shape[1:])])
in_shape = np.prod(shape[1:])
with tf.variable_scope(scope or "lin", reuse=reuse):
if init == 'normal':
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.random_normal_initializer(stddev=stddev))
else:
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.constant_initializer(np.identity(in_shape)))
bias = tf.get_variable(
"b", [output_dim],
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def conv3d(opts, input_, output_dim, d_d=1, d_h=1, d_w=1, scope=None,
conv_filters_dim=None, padding='SAME', l2_norm=False):
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
k_d = k_w
else:
k_h = conv_filters_dim[0]
k_w = conv_filters_dim[1]
k_d = conv_filters_dim[2]
assert len(shape) == 5, 'Conv3d works only with 5d tensors.'
with tf.variable_scope(scope or 'conv3d'):
w = tf.get_variable(
'filter', [k_h, k_w, k_d, shape[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if l2_norm:
w = tf.nn.l2_normalize(w, 2)
conv = tf.nn.conv3d(input_, w, strides=[1, d_h, d_w, d_d, 1], padding=padding)
biases = tf.get_variable(
'b', [output_dim],
initializer=tf.constant_initializer(bias_start))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv3d(opts, input_, output_shape, d_h=2, d_w=2, d_d=2, scope=None, conv_filters_dim=None, padding='SAME'):
stddev = opts['init_std']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
k_d = k_w
else:
k_h = conv_filters_dim[0]
k_w = conv_filters_dim[1]
k_d = conv_filters_dim[2]
assert len(shape) == 5, 'Conv3d_transpose works only with 5d tensors.'
assert len(output_shape) == 5, 'outut_shape should be 5dimensional'
with tf.variable_scope(scope or "deconv3d"):
w = tf.get_variable(
'filter', [k_h, k_w, k_d, output_shape[-1], shape[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv3d_transpose(
input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, d_d, 1], padding=padding)
biases = tf.get_variable(
'b', [output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def conv2d(opts, input_, output_dim, d_h=1, d_w=1, scope=None,
conv_filters_dim=None, padding='VALID', l2_norm=False):
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d works only with 4d tensors.'
with tf.variable_scope(scope or 'conv2d'):
w = tf.get_variable(
'filter', [k_h, k_w, shape[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if l2_norm:
w = tf.nn.l2_normalize(w, 2)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable(
'b', [output_dim],
initializer=tf.constant_initializer(bias_start))
conv2d = tf.nn.bias_add(conv, biases)
return conv2d