diff --git a/README.md b/README.md index bd67ccd..4e44b82 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,13 @@ For further explanations of the parameters take a look at the runGan.py file. Note: evaluation (test case 2) currently requires an Nvidia GPU with `CUDA`. `tkinter` is also required and may be installed via the `python3-tk` package. +### Tensorflow 2 +#### For Tensorlow2 compatibility, you need to install: +```bash +pip install tf_slim +pip install tensorflow_addons +``` + ```bash # Install tensorflow1.8+, pip3 install --ignore-installed --upgrade tensorflow-gpu # or tensorflow diff --git a/lib/ops.py b/lib/ops.py index 9f8510c..73d9484 100644 --- a/lib/ops.py +++ b/lib/ops.py @@ -1,5 +1,6 @@ -import tensorflow as tf -import tensorflow.contrib.slim as slim +# import tensorflow as tf +import tensorflow.compat.v1 as tf +import tf_slim as slim import pdb import keras @@ -8,6 +9,8 @@ import collections from tensorflow.python.ops import summary_op_util +tf.disable_eager_execution() + ### tensorflow functions ###################################################### def preprocess(image): @@ -37,10 +40,10 @@ def conv2_tran(batch_input, kernel=3, output_channel=64, stride=1, use_bias=True with tf.variable_scope(scope): if use_bias: return slim.conv2d_transpose(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer()) + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal()) else: return slim.conv2d_transpose(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(), + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal(), biases_initializer=None) # Define the convolution building block @@ -49,10 +52,10 @@ def conv2(batch_input, kernel=3, output_channel=64, stride=1, use_bias=True, sco with tf.variable_scope(scope): if use_bias: return slim.conv2d(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer()) + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal()) else: return slim.conv2d(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(), + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal(), biases_initializer=None) @@ -62,10 +65,10 @@ def conv2_NCHW(batch_input, kernel=3, output_channel=64, stride=1, use_bias=True with tf.variable_scope(scope): if use_bias: return slim.conv2d(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NCWH', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer()) + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal()) else: return slim.conv2d(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NCWH', - activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(), + activation_fn=None, weights_initializer=tf.keras.initializers.glorot_normal(), biases_initializer=None) @@ -95,7 +98,7 @@ def maxpool(inputs, scope='maxpool'): # Our dense layer def denselayer(inputs, output_size): # Rachel todo, put it to Model variable_scope - denseLayer = tf.layers.Dense(output_size, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer()) + denseLayer = tf.layers.Dense(output_size, activation=None, kernel_initializer=tf.keras.initializers.glorot_normal()) output = denseLayer.apply(inputs) tf.add_to_collection( name=tf.GraphKeys.MODEL_VARIABLES, value=denseLayer.kernel ) #output = tf.layers.dense(inputs, output_size, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer()) diff --git a/main.py b/main.py index 9d361da..04326d4 100644 --- a/main.py +++ b/main.py @@ -7,7 +7,9 @@ 3 = INFO, WARNING, and ERROR messages are not printed Disable Logs for now ''' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -import tensorflow as tf +import tensorflow.compat.v1 as tf +import tensorflow_addons as tfa +import tf_slim as slim from tensorflow.python.util import deprecation deprecation._PRINT_DEPRECATION_WARNINGS = False import random as rn @@ -18,7 +20,6 @@ rn.seed(12345) tf.set_random_seed(1234) -import tensorflow.contrib.slim as slim import sys, shutil, subprocess from lib.ops import * @@ -26,6 +27,7 @@ from lib.frvsr import generator_F, fnet from lib.Teco import FRVSR, TecoGAN +tf.disable_eager_execution() Flags = tf.app.flags @@ -212,7 +214,7 @@ def testWhileTrain(FLAGS, testno = 0): gen_flow_lr = tf.pad(gen_flow_lr, paddings, "SYMMETRIC") gen_flow = upscale_four(gen_flow_lr*4.0) gen_flow.set_shape( output_shape[:-1]+[2] ) - pre_warp_hi = tf.contrib.image.dense_image_warp(pre_gen, gen_flow) + pre_warp_hi = tfa.image.dense_image_warp(pre_gen, gen_flow) before_ops = tf.assign(pre_warp, pre_warp_hi) print('Finish building the network')