Skip to content
This repository was archived by the owner on Apr 10, 2024. It is now read-only.

Upgrade lucid to use tensorflow v2 APIs and make it work on Colab platform #305

Open
wants to merge 38 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
6493a99
Fixing warning messages with TF 1.15.1, part 1.
ercaronte Jan 15, 2023
49d6ade
Fixing warning messages with TF 1.15.5, part 2
ercaronte Jan 15, 2023
510f8e6
Fixing warning messages with TF 1.15.5, part 3
ercaronte Jan 15, 2023
eed2a92
This branch makes lucid work with tensorflow version 2.0.0. All tox t…
ercaronte Jan 16, 2023
ba50323
lucid compatible with TF 2.1.4.
ercaronte Jan 16, 2023
3ca46bb
Branch compatible with TF 2.5.3
ercaronte Jan 16, 2023
b45a145
Branch compatible with TF 2.11
ercaronte Jan 17, 2023
17f33cf
Cleanup before merge
ercaronte Jan 17, 2023
fee0f54
Fixed notebooks at root level (tutorial, modelzoo, caricature). Few o…
ercaronte Jan 17, 2023
92411cb
Adding missing images and other data that are loaded by lucid noteboo…
ercaronte Jan 18, 2023
7a3a31f
Merge pull request #1 from ercaronte/bugfix/missing-notebook-static-c…
ercaronte Jan 18, 2023
b50a7eb
Merge branch 'master' into feature/tf2compatibility
ercaronte Jan 18, 2023
478583c
Upgraded notebooks in the activation-atlas directory, except one (act…
ercaronte Jan 18, 2023
7572e9a
Upgraded more notebooks.
ercaronte Jan 19, 2023
4cf2a10
Adding missing images and other data that are loaded by lucid noteboo…
ercaronte Jan 19, 2023
16b711d
Updated .gitignore
ercaronte Jan 19, 2023
bc6e3cc
Merge pull request #2 from ercaronte/bugfix/missing-notebook-static-c…
ercaronte Jan 19, 2023
75d8af4
Merge commit 'bc6e3cc02d32085f2b7c15b81f514f6e0453dfa4' into feature/…
ercaronte Jan 19, 2023
a9f6334
Small fixes for the style_transfer_2d notebook
ercaronte Jan 20, 2023
e58fd3d
Fixed issue with create_session tf 2.x compatibility
ercaronte Jan 20, 2023
a5f8e20
Fixed issue with style.py compatibility with TF2
ercaronte Jan 20, 2023
f9544bc
Possible fixes of OpenGL glitches in Colab.
ercaronte Jan 20, 2023
c139e50
Restoring previous code of 3D web visualization in showing.py. Puttin…
ercaronte Jan 23, 2023
219a257
Adding models 3D to the static data folder.
ercaronte Jan 23, 2023
a002e69
Merge branch 'bugfix/missing-notebook-static-content'
ercaronte Jan 23, 2023
e4be079
Merge branch 'master' into feature/tf2compatibility
ercaronte Jan 23, 2023
adda0ae
Reverting some of the previous changes.
ercaronte Jan 23, 2023
a846d9b
Upgraded notebooks in differentiable-parameterizations folder.
ercaronte Jan 23, 2023
6096bd6
Upgraded notebooks in feature-visualization. Fixed a related issue in…
ercaronte Jan 24, 2023
cb85d44
Upgraded notebooks in differentiable-parametrization/appendix
ercaronte Jan 24, 2023
e78122d
Upgraded all notebooks except those running svelte
ercaronte Jan 25, 2023
a4ce3d6
Adding missing image.
ercaronte Jan 25, 2023
9bdef1c
Merge branch 'bugfix/missing-notebook-static-content'
ercaronte Jan 25, 2023
426fb03
Merge branch 'master' into feature/tf2compatibility
ercaronte Jan 25, 2023
b61ef33
Upgraded all notebooks in the building-block directory. Did not recre…
ercaronte Jan 26, 2023
22d78c7
Merge pull request #3 from ercaronte/feature/tf2compatibility
ercaronte Jan 26, 2023
ecb8100
Changed modelzoo notebook to us Tensorboard to show model graph. Depr…
ercaronte Feb 1, 2023
526fb4c
Fixed an issue with casti tong float, in case the input vector is alr…
ercaronte Feb 6, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .editorconfig
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ root = true

[*]
indent_style = space
indent_size = 4
indent_size = 2
insert_final_newline = true
trim_trailing_whitespace = true
end_of_line = lf
Expand Down
8 changes: 6 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,16 @@ dist
.tox
.pytest_cache
*.egg-info
notebooks/.ipynb_checkpoints
.ipynb_checkpoints
/notebooks/logs/
coverage.txt
.coverage
lucidenv
lucid-env
.idea
.vscode
.mypy_cache
.python-version

pip-selfcheck.json

Expand All @@ -23,9 +25,11 @@ bin
include
lib
share
venv*/

tests/fixtures/generated_outputs/


lucid/scratch/js/package-lock.json
lucid/scratch/js/yarn.lock
lucid/scratch/js/yarn.lock

2 changes: 1 addition & 1 deletion lucid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,4 @@
seed = 0

# Set the lucid version - setup.py imports this value!
__version__ = "0.3.10"
__version__ = "0.3.11"
4 changes: 2 additions & 2 deletions lucid/misc/gl/glcontext.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
'prior importing this module.')
raise

import ctypes
import ctypes.util
from ctypes import pointer
import os

Expand Down Expand Up @@ -117,4 +117,4 @@ def create_opengl_context(surface_size=(640, 480)):

egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT,
None)
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
9 changes: 5 additions & 4 deletions lucid/misc/gradient_override.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def _foo_grad_alt(op, grad): ...
override_dict_by_name[op_name] = grad_f
else:
override_dict_by_name[op_name] = register_to_random_name(grad_f)
with tf.get_default_graph().gradient_override_map(override_dict_by_name):
with tf.compat.v1.get_default_graph().gradient_override_map(override_dict_by_name):
yield


Expand Down Expand Up @@ -154,7 +154,8 @@ def store_out(out_value):
state["out_value"] = out_value

store_name = "store_" + f.__name__
store = tf.py_func(store_out, [out], (), stateful=True, name=store_name)
store = tf.compat.v1.py_func(store_out, [out], (), stateful=True, name=store_name)
# store = tf.numpy_function(store_out, [out], (), stateful=True, name=store_name) # not yet implemented in TF 2.5

# Next, we create the mock function, with an overriden gradient.
# Note that we need to make sure store gets evaluated before the mock
Expand All @@ -167,8 +168,8 @@ def mock_f(*inputs):
with tf.control_dependencies([store]):
with gradient_override_map({"PyFunc": grad_f_name}):
mock_name = "mock_" + f.__name__
mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True,
name=mock_name)
mock_out = tf.compat.v1.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name)
# mock_out = tf.numpy_function(mock_f, inputs, out.dtype, stateful=True, name=mock_name) # not yet implemented in TF 2.5
mock_out.set_shape(out.get_shape())

# Finally, we can return the mock.
Expand Down
2 changes: 1 addition & 1 deletion lucid/misc/io/loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def _load_text(handle, split=False, encoding="utf-8"):
def _load_graphdef_protobuf(handle, **kwargs):
"""Load GraphDef from a binary proto file."""
# as_graph_def
graph_def = tf.GraphDef.FromString(handle.read())
graph_def = tf.compat.v1.GraphDef.FromString(handle.read())

# check if this is a lucid-saved model
# metadata = modelzoo.util.extract_metadata(graph_def)
Expand Down
2 changes: 1 addition & 1 deletion lucid/misc/io/reading.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import logging
from urllib.parse import urlparse
from urllib import request
from tensorflow.io.gfile import GFile
from tensorflow.compat.v1.io.gfile import GFile
import tensorflow as tf
from tempfile import gettempdir
import gc
Expand Down
3 changes: 2 additions & 1 deletion lucid/misc/io/showing.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def _strip_consts(graph_def, max_const_size=32):
This is mostly a utility function for graph(), and also originates here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
"""
strip_def = tf.GraphDef()
strip_def = tf.compat.v1.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
Expand All @@ -342,6 +342,7 @@ def _strip_consts(graph_def, max_const_size=32):


def graph(graph_def, max_const_size=32):
log.warning("showing.graph is deprecated, replace it with tensorboard and its jupyter magic extensions.")
"""Visualize a TensorFlow graph.

This function was originally found in this notebook (also Apache licensed):
Expand Down
6 changes: 3 additions & 3 deletions lucid/misc/io/writing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import logging
from contextlib import contextmanager
from urllib.parse import urlparse
from tensorflow import gfile
from tensorflow.compat.v1.io import gfile

from lucid.misc.io.scoping import scope_url

Expand Down Expand Up @@ -66,14 +66,14 @@ def write_handle(path, mode=None):
path = scope_url(path)

if _supports_make_dirs(path):
gfile.MakeDirs(os.path.dirname(path))
gfile.makedirs(os.path.dirname(path))

if mode is None:
if _supports_binary_writing(path):
mode = "wb"
else:
mode = "w"

handle = gfile.Open(path, mode)
handle = gfile.GFile(path, mode)
yield handle
handle.close()
20 changes: 10 additions & 10 deletions lucid/misc/redirected_relu_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,12 @@ def redirected_relu_grad(op, grad):
x = op.inputs[0]

# Compute ReLu gradient
relu_grad = tf.where(x < 0., tf.zeros_like(grad), grad)
relu_grad = tf.compat.v1.where(x < 0., tf.zeros_like(grad), grad)

# Compute redirected gradient: where do we need to zero out incoming gradient
# to prevent input going lower if its already negative
neg_pushing_lower = tf.logical_and(x < 0., grad > 0.)
redirected_grad = tf.where(neg_pushing_lower, tf.zeros_like(grad), grad)
redirected_grad = tf.compat.v1.where(neg_pushing_lower, tf.zeros_like(grad), grad)

# Ensure we have at least a rank 2 tensor, as we expect a batch dimension
assert_op = tf.Assert(tf.greater(tf.rank(relu_grad), 1), [tf.rank(relu_grad)])
Expand All @@ -106,12 +106,12 @@ def redirected_relu_grad(op, grad):
batch = tf.shape(relu_grad)[0]
reshaped_relu_grad = tf.reshape(relu_grad, [batch, -1])
relu_grad_mag = tf.norm(reshaped_relu_grad, axis=1)
result_grad = tf.where(relu_grad_mag > 0., relu_grad, redirected_grad)
result_grad = tf.compat.v1.where(relu_grad_mag > 0., relu_grad, redirected_grad)

global_step_t =tf.train.get_or_create_global_step()
global_step_t = tf.compat.v1.train.get_or_create_global_step()
return_relu_grad = tf.greater(global_step_t, tf.constant(16, tf.int64))

return tf.where(return_relu_grad, relu_grad, result_grad)
return tf.compat.v1.where(return_relu_grad, relu_grad, result_grad)


def redirected_relu6_grad(op, grad):
Expand All @@ -120,15 +120,15 @@ def redirected_relu6_grad(op, grad):

# Compute ReLu gradient
relu6_cond = tf.logical_or(x < 0., x > 6.)
relu_grad = tf.where(relu6_cond, tf.zeros_like(grad), grad)
relu_grad = tf.compat.v1.where(relu6_cond, tf.zeros_like(grad), grad)

# Compute redirected gradient: where do we need to zero out incoming gradient
# to prevent input going lower if its already negative, or going higher if
# already bigger than 6?
neg_pushing_lower = tf.logical_and(x < 0., grad > 0.)
pos_pushing_higher = tf.logical_and(x > 6., grad < 0.)
dir_filter = tf.logical_or(neg_pushing_lower, pos_pushing_higher)
redirected_grad = tf.where(dir_filter, tf.zeros_like(grad), grad)
redirected_grad = tf.compat.v1.where(dir_filter, tf.zeros_like(grad), grad)

# Ensure we have at least a rank 2 tensor, as we expect a batch dimension
assert_op = tf.Assert(tf.greater(tf.rank(relu_grad), 1), [tf.rank(relu_grad)])
Expand All @@ -137,9 +137,9 @@ def redirected_relu6_grad(op, grad):
batch = tf.shape(relu_grad)[0]
reshaped_relu_grad = tf.reshape(relu_grad, [batch, -1])
relu_grad_mag = tf.norm(reshaped_relu_grad, axis=1)
result_grad = tf.where(relu_grad_mag > 0., relu_grad, redirected_grad)
result_grad = tf.compat.v1.where(relu_grad_mag > 0., relu_grad, redirected_grad)

global_step_t = tf.train.get_or_create_global_step()
global_step_t = tf.compat.v1.train.get_or_create_global_step()
return_relu_grad = tf.greater(global_step_t, tf.constant(16, tf.int64))

return tf.where(return_relu_grad, relu_grad, result_grad)
return tf.compat.v1.where(return_relu_grad, relu_grad, result_grad)
4 changes: 2 additions & 2 deletions lucid/misc/tfutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def create_session(target='', timeout_sec=10):
when having multiple python sessions sharing the same GPU.
'''
graph = tf.Graph()
config = tf.ConfigProto()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.operation_timeout_in_ms = int(timeout_sec*1000)
return tf.InteractiveSession(target=target, graph=graph, config=config)
return tf.compat.v1.InteractiveSession(target=target, graph=graph, config=config)
1 change: 1 addition & 0 deletions lucid/modelzoo/other_models/Clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ class Clip_ResNet50_4x(Model):
model_name = "Clip_ResNet50_4x"
image_shape = [288, 288, 3]
model_path = "gs://modelzoo/vision/other_models/Clip_ResNet50_4x.pb"
dataset = None
4 changes: 2 additions & 2 deletions lucid/modelzoo/other_models/InceptionV1.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

def _populate_inception_bottlenecks(scope):
"""Add Inception bottlenecks and their pre-Relu versions to the graph."""
graph = tf.get_default_graph()
graph = tf.compat.v1.get_default_graph()
for op in graph.get_operations():
if op.name.startswith(scope+'/') and 'Concat' in op.type:
name = op.name.split('/')[1]
Expand All @@ -37,7 +37,7 @@ def _populate_inception_bottlenecks(scope):
class InceptionV1(Model):
"""InceptionV1 (or 'GoogLeNet')

This is a (re?)implementation of InceptionV1 from the "Going deeper
This is a (re?)implementation of InceptionV1 from the "Going deeper
with convolutions" paper. Links:
* Official CVPR paper, requires subscription: https://ieeexplore.ieee.org/document/7298594
* Author preprint: https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf
Expand Down
14 changes: 7 additions & 7 deletions lucid/modelzoo/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,31 +55,31 @@ def forget_xy(t):
filter) when we only use early parts of it.
"""
shape = (t.shape[0], None, None, t.shape[3])
return tf.placeholder_with_default(t, shape)
return tf.compat.v1.placeholder_with_default(t, shape)


def frozen_default_graph_def(input_node_names, output_node_names):
"""Return frozen and simplified graph_def of default graph."""

sess = tf.get_default_session()
sess = tf.compat.v1.get_default_session()
if sess is None:
raise RuntimeError("Default session not registered.")

input_graph_def = tf.get_default_graph().as_graph_def()
input_graph_def = tf.compat.v1.get_default_graph().as_graph_def()
if len(input_graph_def.node) == 0:
raise RuntimeError("Default graph is empty. Is it possible your model wasn't constructed or is in a different graph?")

pruned_graph = tf.graph_util.remove_training_nodes(
pruned_graph = tf.compat.v1.graph_util.remove_training_nodes(
input_graph_def, protected_nodes=(output_node_names + input_node_names)
)
pruned_graph = tf.graph_util.extract_sub_graph(pruned_graph, output_node_names)
pruned_graph = tf.compat.v1.graph_util.extract_sub_graph(pruned_graph, output_node_names)

# remove explicit device assignments
for node in pruned_graph.node:
node.device = ""

all_variable_names = [v.op.name for v in tf.global_variables()]
output_graph_def = tf.graph_util.convert_variables_to_constants(
all_variable_names = [v.op.name for v in tf.compat.v1.global_variables()]
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=pruned_graph,
output_node_names=output_node_names,
Expand Down
16 changes: 13 additions & 3 deletions lucid/modelzoo/vision_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def post_import(self, scope):
def create_input(self, t_input=None, forget_xy_shape=True):
"""Create input tensor."""
if t_input is None:
t_input = tf.placeholder(tf.float32, self.image_shape)
t_input = tf.compat.v1.placeholder(tf.float32, self.image_shape)
t_prep_input = t_input
if len(t_prep_input.shape) == 3:
t_prep_input = tf.expand_dims(t_prep_input, 0)
Expand All @@ -189,7 +189,7 @@ def create_input(self, t_input=None, forget_xy_shape=True):

def import_graph(self, t_input=None, scope='import', forget_xy_shape=True, input_map=None):
"""Import model GraphDef into the current graph."""
graph = tf.get_default_graph()
graph = tf.compat.v1.get_default_graph()
assert graph.unique_name(scope, False) == scope, (
'Scope "%s" already exists. Provide explicit scope names when '
'importing multiple instances of the model.') % scope
Expand All @@ -210,6 +210,16 @@ def T(layer):
return T

def show_graph(self):
log.warning(
'''
show_graph does no longer work with TF 2, use tensorboard and its jupyter magic extensions:
Example:
%load_ext tensorboard
model = ...
with tf.compat.v1.Graph().as_default():
tf.compat.v1.summary.FileWriter('logs/train', graph=model.graph_def).close()
%tensorboard --logdir logs/train
''')
if self.graph_def is None:
raise Exception("Model.show_graph(): Must load graph def before showing it.")
showing.graph(self.graph_def)
Expand All @@ -231,7 +241,7 @@ def get_layer(self, name):
@staticmethod
def suggest_save_args(graph_def=None):
if graph_def is None:
graph_def = tf.get_default_graph().as_graph_def()
graph_def = tf.compat.v1.get_default_graph().as_graph_def()
gdhelper = model_util.GraphDefHelper(graph_def)
inferred_info = dict.fromkeys(("input_name", "image_shape", "output_names", "image_value_range"))
node_shape = lambda n: [dim.size for dim in n.attr['shape'].shape.dim]
Expand Down
16 changes: 13 additions & 3 deletions lucid/optvis/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,10 @@ def inner(T):
def direction(layer, vec, cossim_pow=0, batch=None):
"""Visualize a direction"""
vec = vec[None, None, None]
vec = vec.astype("float32")
if isinstance(vec, tf.Tensor):
vec = tf.cast(vec, tf.float32)
else:
vec = vec.astype("float32")

@handle_batch(batch)
def inner(T):
Expand All @@ -196,7 +199,11 @@ def inner(T):
@wrap_objective(require_format='NHWC')
def direction_neuron(layer_name, vec, x=None, y=None, cossim_pow=0, batch=None):
"""Visualize a single (x, y) position along the given direction"""
vec = vec.astype("float32")
if isinstance(vec, tf.Tensor):
vec = tf.cast(vec, tf.float32)
else:
vec = vec.astype("float32")

@handle_batch(batch)
def inner(T):
layer = T(layer_name)
Expand All @@ -209,7 +216,10 @@ def inner(T):
def tensor_direction(layer, vec, cossim_pow=0, batch=None):
"""Visualize a tensor."""
assert len(vec.shape) in [3,4]
vec = vec.astype("float32")
if isinstance(vec, tf.Tensor):
vec = tf.cast(vec, tf.float32)
else:
vec = vec.astype("float32")
if len(vec.shape) == 3:
vec = vec[None]
@handle_batch(batch)
Expand Down
7 changes: 4 additions & 3 deletions lucid/optvis/param/cppn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@

import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
# from tensorflow.contrib import slim
import tf_slim as slim


def _composite_activation(x, biased=True):
Expand Down Expand Up @@ -83,8 +84,8 @@ def cppn(
[slim.conv2d],
kernel_size=[1, 1],
activation_fn=None,
weights_initializer=tf.initializers.variance_scaling(),
biases_initializer=tf.initializers.random_normal(0.0, 0.1),
weights_initializer=tf.compat.v1.initializers.variance_scaling(),
biases_initializer=tf.compat.v1.initializers.random_normal(0.0, 0.1),
):
for i in range(num_layers):
x = slim.conv2d(net, num_hidden_channels)
Expand Down
2 changes: 1 addition & 1 deletion lucid/optvis/param/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def rand_fft_image(shape, sd=None, decay_power=1):
for _ in range(b):
freqs = rfft2d_freqs(h, w)
fh, fw = freqs.shape
spectrum_var = sd * tf.random_normal([2, ch, fh, fw], dtype="float32")
spectrum_var = sd * tf.random.normal([2, ch, fh, fw], dtype="float32")
spectrum = tf.complex(spectrum_var[0], spectrum_var[1])
spertum_scale = 1.0 / np.maximum(freqs, 1.0 / max(h, w)) ** decay_power
# Scale the spectrum by the square-root of the number of pixels
Expand Down
Loading