keras-vis: InvalidArgumentError: conv2d_1_input_1:0 is both fed and fetched
That’s the beginning of the normal Maximal Activations example. It gives the error: InvalidArgumentError: conv2d_1_input_1:0 is both fed and fetched.
What could be a solution for this?
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = x_train[:1000,:,:,:]
y_train = y_train[:1000]
x_test = x_test[:1000,:,:,:]
y_test = y_test[:1000]
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
x_train shape: (1000, 28, 28, 1) 1000 train samples 1000 test samples Train on 1000 samples, validate on 1000 samples Epoch 1/1 1000/1000 [==============================] - 8s 8ms/step - loss: 1.9323 - acc: 0.4100 - val_loss: 1.2884 - val_acc: 0.6450 Test loss: 1.2884319705963134 Test accuracy: 0.645
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])
InvalidArgumentError Traceback (most recent call last) <ipython-input-2-a8c2bcc889b0> in <module>() 17 # This is the output node we want to maximize. 18 filter_idx = 0 —> 19 img = visualize_activation(model, layer_idx, filter_indices=filter_idx) 20 plt.imshow(img[…, 0])
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/vis/visualization/activation_maximization.py in visualize_activation(model, layer_idx, filter_indices, wrt_tensor, seed_input, input_range, backprop_modifier, grad_modifier, act_max_weight, lp_norm_weight, tv_weight, **optimizer_params) 110 111 return visualize_activation_with_losses(model.input, losses, wrt_tensor, –> 112 seed_input, input_range, **optimizer_params)
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/vis/visualization/activation_maximization.py in visualize_activation_with_losses(input_tensor, losses, wrt_tensor, seed_input, input_range, **optimizer_params) 41 42 opt = Optimizer(input_tensor, losses, input_range, wrt_tensor=wrt_tensor) —> 43 img = opt.minimize(**optimizer_params)[0] 44 45 # If range has integer numbers, cast to ‘uint8’
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/vis/optimizer.py in minimize(self, seed_input, max_iter, input_modifiers, grad_modifier, callbacks, verbose) 142 143 # 0 learning phase for ‘test’ –> 144 computed_values = self.compute_fn([seed_input, 0]) 145 losses = computed_values[:len(self.loss_names)] 146 named_losses = list(zip(self.loss_names, losses))
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in call(self, inputs) 2659 return self._legacy_call(inputs) 2660 -> 2661 return self._call(inputs) 2662 else: 2663 if py_any(is_tensor(x) for x in inputs):
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs) 2628 feed_symbols, 2629 symbol_vals, -> 2630 session) 2631 fetched = self._callable_fn(*array_vals) 2632 return fetched[:len(self.outputs)]
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session) 2580 callable_opts.target.append(self.updates_op.name) 2581 # Create callable. -> 2582 callable_fn = session._make_callable_from_options(callable_opts) 2583 # Cache parameters corresponding to the generated callable, so that 2584 # we can detect future mismatches and refresh the callable.
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _make_callable_from_options(self, callable_options) 1478 “”" 1479 self._extend_graph() -> 1480 return BaseSession._Callable(self, callable_options) 1481 1482
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in init(self, session, callable_options) 1439 else: 1440 self._handle = tf_session.TF_DeprecatedSessionMakeCallable( -> 1441 session._session, options_ptr, status) 1442 finally: 1443 tf_session.TF_DeleteBuffer(options_ptr)
~/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in exit(self, type_arg, value_arg, traceback_arg) 517 None, None, 518 compat.as_text(c_api.TF_Message(self.status.status)), –> 519 c_api.TF_GetCode(self.status.status)) 520 # Delete the underlying status object from memory otherwise it stays alive 521 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: conv2d_1_input_1:0 is both fed and fetched.
-
[x ] Check that you are up-to-date with the master branch of keras-vis. You can update with: pip install git+git://github.com/raghakot/keras-vis.git --upgrade --no-deps
-
[x ] If running on TensorFlow, check that you are up-to-date with the latest version. The installation instructions can be found here.
-
If running on Theano, check that you are up-to-date with the master branch of Theano. You can update with: pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps
-
[x ] Provide a link to a GitHub Gist of a Python script that can reproduce your issue (or just copy the script here if it is short).
About this issue
- Original URL
- State: closed
- Created 6 years ago
- Comments: 16 (1 by maintainers)
Commits related to this issue
- #119 correction example — committed to keisen/keras-vis by ogis-kubota 6 years ago
- #119 fixed — committed to keisen/keras-vis by keisen 6 years ago
- Fix: InvalidArgumentError: conv2d_1_input_1:0 is both fed and fetched (#120) * #119 fixed * Improve wrt_tensor instance check. Support when wrt_tensor isn't None and input_tensor and wrt_tensor a... — committed to raghakot/keras-vis by keisen 6 years ago
I manually deleted keras-vis and installed it again with
ip install git+https://github.com/raghakot/keras-vis.git --upgrade
and it is all good now 😃 Thank you@wookoouk , This patch is not released to PyPi yet. So, You have to directly install keras-vis from the currently master branch as follows .
latest version from git works for me! 👍 do you know when they plan to make a new release? thanks.
@phylliskaka i was running into same issue. this is a work around: https://stackoverflow.com/a/55418198
@keisen. I used you command: ‘pip install git+https://github.com/raghakot/keras-vis.git’ ‘pip install git+https://github.com/raghakot/keras-vis.git --upgrade’
but somehow it keep install 0.4.1. could you share you tf and keras version. Mine is: tensorflow: 1.13 keras: 2.2.4 keras-vis: 0.4.1
@AlexanderZhujiageng , Please refer to the diff below.
https://github.com/raghakot/keras-vis/pull/120/files/74f2d086feb9556cbb2f1cfd90d6d4f8c21f520d