keract: Using an input other than the one provided by the pre-trained model fails.

I have a fine-tuned resnet50 model which i I’m trying to visualize the activations, although I’m facing a few issues when calling get_activations function. Code and output below:

Building model fn:

def load_and_configure_model(model_name, optimizer, loss, metrics, path):
  model = 0
  num_classes = 250
  if model_name == 'resnet50':
    model = ResNet50(include_top=True, weights='imagenet')
    transfer_layer = model.get_layer('avg_pool')
    conv_model = Model(inputs=model.input,outputs=transfer_layer.output)
    conv_model.trainable = True
    for layer in conv_model.layers:
      layer.trainable = False
    for layer in conv_model.layers:
      trainable = ('conv5_block3' in layer.name)      
      layer.trainable = trainable
    transferred_resnet50 = Sequential()
    transferred_resnet50.add(conv_model)
    transferred_resnet50.add(Dense(num_classes, activation='softmax'))
    transferred_resnet50.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    model = transferred_resnet50
  if not path == None :
    model.load_weights(path)
  return model

Loading model & img

weight_file = DRIVE_DIR+'ImageCLEF2013PlantTask/Models/ResNet50/crop_and_translate/resnet_50.h5' 
loss = tf.keras.losses.sparse_categorical_crossentropy

metrics = ['accuracy']
optimizer = Adam(lr=2e-6)

resnet_50_crop_and_translate = load_and_configure_model('resnet50', optimizer, loss, metrics, weight_file)
resnet_50_crop_and_translate.compile(optimizer=optimizer,loss=loss)

img = cv2.imread(DRIVE_DIR + DATA_DIR+'/visualization/test_image.jpg',cv2.IMREAD_UNCHANGED)
res = cv2.resize(img, dsize=(224, 224))

activations = keract.get_activations(resnet_50_crop_and_translate, np.expand_dims(res,axis=0), nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=False)

First Error, although warnings concerning eager execution and other related exceptions seemed also to be thrown:

Run it without eager mode. Paste those commands at the beginning of your script:
> import tensorflow as tf
> tf.compat.v1.disable_eager_execution()
Run it without eager mode. Paste those commands at the beginning of your script:
> import tensorflow as tf
> tf.compat.v1.disable_eager_execution()

ValueError: Tensor Tensor("avg_pool_1/Mean:0", shape=(None, 2048), dtype=float32) is not an element of this graph.

Then when I have also tried:

import keras.backend as K
K.clear_session()
activations = keract.get_activations(resnet_50_crop_and_translate, np.expand_dims(res,axis=0), nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=False)

Which raised:

---------------------------------------------------------------------------

KeyError                                  Traceback (most recent call last)

<ipython-input-17-cb7cc7958838> in <module>()
----> 1 activations = keract.get_activations(resnet_50_crop_and_translate, np.expand_dims(res,axis=0),layer_names=['conv5_block3_out'], nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=False)

/usr/local/lib/python3.6/dist-packages/keract/keract.py in get_activations(model, x, layer_names, nodes_to_evaluate, output_format, nested, auto_compile)
    319             network_layers = ', '.join([layer.name for layer in model.layers])
    320             raise KeyError('Could not find a layer with name: [{}]. '
--> 321                            'Network layers are [{}]'.format(', '.join(layer_names), network_layers))
    322         else:
    323             raise ValueError('Nodes list is empty. Or maybe the model is empty.')

KeyError: 'Could not find a layer with name: [conv5_block3_out]. Network layers are [model_1, dense_1]'

Full trace from error no. 1 (without specifying layer names):


Run it without eager mode. Paste those commands at the beginning of your script:
> import tensorflow as tf
> tf.compat.v1.disable_eager_execution()
Run it without eager mode. Paste those commands at the beginning of your script:
> import tensorflow as tf
> tf.compat.v1.disable_eager_execution()

---------------------------------------------------------------------------

ValueError                                Traceback (most recent call last)

/usr/local/lib/python3.6/dist-packages/keract/keract.py in _evaluate(model, nodes_to_evaluate, x, y, auto_compile)
    102     try:
--> 103         return eval_fn(model._feed_inputs + model._feed_targets + model._feed_sample_weights)
    104     except Exception:

18 frames

/usr/local/lib/python3.6/dist-packages/keract/keract.py in eval_fn(k_inputs)
     99             print('> tf.compat.v1.disable_eager_execution()')
--> 100             raise e
    101 

/usr/local/lib/python3.6/dist-packages/keract/keract.py in eval_fn(k_inputs)
     90         try:
---> 91             return K.function(k_inputs, nodes_to_evaluate)(model._standardize_user_data(x, y))
     92         except AttributeError:  # one way to avoid forcing non eager mode.

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py in function(inputs, outputs, updates, name, **kwargs)
   4086   return GraphExecutionFunction(
-> 4087       inputs, outputs, updates=updates, name=name, **kwargs)
   4088 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py in __init__(self, inputs, outputs, updates, name, **session_kwargs)
   3809     # Index 0 = total loss or model output for `predict`.
-> 3810     with ops.control_dependencies([self.outputs[0]]):
   3811       updates_ops = []

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in control_dependencies(control_inputs)
   5358   else:
-> 5359     return get_default_graph().control_dependencies(control_inputs)
   5360 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in control_dependencies(self, control_inputs)
   4814         c = c.op
-> 4815       c = self.as_graph_element(c)
   4816       if isinstance(c, Tensor):

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in as_graph_element(self, obj, allow_tensor, allow_operation)
   3725     with self._lock:
-> 3726       return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
   3727 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _as_graph_element_locked(self, obj, allow_tensor, allow_operation)
   3804       if obj.graph is not self:
-> 3805         raise ValueError("Tensor %s is not an element of this graph." % obj)
   3806       return obj

ValueError: Tensor Tensor("avg_pool_1/Mean:0", shape=(None, 2048), dtype=float32) is not an element of this graph.


During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)

<ipython-input-18-1fc67112db22> in <module>()
----> 1 activations = keract.get_activations(resnet_50_crop_and_translate, np.expand_dims(res,axis=0), nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=False)

/usr/local/lib/python3.6/dist-packages/keract/keract.py in get_activations(model, x, layer_names, nodes_to_evaluate, output_format, nested, auto_compile)
    347 
    348     if len(layer_outputs) > 0:
--> 349         activations = _evaluate(model, layer_outputs.values(), x, y=None, auto_compile=auto_compile)
    350     else:
    351         activations = {}

/usr/local/lib/python3.6/dist-packages/keract/keract.py in _evaluate(model, nodes_to_evaluate, x, y, auto_compile)
    114                         acts.append(n.numpy())
    115             return acts
--> 116         return eval_fn(model._feed_inputs)
    117 
    118 

/usr/local/lib/python3.6/dist-packages/keract/keract.py in eval_fn(k_inputs)
     98             print('> import tensorflow as tf')
     99             print('> tf.compat.v1.disable_eager_execution()')
--> 100             raise e
    101 
    102     try:

/usr/local/lib/python3.6/dist-packages/keract/keract.py in eval_fn(k_inputs)
     89     def eval_fn(k_inputs):
     90         try:
---> 91             return K.function(k_inputs, nodes_to_evaluate)(model._standardize_user_data(x, y))
     92         except AttributeError:  # one way to avoid forcing non eager mode.
     93             if y is None:  # tf 2.3.0 upgrade compatibility.

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py in function(inputs, outputs, updates, name, **kwargs)
   4085         raise ValueError(msg)
   4086   return GraphExecutionFunction(
-> 4087       inputs, outputs, updates=updates, name=name, **kwargs)
   4088 
   4089 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py in __init__(self, inputs, outputs, updates, name, **session_kwargs)
   3808     # dependencies in call.
   3809     # Index 0 = total loss or model output for `predict`.
-> 3810     with ops.control_dependencies([self.outputs[0]]):
   3811       updates_ops = []
   3812       for update in updates:

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in control_dependencies(control_inputs)
   5357     return NullContextmanager()
   5358   else:
-> 5359     return get_default_graph().control_dependencies(control_inputs)
   5360 
   5361 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in control_dependencies(self, control_inputs)
   4813           (hasattr(c, "_handle") and hasattr(c, "op"))):
   4814         c = c.op
-> 4815       c = self.as_graph_element(c)
   4816       if isinstance(c, Tensor):
   4817         c = c.op

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in as_graph_element(self, obj, allow_tensor, allow_operation)
   3724 
   3725     with self._lock:
-> 3726       return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
   3727 
   3728   def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _as_graph_element_locked(self, obj, allow_tensor, allow_operation)
   3803       # Actually obj is just the object it's referring to.
   3804       if obj.graph is not self:
-> 3805         raise ValueError("Tensor %s is not an element of this graph." % obj)
   3806       return obj
   3807     elif isinstance(obj, Operation) and allow_operation:

ValueError: Tensor Tensor("avg_pool_1/Mean:0", shape=(None, 2048), dtype=float32) is not an element of this graph.

About this issue

  • Original URL
  • State: closed
  • Created 3 years ago
  • Comments: 20 (10 by maintainers)

Most upvoted comments

But that solution should save you for now. The trick is to use the default Input tensor provided by the pre-trained model and push extra information (push num_slices into the first dimension), calling the pre-trained model and un-squeeze the first dimension to recover batch_size and num_slices.

import warnings

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Lambda
from tensorflow.python.keras.applications.inception_v3 import InceptionV3

from utils import print_names_and_shapes

tf.compat.v1.disable_eager_execution()
warnings.filterwarnings("ignore")
np.random.seed(40)

from tensorflow.keras.layers import Dense, Dropout, GlobalAveragePooling2D, LSTM, Bidirectional
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam

import keract

import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ''

IMAGE_SIZE = 300
N_SLICES = 30


def build_model():
    base_model = InceptionV3(include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)  # x.shape = (batch_size*num_slices, 2048).
    # un-squeeze the first dimension to batch_size, and num_slices. 
    # input.shape = (batch_size*num_slices, 2048)
    # output.shape = (batch_size, num_slices, 2048)
    l3 = Lambda(lambda z: K.reshape(z, (-1, N_SLICES, x.shape[-1])))(x)

    # input.shape = (batch_size, time_steps, input_dim=2048).
    bnn = Bidirectional(LSTM(1000))(l3)
    bnn = Dropout(0.5)(bnn)
    bnn = Dense(512, activation='relu')(bnn)
    bnn = Dropout(0.3)(bnn)
    preds = Dense(3, activation='softmax', name='out')(bnn)

    return Model(base_model.input, preds, name='m2')


def compile_model(model, learning_rate=1e-5):
    """

    :param model: builded model
    :return: compiled model
    """

    optimizer = Adam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer)
    return model


model = build_model()
model = compile_model(model, learning_rate=1e-4)

# The first dimension contains actually two dimensions:
# - the slices dimension
# - the batch dimension
batch_size = 2
inputs = np.random.uniform(size=(N_SLICES * batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
print(model.predict_on_batch(inputs).shape)
activations = keract.get_activations(model, inputs, nested=True)
print_names_and_shapes(activations)

From this reference: https://keras.io/api/applications/

You can do it easily like that:

import numpy as np
from tensorflow.python.keras import Model
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.layers import Dense

import keract


def load_and_configure_model(path):
    base_model = ResNet50(include_top=False)
    x = base_model.output
    # 250 new classes.
    x = Dense(250, activation='softmax', name='custom')(x)
    model = Model(inputs=base_model.input, outputs=x)
    if path is not None:
        model.load_weights(path, by_name=True, skip_mismatch=True)
    return model


resnet_50_crop = load_and_configure_model(path=None)

# random image.
img = np.random.uniform(size=(1, 224, 224, 3))

# (1, 7, 7, 2048).
# too many channels to display here.
activations = keract.get_activations(resnet_50_crop, img, layer_names='conv5_block3_out')
activations['conv5_block3_out'] = activations['conv5_block3_out'][..., 0:64]
keract.display_activations(activations, fig_size=(4, 4))

image

By the way those are the shapes of each layer output for ResNet50:

input_1 (1, 224, 224, 3)
conv1_pad (1, 230, 230, 3)
conv1_conv (1, 112, 112, 64)
conv1_bn (1, 112, 112, 64)
conv1_relu (1, 112, 112, 64)
pool1_pad (1, 114, 114, 64)
pool1_pool (1, 56, 56, 64)
conv2_block1_1_conv (1, 56, 56, 64)
conv2_block1_1_bn (1, 56, 56, 64)
conv2_block1_1_relu (1, 56, 56, 64)
conv2_block1_2_conv (1, 56, 56, 64)
conv2_block1_2_bn (1, 56, 56, 64)
conv2_block1_2_relu (1, 56, 56, 64)
conv2_block1_0_conv (1, 56, 56, 256)
conv2_block1_3_conv (1, 56, 56, 256)
conv2_block1_0_bn (1, 56, 56, 256)
conv2_block1_3_bn (1, 56, 56, 256)
conv2_block1_add (1, 56, 56, 256)
conv2_block1_out (1, 56, 56, 256)
conv2_block2_1_conv (1, 56, 56, 64)
conv2_block2_1_bn (1, 56, 56, 64)
conv2_block2_1_relu (1, 56, 56, 64)
conv2_block2_2_conv (1, 56, 56, 64)
conv2_block2_2_bn (1, 56, 56, 64)
conv2_block2_2_relu (1, 56, 56, 64)
conv2_block2_3_conv (1, 56, 56, 256)
conv2_block2_3_bn (1, 56, 56, 256)
conv2_block2_add (1, 56, 56, 256)
conv2_block2_out (1, 56, 56, 256)
conv2_block3_1_conv (1, 56, 56, 64)
conv2_block3_1_bn (1, 56, 56, 64)
conv2_block3_1_relu (1, 56, 56, 64)
conv2_block3_2_conv (1, 56, 56, 64)
conv2_block3_2_bn (1, 56, 56, 64)
conv2_block3_2_relu (1, 56, 56, 64)
conv2_block3_3_conv (1, 56, 56, 256)
conv2_block3_3_bn (1, 56, 56, 256)
conv2_block3_add (1, 56, 56, 256)
conv2_block3_out (1, 56, 56, 256)
conv3_block1_1_conv (1, 28, 28, 128)
conv3_block1_1_bn (1, 28, 28, 128)
conv3_block1_1_relu (1, 28, 28, 128)
conv3_block1_2_conv (1, 28, 28, 128)
conv3_block1_2_bn (1, 28, 28, 128)
conv3_block1_2_relu (1, 28, 28, 128)
conv3_block1_0_conv (1, 28, 28, 512)
conv3_block1_3_conv (1, 28, 28, 512)
conv3_block1_0_bn (1, 28, 28, 512)
conv3_block1_3_bn (1, 28, 28, 512)
conv3_block1_add (1, 28, 28, 512)
conv3_block1_out (1, 28, 28, 512)
conv3_block2_1_conv (1, 28, 28, 128)
conv3_block2_1_bn (1, 28, 28, 128)
conv3_block2_1_relu (1, 28, 28, 128)
conv3_block2_2_conv (1, 28, 28, 128)
conv3_block2_2_bn (1, 28, 28, 128)
conv3_block2_2_relu (1, 28, 28, 128)
conv3_block2_3_conv (1, 28, 28, 512)
conv3_block2_3_bn (1, 28, 28, 512)
conv3_block2_add (1, 28, 28, 512)
conv3_block2_out (1, 28, 28, 512)
conv3_block3_1_conv (1, 28, 28, 128)
conv3_block3_1_bn (1, 28, 28, 128)
conv3_block3_1_relu (1, 28, 28, 128)
conv3_block3_2_conv (1, 28, 28, 128)
conv3_block3_2_bn (1, 28, 28, 128)
conv3_block3_2_relu (1, 28, 28, 128)
conv3_block3_3_conv (1, 28, 28, 512)
conv3_block3_3_bn (1, 28, 28, 512)
conv3_block3_add (1, 28, 28, 512)
conv3_block3_out (1, 28, 28, 512)
conv3_block4_1_conv (1, 28, 28, 128)
conv3_block4_1_bn (1, 28, 28, 128)
conv3_block4_1_relu (1, 28, 28, 128)
conv3_block4_2_conv (1, 28, 28, 128)
conv3_block4_2_bn (1, 28, 28, 128)
conv3_block4_2_relu (1, 28, 28, 128)
conv3_block4_3_conv (1, 28, 28, 512)
conv3_block4_3_bn (1, 28, 28, 512)
conv3_block4_add (1, 28, 28, 512)
conv3_block4_out (1, 28, 28, 512)
conv4_block1_1_conv (1, 14, 14, 256)
conv4_block1_1_bn (1, 14, 14, 256)
conv4_block1_1_relu (1, 14, 14, 256)
conv4_block1_2_conv (1, 14, 14, 256)
conv4_block1_2_bn (1, 14, 14, 256)
conv4_block1_2_relu (1, 14, 14, 256)
conv4_block1_0_conv (1, 14, 14, 1024)
conv4_block1_3_conv (1, 14, 14, 1024)
conv4_block1_0_bn (1, 14, 14, 1024)
conv4_block1_3_bn (1, 14, 14, 1024)
conv4_block1_add (1, 14, 14, 1024)
conv4_block1_out (1, 14, 14, 1024)
conv4_block2_1_conv (1, 14, 14, 256)
conv4_block2_1_bn (1, 14, 14, 256)
conv4_block2_1_relu (1, 14, 14, 256)
conv4_block2_2_conv (1, 14, 14, 256)
conv4_block2_2_bn (1, 14, 14, 256)
conv4_block2_2_relu (1, 14, 14, 256)
conv4_block2_3_conv (1, 14, 14, 1024)
conv4_block2_3_bn (1, 14, 14, 1024)
conv4_block2_add (1, 14, 14, 1024)
conv4_block2_out (1, 14, 14, 1024)
conv4_block3_1_conv (1, 14, 14, 256)
conv4_block3_1_bn (1, 14, 14, 256)
conv4_block3_1_relu (1, 14, 14, 256)
conv4_block3_2_conv (1, 14, 14, 256)
conv4_block3_2_bn (1, 14, 14, 256)
conv4_block3_2_relu (1, 14, 14, 256)
conv4_block3_3_conv (1, 14, 14, 1024)
conv4_block3_3_bn (1, 14, 14, 1024)
conv4_block3_add (1, 14, 14, 1024)
conv4_block3_out (1, 14, 14, 1024)
conv4_block4_1_conv (1, 14, 14, 256)
conv4_block4_1_bn (1, 14, 14, 256)
conv4_block4_1_relu (1, 14, 14, 256)
conv4_block4_2_conv (1, 14, 14, 256)
conv4_block4_2_bn (1, 14, 14, 256)
conv4_block4_2_relu (1, 14, 14, 256)
conv4_block4_3_conv (1, 14, 14, 1024)
conv4_block4_3_bn (1, 14, 14, 1024)
conv4_block4_add (1, 14, 14, 1024)
conv4_block4_out (1, 14, 14, 1024)
conv4_block5_1_conv (1, 14, 14, 256)
conv4_block5_1_bn (1, 14, 14, 256)
conv4_block5_1_relu (1, 14, 14, 256)
conv4_block5_2_conv (1, 14, 14, 256)
conv4_block5_2_bn (1, 14, 14, 256)
conv4_block5_2_relu (1, 14, 14, 256)
conv4_block5_3_conv (1, 14, 14, 1024)
conv4_block5_3_bn (1, 14, 14, 1024)
conv4_block5_add (1, 14, 14, 1024)
conv4_block5_out (1, 14, 14, 1024)
conv4_block6_1_conv (1, 14, 14, 256)
conv4_block6_1_bn (1, 14, 14, 256)
conv4_block6_1_relu (1, 14, 14, 256)
conv4_block6_2_conv (1, 14, 14, 256)
conv4_block6_2_bn (1, 14, 14, 256)
conv4_block6_2_relu (1, 14, 14, 256)
conv4_block6_3_conv (1, 14, 14, 1024)
conv4_block6_3_bn (1, 14, 14, 1024)
conv4_block6_add (1, 14, 14, 1024)
conv4_block6_out (1, 14, 14, 1024)
conv5_block1_1_conv (1, 7, 7, 512)
conv5_block1_1_bn (1, 7, 7, 512)
conv5_block1_1_relu (1, 7, 7, 512)
conv5_block1_2_conv (1, 7, 7, 512)
conv5_block1_2_bn (1, 7, 7, 512)
conv5_block1_2_relu (1, 7, 7, 512)
conv5_block1_0_conv (1, 7, 7, 2048)
conv5_block1_3_conv (1, 7, 7, 2048)
conv5_block1_0_bn (1, 7, 7, 2048)
conv5_block1_3_bn (1, 7, 7, 2048)
conv5_block1_add (1, 7, 7, 2048)
conv5_block1_out (1, 7, 7, 2048)
conv5_block2_1_conv (1, 7, 7, 512)
conv5_block2_1_bn (1, 7, 7, 512)
conv5_block2_1_relu (1, 7, 7, 512)
conv5_block2_2_conv (1, 7, 7, 512)
conv5_block2_2_bn (1, 7, 7, 512)
conv5_block2_2_relu (1, 7, 7, 512)
conv5_block2_3_conv (1, 7, 7, 2048)
conv5_block2_3_bn (1, 7, 7, 2048)
conv5_block2_add (1, 7, 7, 2048)
conv5_block2_out (1, 7, 7, 2048)
conv5_block3_1_conv (1, 7, 7, 512)
conv5_block3_1_bn (1, 7, 7, 512)
conv5_block3_1_relu (1, 7, 7, 512)
conv5_block3_2_conv (1, 7, 7, 512)
conv5_block3_2_bn (1, 7, 7, 512)
conv5_block3_2_relu (1, 7, 7, 512)
conv5_block3_3_conv (1, 7, 7, 2048)
conv5_block3_3_bn (1, 7, 7, 2048)
conv5_block3_add (1, 7, 7, 2048)
conv5_block3_out (1, 7, 7, 2048)

Nevermind what i said earlier, i realized that i was returning the model before loading the weights.

Fixed by doing:

def load_and_configure_model(model_name, optimizer, loss, metrics, path):
    num_classes = 250
    assert model_name == 'resnet50'
    model = ResNet50(include_top=True, weights='imagenet')
    transfer_layer = model.get_layer('avg_pool')
    x = Dense(num_classes, activation='softmax')
    model = Model(inputs=model.input, outputs=transfer_layer.output)
    model.trainable = True
    for layer in model.layers:
      layer.trainable = False
    for layer in model.layers:
      trainable = ('conv5_block3' in layer.name)      
      layer.trainable = trainable
    transferred_resnet50 = Sequential()
    transferred_resnet50.add(model)
    transferred_resnet50.add(x)
    transferred_resnet50.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    if path is not None:
        transferred_resnet50.load_weights(path)
    return model

When in fact after trying to get the activations from the fine-tuned model leads to the same error:

Python Invalid argument: You must feed a value for placeholder tensor 'input_1' with dtype float and shape [?,224,224,3]

Great! Keras can sometimes be a pain to use.

@FalsoMoralista great! Ideally your case should be properly handled but it’s quite a pain to handle all the cases.