tensorflow: OperatorNotAllowedInGraphError: using a tf.Tensor as a Python bool is not allowed in Graph execution. Use Eager execution or decorate this function with @tf.function.
def unet(pretrained_weights = None,input_size = (256,256,1)):
inputs = keras.Input(shape = input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
def iou(y_pred, y_true):
y_pred = tf.cast((y_pred > 0), dtype=tf.float32)
i = tf.reduce_sum(y_true * y_pred)
u = tf.reduce_sum(y_true + y_pred)
return (i / u).item()if u != 0 else u.item()
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy',iou])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return `model`
model = unet()
When I run the code above, I encounter the following errors:
OperatorNotAllowedInGraphError Traceback (most recent call last)
in
----> 1 model = unet()
in unet(pretrained_weights, input_size)
51 return (i / u).item()if u != 0 else u.item()
52
---> 53 model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy',iou])
54
55 model.summary()
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs)
437 targets=self._targets,
438 skip_target_masks=self._prepare_skip_target_masks(),
--> 439 masks=self._prepare_output_masks())
440
441 # Prepare sample weight modes. List with the same length as model outputs.
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _handle_metrics(self, outputs, targets, skip_target_masks, sample_weights, masks, return_weighted_metrics, return_weighted_and_unweighted_metrics)
2002 metric_results.extend(
2003 self._handle_per_output_metrics(self._per_output_metrics[i],
-> 2004 target, output, output_mask))
2005 if return_weighted_and_unweighted_metrics or return_weighted_metrics:
2006 metric_results.extend(
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights)
1953 with K.name_scope(metric_name):
1954 metric_result = training_utils.call_metric_function(
-> 1955 metric_fn, y_true, y_pred, weights=weights, mask=mask)
1956 metric_results.append(metric_result)
1957 return metric_results
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in call_metric_function(metric_fn, y_true, y_pred, weights, mask)
1153
1154 if y_pred is not None:
-> 1155 return metric_fn(y_true, y_pred, sample_weight=weights)
1156 # Mean metric only takes a single value.
1157 return metric_fn(y_true, sample_weight=weights)
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\metrics.py in call(self, *args, **kwargs)
194 from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
195 return distributed_training_utils.call_replica_local_fn(
--> 196 replica_local_fn, *args, **kwargs)
197
198 @Property
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\distribute\distributed_training_utils.py in call_replica_local_fn(fn, *args, **kwargs)
1133 with strategy.scope():
1134 return strategy.extended.call_for_each_replica(fn, args, kwargs)
-> 1135 return fn(*args, **kwargs)
1136
1137
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\metrics.py in replica_local_fn(*args, **kwargs)
177 def replica_local_fn(*args, **kwargs):
178 """Updates the state of the metric in a replica-local context."""
--> 179 update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
180 with ops.control_dependencies([update_op]):
181 result_t = self.result() # pylint: disable=not-callable
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\utils\metrics_utils.py in decorated(metric_obj, *args, **kwargs)
74
75 with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
---> 76 update_op = update_state_fn(*args, **kwargs)
77 if update_op is not None: # update_op will be None in eager execution.
78 metric_obj.add_update(update_op)
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\metrics.py in update_state(self, y_true, y_pred, sample_weight)
585 y_pred, y_true)
586
--> 587 matches = self._fn(y_true, y_pred, **self._fn_kwargs)
588 return super(MeanMetricWrapper, self).update_state(
589 matches, sample_weight=sample_weight)
in iou(y_pred, y_true)
49 i = tf.reduce_sum(y_true * y_pred)
50 u = tf.reduce_sum(y_true + y_pred)
---> 51 return (i / u).item()if u != 0 else u.item()
52
53 model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy',iou])
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\framework\ops.py in bool(self)
755 TypeError.
756 """
--> 757 self._disallow_bool_casting()
758
759 def nonzero(self):
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\framework\ops.py in _disallow_bool_casting(self)
524 else:
525 # Default: V1-style Graph execution.
--> 526 self._disallow_in_graph_mode("using a tf.Tensor as a Python bool")
527
528 def _disallow_iteration(self):
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\framework\ops.py in _disallow_in_graph_mode(self, task)
513 raise errors.OperatorNotAllowedInGraphError(
514 "{} is not allowed in Graph execution. Use Eager execution or decorate"
--> 515 " this function with @tf.function.".format(task))
516
517 def _disallow_bool_casting(self):
OperatorNotAllowedInGraphError: using a tf.Tensor as a Python bool is not allowed in Graph execution. Use Eager execution or decorate this function with @tf.function.
How can I correct my code so that I can run it successfully?
About this issue
- Original URL
- State: closed
- Created 4 years ago
- Comments: 22 (7 by maintainers)
@amahendrakar That’s not a solution. If it was due to a bug then this bug is again present in tf2.3.0. Can you please elaborate more on the nature of the problem and potential areas to explore to fix it without modifying the tf version. Thanks!
I got same error when i use this code in google colab but i am using SSIM loss please correct it.
OperatorNotAllowedInGraphError: using a
tf.Tensoras a Pythonboolis not allowed in Graph execution. Use Eager execution or decorate this function with @tf.function.@amahendrakar, Thank you! The problem have been resolved.
@mkw18, Could you please provide the TensorFlow version you are using to run the code.
I am able to run the code without any issues with TF v2.2. Please find the gist of it here. Thanks!