Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cannot convert a symbolic Keras input/output to a numpy array... #234

Open
abrarum opened this issue Jan 21, 2021 · 1 comment
Open

Cannot convert a symbolic Keras input/output to a numpy array... #234

abrarum opened this issue Jan 21, 2021 · 1 comment

Comments

@abrarum
Copy link

abrarum commented Jan 21, 2021

Tensorflow version: 2.4.0

Issue: I tried to reproduce this example code but results in an error (it works in Tensorflow version 2.2.0): "TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model."

Trace:


TypeError Traceback (most recent call last)
in
15 #print(x_test[idx])
16
---> 17 grads = visualize_saliency(model, layer_idx, filter_indices=class_idx, seed_input=x_test[idx])
18 # Plot with 'jet' colormap to visualize as a heatmap.
19 plt.imshow(grads, cmap='jet')

~/Abrar/miniconda3/lib/python3.8/site-packages/vis/visualization/saliency.py in visualize_saliency(model, layer_idx, filter_indices, seed_input, backprop_modifier, grad_modifier)
124 (ActivationMaximization(model.layers[layer_idx], filter_indices), -1)
125 ]
--> 126 return visualize_saliency_with_losses(model.input, losses, seed_input, grad_modifier)
127
128

~/Abrar/miniconda3/lib/python3.8/site-packages/vis/visualization/saliency.py in visualize_saliency_with_losses(input_tensor, losses, seed_input, grad_modifier)
71 weighted losses.
72 """
---> 73 opt = Optimizer(input_tensor, losses, norm_grads=False)
74 grads = opt.minimize(seed_input=seed_input, max_iter=1, grad_modifier=grad_modifier, verbose=False)[1]
75

~/Abrar/miniconda3/lib/python3.8/site-packages/vis/optimizer.py in init(self, input_tensor, losses, input_range, wrt_tensor, norm_grads)
50
51 # Compute gradient of overall with respect to wrt tensor.
---> 52 grads = K.gradients(overall_loss, self.wrt_tensor)[0]
53 if norm_grads:
54 grads = grads / (K.sqrt(K.mean(K.square(grads))) + K.epsilon())

~/Abrar/miniconda3/lib/python3.8/site-packages/keras/backend/tensorflow_backend.py in symbolic_fn_wrapper(*args, **kwargs)
73 if _SYMBOLIC_SCOPE.value:
74 with get_graph().as_default():
---> 75 return func(*args, **kwargs)
76 else:
77 return func(*args, **kwargs)

~/Abrar/miniconda3/lib/python3.8/site-packages/keras/backend/tensorflow_backend.py in gradients(loss, variables)
3023 if _is_tf_1():
3024 return tf.gradients(loss, variables, colocate_gradients_with_ops=True)
-> 3025 return tf.gradients(loss, variables)
3026
3027

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/ops/gradients_impl.py in gradients_v2(ys, xs, grad_ys, name, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients)
313 # pylint: disable=protected-access
314 with ops.get_default_graph()._mutation_lock():
--> 315 return gradients_util._GradientsHelper(
316 ys, xs, grad_ys, name, True, gate_gradients,
317 aggregation_method, stop_gradients,

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/ops/gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
526 # cluster ops for compilation.
527 gradient_uid = ops.get_default_graph().unique_name("uid")
--> 528 ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
529 xs = [
530 x.handle if resource_variable_ops.is_resource_variable(x) else x

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/indexed_slices.py in convert_n_to_tensor_or_indexed_slices(values, dtype, name)
383 value.
384 """
--> 385 return internal_convert_n_to_tensor_or_indexed_slices(
386 values=values, dtype=dtype, name=name, as_ref=False)
387

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/indexed_slices.py in internal_convert_n_to_tensor_or_indexed_slices(values, dtype, name, as_ref)
355 n = None if name is None else "%s_%d" % (name, i)
356 ret.append(
--> 357 internal_convert_to_tensor_or_indexed_slices(
358 value, dtype=dtype, name=n, as_ref=as_ref))
359 return ret

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/indexed_slices.py in internal_convert_to_tensor_or_indexed_slices(value, dtype, name, as_ref)
316 return value
317 else:
--> 318 return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
319
320

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/profiler/trace.py in wrapped(*args, **kwargs)
161 with Trace(trace_name, **trace_kwargs):
162 return func(*args, **kwargs)
--> 163 return func(*args, **kwargs)
164
165 return wrapped

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1538
1539 if ret is None:
-> 1540 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1541
1542 if ret is NotImplemented:

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
337 as_ref=False):
338 _ = as_ref
--> 339 return constant(v, dtype=dtype, name=name)
340
341

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
262 ValueError: if called on a symbolic tensor.
263 """
--> 264 return _constant_impl(value, dtype, shape, name, verify_shape=False,
265 allow_broadcast=True)
266

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
279 tensor_value = attr_value_pb2.AttrValue()
280 tensor_value.tensor.CopyFrom(
--> 281 tensor_util.make_tensor_proto(
282 value, dtype=dtype, shape=shape, verify_shape=verify_shape,
283 allow_broadcast=allow_broadcast))

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape, allow_broadcast)
433
434 if _is_array_like(values):
--> 435 values = np.asarray(values)
436
437 # We first convert value to a numpy array or scalar.

~/Abrar/miniconda3/lib/python3.8/site-packages/numpy/core/_asarray.py in asarray(a, dtype, order)
81
82 """
---> 83 return array(a, dtype, copy=False, order=order)
84
85

~/Abrar/miniconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/keras_tensor.py in array(self)
271
272 def array(self):
--> 273 raise TypeError(
274 'Cannot convert a symbolic Keras input/output to a numpy array. '
275 'This error may indicate that you're trying to pass a symbolic value '

TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.

@Sciprios
Copy link

I have been getting the same issue, I have tried upgrading tensorflow, keras and the keras-vis packages to no avail.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants