Skip to content

Instantly share code, notes, and snippets.

@Tahsin-Mayeesha
Created August 22, 2019 16:32
Show Gist options
  • Save Tahsin-Mayeesha/41322df9d1d6a2042183d5872b84e0be to your computer and use it in GitHub Desktop.
Save Tahsin-Mayeesha/41322df9d1d6a2042183d5872b84e0be to your computer and use it in GitHub Desktop.
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in get_attr(self, name)
2325 with c_api_util.tf_buffer() as buf:
-> 2326 c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
2327 data = c_api.TF_GetBuffer(buf)
InvalidArgumentError: Operation 'StatefulPartitionedCall' has no attr named '_XlaCompile'.
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _MaybeCompile(scope, op, func, grad_fn)
343 try:
--> 344 xla_compile = op.get_attr("_XlaCompile")
345 xla_separate_compiled_gradients = op.get_attr(
~\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in get_attr(self, name)
2329 # Convert to ValueError for backwards compatibility.
-> 2330 raise ValueError(str(e))
2331 x = attr_value_pb2.AttrValue()
ValueError: Operation 'StatefulPartitionedCall' has no attr named '_XlaCompile'.
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in get_attr(self, name)
2325 with c_api_util.tf_buffer() as buf:
-> 2326 c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
2327 data = c_api.TF_GetBuffer(buf)
InvalidArgumentError: Operation 'lstm/StatefulPartitionedCall' has no attr named '_XlaCompile'.
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _MaybeCompile(scope, op, func, grad_fn)
343 try:
--> 344 xla_compile = op.get_attr("_XlaCompile")
345 xla_separate_compiled_gradients = op.get_attr(
~\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in get_attr(self, name)
2329 # Convert to ValueError for backwards compatibility.
-> 2330 raise ValueError(str(e))
2331 x = attr_value_pb2.AttrValue()
ValueError: Operation 'lstm/StatefulPartitionedCall' has no attr named '_XlaCompile'.
During handling of the above exception, another exception occurred:
LookupError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
618 try:
--> 619 grad_fn = ops.get_gradient_function(op)
620 except LookupError:
~\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in get_gradient_function(op)
2461 op_type = op.type
-> 2462 return _gradient_registry.lookup(op_type)
2463
~\Anaconda3\lib\site-packages\tensorflow\python\framework\registry.py in lookup(self, name)
96 raise LookupError(
---> 97 "%s registry has no entry for: %s" % (self._name, name))
LookupError: gradient registry has no entry for: While
During handling of the above exception, another exception occurred:
LookupError Traceback (most recent call last)
<ipython-input-8-8f00f4e0d00d> in <module>
1 labels = [[0,0],[1,0]]
2 with tf.GradientTape() as tape:
----> 3 predictions = model(sentences)
4 print(predictions)
5 loss = loss_object(labels, predictions)
<ipython-input-3-74deeba11307> in __call__(self, sentences)
21
22 #tokens,lookup_ids = self.language_module._tokens_to_lookup_ids(sentences)
---> 23 self.enc_out = self.language_module.return_encoder_output(sentences)
24 last_h = self.enc_out[:,-1,:]
25 max_pool_output = self.max_pool_layer(self.enc_out)
~\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
432 *args, **kwds)
433 # If we did not create any variables the trace we have is good enough.
--> 434 return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access
435
436 def fn_with_cond(*inner_args, **inner_kwds):
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs)
587 """
588 return self._call_flat(
--> 589 (t for t in nest.flatten((args, kwargs), expand_composites=True)
590 if isinstance(t, (ops.Tensor,
591 resource_variable_ops.ResourceVariable))))
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args)
663 tape.should_record(self._captured_inputs)):
664 if context.executing_eagerly():
--> 665 return self._eager_backprop_call(args)
666 else:
667 return self._backprop_call_with_delayed_rewrite(args)
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _eager_backprop_call(self, args)
879 """
880 if self._backward_graph_function is None:
--> 881 self._construct_backprop_function()
882
883 ctx = context.context()
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _construct_backprop_function(self)
832 self._func_graph.inputs,
833 grad_ys=gradients_wrt_outputs,
--> 834 src_graph=self._func_graph)
835
836 backwards_graph_captures = list(backwards_graph.captures.keys())
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
675 # functions.
676 in_grads = _MaybeCompile(grad_scope, op, func_call,
--> 677 lambda: grad_fn(op, *out_grads))
678 else:
679 # For function call ops, we add a 'SymbolicGradient'
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _MaybeCompile(scope, op, func, grad_fn)
347 xla_scope = op.get_attr("_XlaScope").decode()
348 except ValueError:
--> 349 return grad_fn() # Exit early
350
351 if not xla_compile:
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in <lambda>()
675 # functions.
676 in_grads = _MaybeCompile(grad_scope, op, func_call,
--> 677 lambda: grad_fn(op, *out_grads))
678 else:
679 # For function call ops, we add a 'SymbolicGradient'
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _registered_grad_fn(op, *doutputs)
691 @ops.RegisterGradient(self._gradient_name)
692 def _registered_grad_fn(op, *doutputs): # pylint: disable=unused-variable
--> 693 return self._grad_fn(op, *doutputs)
694
695 def _grad_fn(self, op, *doutputs):
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _grad_fn(self, op, *doutputs)
696 """Gradients of this function."""
697 if self._backward_graph_function is None:
--> 698 self._construct_backprop_function()
699
700 # pylint: disable=protected-access
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _construct_backprop_function(self)
832 self._func_graph.inputs,
833 grad_ys=gradients_wrt_outputs,
--> 834 src_graph=self._func_graph)
835
836 backwards_graph_captures = list(backwards_graph.captures.keys())
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
675 # functions.
676 in_grads = _MaybeCompile(grad_scope, op, func_call,
--> 677 lambda: grad_fn(op, *out_grads))
678 else:
679 # For function call ops, we add a 'SymbolicGradient'
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _MaybeCompile(scope, op, func, grad_fn)
347 xla_scope = op.get_attr("_XlaScope").decode()
348 except ValueError:
--> 349 return grad_fn() # Exit early
350
351 if not xla_compile:
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in <lambda>()
675 # functions.
676 in_grads = _MaybeCompile(grad_scope, op, func_call,
--> 677 lambda: grad_fn(op, *out_grads))
678 else:
679 # For function call ops, we add a 'SymbolicGradient'
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _registered_grad_fn(op, *doutputs)
691 @ops.RegisterGradient(self._gradient_name)
692 def _registered_grad_fn(op, *doutputs): # pylint: disable=unused-variable
--> 693 return self._grad_fn(op, *doutputs)
694
695 def _grad_fn(self, op, *doutputs):
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _grad_fn(self, op, *doutputs)
696 """Gradients of this function."""
697 if self._backward_graph_function is None:
--> 698 self._construct_backprop_function()
699
700 # pylint: disable=protected-access
~\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _construct_backprop_function(self)
832 self._func_graph.inputs,
833 grad_ys=gradients_wrt_outputs,
--> 834 src_graph=self._func_graph)
835
836 backwards_graph_captures = list(backwards_graph.captures.keys())
~\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
633 raise LookupError(
634 "No gradient defined for operation '%s' (op type: %s)" %
--> 635 (op.name, op.type))
636 if loop_state:
637 loop_state.EnterGradWhileContext(op, before=False)
LookupError: No gradient defined for operation 'while' (op type: While)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment