Created
February 5, 2019 18:17
-
-
Save peterroelants/36ea83ea0aab620eff3e85feb4611dc5 to your computer and use it in GitHub Desktop.
Tensorflow 2.0 cudnn error
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| 2.0.0-dev20190205 | |
| 2019-02-05 18:09:28.956373: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA | |
| 2019-02-05 18:09:28.959894: I tensorflow/stream_executor/platform/default/dso_loader.cc:161] successfully opened CUDA library libcuda.so.1 locally | |
| 2019-02-05 18:09:29.161041: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1010] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero | |
| 2019-02-05 18:09:29.168088: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1010] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero | |
| 2019-02-05 18:09:29.168686: I tensorflow/compiler/xla/service/service.cc:162] XLA service 0x1a03ef0 executing computations on platform CUDA. Devices: | |
| 2019-02-05 18:09:29.168697: I tensorflow/compiler/xla/service/service.cc:169] StreamExecutor device (0): GeForce RTX 2080, Compute Capability 7.5 | |
| 2019-02-05 18:09:29.168701: I tensorflow/compiler/xla/service/service.cc:169] StreamExecutor device (1): GeForce RTX 2080, Compute Capability 7.5 | |
| 2019-02-05 18:09:29.187695: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3600000000 Hz | |
| 2019-02-05 18:09:29.188082: I tensorflow/compiler/xla/service/service.cc:162] XLA service 0x1923100 executing computations on platform Host. Devices: | |
| 2019-02-05 18:09:29.188095: I tensorflow/compiler/xla/service/service.cc:169] StreamExecutor device (0): <undefined>, <undefined> | |
| 2019-02-05 18:09:29.188398: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1434] Found device 0 with properties: | |
| name: GeForce RTX 2080 major: 7 minor: 5 memoryClockRate(GHz): 1.845 | |
| pciBusID: 0000:01:00.0 | |
| totalMemory: 7.76GiB freeMemory: 6.57GiB | |
| 2019-02-05 18:09:29.188569: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1434] Found device 1 with properties: | |
| name: GeForce RTX 2080 major: 7 minor: 5 memoryClockRate(GHz): 1.845 | |
| pciBusID: 0000:02:00.0 | |
| totalMemory: 7.77GiB freeMemory: 7.62GiB | |
| 2019-02-05 18:09:29.188628: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1513] Adding visible gpu devices: 0, 1 | |
| 2019-02-05 18:09:29.188689: I tensorflow/stream_executor/platform/default/dso_loader.cc:161] successfully opened CUDA library libcudart.so.10.0 locally | |
| 2019-02-05 18:09:29.189750: I tensorflow/core/common_runtime/gpu/gpu_device.cc:985] Device interconnect StreamExecutor with strength 1 edge matrix: | |
| 2019-02-05 18:09:29.189758: I tensorflow/core/common_runtime/gpu/gpu_device.cc:991] 0 1 | |
| 2019-02-05 18:09:29.189762: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1004] 0: N N | |
| 2019-02-05 18:09:29.189765: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1004] 1: N N | |
| 2019-02-05 18:09:29.190099: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1116] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 6395 MB memory) -> physical GPU (device: 0, name: GeForce RTX 2080, pci bus id: 0000:01:00.0, compute capability: 7.5) | |
| 2019-02-05 18:09:29.190301: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1116] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:1 with 7415 MB memory) -> physical GPU (device: 1, name: GeForce RTX 2080, pci bus id: 0000:02:00.0, compute capability: 7.5) | |
| 2019-02-05 18:09:29.508308: I tensorflow/stream_executor/platform/default/dso_loader.cc:161] successfully opened CUDA library libcudnn.so.7 locally | |
| 2019-02-05 18:09:30.028886: E tensorflow/stream_executor/cuda/cuda_dnn.cc:482] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR | |
| 2019-02-05 18:09:30.031260: E tensorflow/stream_executor/cuda/cuda_dnn.cc:482] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR | |
| --------------------------------------------------------------------------- | |
| UnknownError Traceback (most recent call last) | |
| <ipython-input-1-cfb4630bf2f5> in <module> | |
| 10 layers.Conv2D(2, 5, padding='same', activation=tf.nn.relu)]) | |
| 11 | |
| ---> 12 model(data) | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs) | |
| 605 with ops.name_scope(self._name_scope()): | |
| 606 self._maybe_build(inputs) | |
| --> 607 outputs = self.call(inputs, *args, **kwargs) | |
| 608 self._handle_activity_regularization(inputs, outputs) | |
| 609 self._set_mask_metadata(inputs, outputs, previous_mask) | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py in call(self, inputs, training, mask) | |
| 232 if not self.built: | |
| 233 self._init_graph_network(self.inputs, self.outputs, name=self.name) | |
| --> 234 return super(Sequential, self).call(inputs, training=training, mask=mask) | |
| 235 | |
| 236 outputs = inputs # handle the corner case where self.layers is empty | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in call(self, inputs, training, mask) | |
| 862 ' implement a `call` method.') | |
| 863 | |
| --> 864 return self._run_internal_graph(inputs, training=training, mask=mask) | |
| 865 | |
| 866 def compute_output_shape(self, input_shape): | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py in _run_internal_graph(self, inputs, training, mask) | |
| 1003 | |
| 1004 # Compute outputs. | |
| -> 1005 output_tensors = layer(computed_tensors, **kwargs) | |
| 1006 | |
| 1007 # Update tensor_dict. | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs) | |
| 605 with ops.name_scope(self._name_scope()): | |
| 606 self._maybe_build(inputs) | |
| --> 607 outputs = self.call(inputs, *args, **kwargs) | |
| 608 self._handle_activity_regularization(inputs, outputs) | |
| 609 self._set_mask_metadata(inputs, outputs, previous_mask) | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/convolutional.py in call(self, inputs) | |
| 194 | |
| 195 def call(self, inputs): | |
| --> 196 outputs = self._convolution_op(inputs, self.kernel) | |
| 197 | |
| 198 if self.use_bias: | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter) | |
| 1011 | |
| 1012 def __call__(self, inp, filter): # pylint: disable=redefined-builtin | |
| -> 1013 return self.conv_op(inp, filter) | |
| 1014 | |
| 1015 | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter) | |
| 630 | |
| 631 def __call__(self, inp, filter): # pylint: disable=redefined-builtin | |
| --> 632 return self.call(inp, filter) | |
| 633 | |
| 634 | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter) | |
| 229 padding=self.padding, | |
| 230 data_format=self.data_format, | |
| --> 231 name=self.name) | |
| 232 | |
| 233 | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py in conv2d(input, filter, strides, padding, use_cudnn_on_gpu, data_format, dilations, name, filters) | |
| 1631 data_format=data_format, | |
| 1632 dilations=dilations, | |
| -> 1633 name=name) | |
| 1634 | |
| 1635 | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_nn_ops.py in conv2d(input, filter, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name) | |
| 1045 input, filter, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, | |
| 1046 padding=padding, explicit_paddings=explicit_paddings, | |
| -> 1047 data_format=data_format, dilations=dilations, name=name, ctx=_ctx) | |
| 1048 except _core._SymbolicException: | |
| 1049 pass # Add nodes to the TensorFlow graph. | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_nn_ops.py in conv2d_eager_fallback(input, filter, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx) | |
| 1144 explicit_paddings, "data_format", data_format, "dilations", dilations) | |
| 1145 _result = _execute.execute(b"Conv2D", 1, inputs=_inputs_flat, attrs=_attrs, | |
| -> 1146 ctx=_ctx, name=name) | |
| 1147 _execute.record_gradient( | |
| 1148 "Conv2D", _inputs_flat, _attrs, _result, name) | |
| /usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) | |
| 64 else: | |
| 65 message = e.message | |
| ---> 66 six.raise_from(core._status_to_exception(e.code, message), None) | |
| 67 except TypeError as e: | |
| 68 if any(ops._is_keras_symbolic_tensor(x) for x in inputs): | |
| /usr/local/lib/python3.6/dist-packages/six.py in raise_from(value, from_value) | |
| UnknownError: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above. [Op:Conv2D] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment