sudo vim /etc/apt/sources.list- Uncomment all
deb-src
sudo apt-get updatesudo apt-get build-dep radeontop
sudo apt-get install libxcb-dri2-0-dev
This list is meant to be a both a quick guide and reference for further research into these topics. It's basically a summary of that Computer Science courses you never took or forgot about, so there's no way it can cover everything in depth. It also will be available as a gist on Github for everyone to edit and add to.
| import cv2 as cv | |
| def convert(filename): | |
| img = cv.imread(filename) | |
| gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY) | |
| return cv.imwrite(filename, gray) | |
| if __name__ == '__main__': | |
| filename = raw_input('filename: ') | |
| if filename: |
This README demonstrate how to install Nvidia driver, CUDA 9.0, cuDNN 7.2 on Ubuntu 18.04 computer with iGPU and Nvidia GPU. Using the following installation --flags, X Server is going to be running on the iGPU so that the dGPU can be used for CUDA tasks.
sudo apt install build-essential libxml2 gcc-multilib dkms gcc-6 g++6 freeglut3-dev libx11-dev libxmu-dev libxi-dev libglu1-mesa libglu1-mesa-dev
/etc/modprobe.d/blacklist-nouveau.conf to disable nouveaublacklist nouveau
| def scaled_dot_product_attention(query, key, value, mask): | |
| matmul_qk = tf.matmul(query, key, transpose_b=True) | |
| depth = tf.cast(tf.shape(key)[-1], tf.float32) | |
| logits = matmul_qk / tf.math.sqrt(depth) | |
| # add the mask zero out padding tokens. | |
| if mask is not None: | |
| logits += (mask * -1e9) |
| class MultiHeadAttention(tf.keras.layers.Layer): | |
| def __init__(self, d_model, num_heads, name="multi_head_attention"): | |
| super(MultiHeadAttention, self).__init__(name=name) | |
| self.num_heads = num_heads | |
| self.d_model = d_model | |
| assert d_model % self.num_heads == 0 | |
| self.depth = d_model // self.num_heads |
| class PositionalEncoding(tf.keras.layers.Layer): | |
| def __init__(self, position, d_model): | |
| super(PositionalEncoding, self).__init__() | |
| self.pos_encoding = self.positional_encoding(position, d_model) | |
| def get_angles(self, position, i, d_model): | |
| angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32)) | |
| return position * angles |
| def encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"): | |
| inputs = tf.keras.Input(shape=(None, d_model), name="inputs") | |
| padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask") | |
| attention = MultiHeadAttention( | |
| d_model, num_heads, name="attention")({ | |
| 'query': inputs, | |
| 'key': inputs, | |
| 'value': inputs, | |
| 'mask': padding_mask |
| def encoder(vocab_size, | |
| num_layers, | |
| units, | |
| d_model, | |
| num_heads, | |
| dropout, | |
| name="encoder"): | |
| inputs = tf.keras.Input(shape=(None,), name="inputs") | |
| padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask") |