- Declaring an empty vector and then pushing values
// creates an empty vector
std::vector<int> vec;
// push values into vector
vec.push_back(10);
vec.push_back(20);
vec.push_back(30);
class ListNode { | |
public: | |
int val; | |
ListNode *next; | |
ListNode(int val) { | |
this->val = val; | |
this->next = nullptr; | |
} | |
}; |
#include <iostream> | |
#include <vector> | |
#include <stack> | |
#include <algorithm> | |
template<typename T> | |
void printVector(vector<T> &vec) { | |
cout << "["; | |
const size_t sz = vec.size(); | |
for (size_t i = 0; i < sz; ++i) { |
left
and right
is that the element to find must reside in the range [left, right]./* iteration version: T[O(logn)], S[O(1)] */
int binarySearch(vector<int> &arr, int target)
# init .vim folder | |
cd ~ | |
mkdir .vim | |
cd .vim | |
touch vimrc | |
git init . | |
# soft link | |
ln -s ~/.vim/vimrc ~/.vimrc |
CUDA 8.0 and higher versions require at least nvidia-384
driver:
sudo apt-get purge nvidia*
sudo add-apt-repository ppa:graphics-drivers/ppa
sudo apt-get update && sudo apt-get install nvidia-384 nvidia-settings
You may want to verify GPU is working with Tensorflow in runtime. Here are some useful python code snippets:
## show all devices available ##
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
To make sure GPU works in computation:
import tensorflow as tf
// I/O | |
#include <iostream> | |
#include <fstream> | |
#include <sstream> | |
// Containers | |
#include <vector> | |
#include <deque> | |
#include <list> | |
#include <set> | |
#include <map> |