(C-x means ctrl+x, M-x means alt+x)
The default prefix is C-b. If you (or your muscle memory) prefer C-a, you need to add this to ~/.tmux.conf
:
// create file: | |
sudo vim /usr/share/applications/intellij.desktop | |
// add the following | |
[Desktop Entry] | |
Version=13.0 | |
Type=Application | |
Terminal=false | |
Icon[en_US]=/home/rob/.intellij-13/bin/idea.png | |
Name[en_US]=IntelliJ |
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
"""Example of thread pool | |
https://docs.python.org/3/library/concurrent.futures.html | |
https://docs.python.org/3/library/multiprocessing.html | |
""" | |
import concurrent.futures as confu | |
import multiprocessing.pool as mpp | |
import time |
library(cowplot) | |
n <- 1e4 | |
X <- rlogis(n) | |
Y <- plogis(X) | |
plot_dens <- function(data, ...) { | |
qplot( | |
data, | |
geom = "histogram", | |
alpha = I(1 / 2), | |
... |
Note: I'm currently taking a break from this course to focus on my studies so I can finally graduate
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | |
## Created by: Hang Zhang, Rutgers University, Email: [email protected] | |
## Modified by Thomas Wolf, HuggingFace Inc., Email: [email protected] | |
## Copyright (c) 2017-2018 | |
## | |
## This source code is licensed under the MIT-style license found in the | |
## LICENSE file in the root directory of this source tree | |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | |
"""Encoding Data Parallel""" |
'use strict'; | |
// original: https://gist.github.com/indutny/8d0f5376ee643962a9f0 | |
const BN = require('bn.js'); | |
const elliptic = require('elliptic'); | |
const bcoin = require('bcoin'); | |
const ecdsa = new elliptic.ec('secp256k1'); |
# Load in embeddings | |
glove_vectors = '/home/ubuntu/.keras/datasets/glove.6B.100d.txt' | |
glove = np.loadtxt(glove_vectors, dtype='str', comments=None) | |
# Extract the vectors and words | |
vectors = glove[:, 1:].astype('float') | |
words = glove[:, 0] | |
# Create lookup of words to vectors | |
word_lookup = {word: vector for word, vector in zip(words, vectors)} |
# Load in embeddings | |
glove_vectors = '/home/ubuntu/.keras/datasets/glove.6B.100d.txt' | |
glove = np.loadtxt(glove_vectors, dtype='str', comments=None) | |
# Extract the vectors and words | |
vectors = glove[:, 1:].astype('float') | |
words = glove[:, 0] | |
# Create lookup of words to vectors | |
word_lookup = {word: vector for word, vector in zip(words, vectors)} |