- ar
creates static libraries.
- ldd
lists the shared libraries on which the object binary is dependent.
- nm
lists the symbols defined in the symbol table of an object file.
- objdump
// avoid resource leaks when exceptions are thrown. | |
// if an exception occurs after successful memory allocation but | |
// before the delete statement executes, a memory leak could occur. | |
// void memory_leak() | |
//{ | |
// ClassA * ptr = new ClassA; | |
// try { | |
// ... | |
// } | |
// catch(...) { |
// A functor is any object that can be used with () in the manner of a function. | |
// includes pointers to functions, and class objects for which the () operator (function call operator) is overloaded | |
#include <iostream> | |
#include <vector> | |
#include <algorithm> | |
using namespace std; |
creates static libraries.
lists the shared libraries on which the object binary is dependent.
lists the symbols defined in the symbol table of an object file.
source /usr/local/bin/virtualenvwrapper.sh
mkvirtualenv env1
ls $WORKON_HOME
lssitepackages
deactive
rmvirtualenv env2
<!DOCTYPE html> | |
<meta charset="utf-8"> | |
<script src="http://d3js.org/d3.v2.min.js?2.9.3"></script> | |
<style> | |
.link { | |
stroke: #ccc; | |
} | |
.node text { |
\DeclareMathOperator*{\argmax}{arg\,max} % in your preamble | |
\DeclareMathOperator*{\argmin}{arg\,min} % in your preamble | |
\argmax_{...} % in your formula | |
\argmin_{...} % in your formula |
# Dirichlet process Gaussian mixture model | |
import numpy as np | |
from scipy.special import gammaln | |
from scipy.linalg import cholesky | |
from sliceSample import sliceSample | |
def multinomialDraw(dist): | |
"""Returns a single draw from the given multinomial distribution.""" | |
return np.random.multinomial(1, dist).argmax() |
import numpy as np | |
from sklearn.datasets import fetch_20newsgroups | |
from sklearn.feature_extraction.text import CountVectorizer | |
def get_vectors(vocab_size=5000): | |
newsgroups_train = fetch_20newsgroups(subset='train') | |
vectorizer = CountVectorizer(max_df=.9, max_features=vocab_size) | |
vecs = vectorizer.fit_transform(newsgroups_train.data) | |
vocabulary = vectorizer.vocabulary | |
terms = np.array(vocabulary.keys()) |
import spark.SparkContext | |
import SparkContext._ | |
/** | |
* A port of [[http://blog.echen.me/2012/02/09/movie-recommendations-and-more-via-mapreduce-and-scalding/]] | |
* to Spark. | |
* Uses movie ratings data from MovieLens 100k dataset found at [[http://www.grouplens.org/node/73]] | |
*/ | |
object MovieSimilarities { |
; map | |
(map clojure.string/lower-case ["Java" "Imperative" "Weeping" "Clojure"]) | |
(map * [1 2 3 4] [5 6 7 8]) | |
; reduce | |
(reduce max [0 -3 10 48]) | |
(reduce + 50 [1 2 3 4]) | |
; partial | |
(def only-strings (partial filter string?)) |