Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
// Word cloud layout by Jason Davies, http://www.jasondavies.com/word-cloud/ | |
// Algorithm due to Jonathan Feinberg, http://static.mrfeinberg.com/bv_ch03.pdf | |
(function(exports) { | |
function cloud() { | |
var size = [256, 256], | |
text = cloudText, | |
font = cloudFont, | |
fontSize = cloudFontSize, | |
rotate = cloudRotate, | |
padding = cloudPadding, |
Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
When the directory structure of your Node.js application (not library!) has some depth, you end up with a lot of annoying relative paths in your require calls like:
const Article = require('../../../../app/models/article');
Those suck for maintenance and they're ugly.
/* pure-hidden-xs */ | |
@media screen and (max-width:567px) { | |
.pure-visible-sm{display:none} | |
.pure-visible-md{display:none} | |
.pure-visible-lg{display:none} | |
.pure-visible-xl{display:none} | |
.pure-hidden-xs{display:none} | |
} | |
/* pure-hidden-sm */ | |
@media screen and (min-width:568px) and (max-width:767px) { |
""" | |
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
BSD License | |
""" | |
import numpy as np | |
# data I/O | |
data = open('input.txt', 'r').read() # should be simple plain text file | |
chars = list(set(data)) | |
data_size, vocab_size = len(data), len(chars) |
template < class T, int n > struct mvector_tool{ | |
typedef vector< typename mvector_tool<T,n-1>::type > type; | |
static type gen_vector( std::array<unsigned int, n> index, T initvalue ){ | |
std::array<unsigned int,n-1> index_next; | |
std::copy_n( index.begin()+1, n-1, index_next.begin() ); | |
return type( index.front(), mvector_tool<T,n-1>::gen_vector( index_next, initvalue )); | |
} | |
}; | |
template < class T > struct mvector_tool<T,0>{ |
글쓴이: 김정주([email protected])
최근 딥러닝 관련 패키지들은 대부분 CPU와 GPU를 함께 지원하고 있습니다. GPU를 사용하면 보다 빠르게 학습 결과를 낼 수 있지만, GPU를 활용하기 위해서는 NVIDIA계열의 그래픽 카드, 드라이버 S/W 그리고 CUDA의 설치를 필요로 합니다.
이 글에서는 AWS의 GPU 인스턴스와 도커를 활용해 딥러닝 패키지(Caffe)를 편리하게 사용하는 방법을 소개합니다.
#include <iostream> | |
#include <vector> | |
//===================================================== | |
// V1 : Wraper class of vector ( has vector ) | |
//===================================================== | |
template < class T , int dim > | |
class MultiVector { | |
public: |