sudo aptitude install git php5-cli nodejs
npm install -g npm
npm install -g maxogden/dat
dat init
dat listen
- Create a Twitter application and generate OAuth keys for your user - add the app and user credentials to
stream.php
. composer init && composer require fennb/phirehose:dev-master && composer install
php stream.php | dat import -json
- Install and start dat-editor then open dat-editor to browse the table.
var once = require('once') | |
var concurrent = 10 | |
var pending = 0 | |
through(function(data, enc, cb) { | |
cb = once(cb) | |
pending++ | |
if (pending < concurrent) cb() | |
rr().write(data, function(err) { | |
if (err) return cb(err) |
# A quick function to save a PBM (http://en.wikipedia.org/wiki/Netpbm_format) | |
# visualize *a lot* of missing data pretty quickly (outside of R). | |
writeMissingPBM <- function(x, file) { | |
dims <- dim(x) | |
x[] <- as.integer(is.na(x)) | |
con <- file(file, open="wt") | |
writeLines(sprintf("P1\n%d %d", ncol(x), nrow(x)), con) | |
write.table(x, file=con, sep=" ", col.names=FALSE, row.names=FALSE, quote=FALSE) | |
close(con) |
Disclaimer: This is an unofficial post by a random person from the community. I am not an official representative of io.js. Want to ask a question? open an issue on the node-forward
discussions repo
- io is a fork of node v0.12 (the next stable version of node.js, currently unreleased)
- io.js will be totally compatible with node.js
- the people who created io.js are node core contributors who have different ideas on how to run the project
- it is not a zero-sum game. many core contributors will help maintain both node.js and io.js
# Hello, and welcome to makefile basics. | |
# | |
# You will learn why `make` is so great, and why, despite its "weird" syntax, | |
# it is actually a highly expressive, efficient, and powerful way to build | |
# programs. | |
# | |
# Once you're done here, go to | |
# http://www.gnu.org/software/make/manual/make.html | |
# to learn SOOOO much more. |
This is a set up for projects which want to check in only their source files, but have their gh-pages branch automatically updated with some compiled output every time they push.
A file below this one contains the steps for doing this with Travis CI. However, these days I recommend GitHub Actions, for the following reasons:
- It is much easier and requires less steps, because you are already authenticated with GitHub, so you don't need to share secret keys across services like you do when coordinate Travis CI and GitHub.
- It is free, with no quotas.
- Anecdotally, builds are much faster with GitHub Actions than with Travis CI, especially in terms of time spent waiting for a builder.
https://www.youtube.com/watch?x-yt-cl=84359240&x-yt-ts=1421782837&feature=player_embedded&v=ZACVcJt0oJA#t=7303 | |
Kai Blin: If we containerize all these things won’t it just encourage | |
worse software development practices; right now developers still need | |
to consider someone other than themselves installing the software. | |
Michael Barton: | |
“It’s a good point. Ultimately, though, if I can get a container, and | |
it works, and I know it will work, do you care how well it was |
#!/usr/bin/env node | |
var fs = require('fs'); | |
var fasta = require('bionode-fasta'); | |
var crypto = require('crypto'); | |
fasta.obj('proteins.fasta').on('data', function(x) { | |
var seq = x.seq | |
.replace(/\*/g, '') // remove asterisks (stop codons) | |
.toUpperCase(); // enforce case |
var atomicQueue = require(...) | |
var queue = atomicQueue(db, function (data, cb) { | |
console.log('got some work', data) | |
// do stuff ... | |
cb(null, result) // if no error this work will be removed from the leveldb | |
}) | |
// write will add this 'work' to the leveldb | |
queue.write({some: 'world'}) |
If you were to give recommendations to your "little brother/sister" on things that they need to do to become a data scientist, what would those things be?
I think the "Data Science Venn Diagram" (http://drewconway.com/zia/2013/3/26/the-data-science-venn-diagram) is a great place to start. You need three things to be a good data scientist:
- Statistical knowledge
- Programming/hacking skills
- Domain expertise