I hereby claim:
- I am trygvea on github.
- I am trygvea (https://keybase.io/trygvea) on keybase.
- I have a public key whose fingerprint is 61DF 261E E636 2555 75C0 0143 EBC0 F71B D423 9905
To claim this, I am signing this object:
function * Numbers () { | |
let number = 0; | |
while (true) { | |
yield ++number; | |
} | |
} | |
function * take (numberToTake, iterable) { | |
const iterator = iterable[Symbol.iterator](); |
import React from 'react' | |
import ReactDOM from 'react-dom' | |
import { createStore } from 'redux' | |
import { Provider, connect } from 'react-redux' | |
const reducer = (state = {counter:0}, action = {}) => { | |
switch (action.type) { | |
case 'INCREMENT': | |
return {counter: state.counter+1} | |
default: |
const curry = f => (...args) => | |
args.length >= f.length | |
? f(...args) | |
: (...more) => curry(f)(...args, ...more) | |
// Test it. If | |
const f = (a,b,c) => a+b+c | |
const g = curry(f) | |
// then the following holds true: |
// push & pop as pure stateful computations | |
const push = elem => stack => [null, [elem, ...stack]] | |
const pop = ([head, ...tail]) => [head, tail] | |
// ---------------------------------------------------------- | |
// Let's do a simple stack manipulation popping the first two | |
// elements and pushing their product back on to the stack. | |
// The boring way to do it: Note how we manually must lift | |
// the stack through computations. | |
// (compare that to stackManipM below) |
// pureStatefulComputation: (state) => [result, newState] | |
// push & pop as pure stateful computations | |
const push = elem => stack => [undefined, [elem, ...stack]] | |
const pop = ([head, ...tail]) => [head, tail] | |
// ------------------------------------------------------------ | |
// stackManip, version 1. | |
// The passing of state through computations is tedious |
I hereby claim:
To claim this, I am signing this object:
# Cut and paste from: | |
# http://dlib.net/face_recognition.py.html | |
# https://github.com/ageitgey/face_recognition/blob/master/face_recognition/api.py | |
# https://medium.com/towards-data-science/facial-recognition-using-deep-learning-a74e9059a150 | |
# | |
# Install dlib: See https://www.pyimagesearch.com/2017/03/27/how-to-install-dlib/ | |
# Download dlib models: http://dlib.net/files/ | |
import os | |
import dlib |
type Diff<T extends string, U extends string> = ({[P in T]: P } & {[P in U]: never } & { [x: string]: never })[T]; | |
type Minus<T, U> = {[P in Diff<keyof T, keyof U>]: T[P]}; | |
interface Eo {type: string} | |
interface Meo extends Eo{ meoStuff: any} | |
const isMeo = (eo: Eo): eo is Meo => eo.type === 'MEO type' | |
const aMethod = (eo: Eo) => { | |
if (isMeo(eo)) { | |
eo.meoStuff | |
} |
import {debounce} from 'lodash'; // or whatever | |
const debouncePromise = <T>(fn: (...args) => Promise<T>, wait: number, options = {}): ((...args) => Promise<T>) => { | |
return (...args) => | |
new Promise((resolve, reject) => { | |
const promisedFn = (...args) => | |
fn(...args) | |
.then(resolve) | |
.catch(reject); | |
const debouncedPromisedFn = debounce(promisedFn, wait, options); |
// Objects with only numeric properties that can be treated as a n-dimensional vector | |
export type Vector<T> = { | |
[P in keyof T]: number | |
}; | |
const vextend = <T>(obj: Vector<T>, key: string, val: number): Vector<T> => Object.assign(obj, { [key]: val }); | |
export const vreduce = <T, R>(obj: Vector<T>, reducer: (agg: R, [key, val]: [string, number]) => R, init: R): R => |