I hereby claim:
- I am achille on github.
- I am achille (https://keybase.io/achille) on keybase.
- I have a public key whose fingerprint is 381B A447 5045 5140 AC05 46D2 A1D0 FF17 221A 89BF
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
// compares MongoDB regular Indexes vs using MultiKey & Elematch | |
/* // regular index: | |
{ "field0": 0, | |
"field1": 1, | |
"field2": 2 ... etc } | |
MultiKey index: | |
{ "props": [ | |
{ "field": "field0", "value": 0 }, |
import cv2 | |
import os | |
#call visualize(QTable) tod display the qtable | |
#Press space to proceed, and q to exit (Frame render pauses execution) | |
def write(image, output_dir="visualize", name=None, is_test_image=False): | |
if not os.path.exists(output_dir): | |
os.makedirs(output_dir) | |
if name is None: |
//Create 1gb collection & enqueue | |
for(i=0;i<1000;i++){db.foo.insert({f:''.pad(1024*1024,true,'A')})} | |
enqueueWork("test.foo") | |
//Each worker calls dequeue() and works on it's own range | |
work = dequeue("test.foo") | |
function enqueueWork(ns,splitSizeBytes=320000000){ | |
split = db.runCommand({splitVector:ns, keyPattern:{_id: 1}, |
""" | |
Vitter JS (1987) 'An efficient algorithm for sequential | |
random sampling.' ACM T. Math. Softw. 13(1): 58--67. | |
Copied from: https://gist.github.com/ldoddema/bb4ba2d4ad1b948a05e0 | |
""" | |
from math import exp, log | |
import random | |
import numpy as np |
Howdy folks
This is an attempt at standardizing the intro threads, and instructions on how to include a student map in them. Note the map & question set may be added to existing Piazza threads.
Steps:
Create a Google Map
/* Check for gaps or duplicates in keyspace */ | |
function check_keyspace(ns) { | |
print("Checking: " + ns); | |
str = JSON.stringify | |
forwardCount=0; | |
reverseCount=0; | |
min = db.chunks.find(ns).pretty().sort({min: 1}).limit(1)[0] | |
max = db.chunks.find(ns).pretty().sort({min:-1}).limit(1)[0] | |
current = min |
/* | |
* Auto-tuning delete that allows for removal of large amounts of data | |
* without impacting performance. Configurable to a target load amount. | |
* | |
* How it works: | |
* TL;DR: Delete a small slice every second; Vary the size of each slice | |
* based on how long the previous delete took; sleep; repeat. | |
* | |
* TODO: Modify this to allow for deletion based on objectid's date | |
* which is embedded in the first four bytes. |
import numpy as np | |
import scipy.optimize as spo | |
ir = lambda n: int(round(n)) | |
# Constants | |
s_freq = 500 #server-server heartbeat frequency ms | |
c_freq = 10000 #client-server heartbeat frequency ms | |
# Simulation boundaries | |
threshold = 300000 # 5 minute max lag/skew |
// compactness() calculates how closely the resulting documents are located together | |
// It counts the size of the documents vs size of the unique pages they reside on | |
function compactness(collection, query, limit) { | |
Object.size = function(o) { var size = 0, key; | |
for (key in o) { if (o.hasOwnProperty(key)) size++; } | |
return size; }; | |
count=0; | |
size=0; |