I'm writing a small Ruby library:
class FizzBuzzEndpoint
def call(env)
module Models | |
module GroupByCaching | |
extend ActiveSupport::Concern | |
included do | |
CACHE_KEY_PREFIX = "#{self.name.collectionize}_grouped_by" | |
end | |
module ClassMethods |
def self.fully_approved_query_value | |
MultiTenancy.current_version.locales.inject({}) do |result, locale| | |
result.merge(locale.to_s => true) | |
end | |
end |
module Export | |
module V1 | |
class GroupSerializer < ActiveModel::Serializers | |
attributes :items | |
def items | |
docs = object.items.order_by(:position.asc) | |
ItemsSerializer.new(docs, special_options).serializable_hash | |
end |
import pymongo | |
import random | |
from collections import defaultdict | |
import time | |
import sys | |
from pymongo import Connection | |
connection = Connection() | |
connection.drop_database('test_db') | |
collection = connection['test_db']['testing'] |
// data density example | |
// shows how storing/sorting data by key can reduce I/O drastically in MongoDB | |
// diskloc gives file # and offset of a given document, divide by 512 for block # | |
// 2011 kcg | |
// start clean | |
db.disktest_noorg.drop(); | |
db.disktest_org.drop(); | |
// create some random data in non userid dense form |