I hereby claim:
- I am comerford on github.
- I am comerford (https://keybase.io/comerford) on keybase.
- I have a public key whose fingerprint is A706 6556 2C81 E177 8932 2D88 9454 794E C192 0598
To claim this, I am signing this object:
| Fri Apr 20 16:22:27 [rsMgr] replSet can't see a majority, will not try to elect self | |
| Fri Apr 20 16:22:28 [initandlisten] connection accepted from 192.168.2.71:59427 #3990253 (2 connections now open) | |
| Fri Apr 20 16:22:28 [conn3990253] end connection 192.168.2.71:59427 (1 connection now open) | |
| Fri Apr 20 16:22:28 [initandlisten] connection accepted from 192.168.2.72:55076 #3990254 (2 connections now open) | |
| Fri Apr 20 16:22:28 [conn3990254] end connection 192.168.2.72:55076 (1 connection now open) | |
| Fri Apr 20 16:22:30 [initandlisten] connection accepted from 192.168.2.71:59428 #3990255 (2 connections now open) | |
| Fri Apr 20 16:22:30 [conn3990255] end connection 192.168.2.71:59428 (1 connection now open) | |
| Fri Apr 20 16:22:30 [initandlisten] connection accepted from 192.168.2.72:55077 #3990256 (2 connections now open) | |
| Fri Apr 20 16:22:30 [conn3990256] end connection 192.168.2.72:55077 (1 connection now open) | |
| Fri Apr 20 16:22:32 [initandlisten] connection accepted from 192.168.2.71:59429 #3990257 (2 connections now open) |
| // delete oplog, crete new and empty | |
| testReplSet:PRIMARY> use local | |
| switched to db local | |
| testReplSet:PRIMARY> db.oplog.rs.drop() | |
| true | |
| testReplSet:PRIMARY> db.createCollection("oplog.rs", {capped:1, size: 2*1024*1024, autoIndexId:false}) | |
| { "ok" : 1 } | |
| // run the command to get the fields | |
| testReplSet:PRIMARY> db.getReplicationInfo() | |
| { |
| // kills long running ops in MongoDB (taking seconds as an arg to define "long") | |
| // attempts to be a bit safer than killing all by excluding replication related operations | |
| // and only targeting queries as opposed to commands etc. | |
| killLongRunningOps = function(maxSecsRunning) { | |
| currOp = db.currentOp(); | |
| for (oper in currOp.inprog) { | |
| op = currOp.inprog[oper-0]; | |
| if (op.secs_running > maxSecsRunning && op.op == "query" && !op.ns.startsWith("local")) { | |
| print("Killing opId: " + op.opid |
| // start a shell from the command line, do not connect to a database | |
| ./mongo --nodb | |
| // using that shell start a new cluster, with a 1MB chunk size | |
| cluster = new ShardingTest({shards: 2, chunksize: 1}); | |
| // open another shell (previous one will be full of logging and not actually connected to anything) | |
| ./mongo --port 30999 | |
| // stop the balancer | |
| sh.stopBalancer() | |
| sh.getBalancerState() | |
| // select test DB, enable sharding |
| // function will take a number of days, a collection name, an index Field name, and a boolean as args | |
| // it assumes the index is ObjectID based and creates an ObjectID with a timestamp X days in the past | |
| // Finally, it queries that index/data (controlled by boolean), loading it into memory | |
| // | |
| // Example - 2 days data, foo collection, _id index, pre-heat index only | |
| // preHeatData(2, "foo", "_id", true) | |
| // Example - 7 days data, foo collection, _id index, pre-heat data also | |
| // preHeatData(7, "foo", "_id", false) | |
| // Example - 2 days data, bar collection, blah index, pre-heat index only | |
| // preHeatData(2, "bar", "blah", false) |
| // simple for loop to insert 100k records into the test databases | |
| var testDB = db.getSiblingDB("test"); | |
| // drop the collection, avoid dupes | |
| testDB.timecheck.drop(); | |
| for(var i = 0; i < 100000; i++){ | |
| testDB.timecheck.insert( | |
| {_id : i} | |
| ) | |
| }; |
I hereby claim:
To claim this, I am signing this object:
| #!/bin/bash | |
| # | |
| # author: Tim "xGhOsTkiLLeRx" Brust | |
| # license: CC BY-NC-SA 4.0 | |
| # version: 0.2 | |
| # date: 07/10/2014 | |
| # description: replace (root) password of squashfs from openELEC | |
| # usage: ./openELEC [password] [device] [hash] [user] | |
| # dependencies: mkpassword (whois), squashfs-tools | |
| # |
| // these docs, in 2.6, get bucketed into the 256 bucket (size without header = 240) | |
| // From Object.bsonsize(db.data.findOne()), the size is actually 198 for reference, so add 16 to that for an exact fit | |
| // with that doc size, 80,000 is a nice round number under the 16MiB limit, so will use that for the inner loop | |
| // We are shooting for ~16 GiB of data, without indexes, so do 1,024 iterations (512 from each client) | |
| // This will mean being a little short (~500MiB) in terms of target data size, but keeps things simple | |
| for(var j = 0; j < 512; j++){ // | |
| bigDoc = []; | |
| for(var i = 0; i < 80000; i++){ |
| storage: | |
| dbPath: "/ssd/db/wt_snappy" | |
| engine: "wiredTiger" | |
| systemLog: | |
| destination: file | |
| path: "/data/wt_snappy/mongodb.log" | |
| processManagement: | |
| fork: true |