from south.models import MigrationHistory
MigrationHistory.objects.filter(app_name='polls').delete()
Make sure you delete the migrations folder (at APPNAME/migrations)
./manage.py schemamigration --initial polls
set foldmethod=marker | |
nmap <c-h> <c-w>h<c-w><Bar> | |
nmap <c-l> <c-w>l<c-w><Bar> | |
let g:user_zen_expandabbr_key = '<c-e>' | |
let g:use_zen_complete_tag = 1 | |
" Use Node.js for JavaScript interpretation | |
" let $JS_CMD='node' | |
let g:syntastic_quiet_warnings=0 |
Go | |
** SIEGE 2.72 | |
** Preparing 200 concurrent users for battle. | |
The server is now under siege... | |
Lifting the server siege... done. | |
Transactions: 8296 hits | |
Availability: 100.00 % | |
Elapsed time: 19.54 secs |
#!/bin/bash | |
# node.js using PPA (for statsd) | |
sudo apt-get install python-software-properties | |
sudo apt-add-repository ppa:chris-lea/node.js | |
sudo apt-get update | |
sudo apt-get install nodejs npm | |
# Install git to get statsd | |
sudo apt-get install git |
#################################### | |
# BASIC REQUIREMENTS | |
# http://graphite.wikidot.com/installation | |
# http://geek.michaelgrace.org/2011/09/how-to-install-graphite-on-ubuntu/ | |
# Last tested & updated 10/13/2011 | |
#################################### | |
sudo apt-get update | |
sudo apt-get upgrade | |
import boto | |
from boto.route53.record import ResourceRecordSets | |
from collections import defaultdict | |
zone_name = 'yourdomain.com.' | |
zone_id = "ZONEID-HERE" | |
conn = boto.connect_route53() | |
zone = conn.get_hosted_zone(zone_id) |
import pyes | |
import simplejson as json | |
SOURCE =['SERVER:9200'] | |
sconn = pyes.ES(SOURCE) | |
def scroll_gen(index): | |
q = '{"query":{"match_all":{}}, "size": 15000}' | |
s = sconn.search_raw(json.loads(q), scroll="5m", indices=index) |
#!/bin/bash | |
# herein we backup our indexes! this script should run at like 6pm or something, after logstash | |
# rotates to a new ES index and theres no new data coming in to the old one. we grab metadatas, | |
# compress the data files, create a restore script, and push it all up to S3. | |
TODAY=`date +"%Y.%m.%d"` | |
INDEXNAME="logstash-$TODAY" # this had better match the index name in ES | |
INDEXDIR="/usr/local/elasticsearch/data/logstash/nodes/0/indices/" | |
BACKUPCMD="/usr/local/backupTools/s3cmd --config=/usr/local/backupTools/s3cfg put" | |
BACKUPDIR="/mnt/es-backups/" | |
YEARMONTH=`date +"%Y-%m"` |
# TO_FOLDER=/something | |
# FROM=/your-es-installation | |
DATE=`date +%Y-%m-%d_%H-%M` | |
TO=$TO_FOLDER/$DATE/ | |
echo "rsync from $FROM to $TO" | |
# the first times rsync can take a bit long - do not disable flusing | |
rsync -a $FROM $TO | |
# now disable flushing and do one manual flushing |
import sys | |
from kazoo.client import KazooClient | |
""" | |
Simple python script to clean up zookeeper consumer offsets after a errant kafka consumer. Only tested on Kafka 0.7. | |
For details on Kazoo: http://kazoo.readthedocs.org/en/latest/index.html | |
MIT License. | |
""" |