A "Best of the Best Practices" (BOBP) guide to developing in Python.
- "Build tools for others that you want to be built for you." - Kenneth Reitz
- "Simplicity is alway better than functionality." - Pieter Hintjens
| import sys | |
| import numpy as np | |
| import networkx as nx | |
| import matplotlib.pyplot as plt | |
| def plot(data,filename,degreetype): | |
| """ Plot Distribution """ | |
| plt.plot(range(len(data)),data,'bo') | |
| plt.yscale('log') | |
| plt.xscale('log') |
| # Initialize the scroll | |
| page = es.search( | |
| index = 'yourIndex', | |
| doc_type = 'yourType', | |
| scroll = '2m', | |
| search_type = 'scan', | |
| size = 1000, | |
| body = { | |
| # Your query's body | |
| }) |
| import datetime | |
| class DayExpression(object): | |
| def include(self, date): | |
| raise NotImplementedError | |
| class Union(DayExpression): | |
| def __init__(self, expressions): |
| /** | |
| * @fileoverview Pearson correlation score algorithm. | |
| * @author [email protected] (Matt West) | |
| * @license Copyright 2013 Matt West. | |
| * Licensed under MIT (http://opensource.org/licenses/MIT). | |
| */ | |
| /** | |
| * Calculate the person correlation score between two items in a dataset. |
| from collections import defaultdict | |
| from heapq import * | |
| def dijkstra(edges, f, t): | |
| g = defaultdict(list) | |
| for l,r,c in edges: | |
| g[l].append((c,r)) | |
| q, seen, mins = [(0,f,())], set(), {f: 0} | |
| while q: |
| #!/bin/bash | |
| NAME="hello_app" # Name of the application | |
| DJANGODIR=/webapps/hello_django/hello # Django project directory | |
| SOCKFILE=/webapps/hello_django/run/gunicorn.sock # we will communicte using this unix socket | |
| USER=hello # the user to run as | |
| GROUP=webapps # the group to run as | |
| NUM_WORKERS=3 # how many worker processes should Gunicorn spawn | |
| DJANGO_SETTINGS_MODULE=hello.settings # which settings file should Django use | |
| DJANGO_WSGI_MODULE=hello.wsgi # WSGI module name |
| # -*- coding: utf-8 -*- | |
| ''' | |
| ''' | |
| from nltk import sent_tokenize, word_tokenize, pos_tag, ne_chunk | |
| def extract_entities(text): | |
| entities = [] | |
| for sentence in sent_tokenize(text): |