Skip to content

Instantly share code, notes, and snippets.

counts = dict((k, F.count(k)) for k in set(F))
counts_items=counts.items()
counts_items=[(b,a) for (a,b) in counts_items]
counts_items.sort()
counts_items.reverse()
for i in counts_items:
print str(i[0])+' of your friends are already following '+i[1]
#!/usr/bin/env python
# encoding: utf-8
"""
TwitterEgoBuilder.py
Created by Drew Conway on 2009-02-23.
Copyright (c) 2009. All rights reserved.
The purpose of this script is to generate a
NetworkX DiGraph object based on the snowball
#!/usr/bin/env python
# encoding: utf-8
"""
exploded_view_3d.py
The purpose of this script is to create an 'exploded view'
of a network in 3D using hierarchical clustering of geodesic
distances in NetworkX and UbiGraph.
This script is intended as an illustrative proof of concept
#!/usr/bin/env python
# encoding: utf-8
"""
Experiments for growing fractal networks
Created by Drew Conway on 2009-05-19.
Copyright (c) 2009. All rights reserved.
"""
import sys
import os
import sys
import csv
def get_players(path):
# Returns a list of player names from CSV file
reader=csv.reader(open(path,'U'),delimiter=',')
'''
players=[]
row_num=0
import urllib2
def get_player_profiles(player_list):
# Returns a dict of player profile URLs to be used in the next step
# Dict will hold player profile pages indexed by player name
player_profile_urls=dict.fromkeys(player_list)
for n in player_list:
names=n.split(' ')
# Search for the player names at NFL.com to get their individual player profiles, which contain the
# data we ultimately want.
import html5lib
from html5lib import treebuilder
def parse_data(player_urls):
# Returns a dict of player data parse trees indexed by player name
# Create a dict indexed by player names
player_data=dict.fromkeys(player_urls.keys())
# Download player profile data and parse using html5lib
for name in player_urls.keys():
# html5lib integrates the easy-to-use BeautifulSoup parse tree using the treebuilders library.
def write_data(data,path,new_path):
# Takes data dict and writes new data to a new file
reader=csv.reader(open(path,'U'),delimiter=',')
writer=csv.writer(open(new_path,"w"))
row_num=0
for row in reader:
if row_num<1:
# Keep ther same column headers as before, so we simply
# re-write the first row.
writer.writerow(row)
# File-Name: currency_converter.R
# Date: 2009-11-17
# Author: Drew Conway
# Purpose: Convert currency data
# Data Used: vc_invests.csv
# Packages Used: foreign,XML
# Output File: vc_invests_USD.csv
# Data Output:
# Machine: Drew Conway's MacBook
# File-Name: cpi_oprobit.R
# Date: 2009-11-17
# Author: Drew Conway
# Purpose: Quick ordered probit analysis of the Corruption Perceptions Index 2009
# to check for effect of number of surveys used on CPI scores
# Data Used: corruption_index.csv
# available here: http://www.drewconway.com/zia/wp-content/uploads/2009/11/corruption_index.csv
# Packages Used: foreign,Zelig
# Output File:
# Data Output: