Suppose you have these line-item and snapshot/lookup tables that you want to relate:
Contracts
ID,Type,Date,Amt
1,PO,1/20,$100
1,PO,2/20,$200
1,PO,2/23,$300
2,PO,2/10,$1000
2,PO,2/15,$2000
def partial_soln(solns, coins, target): | |
solns[target] = [] | |
for i in coins: | |
x = target - i | |
if x in solns: | |
for xx in solns[x]: | |
new_item = tuple(sorted(list(xx) + [i])) | |
solns[target].append(new_item) | |
def solution(coins, target): |
Suppose you have these line-item and snapshot/lookup tables that you want to relate:
Contracts
ID,Type,Date,Amt
1,PO,1/20,$100
1,PO,2/20,$200
1,PO,2/23,$300
2,PO,2/10,$1000
2,PO,2/15,$2000
# An occasional python interview question I have seen is: | |
# "How would you make this series unique while preserving order?" | |
# The standard code looks like this: | |
def unique2(series): | |
result = [] | |
seen = set() | |
for s in series: | |
if s not in seen: | |
seen.add(s) |
def cartesian(lol): | |
if not lol: | |
yield [] | |
else: | |
left, right = lol[0], lol[1:] | |
for item in left: | |
for result in cartesian(right): | |
yield [item] + result | |
>>> data = [ |
from itertools import tee | |
from random import shuffle | |
from datetime import datetime | |
def issorted(series): | |
s1, s2 = tee(series) | |
next(s2) | |
return all(elem1 <= elem2 for elem1, elem2 in zip(s1, s2)) |
import numpy as np | |
def downsample(data, labels): | |
""" | |
>>> data = np.arange(100) | |
>>> label = np.array([1] * 95 + [0] * 5) | |
>>> print downsample(data, label) | |
""" | |
zero_index = np.array([i for i, val in enumerate(labels) if val == 0]) | |
one_index = np.array([i for i, val in enumerate(labels) if val == 1]) |
""" | |
Code to write data read from a URL to a file | |
Based on an answer on SO: | |
http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python/22721 | |
""" | |
import urllib2 | |
mp3file = urllib2.urlopen("http://www.example.com/songs/mp3.mp3") |
from __future__ import print_function | |
import numpy as np | |
from nltk.corpus import stopwords | |
# from nltk.stem import WordNetLemmatizer | |
from nltk.stem.porter import PorterStemmer | |
from sklearn import metrics | |
from sklearn.cross_validation import train_test_split # , cross_val_score |