I hereby claim:
- I am lizadaly on github.
- I am lizadaly (https://keybase.io/lizadaly) on keybase.
- I have a public key ASBKi45tuqp1aEuDXp8B0EYFUcdlRSGFMlRxb9nNAqm4owo
To claim this, I am signing this object:
| class LibraryItem(models.Model): | |
| owner = models.ForeignKey(Member) | |
| class Status(models.Model): | |
| name = models.CharField(max_length=50) | |
| class ItemStatus(models.Model): | |
| item = models.ForeignKey(LibraryItem) | |
| status = models.ForeignKey(Status) | |
| >>> # I'm going to set the variable 'items' first just for this demo | |
| >>> items = 0 | |
| >>> if items > 5: | |
| ... print "Shipping is free" # This is going to cause a problem because it's not indented | |
| File "<stdin>", line 2 | |
| print "Shipping is free" | |
| ^ | |
| IndentationError: expected an indented block | |
| >>> # Oops, I wanted to indent that second line! |
| from sklearn.datasets import load_files | |
| from sklearn.feature_extraction.text import TfidfVectorizer | |
| from sklearn.linear_model import SGDClassifier | |
| from sklearn.pipeline import Pipeline | |
| import numpy as np | |
| # Load the pre-classified training data as "training-fanfic/clean" and "training-fanfic/dirty/" | |
| train = load_files("training-fanfic", encoding="utf-8", load_content=True) |
| > db.subjects.getIndexes() | |
| [ | |
| { | |
| "v" : 1, | |
| "key" : { | |
| "_id" : 1 | |
| }, | |
| "name" : "_id_", | |
| "ns" : "scribe_api_development.subjects" | |
| }, |
| import nltk | |
| from nltk.corpus import stopwords | |
| from collections import Counter | |
| word_list = [] | |
| # Set up a quick lookup table for common words like "the" and "an" so they can be excluded | |
| stops = set(stopwords.words('english')) | |
| # For all 18 novels in the public domain book corpus, extract all their words |
| import random | |
| from collections import Counter | |
| import nltk | |
| # See https://gist.github.com/lizadaly/7071e0de589883a197433951bc7314c5 for comments on the setup here | |
| word_list = [] | |
| [word_list.extend(nltk.corpus.gutenberg.words(f)) for f in nltk.corpus.gutenberg.fileids()] | |
| cleaned_words = [w.lower() for w in word_list if w.isalnum()] | |
| all_bigrams = [b for b in nltk.bigrams(cleaned_words)] |
| */ | |
| var sortedArrays = function (arr1, arr2) { | |
| let ans = new Array(arr1.length + arr2.length) | |
| let i = 0, j = 0, k = 0 | |
| while (i < arr1.length && j < arr2.length) { | |
| if (arr1[i] < arr2[j]) { | |
| ans[k] = arr1[i] | |
| i++ | |
| } | |
| else { |
| /* Version without division */ | |
| var findProductsNoDiv = function (arr) { | |
| let forw = new Array(arr.length) | |
| let back = new Array(arr.length) | |
| let res = new Array(arr.length) | |
| let prod = 1 | |
| /* Go through the array forward and multiply as we go */ | |
| for (var i=0;i<arr.length;i++) { | |
| forw[i] = prod | |
| prod *= arr[i] |
I hereby claim:
To claim this, I am signing this object:
| import spacy | |
| nlp = spacy.load('en') | |
| doc = nlp('Do you have a car or truck') | |
| for token in doc: | |
| print(token, token.pos_) |