Skip to content

Instantly share code, notes, and snippets.

import utils
import nltk
from pprint import pprint
def find_document_by_keyword(keyword):
df = utils.data_load()
count = 0
for index, row in df.iterrows():
import json, re, datetime
import pandas as pd
import numpy as np
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
import json, re, datetime
import pandas as pd
import numpy as np
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
import json, re, datetime
import pandas as pd
import numpy as np
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
import json, re, datetime
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion, Pipeline
import json, re, datetime
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion, Pipeline
import json, re, datetime
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion, Pipeline
import requests
filenames = ['10_1_mic1.wav','10_1_mic2.wav']
files = dict()
for filename in filenames:
f = open(filename, 'rb')
files[filename] = f
res = requests.post('http://localhost:8080/upload', files=files)
import numpy as np
import pandas as pd
import nltk
import re, json, os, codecs, mpld3, datetime
from sklearn import feature_extraction
from sklearn.externals import joblib
def data_load(year):
data = list()
const express = require('express');
const http = require('http');
const app = express();
const exec = require('child_process').exec;
function run(command) {
return new Promise((resolve, reject) => {
exec(command, (error, stdout, stderr) => {
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);