Skip to content

Instantly share code, notes, and snippets.

#!/usr/bin/env python
# coding: utf8
"""Example of multi-processing with Joblib. Here, we're exporting
part-of-speech-tagged, true-cased, (very roughly) sentence-separated text, with
each "sentence" on a newline, and spaces between tokens. Data is loaded from
the IMDB movie reviews dataset and will be loaded automatically via Thinc's
built-in dataset loader.
Compatible with: spaCy v2.0.0+
"""
package com.test
import org.apache.spark._
import org.apache.spark.SparkContext._
object WordCount {
def main(args: Array[String]): Unit = {
val master = args.length match {
case x: Int if x > 0 => args(0)
case _ => "local"
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer
# scikit learn으로 다양한 종류의 모델을 만들어 보자.
clf_1 = Pipeline([('vect', CountVectorizer()),
('clf', MultinomialNB())])
clf_2 = Pipeline([('vect', HashingVectorizer(non_negative=True)),
('clf', MultinomialNB())])
clf_3 = Pipeline([('vect', TfidfVectorizer()),
$(document).ready(function(){
GetData(1);
});
function GetData(_page){
var _offset = _page;
var _data_list = "{{ data_list|join('|') }}";
if(_data_list != ''){
$.ajax({
url : '/getData',
public static void main(String[] args) throws Exception {
Configuration hconf = HBaseConfiguration.create();
hconf.set("hbase.rootdir", "hbase root 경로");
hconf.set("hbase.zookeeper.quorum", "hbase zookeeper 주소");
hconf.set("hbase.zookeeper.property.clientPort", "zookeeper port");
HBaseAdmin ha = new HBaseAdmin(hconf);
for (int i = 0; i < ha.listTableNames().length; i++){
System.out.println(ha.listTableNames()[i]);
}
import tensorflow as tf
import numpy as np
from cell.lstm import BN_LSTMCell
import sys
class Seq2Seq(object):
def __init__(self, xseq_len, yseq_len,
xvocab_size, yvocab_size,
emb_dim, num_layers, ckpt_path,
def dual_encoder_model(
hparams,
mode,
context,
context_len,
utterance,
utterance_len,
targets):
# Initialize embedidngs randomly or with pre-trained vectors if available
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import os
import random
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
import csv
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import matplotlib.pyplot as plt
%matplotlib inline
def load_series(filename ,series_dix=1):
try:
with open(filename) as csvfile:
import numpy as np
import pickle
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo)
fo.close()