install composer using curl -sS https://getcomposer.org/installer | php
php composer.phar update --ignore-platform-reqs
var list = [ | |
{name: "1", lastname: "foo1", age: "16"}, | |
{name: "2", lastname: "foo", age: "13"}, | |
{name: "3", lastname: "foo1", age: "11"}, | |
{name: "4", lastname: "foo", age: "11"}, | |
{name: "5", lastname: "foo1", age: "16"}, | |
{name: "6", lastname: "foo", age: "16"}, | |
{name: "7", lastname: "foo1", age: "13"}, | |
{name: "8", lastname: "foo1", age: "16"}, | |
{name: "9", lastname: "foo", age: "13"}, |
var Converter=require("csvtojson").core.Converter; | |
var fs=require("fs"); | |
var csvFileName="./test.csv"; | |
var fileStream=fs.createReadStream(csvFileName); | |
//new converter instance | |
var csvConverter=new Converter({constructResult:true}); | |
//end_parsed will be emitted once parsing finished | |
csvConverter.on("end_parsed",function(jsonObj){ |
//first get first 10 records | |
select id from user_info limit 10; | |
//then save id of the 10th item and | |
select id from buyer_info where token(id) > token(<last_id_of_the_previous_result>) limit 10; | |
<?php | |
$mongodb = new Mongo("mongodb://username:password@localhost/database_name"); | |
$database = $mongodb->database_name; | |
$collection = $database->collection; | |
$page = isset($_GET['page']) ? (int) $_GET['page'] : 1; | |
$limit = 12; | |
$skip = ($page - 1) * $limit; | |
$next = ($page + 1); | |
$prev = ($page - 1); |
install composer using curl -sS https://getcomposer.org/installer | php
php composer.phar update --ignore-platform-reqs
[{ | |
"limit": 25, | |
"name": null, | |
"type": "/business/consumer_product" | |
}] | |
https://www.freebase.com/query |
http://localhost:9200/_river/my_twitter_river_filter/_meta - PUT | |
{ | |
"type" : "twitter", | |
"twitter" : { | |
"filter" : { | |
"tracks" : "walmart" | |
} | |
} | |
} |
./spark-shell --jars ./spark-cassandra-connector_2.10-1.1.1.jar --driver-class-path $(echo /Users/agaikw1/spark-cassandra-connector/spark-cassandra-connector-java/target/scala-2.10/*.jar |sed 's/ /:/g') | |
import com.datastax.spark.connector._, org.apache.spark.SparkContext, org.apache.spark.SparkContext._, org.apache.spark.SparkConf | |
val conf = new SparkConf(true).set("spark.cassandra.connection.host", "localhost") | |
val sc = new SparkContext("local[4]", "gemc_dev", conf) | |
val test_spark_rdd = sc.cassandraTable("keyspace_name", "table_name") |
drop table time_zone_map; | |
drop table tweets_raw; | |
drop table tweets_sentiment; | |
drop table tweetsbi; | |
drop table dictionary; | |
drop view tweets_clean; | |
drop view tweets_simple; | |
drop view l1; | |
drop view l2; | |
drop view l3; |