Here is a list of scopes to use in Sublime Text 2/3 snippets -
ActionScript: source.actionscript.2
AppleScript: source.applescript
ASP: source.asp
Batch FIle: source.dosbatch
C#: source.cs
C++: source.c++
Clojure: source.clojure
Here is a list of scopes to use in Sublime Text 2/3 snippets -
ActionScript: source.actionscript.2
AppleScript: source.applescript
ASP: source.asp
Batch FIle: source.dosbatch
C#: source.cs
C++: source.c++
Clojure: source.clojure
// This can be imported via ./bin/gremlin.sh -i describe.groovy | |
// A variable 'graph' must be defined with a JanusGraph graph | |
// Run it as a plugin command ':schema' | |
// :schema describe | |
// | |
import org.janusgraph.graphdb.database.management.MgmtLogType | |
import org.codehaus.groovy.tools.shell.Groovysh | |
import org.codehaus.groovy.tools.shell.CommandSupport |
def chunks(l, n): | |
"""Yield successive n-sized chunks from l.""" | |
for i in range(0, len(l), n): | |
yield l[i:i + n] | |
def chunks_iter(l, n): | |
it = iter(iterable) | |
while True: | |
chunk = tuple(itertools.islice(it, n)) | |
if not chunk: |
import os | |
import sys | |
import pandas as pd | |
import numpy as np | |
import matplotlib | |
from matplotlib import pyplot as plt | |
import seaborn as sns | |
from IPython.core.display import display, HTML |
import org.apache.spark.mllib.random.RandomRDDs | |
import org.apache.spark.sql.{types => T, functions => F} | |
import org.apache.spark.sql.Row | |
// | |
import spark.implicits._ | |
val rdd = RandomRDDs.uniformRDD(spark.sparkContext, 100) | |
var df = rdd.toDF("A") | |
df = df.withColumn("id", F.monotonically_increasing_id()) |
# metadata | |
GET _cat/indices? | |
# m, d,i | |
GET _cat/nodes?V&s=ip:asc | |
GET _cat/master?v | |
GET _cat/shards?v | |
GET _cat/allocations?v | |
GET _cluster/heatlh |
"""just for fun on 10 fast fin | |
including some random stuff and error to make it more 'human' | |
""" | |
import time | |
import random | |
from selenium.webdriver import Chrome, ChromeOptions | |
WPM = 300 | |
base_interrval = 60 / WPM / 50 # words length + made up for time.sleep(), this should do it |
// read file by line | |
file, err := os.Open("file.txt") | |
if err != nil { | |
log.Fatal(err) | |
} | |
defer file.Close() | |
scan := bufio.NewScanner(file) | |
for scan.Scan() { | |
line := scan.Text() |
from pyspark.sql import SparkSession | |
# config | |
INPUT = "" | |
OUTPUT = "" | |
def main(): | |
spark = ( SparkSession.builder | |
.appName("Viet PySpark") | |
.config("spark.dynamicAllocation.enabled","true") |