Skip to content

Instantly share code, notes, and snippets.

View aazout's full-sized avatar
💭
Thinking... 🧠

Albert Azout aazout

💭
Thinking... 🧠
View GitHub Profile
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
#import tf_attached_input
import tf_attached as attached
import tf_attached_input as attached_input
#import math, time
#from datetime import datetime
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
#!/bin/bash -x
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
@aazout
aazout / gist:49b79d205dd2c04f14c3
Created June 15, 2015 21:56
Estimator implementation using spark.ml
package com.aol.advertisting.execution.ml
import org.apache.spark.ml.classification._
import org.apache.spark.ml.param._
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.mllib.linalg.{Vector, VectorUDT}
import org.apache.spark.sql.types.{DataType, StructType}
import java.io._
import scala.sys.process._
@aazout
aazout / Batch External Training
Created June 12, 2015 18:09
This code snippet shows a method of running a 3rd party batch trainer using subprocess in Scala on Spark.
package com.aol.advertising.execution
import org.apache.spark.{SparkConf, SparkContext, HashPartitioner}
import scala.sys.process._
import java.io._
import org.apache.hadoop.io._
import org.apache.hadoop.mapred.{FileSplit, TextInputFormat}
import org.apache.spark.rdd.HadoopRDD