Last active
August 29, 2015 14:01
-
-
Save tgpfeiffer/7d20a4d59ee6e0088f91 to your computer and use it in GitHub Desktop.
ClassNotFoundException when using Mesos
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package spark | |
import org.apache.spark | |
import org.apache.spark.rdd.RDD | |
import org.apache.spark.SparkConf | |
object SparkExamplesMinimal { | |
def asInt(s: String): Option[Int] = { | |
try { | |
Some(Integer.parseInt(s)) | |
} catch { | |
case _ : Throwable => None | |
} | |
} | |
def main (args: Array[String]) { | |
// configure Spark environment | |
val conf = new SparkConf() | |
.setMaster("mesos://mesos10-1:5050/") | |
.setAppName("My app") | |
.set("spark.executor.uri", "http://hdp8:8001/spark-1.0.0-SNAPSHOT.tar.gz") | |
val sc = new spark.SparkContext(conf) | |
// simulate "reading from a file" by converting a standard Scala list | |
// comprehension to an RDD | |
val fileData = for (i <- 1 to 1000000) yield { | |
"%d\t%s".format(i, i) | |
} | |
val file: RDD[String] = sc.parallelize(fileData) | |
println("count: " + file.count()) // THIS WORKS AND PRINTS 1000000 | |
// issue a `map()` on this RDD to convert string rows into integer tuples | |
// while skipping failures | |
val data: RDD[(Int, Int)] = file.flatMap(row => { | |
val intRow = row.split('\t').map(asInt(_)).toList | |
intRow match { | |
case Some(key) :: Some(value) :: Nil => | |
Some((key, value)) | |
case _ => | |
None | |
} | |
}) | |
println("count: " + data.count()) // THIS GIVES ClassNotFoundException: spark.SparkExamplesMinimal$$anonfun$2 | |
} | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import org.apache.spark | |
import org.apache.spark.rdd.RDD | |
import org.apache.spark.SparkConf | |
def asInt(s: String): Option[Int] = { | |
try { | |
Some(Integer.parseInt(s)) | |
} catch { | |
case _ : Throwable => None | |
} | |
} | |
// simulate "reading from a file" by converting a standard Scala list | |
// comprehension to an RDD | |
val fileData = for (i <- 1 to 1000000) yield { | |
"%d\t%s".format(i, i) | |
} | |
val file: RDD[String] = sc.parallelize(fileData) | |
println("count: " + file.count()) // THIS WORKS AND PRINTS 1000000 | |
// issue a `map()` on this RDD to convert string rows into integer tuples | |
// while skipping failures | |
val data: RDD[(Int, Int)] = file.flatMap(row => { | |
val intRow = row.split('\t').map(asInt(_)).toList | |
intRow match { | |
case Some(key) :: Some(value) :: Nil => | |
Some((key, value)) | |
case _ => | |
None | |
} | |
}) | |
println("count: " + data.count()) // THIS WORKS AND PRINTS 1000000 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment