This script uses an Amazon EMR Spark cluster with 10 m3.xlarge
instances.
install.packages(devtools)
install.packages(sparklyr)
devtools::install_github("tylermorganwall/rayrender")
system2("hadoop", args = c("fs", "-mkdir", "/rendering"))
library(sparklyr)
config <- spark_config()
config["sparklyr.shell.driver-memory"] <- "8g"
config["sparklyr.shell.executor-memory"] <- "1g"
config["sparklyr.shell.executor-cores"] <- 1
config["sparklyr.shell.num-executors"] <- 80
config["spark.memory.fraction"] <- 0.8
sc <- spark_connect(master = "yarn", config = config)
library(rayrender)
scene <- generate_ground(material = lambertian()) %>%
add_object(sphere(material = metal(color="orange"), z = -2)) %>%
add_object(sphere(material = metal(color="orange"), z = +2)) %>%
add_object(sphere(material = metal(color="orange"), x = -2))
sdf_len(sc, 628, repartition = 628) %>%
spark_apply(function(idx, scene) {
render <- sprintf("%04d.png", idx$id)
rayrender::render_scene(scene, width = 1920, height = 1080,
lookfrom = c(12 * sin(idx$id/100), 5, 12 * cos(idx$id/100)),
filename = render)
system2("hadoop", args = c("fs", "-put", render, "/user/hadoop/rendering/"))
}, context = scene, columns = list()) %>% collect()
hadoop fs -get rendering/
ffmpeg -s 1920x1080 -i rendering/%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p rendering.mp4
Nice. Though the rendering speed and quality is of course not impressive compared to what nvidia RTX can do. But I think raytracing is a good proof of concept/hello-world for a cluster.