Skip to content

Instantly share code, notes, and snippets.

@sklam
Created August 23, 2013 15:37
Show Gist options
  • Save sklam/6320714 to your computer and use it in GitHub Desktop.
Save sklam/6320714 to your computer and use it in GitHub Desktop.
numbapro async API draft & notes
import numpy as np
from numbapro import async
# Create a plan for a asynchronous computation task
plan = async.Plan()
# Create the data sources as arrays
# EHT: using pre-allocated array as source
a = plan.source(ndim=1) # fixed ndim but not shape
b = plan.source(ndim=1)
# Add compute tasks map/reduce/stencil
c = plan.map(lambda x, y: x + y,
args=(a, b))
d = plan.reduce(lambda x, y: x + y,
args=(a,))
e = plan.stencil("x y _", # a format string to describe the kernel window
lambda x, y: x + y,
args=(b,))
plan.custom()
# A 2d window would be:
# '''
# a b c
# d e f
# g h i
# '''
# Higher dimension window will need another way to describe
# EHT: should be able to remove the sinks and use c, d, e directly in dispatch
# Add data sinks
f = plan.sink(c) # output array size can be inferred
g = plan.sink(d)
h = plan.sink(e)
# Realizing the plan will compile it to a "work" object
work = plan.realize(target='gpu')
# The work object can be dispatched to run asynchronously.
# Here we allocate the buffers for the data sources/sinks.
future = work.dispatch(a=np.array(shape=10),
b=np.array(shape=10),
f=np.array(shape=10),
g=np.array(shape=1),
h=np.array(shape=10))
# Wait for the dispatched work to complete
future.wait()
#######################
# With Async IO
io = async.IO(work)
# sources
io.connect(a, read_chunk_from_file())
io.connect(b, read_chunk_from_file())
# sinks
io.connect(f, write_chunk_to_file())
io.connect(g, write_chunk_to_file())
io.connect(h, write_chunk_to_file())
# start the IO asynchronously;
# run until source files are emptied
io.start() # may spawn multiple workers?
# wait until the task in done.
io.wait()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment