Get gdal development libraries:
$ sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable
$ sudo apt-get update
$ sudo apt-get install libgdal-dev
Create and activate a virtual environment:
// Use Gists to store code you would like to remember later on | |
console.log(window); // log the "window" object to the console |
get.conn <- function(dbpath) { | |
sqlite <- dbDriver("SQLite") | |
conn <- dbConnect(sqlite, dbpath) | |
return(conn) | |
} |
from requests import post | |
import json | |
class connection(object): | |
def __init__(self, url=None, app_name=None, session_id=None): | |
self.url = url | |
self.app_name = app_name or 'OpenAgua' | |
self.session_id = session_id |
const adjacency = (network) => { | |
const n = network.nodes.length; | |
// initialize the array | |
let adj = [new Array(n+1)]; | |
// populate the row headers (sources) & column headers (destinations) with node names | |
let i = 0; | |
let lookup = {}; |
# define the dataset value | |
value = json.dumps(dataframe) | |
# create the dataset | |
dataset = { | |
'type': attr['dtype'], # e.g., "timeseries'; attr is from Hydra database | |
'name': '{} - {} - {} [{}]'.format(network["name"], resource["name"], attr['name'], scenario_name), | |
'unit': attr['unit'], # e.g., "bbl" | |
'dimension': attr['dim'], # e.g., "Volume" | |
'value': value # from above |
process = function(files) { | |
var file = files[0]; | |
var ext = file.name.split('.').pop(); | |
// check if extension is kmz, etc. | |
const reader = new FileReader(); | |
switch (file.type) { | |
case 'application/zip': // zipped shapefile, for example |
if template_id: | |
types = [rt for rt in link.types if rt.template_id == template_id] | |
else: | |
types = link.types[-1] | |
nl = dict( | |
description=link.description, | |
layout=copy.deepcopy(link.layout), | |
node_1_id=n1.id, | |
node_2_id=n2.id, | |
types=types, |
import json | |
import csv | |
import codecs # not sure why this is needed - need to fix upstream saving routine | |
jsonfile = 'MCMA.json' | |
network = json.load(codecs.open(jsonfile, 'r', 'utf-8-sig')) | |
# create the array of nodes | |
col_names = ['id', 'name', 'x', 'y'] |
import pandas as pd | |
from math import ceil | |
for region in ['CherryEleanor', 'DonPedro', 'Hetchy']: | |
# define input and output paths | |
inpath = 'baserun/{}/statvar.csv'.format(region) | |
outpath = 'baserun/{}/10day.csv'.format(region) | |
# read in the original csv | |
df = pd.read_csv(inpath, index_col=0, parse_dates=True) |