Test model (dimensions and mapping):
ostool cfg.ini csvimport --model=model.json --dry-run --raise-on-error --max-lines=1 data.csv
Dry run:
ostool cfg.ini csvimport --model=model.json --dry-run data.csv
jQuery(function ($) { | |
var elem = $('#text-to-annotate'); | |
var account_id = '39fc339cf058bd22176771b3e3036609'; | |
var annotator_store = '/annostore' + '/api'; | |
var userid = ''; | |
var options = {}; | |
options.permissions = {}; | |
options.permissions.user = { | |
'name': '194.104.70.73' | |
}; |
import ckanclient | |
base_location = 'http://localhost:5000/api' | |
api_key = 'tester' | |
client = ckanclient.CkanClient(base_location, api_key) | |
pkg = dict( | |
name='test-ckanext-datapreview', | |
title='Test CKANext Data Preview', | |
resources=[ | |
dict( |
# Get user stories out a spreadsheet and into mediawiki syntax | |
# | |
# Designed for this set here | |
# https://docs.google.com/spreadsheet/ccc?key=0Aon3JiuouxLUdFhMVEVFWXhxWXRKaU04LUF2ZTVsTVE&hl=en_GB#gid=0 | |
# Pass it link to gdocs csv as argument on command line | |
# E.g. | |
# | |
# python userstories.py "https://docs.google.com/spreadsheet/pub?hl=en_GB&hl=en_GB&key=0Aon3JiuouxLUdFhMVEVFWXhxWXRKaU04LUF2ZTVsTVE&single=true&gid=0&output=csv" | |
# | |
import urllib |
Test model (dimensions and mapping):
ostool cfg.ini csvimport --model=model.json --dry-run --raise-on-error --max-lines=1 data.csv
Dry run:
ostool cfg.ini csvimport --model=model.json --dry-run data.csv
'''This is a test using the real setup with elasticsearch. | |
It requires you to run nginx on port 8088 with config as per | |
https://github.com/okfn/elastic-proxy/blob/master/elasticproxy plus, | |
obviously, elasticsearch on port 9200. | |
''' | |
import json | |
import paste.fixture | |
import paste.proxy |
'''Simple script for creating demo data in CKAN | |
Requires existence of a tester user. You can create this by doing:: | |
paster create-test-data user | |
''' | |
import ckanclient | |
base_location = 'http://localhost:5000/api' | |
api_key = 'tester' |
#!/usr/bin/env python | |
import urlparse | |
import mimetypes | |
import os | |
import ConfigParser | |
import urllib2 | |
import json | |
import csv | |
import time |
import csv | |
import json | |
import geojson | |
fp = 'data/US_Rendition_FOIA.csv' | |
fpout = 'data/US_Rendition_FOIA.geojson.csv' | |
jsonout = 'data/US_Rendition_FOIA.geojson.json' | |
jsondata = [] | |
def convert(): |
<?xml version="1.0" encoding="utf-8"?> | |
<Root xmlns:wb="http://www.worldbank.org"> | |
<data> | |
<record> | |
<field name="Country or Area" key="ARB">Arab World</field> | |
<field name="Item" key="SP.POP.TOTL">Population, total</field> | |
<field name="Year">1960</field> | |
<field name="Value">96388069</field> | |
</record> | |
<record> |
// Parse a summary to extract title, tags, location and start and end | |
parseNoteSummary = function(text) { | |
var result = { | |
title: '', | |
tags: [] | |
}; | |
var ourtext = text; | |
regex = / #([\w-\.]+)/; | |
while(ourtext.search(regex)!=-1) { | |
var out = ourtext.match(regex)[1]; |