Purpose: sharing of smaller datasets - whole cubes or portions of larger cubes.
Package can be a directory or a ZIP file with following structure:
| # Drill down by two dimensions | |
| result = browser.aggregate(cell, drilldown=["year", "item"]) | |
| # Get presenter | |
| rickshaw = cubes.create_presenter("rickshaw_multi_series") | |
| # Create Rickshaw series. | |
| # You can use color_map to map series keys to colors or you can use color_palette to | |
| # cycle through colors |
| { | |
| "cubes":[ | |
| { | |
| "dimensions": [ | |
| "start_date", | |
| "middle_date", | |
| "end_date" | |
| ], | |
| "joins": [ | |
| {"master":"start_date_id", "detail":"dim_date.id", "alias": "dim_start_date"}, |
| import brewery.stores as stores | |
| filename = "data.csv" | |
| source = stores.CSVDataSource(filename, encoding="latin1", infer_fields=True, | |
| read_header=True) | |
| target = stores.SQLDataTarget(url="sqlite:///data.sqlite", | |
| table="data", create=True, fields=source.fields, | |
| replace=True) |
| from sqlalchemy.sql.expression import Executable, ClauseElement | |
| from sqlalchemy.ext.compiler import compiles | |
| class CreateTableAsSelect(Executable, ClauseElement): | |
| def __init__(self, table, select): | |
| self.table = table | |
| self.select = select | |
| @compiles(CreateTableAsSelect) | |
| def visit_create_table_as_select(element, compiler, **kw): |
| ( | |
| { | |
| on = 1; | |
| replace = "(c)"; | |
| with = "\U00a9"; | |
| }, | |
| { | |
| on = 0; | |
| replace = "(r)"; | |
| with = "\U00ae"; |
| from collections import Counter | |
| from itertools import combinations | |
| def distinct_items(transactions, support=None): | |
| """Returns counted set of distinct items in transactions""" | |
| counter = Counter() | |
| for trans in transactions: | |
| counter.update(trans) | |
| if support is not None: |
| source CUBE_NAME | |
| { | |
| type = xmlpipe2 | |
| xmlpipe_command = slicer-indexer --engine sphinx slicer.ini CUBE_NAME | |
| } | |
| index CUBE_NAME | |
| { | |
| source = CUBE_NAME | |
| path = index/CUBE_NAME |
| k = OperationKernel() | |
| @k.operation("sql", "sql") | |
| def append(l, r): | |
| if l.engine == r.engine: | |
| return "UNION ALL statement" | |
| else: | |
| raise RetryOperation(["sql", "rows"]) | |
| @k.operation("sql", "rows") |
| from brewery import data_object, FieldList, open_store | |
| from brewery import kernel as k | |
| import brewery.base.iterator | |
| import brewery.backends.sql | |
| FACT_DATA = [ | |
| [0, 1, 100.0], | |
| [1, 1, 200.0], | |
| [2, 2, 150.0], | |
| [3, 2, 250.0], |