Skip to content

Instantly share code, notes, and snippets.

@georgf
Created June 6, 2016 15:31
Show Gist options
  • Save georgf/7fba6b7e98dd3446b4c2f13e39ef0292 to your computer and use it in GitHub Desktop.
Save georgf/7fba6b7e98dd3446b4c2f13e39ef0292 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import ujson as json\n",
"import datetime as dt\n",
"import os.path\n",
"import boto3 #S3\n",
"import botocore\n",
"import calendar\n",
"\n",
"from os import listdir\n",
"from moztelemetry import get_pings, get_pings_properties, get_one_ping_per_client\n",
"from pprint import pprint\n",
"\n",
"%pylab inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's pick the report we want to generate here."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def snap_to_past_sunday(date):\n",
" \"\"\" Get the closest, previous Sunday since date. \"\"\"\n",
" # We need the weeks starting from \"Sunday\", not from \"Monday\",\n",
" # so account for that using |(today.weekday() + 1) % 7)|\n",
" return date - datetime.timedelta(days=((date.weekday() + 1) % 7))\n",
"\n",
"def snap_to_beginning_of_month(date):\n",
" \"\"\" Get the date for the first day of this month. \"\"\"\n",
" return date.replace(day=1)\n",
"\n",
"def get_last_week_range():\n",
" today = dt.date.today()\n",
" # Get the first day of the past complete week. \n",
" start_of_week = snap_to_past_sunday(today) - datetime.timedelta(weeks=1)\n",
" end_of_week = start_of_week + datetime.timedelta(days=6)\n",
" return (start_of_week, end_of_week)\n",
"\n",
"def get_last_month_range():\n",
" today = dt.date.today()\n",
" # Get the last day for the previous month.\n",
" end_of_last_month = snap_to_beginning_of_month(today) - datetime.timedelta(days=1)\n",
" start_of_last_month = snap_to_beginning_of_month(end_of_last_month)\n",
" return (start_of_last_month, end_of_last_month)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Fetch the core pings"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"First thing, pick a submission range. Either last week or last month."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def fetch_deduped_pings(sub_range):\n",
" core_pings = get_pings(sc,\n",
" app=\"Fennec\",\n",
" doc_type=\"core\",\n",
" source_version=\"*\",\n",
" submission_date=sub_range,\n",
" fraction=fraction)\n",
"\n",
" # We don't need the whole ping. Just get the props we want.\n",
" subset = get_pings_properties(core_pings, [\"clientId\",\n",
" \"osversion\",\n",
" \"os\",\n",
" \"profileDate\",\n",
" \"meta/submissionDate\",\n",
" \"meta/geoCountry\",\n",
" \"meta/appUpdateChannel\",\n",
" \"meta/Timestamp\",\n",
" \"meta/documentId\"\n",
" ])\n",
"\n",
" # iOS also submits \"core\" pings, filter for Android only.\n",
" android = subset.filter(lambda p: p.get(\"os\", \"\") == \"Android\")\n",
"\n",
" # We can (sadly) have duplicated pings. Apply deduping.\n",
" return android.map(lambda p: (p[\"meta/documentId\"], p))\\\n",
" .reduceByKey(lambda a, b: a)\\\n",
" .map(lambda t: t[1])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Aggregate the data\n",
"\n",
"Fields documentation:\n",
"* *os_version* - core pings contain the API level, we need to output the codename+version\n",
"* *geo* - the country code from the country originating the pings. We're only interested in some countries, other countries are grouped as \"Other\"\n",
"* *channel* - the product channel\n",
"* *date* - the first day of the aggregated week/month\n",
"* *actives* - the number of clients that were active that day. It checked 'org.mozilla.appSessions' before, can we simply count the number of core pings submitted on that day (dedupe by client id!)?\n",
"* *new_records* - profile creation date == submission date\n",
" * This could not hold due to broken clocks, temporary loss of network, ...\n",
"* *d1* - how many clients were active at least once the day after the profile creation date?\n",
"* *d7* - how many clients were active at least once on the 7th day following profile creation?\n",
"* *d30* - how many clients were active at least once on the 30th day following profile creation?\n",
"* *hours* - session duration in hours. Currently 0.\n",
"* *google, yahoo, bing, other* - Currently 0\n",
"\n",
"The d1/d7/d30 metrics stricly imply that the user was seen on the day, not the days before (e.g. d7 means the user was seen on profile creation date + 7, not within the [profile creation date, profile creation date + 7] window)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"COUNTRIES_OF_INTEREST = set(['US','CA','BR','MX','FR','ES','IT','PL','TR','RU','DE','IN','ID','CN','JP','GB'])\n",
"\n",
"def get_country(original_country):\n",
" return original_country if original_country in COUNTRIES_OF_INTEREST else 'Other'\n",
"\n",
"def android_api_level_to_version(api_level):\n",
" \"\"\"\n",
" The core ping stores the API level, but we need to display the OS version/codename.\n",
" We can map API Level -> Codename, the related information is available there:\n",
" https://source.android.com/source/build-numbers.html\n",
" \"\"\"\n",
" API_MAP = {\n",
" '8': 'Froyo (2.2 - 2.2.3)',\n",
" '9': 'Gingerbread (2.3 - 2.3.7)',\n",
" '10': 'Gingerbread (2.3 - 2.3.7)',\n",
" '11': 'Honeycomb (3.0 - 3.2.6)',\n",
" '12': 'Honeycomb (3.0 - 3.2.6)',\n",
" '13': 'Honeycomb (3.0 - 3.2.6)',\n",
" '14': 'Ice Cream Sandwich (4.0 - 4.0.4)',\n",
" '15': 'Ice Cream Sandwich (4.0 - 4.0.4)',\n",
" '16': 'Jelly Bean (4.1 - 4.3.x)',\n",
" '17': 'Jelly Bean (4.1 - 4.3.x)',\n",
" '18': 'Jelly Bean (4.1 - 4.3.x)',\n",
" '19': 'KitKat (4.4 - 4.4.4)',\n",
" '21': 'Lollipop (5.0 - 5.1)',\n",
" '22': 'Lollipop (5.0 - 5.1)',\n",
" '23': 'Marshmallow (6.0)',\n",
" }\n",
" return API_MAP.get(api_level, 'Other')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def parse_to_unix_days(s):\n",
" \"\"\" Converts YYYYMMDD to days since unix epoch \"\"\"\n",
" return (dt.datetime.strptime(s, \"%Y%m%d\") - dt.datetime(1970,1,1)).days\n",
"\n",
"def get_file_name(report_type, suffix=\"\"):\n",
" return \"fennec-v4-\" + report_type + suffix + \".csv\"\n",
"\n",
"def safe_increment(p, key):\n",
" \"\"\" Safely increments p[key]. \"\"\"\n",
" p[key] = p.get(key, 0) + 1\n",
"\n",
"def is_new_profile(submission_epoch, profile_epoch):\n",
" \"\"\"\n",
" Determines if this is a new profile by checking if the submission date\n",
" equals the profile creation date.\n",
" \"\"\"\n",
" return submission_epoch == profile_epoch\n",
"\n",
"def get_key(p, segments, report_type):\n",
" \"\"\" Build a key-tuple with the dimensions we want to aggregate. \"\"\"\n",
" dims = []\n",
" \n",
" # Translate the API version to a name.\n",
" if 'osversion' in segments:\n",
" dims.append(android_api_level_to_version(p.get('osversion')))\n",
" else:\n",
" dims.append('all')\n",
"\n",
" # Only get some of the countries.\n",
" if 'meta/geoCountry' in segments:\n",
" dims.append(get_country(p.get('meta/geoCountry')))\n",
" else:\n",
" dims.append('all')\n",
"\n",
" # Only get some of the countries.\n",
" if 'meta/appUpdateChannel' in segments:\n",
" dims.append(p.get('meta/appUpdateChannel'))\n",
" else:\n",
" dims.append('all')\n",
" \n",
" # Append the date, at last. In the weekly mode, that's the first day of the\n",
" # submission week. In the monthly mode, that's the first day of the month.\n",
" submission_date = dt.datetime.strptime(p.get(\"meta/submissionDate\"), \"%Y%m%d\")\n",
" date_string = \"\"\n",
" if report_type == \"monthly\":\n",
" date_string = snap_to_beginning_of_month(submission_date).strftime(\"%Y%m01\")\n",
" else:\n",
" date_string = snap_to_past_sunday(submission_date).strftime(\"%Y%m%d\")\n",
"\n",
" dims.append(date_string)\n",
" \n",
" return tuple(dims)\n",
"\n",
"def run_query(pings, segments, report_type):\n",
" \"\"\"\n",
" Aggregate the pings over the dimensions in \"segments\". We start by generating a key for each\n",
" ping by chaining the values of the dimensions of interest. If we don't care about a particular\n",
" dimension, its value is set to \"all\".\n",
" All the pings belonging to a key are aggregated together.\n",
" \"\"\"\n",
"\n",
" # Segment the data by indexing them by dimensions.\n",
" segmented_pings = pings.map(lambda p: (get_key(p, segments, report_type), p))\n",
" \n",
" # We require the profile age to measure the retention. Filter out those pings that don't have it.\n",
" filtered = segmented_pings.filter(lambda p: p[1].get(\"profileDate\", None) != None)\n",
" \n",
" # print \"Got {} pings after filtering invalid profileDate(s)\".format(filtered.count())\n",
" \n",
" # For metrics like d1, d7, d30, and new_records we need only one core ping per client, per day.\n",
" # Generate a new RDD containing only one ping per client, for each day, within the segment:\n",
" # Step 1 - Append the client id and submission date to the index key\n",
" # Step 2 - ReduceByKey so that we get only one ping per day per client\n",
" # Step 3 - Strip off the client id/submission date from the index key\n",
" one_per_day = filtered.map(lambda p: ((p[0], p[1].get(\"clientId\"), p[1].get(\"meta/submissionDate\")), p[1]))\\\n",
" .reduceByKey(lambda a, b: a)\\\n",
" .map(lambda p: (p[0][0], p[1]))\n",
" \n",
" # Compute the aggregated counts.\n",
" def retention_seq(acc, v):\n",
" if not acc:\n",
" acc = {}\n",
" \n",
" # **** IMPORTANT NOTICE ****\n",
" #\n",
" # Please note that retention *WILL* be broken by broken client clocks. This is most\n",
" # certainly affected by clock skew. Once we'll have clock skew data from the clients\n",
" # we might consider revisiting this to adjust the profileDate accordingly.\n",
" # Another option would be to fetch, in some way, the first ping ever submitted by the client,\n",
" # but that would not be practical due to the data retention policies (luckily).\n",
"\n",
" submission_epoch = parse_to_unix_days(v['meta/submissionDate'])\n",
" \n",
" # Check if this ping is on a new profile. If so, increments \"new_records\".\n",
" if is_new_profile(submission_epoch, v['profileDate']):\n",
" safe_increment(acc, 'new_records')\n",
" \n",
" # Evaluate the d1, d7, d30 retention metrics. First, get the delta between\n",
" # the submission date and the profile creation date.\n",
" days_after_creation = submission_epoch - v['profileDate']\n",
"\n",
" # Is the user still engaged after 1 day (d1)?\n",
" if days_after_creation == 1:\n",
" safe_increment(acc, 'd1')\n",
" # And after 7 days (d7)?\n",
" elif days_after_creation == 7:\n",
" safe_increment(acc, 'd7')\n",
" # And after 30 days (d30)?\n",
" elif days_after_creation == 30:\n",
" safe_increment(acc, 'd30')\n",
"\n",
" return acc\n",
"\n",
" def cmb(v1, v2):\n",
" # Combine the counts from the two partial dictionaries. Hacky?\n",
" return { k: v1.get(k, 0) + v2.get(k, 0) for k in set(v1) | set(v2) }\n",
"\n",
" retention_defaults = {\n",
" 'new_records': 0,\n",
" 'actives': 0,\n",
" 'd1': 0,\n",
" 'd7': 0,\n",
" 'd30': 0,\n",
" }\n",
" aggregated_retention = one_per_day.aggregateByKey(retention_defaults, retention_seq, cmb)\n",
"\n",
" # For each segment, count how many active clients:\n",
" def count_actives(acc, v):\n",
" acc[\"actives\"] = acc[\"actives\"] + 1\n",
" return acc\n",
"\n",
" # We aggregate the active user count in an object to ease joining\n",
" actives_per_segment = segmented_pings.map(lambda r: ((r[0], r[1].get(\"clientId\")), 1))\\\n",
" .reduceByKey(lambda x,y: x)\\\n",
" .map(lambda r: (r[0][0], 1))\\\n",
" .aggregateByKey({\"actives\":0}, count_actives, cmb)\n",
" \n",
" # Join the RDDs.\n",
" merged = aggregated_retention.join(actives_per_segment).mapValues(lambda r: cmb(r[0], r[1]))\n",
"\n",
" return merged"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To build a single CSV file, we execute a series of queries and then serialize the output."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def run_queries(report_type, start_date=None, end_date=None):\n",
" \"\"\"\n",
" This function has 3 operating modes:\n",
" 1. \"weekly\", which aggregates the data from the last full week and appends the results to the weekly CSV;\n",
" 2. \"monthly\", aggregates the last full month and appends the results to the monthly CSV;\n",
" 3. \"backfill\", given a start and end date, performs weekly or monthly aggregation over that period and\n",
" appends to the CSV file.\n",
" \"\"\"\n",
" # Each entry represents a different query over a set of dimensions of interest.\n",
" QUERIES = [\n",
" ['osversion', 'meta/geoCountry', 'meta/appUpdateChannel'],\n",
" ['osversion', 'meta/appUpdateChannel'],\n",
" ['osversion', 'meta/geoCountry'],\n",
" ['meta/geoCountry', 'meta/appUpdateChannel'],\n",
" ['osversion'],\n",
" ['meta/geoCountry'],\n",
" ['meta/appUpdateChannel'],\n",
" []\n",
" ]\n",
"\n",
" # Check start_date and end_date for validity. If invalid, set them for last week/month\n",
" date_range = get_last_month_range() if report_type is \"monthly\" else get_last_week_range()\n",
" if start_date != None and end_date != None:\n",
" sd = snap_to_past_sunday(start_date) if report_type is \"weekly\"\\\n",
" else snap_to_beginning_of_month(start_date)\n",
" date_range = (sd, end_date)\n",
" \n",
" delta = date_range[1] - date_range[0]\n",
" print \"Running summary analysis type {} over {} days for {} to {}\"\\\n",
" .format(report_type, delta.days, *date_range)\n",
"\n",
" # Split the submission period in chunks, so we don't run out of resources while aggregating.\n",
" date_chunks = []\n",
" chunk_start = date_range[0]\n",
"\n",
" print \"Breaking date range into chunks:\"\n",
" while chunk_start < date_range[1]:\n",
" # Compute the end of this time chunk.\n",
" chunk_end = None\n",
" if report_type == \"monthly\":\n",
" chunk_end = chunk_start.replace(day=calendar.monthrange(chunk_start.year, chunk_start.month)[1])\n",
" else:\n",
" chunk_end = chunk_start + dt.timedelta(days=6)\n",
" \n",
" print \"* {} to {}\".format(chunk_start.strftime(\"%Y%m%d\"), chunk_end.strftime(\"%Y%m%d\"))\n",
" date_chunks.append((chunk_start, chunk_end))\n",
" \n",
" # Move on to the next chunk, just add one day to either last month or week.\n",
" chunk_start = chunk_end + dt.timedelta(days=1)\n",
"\n",
" # The results will be in a dict of the form {(os, geo, channel, date): dict, ...}.\n",
" results = {}\n",
" \n",
" for chunk_start,chunk_end in date_chunks:\n",
" # Fetch the pings we need.\n",
" submissions_range = (chunk_start.strftime(\"%Y%m%d\"), chunk_end.strftime(\"%Y%m%d\"))\n",
" print \"\\nFetching pings for {} to {}\".format(*submissions_range)\n",
" deduped = fetch_deduped_pings(submissions_range)\n",
" #print \"Fetched {} pings\".format(deduped.count())\n",
"\n",
" chunk_results = {}\n",
" for query in QUERIES:\n",
" print \" * Running query over dimensions: %s\" % \", \".join(query) \n",
" query_result = dict(run_query(deduped, query, report_type).collect())\n",
" #pprint(query_result)\n",
" # Append this RDD to the results for this chunk.\n",
" chunk_results.update(query_result)\n",
"\n",
" # Serialize intermediate results to file, so we don't start from scratch if the batch fails.\n",
" # We append the week/month at the end of the file name.\n",
" serialize_results(chunk_results, report_type, submissions_range[0])\n",
" \n",
" # Append this chunk results to the whole RDD. We assume the keys DO NOT collide.\n",
" results.update(chunk_results)\n",
" \n",
" return results"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### CSV and S3 utility functions."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Some utility functions to read/write from the S3 store."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"S3_ANALYSIS_BUCKET = \"net-mozaws-prod-us-west-2-pipeline-analysis\"\n",
"S3_ANALYSIS_BASE_PATH = \"aplacitelli/\"\n",
"S3_DASHBOARD_BUCKET = \"net-mozaws-prod-metrics-data\"\n",
"\n",
"def fetch_previous_state(report_type):\n",
" \"\"\"\n",
" To prevent ACL issues and prevent files from disappearing from the dashboard bucket,\n",
" we fetch and stage the canonical state from the analysis bucket, only sending the updated\n",
" state to the dashboard bucket.\n",
" \"\"\"\n",
" file_name = get_file_name(report_type)\n",
"\n",
" # Fetch the CSV\n",
" client = boto3.client('s3', 'us-west-2')\n",
" transfer = boto3.s3.transfer.S3Transfer(client)\n",
" key_path = \"{}{}\".format(S3_ANALYSIS_BASE_PATH, 'fennec-dashboard/' + file_name)\n",
" \n",
" try:\n",
" transfer.download_file(S3_ANALYSIS_BUCKET, key_path, file_name)\n",
" except botocore.exceptions.ClientError as e:\n",
" # If the file wasn't there, that's ok. Otherwise, abort!\n",
" if e.response['Error']['Code'] != \"404\":\n",
" raise e\n",
" else:\n",
" print \"Did not find an existing file at '{}'\".format(key_path)\n",
"\n",
"def store_new_state(report_type):\n",
" \"\"\"\n",
" To prevent ACL issues and prevent files from disappearing from the dashboard bucket,\n",
" we fetch and stage the canonical state from the analysis bucket, only sending the updated\n",
" state to the dashboard bucket.\n",
" \"\"\"\n",
" file_name = get_file_name(report_type)\n",
"\n",
" client = boto3.client('s3', 'us-west-2')\n",
" transfer = boto3.s3.transfer.S3Transfer(client)\n",
" \n",
" # Update the state in the analysis bucket.\n",
" analysis_key_path = \"{}{}\".format(S3_ANALYSIS_BASE_PATH, 'fennec-dashboard/' + file_name)\n",
" transfer.upload_file(file_name, S3_ANALYSIS_BUCKET, analysis_key_path)\n",
" \n",
" # Update the state in the analysis bucket.\n",
" transfer.upload_file(file_name, S3_DASHBOARD_BUCKET, 'fennec-dashboard/' + file_name,\n",
" extra_args={'ACL': 'bucket-owner-full-control'})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Utility functions to map our data to CSV and then save it to file."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def to_csv(r):\n",
" # The key itself is a tuple containing the following data:\n",
" # (os, geo, channel, date)\n",
" data_from_key = r[0]\n",
" formatted_date = dt.datetime.strptime(data_from_key[3], \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n",
" return \",\".join([\n",
" data_from_key[0], # os\n",
" data_from_key[1], # geo\n",
" data_from_key[2], # channel\n",
" formatted_date,\n",
" str(r[1]['actives']),\n",
" str(r[1]['new_records']),\n",
" str(r[1]['d1']),\n",
" str(r[1]['d7']),\n",
" str(r[1]['d30']),\n",
" \"0\", #str(r[1]['hours']),\n",
" \"0\", #str(r[1]['google']),\n",
" \"0\", #str(r[1]['yahoo']),\n",
" \"0\", #str(r[1]['bing']),\n",
" \"0\", #str(r[1]['other']),\n",
" ])\n",
" \n",
"def serialize_results(results, report_type, file_suffix=\"\"):\n",
" file_name = get_file_name(report_type, file_suffix)\n",
" skip_csv_header = False\n",
" \n",
" # If the file is already there, append the new data, but don't print the header again.\n",
" if os.path.exists(file_name):\n",
" print(\"Omitting the CSV header\")\n",
" skip_csv_header = True \n",
"\n",
" csv_lines = map(to_csv, results.iteritems())\n",
" print(\"Writing %i new entries\" % len(csv_lines))\n",
" with open(file_name, \"a\") as csv_file:\n",
" # The file didn't exist before this call, print the header.\n",
" if not skip_csv_header:\n",
" header = \"os_version,geo,channel,date,actives,new_records,d1,d7,d30,hours,google,yahoo,bing,other\"\n",
" csv_file.write(header.encode('utf8') + \"\\n\")\n",
" \n",
" # Finally append the data lines.\n",
" for r in csv_lines:\n",
" #print \" ... writing csv line: \", r\n",
" csv_file.write(r.encode('utf8') + \"\\n\")\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Execute our script"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def get_mode_from_filename():\n",
" \"\"\"\n",
" This is hackery-hacky, until bug 1258685 lands: if there is a file named 'summarize_csv_monthly.ipynb',\n",
" we select the monthly operating mode. Otherwise, we stay with weekly.\n",
" \"\"\"\n",
" return \"monthly\" if os.path.exists('summarize_csv_monthly.ipynb') else \"weekly\"\n",
"\n",
"operating_mode = get_mode_from_filename() # either \"weekly\" or \"monthly\"\n",
"fraction = 1.0\n",
"\n",
"if operating_mode not in [\"weekly\", \"monthly\"]:\n",
" raise ValueError(\"Unknown operating mode: %s \" % operating_mode)\n",
"\n",
"start_date = None # Only use this when backfilling, e.g. dt.datetime(2016,3,19)\n",
"end_date = None # Only use this when backfilling, e.g. dt.datetime(2016,3,19)\n",
"\n",
"# Run the query and compute the results.\n",
"print \"... Running queries\"\n",
"result = run_queries(operating_mode, start_date, end_date)\n",
"# Fetch the previous CSV file from S3.\n",
"print \"... Fetching previous state\"\n",
"fetch_previous_state(operating_mode)\n",
"# Updates it.\n",
"print \"... Serializing results\"\n",
"serialize_results(result, operating_mode)\n",
"# Stores the updated one back to S3\n",
"print \"... Storing new state\"\n",
"store_new_state(operating_mode)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment