Created
April 4, 2018 21:24
-
-
Save neuromusic/81a11fb99ab6b91b20e939547da12a3a to your computer and use it in GitHub Desktop.
calcium+visual_stimuli=nwb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"### build dataset object from lims ID for the session" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"lims_id = 672584839" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stderr", | |
"output_type": "stream", | |
"text": [ | |
"/local1/miniconda2/envs/ophys_nwb/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", | |
" from ._conv import register_converters as _register_converters\n" | |
] | |
} | |
], | |
"source": [ | |
"from visual_behavior_ophys.dataset.visual_behavior_scientifica_dataset import VisualBehaviorScientificaDataset" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"sync\n", | |
"calculating monitor delay\n", | |
"Visual frames detected in sync: 215800\n", | |
"2P frames detected in sync: 111682\n", | |
"180313\n", | |
"visual frames in pkl file: 215800\n", | |
"stim type is images\n", | |
"length of traces: 111680\n", | |
"number of segmented cells: 130\n", | |
"dropped 2P frames detected\n" | |
] | |
} | |
], | |
"source": [ | |
"dataset = VisualBehaviorScientificaDataset(lims_id)" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## this is the data we need to get into NWB\n", | |
"\n", | |
"❌ - incomplete\n", | |
"✔️ - complete\n", | |
"\n", | |
"- ❌ metadata\n", | |
"- ❌ alignment\n", | |
" - ❌ Body camera\n", | |
" - ❌ Eye tracking\n", | |
" - ❌ Stimulus\n", | |
" - ❌ 2P vsync\n", | |
"- ❌ acquisition\n", | |
" - ❌ body camera\n", | |
" - ❌ eye camera\n", | |
" - ❌ wheel encoder\n", | |
" - ❌ lick events\n", | |
" - ❌ calcium movie\n", | |
"- ❌ stimulus\n", | |
" - ❌ visual stimulus events\n", | |
" - ❌ reward events\n", | |
" - ❌ trials\n", | |
"- ❌ processing/ophys\n", | |
" - ❌ motion correction\n", | |
" - ✔️ image segmentation\n", | |
" - ✔️ ROIs\n", | |
" - ❌ ROI metadata?\n", | |
" - ✔️ dFF\n", | |
" - ❌ fluorescence\n", | |
"- ❌ processing/eye\n", | |
" - ❌ pupil location\n", | |
" - ❌ pupil size\n", | |
"- ❌ processing/running\n", | |
" - ❌ wheel velocity" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## let's load data using the dataset object" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"length of traces: 111680\n", | |
"number of segmented cells: 130\n", | |
"dropped 2P frames detected\n" | |
] | |
} | |
], | |
"source": [ | |
"# get ROI & dF/F data\n", | |
"\n", | |
"roi_metrics = dataset.get_roi_metrics()\n", | |
"\n", | |
"imaging_rate = '{:0.1f}Hz'.format(dataset.metadata['ophys_frame_rate'])\n", | |
"\n", | |
"roi_masks = dataset.roi_dict\n", | |
"max_projection = dataset.max_projection\n", | |
"\n", | |
"dFF, dFF_t = dataset.get_dff_traces()\n", | |
"\n", | |
"cell_ids = dataset.get_cell_specimen_id_for_index(range(dFF.shape[0]))" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"# first, we create the nwbfile object" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"NA = 'THIS REQUIRED ATTRIBUTE INTENTIONALLY LEFT BLANK.'\n", | |
"\n", | |
"from pynwb import NWBFile\n", | |
"nwbfile = NWBFile(\n", | |
" source = NA, \n", | |
" session_description = NA, \n", | |
" identifier = str(lims_id), \n", | |
" session_start_time = dataset.lims_data['experiment_date'][0],\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## next, we create nwb objects for ophys assets" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 6, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"two_p_device = 'scientifica'\n", | |
"\n", | |
"from pynwb import ophys\n", | |
"optical_channel = ophys.OpticalChannel(\n", | |
" name = 'Optical Channel',\n", | |
" source = NA,\n", | |
" description = NA,\n", | |
" emission_lambda = NA,\n", | |
")\n", | |
"\n", | |
"imaging_plane = nwbfile.create_imaging_plane(\n", | |
" name='test_imaging_plane',\n", | |
" source=NA,\n", | |
" optical_channel=optical_channel,\n", | |
" description=NA,\n", | |
" device=NA,\n", | |
" excitation_lambda=NA, \n", | |
" imaging_rate=NA, \n", | |
" indicator=NA, \n", | |
" location=NA,\n", | |
" manifold=[], \n", | |
" conversion=1.0, \n", | |
" unit=NA, \n", | |
" reference_frame=NA,\n", | |
")\n", | |
"\n", | |
"from pynwb import image\n", | |
"max_proj = image.ImageSeries(\n", | |
" name = 'Maximum Projection Image',\n", | |
" source = NA,\n", | |
" data = max_projection,\n", | |
" unit = NA,\n", | |
" format = 'raw',\n", | |
" timestamps = [0.0], # <- This is required, so let's make it at the beginning of the experiment\n", | |
")\n", | |
"nwbfile.add_acquisition(max_proj)\n", | |
"\n", | |
"ophys_module = nwbfile.create_processing_module(\n", | |
" name=\"ophys\",\n", | |
" description=\"calcium responses\",\n", | |
" source=\"Allen Brain Observatory: Visual Behavior\",\n", | |
")\n", | |
"\n", | |
"from pynwb.ophys import ImageSegmentation\n", | |
"img_seg = ImageSegmentation(\n", | |
" 'image segmentation container'\n", | |
")\n", | |
"ophys_module.add_data_interface(img_seg)\n", | |
"\n", | |
"plane_segmentation = img_seg.create_plane_segmentation(\n", | |
" source=NA,\n", | |
" description=NA,\n", | |
" imaging_plane=imaging_plane, \n", | |
" name='plane_0', \n", | |
" reference_images=max_proj,\n", | |
")\n", | |
"\n", | |
"\n", | |
"for cell_id in cell_ids: # <- must iterate like this because order matters\n", | |
" img_mask = roi_masks[cell_id]\n", | |
" \n", | |
" plane_segmentation.add_roi(\n", | |
" pixel_mask=[], \n", | |
" image_mask=img_mask,\n", | |
" )\n", | |
" \n", | |
" \n", | |
"from pynwb.ophys import DfOverF\n", | |
"\n", | |
"dff_container = DfOverF('container of fluorescence data')\n", | |
"ophys_module.add_data_interface(dff_container)\n", | |
"\n", | |
"# get an ROI table region i.e. a subset of ROIs to create a RoiResponseSeries from\n", | |
"rt_region = plane_segmentation.create_roi_table_region(\n", | |
" range(len(cell_ids)), # <- this should be names, in order\n", | |
" NA,\n", | |
")\n", | |
"\n", | |
"dFF_series = dff_container.create_roi_response_series(\n", | |
" name='df_over_f', \n", | |
" source=NA,\n", | |
" data=dFF, \n", | |
" unit='NA', \n", | |
" rois = rt_region,\n", | |
" timestamps=dFF_t,\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## next, let's load the record of visual stimuli from the dataset object" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 7, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"visual_stimulus = dataset.flashes[['time','image_category','image_name']]" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 8, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stderr", | |
"output_type": "stream", | |
"text": [ | |
"/local1/miniconda2/envs/ophys_nwb/lib/python2.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n", | |
"A value is trying to be set on a copy of a slice from a DataFrame.\n", | |
"Try using .loc[row_indexer,col_indexer] = value instead\n", | |
"\n", | |
"See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n", | |
" \"\"\"Entry point for launching an IPython kernel.\n" | |
] | |
} | |
], | |
"source": [ | |
"visual_stimulus['duration'] = 0.25" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 9, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/html": [ | |
"<div>\n", | |
"<style>\n", | |
" .dataframe thead tr:only-child th {\n", | |
" text-align: right;\n", | |
" }\n", | |
"\n", | |
" .dataframe thead th {\n", | |
" text-align: left;\n", | |
" }\n", | |
"\n", | |
" .dataframe tbody tr th {\n", | |
" vertical-align: top;\n", | |
" }\n", | |
"</style>\n", | |
"<table border=\"1\" class=\"dataframe\">\n", | |
" <thead>\n", | |
" <tr style=\"text-align: right;\">\n", | |
" <th></th>\n", | |
" <th>time</th>\n", | |
" <th>image_category</th>\n", | |
" <th>image_name</th>\n", | |
" <th>duration</th>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>flash_index</th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" <th></th>\n", | |
" </tr>\n", | |
" </thead>\n", | |
" <tbody>\n", | |
" <tr>\n", | |
" <th>0</th>\n", | |
" <td>57.56506</td>\n", | |
" <td>im045</td>\n", | |
" <td>im045</td>\n", | |
" <td>0.25</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>1</th>\n", | |
" <td>58.31548</td>\n", | |
" <td>im045</td>\n", | |
" <td>im045</td>\n", | |
" <td>0.25</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>2</th>\n", | |
" <td>59.06612</td>\n", | |
" <td>im045</td>\n", | |
" <td>im045</td>\n", | |
" <td>0.25</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>3</th>\n", | |
" <td>59.81668</td>\n", | |
" <td>im045</td>\n", | |
" <td>im045</td>\n", | |
" <td>0.25</td>\n", | |
" </tr>\n", | |
" <tr>\n", | |
" <th>4</th>\n", | |
" <td>60.56731</td>\n", | |
" <td>im045</td>\n", | |
" <td>im045</td>\n", | |
" <td>0.25</td>\n", | |
" </tr>\n", | |
" </tbody>\n", | |
"</table>\n", | |
"</div>" | |
], | |
"text/plain": [ | |
" time image_category image_name duration\n", | |
"flash_index \n", | |
"0 57.56506 im045 im045 0.25\n", | |
"1 58.31548 im045 im045 0.25\n", | |
"2 59.06612 im045 im045 0.25\n", | |
"3 59.81668 im045 im045 0.25\n", | |
"4 60.56731 im045 im045 0.25" | |
] | |
}, | |
"execution_count": 9, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"visual_stimulus.head()" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 10, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"images = dataset.pkl['image_dict']" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 11, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"(8, 918, 1174)" | |
] | |
}, | |
"execution_count": 11, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"import numpy as np\n", | |
"import pandas as pd\n", | |
"image_stack = []\n", | |
"image_lookup = {}\n", | |
"\n", | |
"ii = 0\n", | |
"for cat, cat_images in images.iteritems():\n", | |
" for name, img in cat_images.iteritems():\n", | |
" image_stack.append(img)\n", | |
" image_lookup[name] = ii \n", | |
" ii += 1\n", | |
"\n", | |
"image_stack = np.stack(image_stack)\n", | |
"\n", | |
"image_stack.shape" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## now, we'll create the NWB objects for the visual stimulus record" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 12, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"\n", | |
"visual_stimulus_images = image.ImageSeries(\n", | |
" name = 'natural_images',\n", | |
" source = NA,\n", | |
" data = image_stack,\n", | |
" unit = NA,\n", | |
" format = 'raw',\n", | |
" timestamps = [0.0], # <- This is required, so let's make it at the beginning of the experiment\n", | |
")\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 13, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"<pynwb.image.ImageSeries at 0x7ff6d08e5dd0>" | |
] | |
}, | |
"execution_count": 13, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"nwbfile.add_stimulus_template(visual_stimulus_images)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 14, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"array([4, 6, 1, 7, 3, 2, 0, 5])" | |
] | |
}, | |
"execution_count": 14, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"visual_stimulus['image_name'].map(image_lookup).unique()" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 15, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"image_index = image.IndexSeries(\n", | |
" name='natural_images_timeseries',\n", | |
" source=NA,\n", | |
" data=visual_stimulus['image_name'].map(image_lookup).values,\n", | |
" unit=NA,\n", | |
" indexed_timeseries=visual_stimulus_images,\n", | |
" timestamps=visual_stimulus['time'].values,\n", | |
")" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 16, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"<pynwb.image.IndexSeries at 0x7ff6d3f072d0>" | |
] | |
}, | |
"execution_count": 16, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"nwbfile.add_stimulus(image_index)" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## finally, we'll save the file" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 17, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"ophys_672584839.nwb\n" | |
] | |
} | |
], | |
"source": [ | |
"from pynwb import NWBHDF5IO\n", | |
"# write data\n", | |
"\n", | |
"filename = 'ophys_{}.nwb'.format(lims_id)\n", | |
"print filename\n", | |
"with NWBHDF5IO(filename, 'w') as io:\n", | |
" io.write(nwbfile)" | |
] | |
} | |
], | |
"metadata": { | |
"anaconda-cloud": {}, | |
"kernelspec": { | |
"display_name": "ophys_nwb", | |
"language": "python", | |
"name": "ophys_nwb" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.13" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment