Created
December 6, 2017 22:23
-
-
Save nicain/9cc8d1d295a09b6d02ed2195801cb0c3 to your computer and use it in GitHub Desktop.
Adapter class for ephys experiments into BrainObservatoryDataSet-like object
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class EphysBrainObservatoryAdapter(object): | |
| def __init__(self, nwb_file_name, analysis_dir, probe_unit_dict=cortex_probe_dict): | |
| self.convert_inner_unit_to_unit = {} | |
| self.convert_unit_to_inner_unit = {} | |
| self.probe_unit_id_dict = {} | |
| self.spike_data_dict = {} | |
| f = h5py.File(nwb_file_name) | |
| self.stim_table_data = f['stimulus/presentation/locally_sparse_noise_4deg/timestamps'].value | |
| self.stim_table_image = f['stimulus/presentation/locally_sparse_noise_4deg/data'].value[:,0] | |
| index = 0 | |
| for probe, unit_list in cortex_probe_dict.items(): | |
| unit_reference_list = f['processing/%s/unit_list' % probe].value.tolist() | |
| for unit in unit_list: | |
| lookup_index = unit_reference_list.index(unit) | |
| key = f['processing/%s/UnitTimes/unit_list' % probe].value[int(lookup_index)] | |
| if not key in ['noise']: | |
| # # print 'processing/%s/UnitTimes/%s/times' % (probe, lookup_index) | |
| self.spike_data_dict[probe, unit] = f['processing/%s/UnitTimes/%s/times' % (probe, key)].value | |
| self.convert_inner_unit_to_unit[(probe, int(key))] = unit | |
| self.convert_unit_to_inner_unit[probe, unit] = int(key) | |
| self.probe_unit_id_dict[probe, unit] = index | |
| self.probe_unit_id_dict[index] = probe, unit | |
| index += 1 | |
| f.close() | |
| self.fake_calcium_data_dict = {} | |
| self.timestamps = None | |
| # self.unit_key_to_index_dict = {} | |
| # self.index_to_unit_key_dict = {} | |
| # for ii, unit_key in enumerate(sorted(self.spike_data_dict.keys())): | |
| # self.unit_key_to_index_dict[unit_key] = ii | |
| # self.index_to_unit_key_dict[ii] = unit_key | |
| # self.fake_calcium_data_dict = {} | |
| # data_key = os.path.basename(fake_calcium_file_name).split('.')[0] | |
| # for unit_key in self.unit_key_to_index_dict: | |
| # self.fake_calcium_data_dict[unit_key] = mlspike.get_dff('%s/%s_%s/dff.h5' % (analysis_dir, 'probeC', int(unit_key))) | |
| # print self.fake_calcium_data_dict[unit_key] | |
| # fake_calcium_data_raw[ii][:,0] | |
| # self.timestamps = np.arange(0, len(self.fake_calcium_data_dict[unit_key]))*1./30 | |
| def set_calcium(self,probe, unit, calcium): | |
| if not self.timestamps is None: | |
| assert len(self.timestamps) == len(calcium) | |
| index = self.probe_unit_id_dict[probe, unit] | |
| self.fake_calcium_data_dict[index] = calcium | |
| self.timestamps = np.arange(0, len(self.fake_calcium_data_dict[index]))*1./30 | |
| def get_stimulus_table(self, stimulus): | |
| assert stimulus == si.LOCALLY_SPARSE_NOISE_4DEG | |
| tmp = np.round(self.stim_table_data[:,:2]/(1./30)) | |
| stimulus_table_dict = collections.defaultdict(list) | |
| stimulus_table_dict['frame'] = self.stim_table_image.astype(np.int) | |
| stimulus_table_dict['start'] = tmp[:, 0].astype(int) | |
| stimulus_table_dict['end'] = tmp[:, 1].astype(int) | |
| return pd.DataFrame(stimulus_table_dict) | |
| def get_dff_traces(self): | |
| dff_data = np.zeros((len(self.fake_calcium_data_dict), len(self.timestamps))) | |
| for unit_key, val in sorted(self.fake_calcium_data_dict.iteritems(), key=lambda x:x[0]): | |
| index = self.unit_key_to_index_dict[unit_key] | |
| dff_data[index] = val | |
| return self.timestamps, dff_data | |
| def get_stimulus_template(self, stimulus): | |
| assert stimulus == stimulus_info.LOCALLY_SPARSE_NOISE_4DEG | |
| return np.load('/data/mat/nicholasc/brain_observatory_analysis/ephys_receptive_field/data/stimulus_template_lsn.npy') | |
| @property | |
| def unit_list(self): | |
| return sorted(self.unit_key_to_index_dict.keys()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment