Skip to content

Instantly share code, notes, and snippets.

@nicain
nicain / verify_get_dprime_pipeline.py
Created June 26, 2019 20:33
Acceptance Criteria C1 of get_dprime function in AllenSDK
import pandas as pd
from allensdk.internal.api.behavior_ophys_api import BehaviorOphysLimsApi
from allensdk.brain_observatory.behavior.behavior_ophys_session import BehaviorOphysSession
from allensdk.brain_observatory.behavior.dprime import get_dprime_pipeline, get_hit_rate, get_false_alarm_rate
from visual_behavior.translator.core import create_extended_dataframe
from visual_behavior.translator import foraging2
from visual_behavior.translator.foraging2 import data_to_change_detection_core
from visual_behavior.utilities import get_response_rates
@nicain
nicain / tiny_logging.py
Last active July 26, 2019 17:02
Tiny Logger Cheat Example
logger_name = 'TheName'
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.addHandler(ch)
logger.propagate = False
import h5py
import numpy as np
from allensdk.brain_observatory.sync_dataset import Dataset as SyncDataset # on internal branch as of 04/03/2019
# ophys_experiment_id: 789359614
dff_filepath = '/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/ophys_experiment_789359614/789359614_dff.h5'
sync_filepath = '/allen/programs/braintv/production/visualbehavior/prod0/specimen_756577249/ophys_session_789220000/789220000_sync.h5'
with h5py.File(dff_filepath, 'r') as raw_file:
@nicain
nicain / mcc_example.py
Last active February 21, 2019 23:40
MCC
from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache
from allensdk.api.queries.mouse_connectivity_api import MouseConnectivityApi
mcc_2017 = MouseConnectivityCache(resolution=25, ccf_version=MouseConnectivityApi.CCF_2017)
data = mcc_2017.get_template_volume()
print data[0].shape
@nicain
nicain / example.py
Last active October 17, 2018 23:02
Dataframe Browser Client-side API
import dill
import requests
x = requests.get('http://nicholasc-ubuntu:5001/cursor/')
c = dill.loads(x.content)
c.cell_width('90%')
c.open(filename='/home/nicholasc/projects/dataframe-browser/data/example.csv')
c.display()
@nicain
nicain / epoch_hello_world.py
Created October 1, 2018 22:37
EpochTable Hello World
from pynwb import NWBFile, NWBHDF5IO, TimeSeries
from pynwb.epoch import Epochs
import os
import pandas as pd
import datetime
nwbfile = NWBFile(
source='Data source',
session_description='test',
identifier='test',
@nicain
nicain / generate_nwb.py
Last active October 1, 2018 18:15
NWB Boilerplate
from pynwb import NWBFile, NWBHDF5IO, TimeSeries
import os
import pandas as pd
from visual_behavior.translator.foraging2 import data_to_change_detection_core
import datetime
def test_visbeh_nwb(tmpdir_factory):
data_dir = str(tmpdir_factory.mktemp("data"))
save_file_name = os.path.join(data_dir, 'visbeh_test.nwb')
@nicain
nicain / one.py
Created September 4, 2018 21:17
Get one:
def one(x, exception=Exception):
if len(x) != 1:
raise exception
if isinstance(x,set):
return list(x)[0]
else:
return x[0]
@nicain
nicain / add_frame_dir.py
Last active August 30, 2018 18:03
Minimal working example of a monkey-patch to save from display buffer
def add_frame_dir(sweep_stim, output_dir='.'):
image_dict = {}
timing_dict = collections.defaultdict(list)
for stimulus in sweep_stim.stimuli:
old_update = stimulus.update
def new_update(frame):
old_update(frame)
assert frame == stimulus.current_frame
@nicain
nicain / behavior_session_regimen_stage_df.py
Last active August 23, 2018 21:50
Mapping between behavior_session_id and regimen/stage
import pandas as pd
from mtrain_api.utils import get_df
stages_df = get_df('stages').rename(columns={'id':'stage_id','name':'stage_name'}).drop(['script', 'script_md5', 'states'], axis=1)
states_df = get_df('states').rename(columns={'id':'state_id'})
subjects_df = get_df('subjects').rename(columns={'id':'state_id'})
subjects_df['state_id'] = subjects_df['state'].map(lambda x:x['id'])
subjects_df.drop(['state'], axis=1, inplace=True)
stages_states_df = pd.merge(stages_df, states_df, on='stage_id')