diff --git a/actions/longitudinal-comparisons-speed/attributes.yaml b/actions/longitudinal-comparisons-speed/attributes.yaml new file mode 100644 index 000000000..0a7822fee --- /dev/null +++ b/actions/longitudinal-comparisons-speed/attributes.yaml @@ -0,0 +1,4 @@ +registered: '2020-01-09T09:32:17' +data: + notebook: 20_longitudinal_comparisons_speed.ipynb + html: 20_longitudinal_comparisons_speed.html diff --git a/actions/longitudinal-comparisons-speed/data/20_longitudinal_comparisons_speed.html b/actions/longitudinal-comparisons-speed/data/20_longitudinal_comparisons_speed.html new file mode 100644 index 000000000..5268ebbe9 --- /dev/null +++ b/actions/longitudinal-comparisons-speed/data/20_longitudinal_comparisons_speed.html @@ -0,0 +1,16592 @@ + + +
+ +%load_ext autoreload
+%autoreload 2
+
import os
+import pathlib
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import colors
+import seaborn as sns
+import re
+import shutil
+import pandas as pd
+import scipy.stats
+
+import exdir
+import expipe
+from distutils.dir_util import copy_tree
+import septum_mec
+import spatial_maps as sp
+import head_direction.head as head
+import septum_mec.analysis.data_processing as dp
+import septum_mec.analysis.registration
+from septum_mec.analysis.plotting import violinplot, despine
+from spatial_maps.fields import find_peaks, calculate_field_centers, separate_fields_by_laplace
+from spike_statistics.core import permutation_resampling
+
+import speed_cells.speed as spd
+from septum_mec.analysis.plotting import plot_bootstrap_timeseries
+
+from tqdm.notebook import tqdm_notebook as tqdm
+tqdm.pandas()
+
project_path = dp.project_path()
+project = expipe.get_project(project_path)
+actions = project.actions
+
+output_path = pathlib.Path("output") / "longitudinal-comparisons-speed"
+(output_path / "statistics").mkdir(exist_ok=True, parents=True)
+(output_path / "figures").mkdir(exist_ok=True, parents=True)
+
statistics_action = actions['calculate-statistics']
+identification_action = actions['identify-neurons']
+sessions = pd.read_csv(identification_action.data_path('sessions'))
+units = pd.read_csv(identification_action.data_path('units'))
+session_units = pd.merge(sessions, units, on='action')
+statistics_results = pd.read_csv(statistics_action.data_path('results'))
+statistics = pd.merge(session_units, statistics_results, how='left')
+statistics.head()
+
statistics['unit_day'] = statistics.apply(lambda x: str(x.unit_idnum) + '_' + x.action.split('-')[1], axis=1)
+
stim_response_action = actions['stimulus-response']
+stim_response_results = pd.read_csv(stim_response_action.data_path('results'))
+stim_response_results = stim_response_results.drop('unit_id', axis=1)
+
statistics = pd.merge(statistics, stim_response_results, how='left')
+
print('N cells:',statistics.shape[0])
+
shuffling = actions['shuffling']
+quantiles_95 = pd.read_csv(shuffling.data_path('quantiles_95'))
+quantiles_95.head()
+
action_columns = ['action', 'channel_group', 'unit_name']
+data = pd.merge(statistics, quantiles_95, on=action_columns, suffixes=("", "_threshold"))
+
+data['specificity'] = np.log10(data['in_field_mean_rate'] / data['out_field_mean_rate'])
+
+data.head()
+
waveform_action = actions['waveform-analysis']
+waveform_results = pd.read_csv(waveform_action.data_path('results')).drop('template', axis=1)
+waveform_results = waveform_results.drop('unit_id', axis=1)
+
+data = data.merge(waveform_results, how='left')
+data.bs = data.bs.astype(bool)
+
data.groupby('stimulated').count()['action']
+
data.loc[data.eval('t_i_peak == t_i_peak and not bs'), 'ns_inhibited'] = True
+data.ns_inhibited.fillna(False, inplace=True)
+
+data.loc[data.eval('t_i_peak != t_i_peak and not bs'), 'ns_not_inhibited'] = True
+data.ns_not_inhibited.fillna(False, inplace=True)
+
# make baseline for inhibited vs not inhibited
+data.loc[data.unit_id.isin(data.query('ns_inhibited').unit_id.values), 'ns_inhibited'] = True
+data.loc[data.unit_id.isin(data.query('ns_not_inhibited').unit_id.values), 'ns_not_inhibited'] = True
+
query = (
+ 'gridness > gridness_threshold and '
+ 'information_rate > information_rate_threshold and '
+ 'gridness > .2 and '
+ 'average_rate < 25'
+)
+sessions_above_threshold = data.query(query)
+print("Number of sessions above threshold", len(sessions_above_threshold))
+print("Number of animals", len(sessions_above_threshold.groupby(['entity'])))
+
once_a_gridcell = statistics[statistics.unit_day.isin(sessions_above_threshold.unit_day.values)]
+
print("Number of gridcells", once_a_gridcell.unit_idnum.nunique())
+print("Number of gridcell recordings", len(once_a_gridcell))
+print("Number of animals", len(once_a_gridcell.groupby(['entity'])))
+
data.loc[:,'gridcell'] = np.nan
+data['gridcell'] = data.isin(once_a_gridcell)
+
data.ns_inhibited.sum()
+
query_baseline_i = 'baseline and Hz11'
+query_stimulated_11 = 'stimulated and frequency==11 and stim_location=="ms"'
+
+query_baseline_ii = 'baseline and Hz30'
+query_stimulated_30 = 'stimulated and frequency==30 and stim_location=="ms"'
+
+print("Number of gridcells in baseline i sessions",
+ len(data.query('gridcell and ' + f'{query_baseline_i}')))
+print("Number of gridcells in stimulated 11Hz ms sessions",
+ len(data.query('gridcell and ' + f'{query_stimulated_11}')))
+
+print("Number of gridcells in baseline ii sessions",
+ len(data.query('gridcell and ' + f'{query_baseline_ii}')))
+print("Number of gridcells in stimulated 30Hz ms sessions",
+ len(data.query('gridcell and ' + f'{query_stimulated_30}')))
+print()
+print("Number of NSi in baseline i sessions",
+ len(data.query('ns_inhibited and ' + f'{query_baseline_i}')))
+print("Number of NSi in stimulated 11Hz ms sessions",
+ len(data.query('ns_inhibited and ' + f'{query_stimulated_11}')))
+
+print("Number of NSi in baseline ii sessions",
+ len(data.query('ns_inhibited and ' + f'{query_baseline_ii}')))
+print("Number of NSi in stimulated 30Hz ms sessions",
+ len(data.query('ns_inhibited and ' + f'{query_stimulated_30}')))
+
max_speed = .5 # m/s only used for speed score
+min_speed = 0.02 # m/s only used for speed score
+position_sampling_rate = 100 # for interpolation
+position_low_pass_frequency = 6 # for low pass filtering of position
+
+box_size = [1.0, 1.0]
+bin_size = 0.02
+smoothing_low = 0.03
+smoothing_high = 0.06
+
+speed_binsize = 0.02
+
+stim_mask = True
+# baseline_duration = 600
+baseline_duration = None
+
data_loader = dp.Data(
+ position_sampling_rate=position_sampling_rate,
+ position_low_pass_frequency=position_low_pass_frequency,
+ box_size=box_size, bin_size=bin_size,
+ stim_mask=stim_mask, baseline_duration=baseline_duration
+)
+
def compute_mask(search, t1, t2, z1, z2, z3):
+ idxs = []
+ idx = np.searchsorted(search, [t1 + z1, t1 + z2], side='right')
+ idxs.extend(np.arange(idx[0], idx[1]).tolist())
+
+ idx = np.searchsorted(search, [t1 + z3, t2], side='right')
+ idxs.extend(np.arange(idx[0], idx[1]).tolist())
+ return idxs
+
+
+def load_speed(action_id, channel_id, unit_name, z1, z2, z3, split=False, stim_action=None):
+ x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])
+
+ spike_times = data_loader.spike_train(action_id, channel_id, unit_name)
+ spike_times = spike_times[(spike_times > min(t)) & (spike_times < max(t))]
+
+ stim_action = stim_action if stim_action is not None else action_id
+ stim_times = data_loader.stim_times(stim_action)
+
+ if stim_times is not None:
+ idxs = []
+ stim_times = np.array(stim_times)
+ for t1, t2 in zip(stim_times, stim_times[1:]):
+ idx = compute_mask(np.array(t), t1, t2, z1, z2, z3)
+ idxs.extend(idx)
+
+ idxs = np.sort(np.unique(idxs))
+ mask = ~np.in1d(np.arange(len(t)), idxs)
+ else:
+ mask = np.zeros_like(t).astype(bool)
+
+ if split:
+ t_split = t[-1] / 2
+ mask_speed = t < t_split
+ speed1 = speed[mask_speed]
+ speed2 = speed[~mask_speed]
+ t1 = t[mask_speed]
+ t2 = t[~mask_speed]
+ mask1 = mask[mask_speed]
+ mask2 = mask[~mask_speed]
+
+ spike_mask = spike_times < t_split
+ spike_times1 = spike_times[spike_mask]
+ spike_times2 = spike_times[~spike_mask]
+
+ return speed1, speed2, t1, t2, spike_times1, spike_times2, mask1, mask2
+
+ return speed, t, spike_times, mask
+
z1, zg2, zi2, z3 = 0, 5e-3, 3e-3, 15e-3
+gridcell_id_map = {}
+gridcell_speed = [[], [], [], [], []]
+gridcell_data = data.query('gridcell')
+
+nsi_id_map = {}
+nsi_speed = [[], [], [], [], []]
+nsi_data = data.query('ns_inhibited')
+
+
+n_iter = gridcell_data.unit_id.unique().shape[0] + nsi_data.unit_id.unique().shape[0]
+pbar = tqdm(total=n_iter)
+
+for nid, unit_sessions in gridcell_data.groupby('unit_id'):
+ base_i = unit_sessions.query("baseline and Hz11")
+ base_ii = unit_sessions.query("baseline and Hz30")
+ stim_i = unit_sessions.query("frequency==11")
+ stim_ii = unit_sessions.query("frequency==30")
+ dfs = [(base_i, base_i), (base_i, base_ii), (base_i, stim_i), (base_ii, stim_ii), (base_i, stim_ii)]
+ sample = [False, False, True, True, True]
+ for i, pair in enumerate(dfs):
+ same_frame = pair[0].equals(pair[1])
+ for (_, row_1), (_, row_2) in zip(pair[0].iterrows(), pair[1].iterrows()):
+ if same_frame:
+ assert row_1.equals(row_2)
+ speed1, speed2, t1, t2, spike_times1, spike_times2, mask1, mask2 = load_speed(
+ row_1['action'], row_1['channel_group'], row_1['unit_name'],
+ z1, zg2, z3, split=True)
+ else:
+ assert not row_1.equals(row_2)
+ stim_action = row_2['action'] if sample[i] else None
+ speed1, t1, spike_times1, mask1 = load_speed(
+ row_1['action'], row_1['channel_group'], row_1['unit_name'],
+ z1, zg2, z3, split=False, stim_action=stim_action)
+ speed2, t2, spike_times2, mask2 = load_speed(
+ row_2['action'], row_2['channel_group'], row_2['unit_name'],
+ z1, zg2, z3, split=False)
+
+ speed_score1 = spd.speed_correlation(
+ speed1, t1, spike_times1, return_data=False, mask=mask1)
+ speed_score2 = spd.speed_correlation(
+ speed2, t2, spike_times2, return_data=False, mask=mask2)
+
+ gridcell_speed[i].append((speed_score1, speed_score2))
+
+ assert row_1.unit_id == row_2.unit_id
+ uid = row_2.unit_id
+ idnum = row_1.unit_idnum
+ gridcell_id_map[uid] = idnum
+ pbar.update()
+
+for nid, unit_sessions in nsi_data.groupby('unit_id'):
+ base_i = unit_sessions.query("baseline and Hz11")
+ base_ii = unit_sessions.query("baseline and Hz30")
+ stim_i = unit_sessions.query("frequency==11")
+ stim_ii = unit_sessions.query("frequency==30")
+ dfs = [(base_i, base_i), (base_i, base_ii), (base_i, stim_i), (base_ii, stim_ii), (base_i, stim_ii)]
+ sample = [False, False, True, True, True]
+ for i, pair in enumerate(dfs):
+ same_frame = pair[0].equals(pair[1])
+ for (_, row_1), (_, row_2) in zip(pair[0].iterrows(), pair[1].iterrows()):
+ if same_frame:
+ assert row_1.equals(row_2)
+ speed1, speed2, t1, t2, spike_times1, spike_times2, mask1, mask2 = load_speed(
+ row_1['action'], row_1['channel_group'], row_1['unit_name'],
+ z1, zi2, z3, split=True)
+ else:
+ assert not row_1.equals(row_2)
+ stim_action = row_2['action'] if sample[i] else None
+ speed1, t1, spike_times1, mask1 = load_speed(
+ row_1['action'], row_1['channel_group'], row_1['unit_name'],
+ z1, zi2, z3, split=False, stim_action=stim_action)
+ speed2, t2, spike_times2, mask2 = load_speed(
+ row_2['action'], row_2['channel_group'], row_2['unit_name'],
+ z1, zi2, z3, split=False)
+
+ speed_score1 = spd.speed_correlation(
+ speed1, t1, spike_times1, return_data=False, mask=mask1)
+ speed_score2 = spd.speed_correlation(
+ speed2, t2, spike_times2, return_data=False, mask=mask2)
+
+ nsi_speed[i].append((speed_score1, speed_score2))
+
+ assert row_1.unit_id == row_2.unit_id
+ uid = row_2.unit_id
+ idnum = row_1.unit_idnum
+ nsi_id_map[uid] = idnum
+ pbar.update()
+
+pbar.close()
+
def session_id(row):
+ if row.baseline and row.i:
+ n = 0
+ elif row.stimulated and row.i:
+ n = 1
+ elif row.baseline and row.ii:
+ n = 2
+ elif row.stimulated and row.ii:
+ n = 3
+ else:
+ raise ValueError('what')
+ return n
+
+data['session_id'] = data.apply(session_id, axis=1)
+
plt.rc('axes', titlesize=12)
+plt.rcParams.update({
+ 'font.size': 12,
+ 'figure.figsize': (4, 3),
+ 'figure.dpi': 150
+})
+
from septum_mec.analysis.plotting import plot_uncertainty
+
+for unit_id, id_num in gridcell_id_map.items():
+ sessions = data.query(f'gridcell and unit_id=="{unit_id}"')
+ n_action = sessions.date.nunique()
+ fig, axs = plt.subplots(n_action, 4, sharey=True, sharex=True, figsize=(8, n_action*4))
+ despine()
+ fig.suptitle(f'Neuron {id_num}')
+ if n_action == 1:
+ axs = [axs]
+ waxs = None
+ for ax, (date, rows) in zip(axs, sessions.groupby('date')):
+ entity = rows.iloc[0].entity
+ ax[0].set_ylabel(f'{entity}-{date}')
+ for _, row in rows.iterrows():
+ idx = row.session_id
+
+ speed, t, spike_times, mask = load_speed(
+ row['action'], row['channel_group'], row['unit_name'],
+ z1, zg2, z3, split=False)
+
+ speed_score, inst_speed, rate, times = spd.speed_correlation(
+ speed, t, spike_times, return_data=True, mask=mask)
+
+ inst_speed = inst_speed[~inst_speed.mask]
+ rate = rate[~rate.mask]
+ times = times[~times.mask]
+
+ speed_bins = np.arange(min_speed, max_speed + speed_binsize, speed_binsize)
+ ia = np.digitize(inst_speed, bins=speed_bins, right=True)
+ rates = []
+
+ for i in range(len(speed_bins)):
+ rates.append(rate[ia==i])
+ ax[idx].set_title(f'{speed_score:.3f}')
+# plot_uncertainty(speed_bins, rates, ax=ax[idx], normalize_values=True)
+ plot_bootstrap_timeseries(speed_bins, rates, ax=ax[idx], normalize_values=True)
+# rr = [rr for r in rates for rr in r]
+# aspect = (np.nanmax(rr) - np.nanmin(rr)) / (max_speed - min_speed)
+ for a in ax:
+ a.set_aspect('auto')
+
+ plt.tight_layout()
+ fig.savefig(
+ output_path / 'figures' / f'gridcell_neuron_{id_num}_speed_map.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'gridcell_neuron_{id_num}_speed_map.svg',
+ bbox_inches='tight', transparent=True)
+
for unit_id, id_num in nsi_id_map.items():
+ sessions = data.query(f'ns_inhibited and unit_id=="{unit_id}"')
+ n_action = sessions.date.nunique()
+ fig, axs = plt.subplots(n_action, 4, sharey=True, sharex=True, figsize=(8, n_action*4))
+ despine()
+ fig.suptitle(f'Neuron {id_num}')
+ if n_action == 1:
+ axs = [axs]
+ waxs = None
+ for ax, (date, rows) in zip(axs, sessions.groupby('date')):
+ entity = rows.iloc[0].entity
+ ax[0].set_ylabel(f'{entity}-{date}')
+ for _, row in rows.iterrows():
+ idx = row.session_id
+
+ speed, t, spike_times, mask = load_speed(
+ row['action'], row['channel_group'], row['unit_name'],
+ z1, zi2, z3, split=False)
+
+ speed_score, inst_speed, rate, times = spd.speed_correlation(
+ speed, t, spike_times, return_data=True, mask=mask)
+
+ inst_speed = inst_speed[~inst_speed.mask]
+ rate = rate[~rate.mask]
+ times = times[~times.mask]
+
+ speed_bins = np.arange(min_speed, max_speed + speed_binsize, speed_binsize)
+ ia = np.digitize(inst_speed, bins=speed_bins, right=True)
+ rates = []
+
+ for i in range(len(speed_bins)):
+ rates.append(rate[ia==i])
+
+ ax[idx].set_title(f'{speed_score:.3f}')
+ plot_bootstrap_timeseries(speed_bins, rates, ax=ax[idx], normalize_values=True)
+# rr = [rr for r in rates for rr in r]
+# aspect = (max_speed - min_speed) / (np.nanmax(rr) - np.nanmin(rr))
+ for a in ax:
+ a.set_aspect('auto')
+ plt.tight_layout()
+ fig.savefig(
+ output_path / 'figures' / f'nsi_neuron_{id_num}_speed_map.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'nsi_neuron_{id_num}_speed_map.svg',
+ bbox_inches='tight', transparent=True)
+
import speed_cells.speed as spd
+from septum_mec.analysis.plotting import plot_bootstrap_timeseries
+speed_dist = [[], [], [], []]
+speed_bins = np.arange(min_speed, 1 + speed_binsize, speed_binsize)
+for unit_id, id_num in results_id_map.items():
+ sessions = once_a_gridcell.query(f'unit_id=="{unit_id}"')
+
+ for date, rows in sessions.groupby('date'):
+ entity = rows.iloc[0].entity
+ for _, row in rows.iterrows():
+ action_id = row['action']
+ channel_id = row['channel_group']
+ unit_name = row['unit_name']
+ idx = row.session_id
+ x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])
+ hist, _ = np.histogram(speed, bins=speed_bins, density=True, )
+ speed_dist[idx].append(hist)
+
+
plt.rc('axes', titlesize=12)
+plt.rcParams.update({
+ 'font.size': 12,
+ 'figure.figsize': (2.5, 2),
+ 'figure.dpi': 150
+})
+colors = ['#1b9e77','#d95f02','#7570b3','#e7298a']
+labels = ['Baseline I', '11 Hz', 'Baseline II', '30 Hz']
+fig = plt.figure()
+for i in range(len(speed_dist)):
+ plt.plot(
+ speed_bins[:-1], np.cumsum(np.array(speed_dist[i]).mean(0))*speed_binsize,
+ c=colors[i], label=labels[i])
+plt.legend(bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
+despine()
+plt.xlabel('Running speed (m/s)')
+fig.savefig(output_path / 'figures' / 'running_speed.png', bbox_inches='tight', transparent=True)
+fig.savefig(output_path / 'figures' / 'running_speed.svg', bbox_inches='tight')
+
labels = [
+ 'Baseline I vs baseline I',
+ 'Baseline I vs baseline II',
+ 'Baseline I vs stim I',
+ 'Baseline II vs stim II',
+ 'Baseline I vs stim II'
+]
+
+
+def swarm_violin(data, ax=None, clip=None):
+ if ax is None:
+ fig, ax = plt.subplots()
+
+ ticks = [0,1,2,3,4]
+
+ violins = ax.violinplot(
+ data, ticks, showmedians=True, showextrema=False, points=1000, bw_method=.3)
+
+ for category in ['cbars', 'cmins', 'cmaxes', 'cmedians']:
+ if category in violins:
+ violins[category].set_color(['w', 'w'])
+ violins[category].set_linewidth(2.0)
+ violins[category].set_zorder(10000)
+
+ for pc in violins['bodies']:
+ pc.set_facecolor('gray')
+# pc.set_edgecolor(c)
+ pc.set_alpha(0.4)
+
+ sns.stripplot(data=data, size=4, ax=ax, color='k')
+ ax.spines['top'].set_visible(False)
+ ax.spines['right'].set_visible(False)
+
+ y = -np.inf
+ if clip is None:
+ for val in data[1:]:
+ data_max = np.max([max(data[0]), max(val)])
+ data_min = np.min([min(data[0]), min(val)])
+ y_ = data_max * 1.05 + 0.025 * (data_max - data_min)
+ if y_ > y:
+ y = y_
+ else:
+ y = clip
+ ax.set_ylim(0, clip)
+
+ x = 1
+ for val in data[1:]:
+ Uvalue, pvalue = scipy.stats.mannwhitneyu(data[0], val, alternative='two-sided')
+ # significance
+ if pvalue < 0.0001:
+ significance = "****"
+ elif pvalue < 0.001:
+ significance = "***"
+ elif pvalue < 0.01:
+ significance = "**"
+ elif pvalue < 0.05:
+ significance = "*"
+ else:
+ significance = "ns"
+
+ ax.text(x, y, significance, ha='center', va='bottom')
+ x += 1
+
pairwise_gridcell = [[], [], [], [], []]
+for i, pairs in enumerate(gridcell_speed):
+ for j, pair in enumerate(pairs):
+ pairwise_gridcell[i].append(np.diff(pair))
+
+pairwise_nsi = [[], [], [], [], []]
+for i, pairs in enumerate(nsi_speed):
+ for j, pair in enumerate(pairs):
+ pairwise_nsi[i].append(np.diff(pair))
+
plt.rc('axes', titlesize=12)
+plt.rcParams.update({
+ 'font.size': 12,
+ 'figure.figsize': (4, 2),
+ 'figure.dpi': 150
+})
+
fig, axs = plt.subplots(2, 1, sharex=True)
+
+swarm_violin(pairwise_gridcell, ax=axs[0])
+axs[0].set_ylabel('Grid cells')
+
+swarm_violin(pairwise_nsi, ax=axs[1])
+axs[1].set_ylabel('NSi cells')
+
+
+plt.xticks([0,1,2,3,4], labels, rotation=-45, ha='center')
+# plt.tight_layout()
+fig.savefig(output_path / 'figures' / 'violins_swarm.png', bbox_inches='tight')
+fig.savefig(output_path / 'figures' / 'violins_swarm.svg', bbox_inches='tight')
+
action = project.require_action("longitudinal-comparisons-speed")
+
copy_tree(output_path, str(action.data_path()))
+
septum_mec.analysis.registration.store_notebook(action, "20_longitudinal_comparisons_speed.ipynb")
+
+
\n", + " | action | \n", + "baseline | \n", + "entity | \n", + "frequency | \n", + "i | \n", + "ii | \n", + "session | \n", + "stim_location | \n", + "stimulated | \n", + "tag | \n", + "... | \n", + "burst_event_ratio | \n", + "bursty_spike_ratio | \n", + "gridness | \n", + "border_score | \n", + "information_rate | \n", + "information_specificity | \n", + "head_mean_ang | \n", + "head_mean_vec_len | \n", + "spacing | \n", + "orientation | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "0.398230 | \n", + "0.678064 | \n", + "-0.466923 | \n", + "0.029328 | \n", + "1.009215 | \n", + "0.317256 | \n", + "5.438033 | \n", + "0.040874 | \n", + "0.628784 | \n", + "20.224859 | \n", + "
1 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "0.138014 | \n", + "0.263173 | \n", + "-0.666792 | \n", + "0.308146 | \n", + "0.192524 | \n", + "0.033447 | \n", + "1.951740 | \n", + "0.017289 | \n", + "0.789388 | \n", + "27.897271 | \n", + "
2 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "0.373986 | \n", + "0.659259 | \n", + "-0.572566 | \n", + "0.143252 | \n", + "4.745836 | \n", + "0.393704 | \n", + "4.439721 | \n", + "0.124731 | \n", + "0.555402 | \n", + "28.810794 | \n", + "
3 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "0.087413 | \n", + "0.179245 | \n", + "-0.437492 | \n", + "0.268948 | \n", + "0.157394 | \n", + "0.073553 | \n", + "6.215195 | \n", + "0.101911 | \n", + "0.492250 | \n", + "9.462322 | \n", + "
4 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "0.248771 | \n", + "0.463596 | \n", + "-0.085938 | \n", + "0.218744 | \n", + "0.519153 | \n", + "0.032683 | \n", + "1.531481 | \n", + "0.053810 | \n", + "0.559905 | \n", + "0.000000 | \n", + "
5 rows × 39 columns
\n", + "\n", + " | border_score | \n", + "gridness | \n", + "head_mean_ang | \n", + "head_mean_vec_len | \n", + "information_rate | \n", + "speed_score | \n", + "action | \n", + "channel_group | \n", + "unit_name | \n", + "
---|---|---|---|---|---|---|---|---|---|
0 | \n", + "0.348023 | \n", + "0.275109 | \n", + "3.012689 | \n", + "0.086792 | \n", + "0.707197 | \n", + "0.149071 | \n", + "1833-010719-1 | \n", + "0.0 | \n", + "127.0 | \n", + "
1 | \n", + "0.362380 | \n", + "0.166475 | \n", + "3.133138 | \n", + "0.037271 | \n", + "0.482486 | \n", + "0.132212 | \n", + "1833-010719-1 | \n", + "0.0 | \n", + "161.0 | \n", + "
2 | \n", + "0.367498 | \n", + "0.266865 | \n", + "5.586395 | \n", + "0.182843 | \n", + "0.271188 | \n", + "0.062821 | \n", + "1833-010719-1 | \n", + "0.0 | \n", + "191.0 | \n", + "
3 | \n", + "0.331942 | \n", + "0.312155 | \n", + "5.955767 | \n", + "0.090786 | \n", + "0.354018 | \n", + "0.052009 | \n", + "1833-010719-1 | \n", + "0.0 | \n", + "223.0 | \n", + "
4 | \n", + "0.325842 | \n", + "0.180495 | \n", + "5.262721 | \n", + "0.103584 | \n", + "0.210427 | \n", + "0.094041 | \n", + "1833-010719-1 | \n", + "0.0 | \n", + "225.0 | \n", + "
\n", + " | action | \n", + "baseline | \n", + "entity | \n", + "frequency | \n", + "i | \n", + "ii | \n", + "session | \n", + "stim_location | \n", + "stimulated | \n", + "tag | \n", + "... | \n", + "p_e_peak | \n", + "t_i_peak | \n", + "p_i_peak | \n", + "border_score_threshold | \n", + "gridness_threshold | \n", + "head_mean_ang_threshold | \n", + "head_mean_vec_len_threshold | \n", + "information_rate_threshold | \n", + "speed_score_threshold | \n", + "specificity | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.332548 | \n", + "0.229073 | \n", + "6.029431 | \n", + "0.205362 | \n", + "1.115825 | \n", + "0.066736 | \n", + "0.451741 | \n", + "
1 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.354830 | \n", + "0.089333 | \n", + "6.120055 | \n", + "0.073566 | \n", + "0.223237 | \n", + "0.052594 | \n", + "0.098517 | \n", + "
2 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.264610 | \n", + "-0.121081 | \n", + "5.759406 | \n", + "0.150827 | \n", + "4.964984 | \n", + "0.027120 | \n", + "0.400770 | \n", + "
3 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.344280 | \n", + "0.215829 | \n", + "6.033364 | \n", + "0.110495 | \n", + "0.239996 | \n", + "0.054074 | \n", + "0.269461 | \n", + "
4 | \n", + "1849-060319-3 | \n", + "True | \n", + "1849 | \n", + "NaN | \n", + "False | \n", + "True | \n", + "3 | \n", + "NaN | \n", + "False | \n", + "baseline ii | \n", + "... | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.342799 | \n", + "0.218967 | \n", + "5.768170 | \n", + "0.054762 | \n", + "0.524990 | \n", + "0.144702 | \n", + "0.133410 | \n", + "
5 rows × 51 columns
\n", + "