In [1]:
%load_ext autoreload
%autoreload 2
In [167]:
import os
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import seaborn as sns
import re
import shutil
import pandas as pd
import scipy.stats

import exdir
import expipe
from distutils.dir_util import copy_tree
import septum_mec
import spatial_maps as sp
import head_direction.head as head
import septum_mec.analysis.data_processing as dp
import septum_mec.analysis.registration
from septum_mec.analysis.plotting import violinplot, despine
from spatial_maps.fields import (
    find_peaks, calculate_field_centers, separate_fields_by_laplace, 
    map_pass_to_unit_circle, calculate_field_centers, distance_to_edge_function, 
    compute_crossings, which_field)
from phase_precession import cl_corr
from spike_statistics.core import permutation_resampling
import matplotlib.mlab as mlab
import scipy.signal as ss
from scipy.interpolate import interp1d
from septum_mec.analysis.plotting import regplot
from skimage import measure
from tqdm.notebook import tqdm_notebook as tqdm
tqdm.pandas()

from scipy.stats import wilcoxon
In [ ]:
 
In [3]:
# %matplotlib notebook
%matplotlib inline
In [4]:
project_path = dp.project_path()
project = expipe.get_project(project_path)
actions = project.actions

output_path = pathlib.Path("output") / "spikes-in-field"
(output_path / "statistics").mkdir(exist_ok=True, parents=True)
(output_path / "figures").mkdir(exist_ok=True, parents=True)

Load cell statistics and shuffling quantiles

In [5]:
statistics_action = actions['calculate-statistics']
identification_action = actions['identify-neurons']
sessions = pd.read_csv(identification_action.data_path('sessions'))
units = pd.read_csv(identification_action.data_path('units'))
session_units = pd.merge(sessions, units, on='action')
statistics_results = pd.read_csv(statistics_action.data_path('results'))
statistics = pd.merge(session_units, statistics_results, how='left')
statistics.head()
Out[5]:
action baseline entity frequency i ii session stim_location stimulated tag ... burst_event_ratio bursty_spike_ratio gridness border_score information_rate information_specificity head_mean_ang head_mean_vec_len spacing orientation
0 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... 0.398230 0.678064 -0.466923 0.029328 1.009215 0.317256 5.438033 0.040874 0.628784 20.224859
1 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... 0.138014 0.263173 -0.666792 0.308146 0.192524 0.033447 1.951740 0.017289 0.789388 27.897271
2 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... 0.373986 0.659259 -0.572566 0.143252 4.745836 0.393704 4.439721 0.124731 0.555402 28.810794
3 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... 0.087413 0.179245 -0.437492 0.268948 0.157394 0.073553 6.215195 0.101911 0.492250 9.462322
4 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... 0.248771 0.463596 -0.085938 0.218744 0.519153 0.032683 1.531481 0.053810 0.559905 0.000000

5 rows × 39 columns

In [6]:
statistics['unit_day'] = statistics.apply(lambda x: str(x.unit_idnum) + '_' + x.action.split('-')[1], axis=1)
In [7]:
stim_response_action = actions['stimulus-response']
stim_response_results = pd.read_csv(stim_response_action.data_path('results'))
In [8]:
statistics = pd.merge(statistics, stim_response_results, how='left')
In [9]:
print('N cells:',statistics.shape[0])
N cells: 1284
In [10]:
shuffling = actions['shuffling']
quantiles_95 = pd.read_csv(shuffling.data_path('quantiles_95'))
quantiles_95.head()
Out[10]:
border_score gridness head_mean_ang head_mean_vec_len information_rate speed_score action channel_group unit_name
0 0.348023 0.275109 3.012689 0.086792 0.707197 0.149071 1833-010719-1 0.0 127.0
1 0.362380 0.166475 3.133138 0.037271 0.482486 0.132212 1833-010719-1 0.0 161.0
2 0.367498 0.266865 5.586395 0.182843 0.271188 0.062821 1833-010719-1 0.0 191.0
3 0.331942 0.312155 5.955767 0.090786 0.354018 0.052009 1833-010719-1 0.0 223.0
4 0.325842 0.180495 5.262721 0.103584 0.210427 0.094041 1833-010719-1 0.0 225.0
In [11]:
action_columns = ['action', 'channel_group', 'unit_name']
data = pd.merge(statistics, quantiles_95, on=action_columns, suffixes=("", "_threshold"))

data['specificity'] = np.log10(data['in_field_mean_rate'] / data['out_field_mean_rate'])

data.head()
Out[11]:
action baseline entity frequency i ii session stim_location stimulated tag ... p_e_peak t_i_peak p_i_peak border_score_threshold gridness_threshold head_mean_ang_threshold head_mean_vec_len_threshold information_rate_threshold speed_score_threshold specificity
0 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... NaN NaN NaN 0.332548 0.229073 6.029431 0.205362 1.115825 0.066736 0.451741
1 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... NaN NaN NaN 0.354830 0.089333 6.120055 0.073566 0.223237 0.052594 0.098517
2 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... NaN NaN NaN 0.264610 -0.121081 5.759406 0.150827 4.964984 0.027120 0.400770
3 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... NaN NaN NaN 0.344280 0.215829 6.033364 0.110495 0.239996 0.054074 0.269461
4 1849-060319-3 True 1849 NaN False True 3 NaN False baseline ii ... NaN NaN NaN 0.342799 0.218967 5.768170 0.054762 0.524990 0.144702 0.133410

5 rows × 51 columns

Statistics about all cell-sessions

In [12]:
data.groupby('stimulated').count()['action']
Out[12]:
stimulated
False    624
True     660
Name: action, dtype: int64

Find all cells with gridness above threshold

In [13]:
query = (
    'gridness > gridness_threshold and '
    'information_rate > information_rate_threshold and '
    'gridness > .2 and '
    'average_rate < 25'
)
sessions_above_threshold = data.query(query)
print("Number of sessions above threshold", len(sessions_above_threshold))
print("Number of animals", len(sessions_above_threshold.groupby(['entity'])))
Number of sessions above threshold 194
Number of animals 4

select neurons that have been characterized as a grid cell on the same day

In [193]:
once_a_gridcell = statistics[statistics.unit_day.isin(sessions_above_threshold.unit_day.values)]
In [172]:
once_a_gridcell = statistics[statistics.unit_day.isin(
    sessions_above_threshold.query('baseline and Hz11').unit_day.values)]
In [194]:
print("Number of gridcells", once_a_gridcell.unit_idnum.nunique())
print("Number of gridcell recordings", len(once_a_gridcell))
print("Number of animals", len(once_a_gridcell.groupby(['entity'])))
Number of gridcells 139
Number of gridcell recordings 231
Number of animals 4

divide into stim not stim

In [195]:
baseline_i = once_a_gridcell.query('baseline and Hz11')
stimulated_11 = once_a_gridcell.query('stimulated and frequency==11 and stim_location=="ms"')

baseline_ii = once_a_gridcell.query('baseline and Hz30')
stimulated_30 = once_a_gridcell.query('stimulated and frequency==30 and stim_location=="ms"')

print("Number of gridcells in baseline i sessions", len(baseline_i))
print("Number of gridcells in stimulated 11Hz ms sessions", len(stimulated_11))

print("Number of gridcells in baseline ii sessions", len(baseline_ii))
print("Number of gridcells in stimulated 30Hz ms sessions", len(stimulated_30))
Number of gridcells in baseline i sessions 66
Number of gridcells in stimulated 11Hz ms sessions 61
Number of gridcells in baseline ii sessions 56
Number of gridcells in stimulated 30Hz ms sessions 40
In [196]:
baseline_ids = baseline_i.unit_day.unique()
In [197]:
baseline_ids
Out[197]:
array(['30_260619', '31_260619', '32_260619', '78_260619', '79_260619',
       '150_260619', '205_260619', '243_260619', '263_260619',
       '265_260619', '45_010719', '46_010719', '47_010719', '49_010719',
       '96_010719', '118_010719', '121_010719', '185_010719',
       '186_010719', '106_050619', '168_050619', '231_050619',
       '232_050619', '233_050619', '379_150319', '609_120619',
       '658_120619', '615_290519', '616_290519', '666_290519',
       '667_290519', '179_290519', '214_290519', '278_290519',
       '279_290519', '317_290519', '613_200619', '661_200619',
       '361_010319', '362_010319', '168_120619', '233_120619',
       '243_120619', '851_060319', '357_220319', '358_220319',
       '359_220319', '332_060319', '338_060319', '655_060619',
       '715_110319', '8_020719', '56_020719', '57_020719', '58_020719',
       '129_020719', '130_020719', '132_020719', '23_200619',
       '174_200619', '250_200619', '251_200619', '252_200619',
       '253_200619', '304_200619', '932_280219'], dtype=object)
In [198]:
stimulated_11_sub = stimulated_11[stimulated_11.unit_day.isin(baseline_ids)]
In [199]:
baseline_ids_11 = stimulated_11_sub.unit_day.unique()
In [200]:
baseline_i_sub = baseline_i[baseline_i.unit_day.isin(baseline_ids_11)]

Plotting

In [201]:
max_speed = .5 # m/s only used for speed score
min_speed = 0.02 # m/s only used for speed score
position_sampling_rate = 100 # for interpolation
position_low_pass_frequency = 6 # for low pass filtering of position

box_size = [1.0, 1.0]
bin_size = 0.02
smoothing_low = 0.03
smoothing_high = 0.06

speed_binsize = 0.02

stim_mask = True
baseline_duration = 600
In [202]:
data_loader = dp.Data(
    position_sampling_rate=position_sampling_rate, 
    position_low_pass_frequency=position_low_pass_frequency,
    box_size=box_size, bin_size=bin_size, 
    stim_mask=stim_mask, baseline_duration=baseline_duration
)
In [203]:
def find_grid_fields(rate_map, sigma=3, seed=2.5):
    # find fields with laplace
    fields_laplace = sp.fields.separate_fields_by_dilation(rate_map, sigma=sigma, seed=seed)
    fields = fields_laplace.copy() # to be cleaned by Ismakov
    fields_areas = scipy.ndimage.measurements.sum(
        np.ones_like(fields), fields, index=np.arange(fields.max() + 1))
    fields_area = fields_areas[fields]
    fields[fields_area < 9.0] = 0

    # find fields with Ismakov-method
    fields_ismakov, radius = sp.separate_fields_by_distance(rate_map)
    fields_ismakov_real = fields_ismakov * bin_size
    approved_fields = []

    # remove fields not found by both methods
    for point in fields_ismakov:
        field_id = fields[tuple(point)]
        approved_fields.append(field_id)

    for field_id in np.arange(1, fields.max() + 1):
        if not field_id in approved_fields:
            fields[fields == field_id] = 0
            
    return fields
In [204]:
def get_data(row):
    spikes = data_loader.spike_train(row.action, row.channel_group, row.unit_name)
    rate_map = data_loader.rate_map(row.action, row.channel_group, row.unit_name, smoothing=0.04)
    pos_x, pos_y, pos_t, pos_speed = map(data_loader.tracking(row.action).get, ['x', 'y', 't', 'v'])
    stim_times = data_loader.stim_times(row.action)
    if stim_times is not None:
        stim_times = np.array(stim_times)
    spikes = np.array(spikes)
    spikes = spikes[(spikes > pos_t.min()) & (spikes < pos_t.max())]
#     sx, sy = rate_map.shape
#     dx = box_size[0] / sx
#     dy = box_size[1] / sy
#     x_bins = np.arange(0, box_size[0], dx)
#     y_bins = np.arange(0, box_size[1], dy)
#     f = interp2d(x_bins, y_bins, rate_map.T)
#     x_new = np.arange(0, box_size[0], dx / 3)
#     y_new = np.arange(0, box_size[1], dy / 3)
#     rate_map = f(x_new, y_new).T
    fields = find_grid_fields(rate_map)
    
    return spikes, pos_x, pos_y, pos_t, rate_map, fields, stim_times
In [205]:
def compute_field_spikes(row, plot=False, z1=5e-3, z2=11e-3, surrogate_fields=None):
    spikes, pos_x, pos_y, pos_t, rate_map, fields, stim_times = get_data(row)
    if surrogate_fields is not None:
        fields = surrogate_fields
#     if field_num is not None:
#         fields = np.where(fields == field_num, fields, 0)
        
    if plot:
        fig, axs = plt.subplots(1, 3, figsize=(16,9))
        axs[1].set_title(f'{row.action} {row.channel_group} {row.unit_idnum}, G={row.gridness:.3f}')
        dot_size = 10
        
    sx, sy = interp1d(pos_t, pos_x), interp1d(pos_t, pos_y)
    
    stim_spikes = []
    stim_in_field_indices = []
    if stim_times is not None:
        for t in stim_times:
            idx = np.searchsorted(spikes, [t + z1, t + z2], side='right')
            tmp_spikes = spikes[idx[0]: idx[1]].tolist()
            stim_spikes.extend(tmp_spikes)
        stim_spikes_x = sx(stim_spikes)
        stim_spikes_y = sy(stim_spikes)
        stim_in_field_indices = which_field(stim_spikes_x, stim_spikes_y, fields, box_size).astype(bool)
        
#         stim_ids_ = []
#         stim_spikes_ = []
#         for i, t in enumerate(stim_times):
#             idx = np.searchsorted(spikes, [t, t + 30e-3], side='right')
#             tmp_spikes = (spikes[idx[0]: idx[1]] - t).tolist()
#             stim_ids_.extend([i] * len(tmp_spikes))
#             stim_spikes_.extend(tmp_spikes)
            
#         plt.scatter(stim_spikes_, stim_ids_, s=1)
#         plt.axvspan(z1, z2, color='r', alpha=.3)

    spikes_x = sx(spikes)
    spikes_y = sy(spikes)
    in_field_indices = which_field(spikes_x, spikes_y, fields, box_size).astype(bool)        
    
    if plot:
        axs[0].imshow(
            fields.T.astype(bool), extent=[0, box_size[0], 0, box_size[1]], 
            origin='lower', cmap=plt.cm.Greys, zorder=0)
        axs[0].scatter(
            spikes_x[in_field_indices], spikes_y[in_field_indices], 
            s=dot_size, color='r', zorder=1)
        axs[0].scatter(
            spikes_x[~in_field_indices], spikes_y[~in_field_indices], 
            s=dot_size, color='b', zorder=1)
        if stim_times is not None:
            axs[0].scatter(
                stim_spikes_x, stim_spikes_y,
                s=dot_size, color='orange', zorder=1)
        # Display the image and plot all contours found
        contours = measure.find_contours(fields, 0.0)
        axs[1].imshow(rate_map.T, extent=[0, box_size[0], 0, box_size[1]], origin='lower')

        axs[2].plot(pos_x, pos_y, color='k', alpha=.2, zorder=0)
        axs[2].scatter(
            interp1d(pos_t, pos_x)(spikes), interp1d(pos_t, pos_y)(spikes), 
            s=dot_size, zorder=1)

        for ax in axs.ravel()[1:]:
            for n, contour in enumerate(contours):
                ax.plot(
                    contour[:, 0] * bin_size, contour[:, 1] * bin_size, 
                    lw=4, color='y', zorder=3)

        for ax in axs.ravel():
            ax.axis('image')
            ax.set_xticks([])
            ax.set_yticks([])
    return fields, in_field_indices, stim_in_field_indices
In [342]:
def plot_stim_field_spikes(row, t1=0, t2=30, z1_base=0, z2_base=5, z1_stim=5, z2_stim=11, colors=['k','r']):
    spikes, pos_x, pos_y, pos_t, rate_map, fields, stim_times = get_data(row)
    spikes = np.array(spikes) * 1000
    pos_t = np.array(pos_t) * 1000
    stim_times = np.array(stim_times) * 1000
    
    fig, axs = plt.subplots(1, 2)
    dot_size = 2
        
    sx, sy = interp1d(pos_t, pos_x), interp1d(pos_t, pos_y)
    
    stim_spikes_base = []
    stim_spikes_base_plot = []
    stim_ids_base = []
    stim_spikes_stim = []
    stim_spikes_stim_plot = []
    stim_ids_stim = []
    stim_ids_all = []
    stim_spikes_all = []
    for i, t in enumerate(stim_times):
        idx = np.searchsorted(spikes, [t + z1_base, t + z2_base], side='right')
        tmp_spikes = spikes[idx[0]: idx[1]] - t
        stim_ids_base.extend([i] * len(tmp_spikes))
        stim_spikes_base_plot.extend(tmp_spikes)
        stim_spikes_base.extend(spikes[idx[0]: idx[1]].tolist())
        
        idx = np.searchsorted(spikes, [t + z1_stim, t + z2_stim], side='right')
        tmp_spikes = spikes[idx[0]: idx[1]] - t
        stim_ids_stim.extend([i] * len(tmp_spikes))
        stim_spikes_stim_plot.extend(tmp_spikes)
        stim_spikes_stim.extend(spikes[idx[0]: idx[1]].tolist())
        
        idx = np.searchsorted(spikes, [t + t1, t + t2], side='right')
        tmp_spikes = (spikes[idx[0]: idx[1]] - t).tolist()
        stim_ids_all.extend([i] * len(tmp_spikes))
        stim_spikes_all.extend(tmp_spikes)
        
        
    stim_spikes_base_x = sx(stim_spikes_base)
    stim_spikes_base_y = sy(stim_spikes_base)
#     stim_in_field_indices_base = which_field(
#         stim_spikes_base_x, stim_spikes_base_y, fields, box_size).astype(bool)
    
    stim_spikes_stim_x = sx(stim_spikes_stim)
    stim_spikes_stim_y = sy(stim_spikes_stim)
#     stim_in_field_indices_stim = which_field(
#         stim_spikes_stim_x, stim_spikes_stim_y, fields, box_size).astype(bool)


    axs[0].scatter(stim_spikes_all, stim_ids_all, s=dot_size, color='k', alpha=.5)
    axs[0].scatter(stim_spikes_base_plot, stim_ids_base, s=dot_size, color=colors[0], alpha=.8)
    axs[0].scatter(stim_spikes_stim_plot, stim_ids_stim, s=dot_size, color=colors[1], alpha=.8)
    
    times = np.arange(t1, t2, .1)
    from scipy.stats import gaussian_kde
    kernel = gaussian_kde(stim_spikes_all, 0.1)
    pdf = kernel(times)
    m = max(stim_ids_all)
    pdf = (pdf - pdf.min()) / (pdf - pdf.min()).max() * m
    axs[0].plot(times, pdf, 'k', lw=1)
    axs[0].set_xlim(t1, t2)
#     ax.plot(0, len(trials) * 1.1, ls='none', marker='v', color='k', markersize=5)
#     axs[0].axvspan(0, 5, color='#43a2ca', alpha=.3, zorder=-5)

    contours = measure.find_contours(fields, 0.0)

    axs[1].scatter(
        stim_spikes_base_x, stim_spikes_base_y,
        s=dot_size, color=colors[0], zorder=1, alpha=.8)
    
    axs[1].scatter(
        stim_spikes_stim_x, stim_spikes_stim_y,
        s=dot_size, color=colors[1], zorder=1, alpha=.8)

    axs[1].plot(pos_x, pos_y, color='k', alpha=.2, zorder=0)

    for n, contour in enumerate(contours):
        axs[1].plot(
            contour[:, 0] * bin_size, contour[:, 1] * bin_size, 
            lw=1, color='k', zorder=3)
    axs[0].set_aspect((t2 - t1) / len(stim_times))
    axs[1].axis('image')
    axs[1].set_xticks([])
    axs[1].set_yticks([])
    despine(axs[0])
    despine(axs[1], left=True, bottom=True)
In [307]:
 compute_field_spikes(baseline_i.sort_values('gridness', ascending=False).iloc[18], plot=True)
Out[307]:
(array([[9, 9, 9, ..., 6, 6, 6],
        [9, 9, 9, ..., 6, 6, 6],
        [0, 0, 0, ..., 6, 6, 6],
        ...,
        [0, 0, 0, ..., 0, 0, 0],
        [0, 0, 0, ..., 0, 0, 0],
        [0, 0, 0, ..., 0, 0, 0]], dtype=int32),
 array([False, False, False, ..., False, False, False]),
 [])
In [207]:
iter_base = baseline_i_sub.sort_values('unit_day', ascending=False).itertuples()
iter_stim = stimulated_11_sub.sort_values('unit_day', ascending=False).itertuples()
for row_base, row_stim in zip(iter_base, iter_stim):
    fields,_,_ = compute_field_spikes(row_base, plot=True)
    compute_field_spikes(row_stim, plot=True)#, surrogate_fields=fields)
/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:9: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  if __name__ == '__main__':
In [186]:
iter_base = baseline_i_sub.sort_values('unit_day', ascending=False).itertuples()
iter_stim = stimulated_11_sub.sort_values('unit_day', ascending=False).itertuples()
for row_base, row_stim in zip(iter_base, iter_stim):
    fields,_,_ = compute_field_spikes(row_base, plot=True)
    compute_field_spikes(row_stim, plot=True)#, surrogate_fields=fields)
/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:9: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  if __name__ == '__main__':

analysis stim vs stim

In [343]:
plt.rc('axes', titlesize=12)
plt.rcParams.update({
    'font.size': 12, 
    'figure.figsize': (5, 2), 
    'figure.dpi': 150
})

plot_stim_field_spikes(
    stimulated_11.sort_values('gridness', ascending=False).iloc[18],
    colors=['#2166ac', '#b2182b']#['#1b9e77','#d95f02']
)
fig = plt.gcf()
figname = 'stim_field_spikes_example'
fig.savefig(
    output_path / 'figures' / f'{figname}.png', 
    bbox_inches='tight', transparent=True)
fig.savefig(
    output_path / 'figures' / f'{figname}.svg', 
    bbox_inches='tight', transparent=True)
In [254]:
results_stim_stim_11 = []
z1_stim=5e-3
z2_stim=11e-3
z1_base=0
z2_base=5e-3
for row_stim in stimulated_11.itertuples():
    _, _, base_in_field = compute_field_spikes(
        row_stim, z1=z1_base, z2=z2_base)
    _, _, stim_in_field = compute_field_spikes(
        row_stim, z1=z1_stim, z2=z2_stim)
    
    results_stim_stim_11.append({
        'base_in_field': 1 - base_in_field.mean(),
        'stim_in_field': 1 - stim_in_field.mean()
    })
#     break
In [257]:
results_stim_stim_11 = pd.DataFrame(results_stim_stim_11)
results_stim_stim_11
Out[257]:
base_in_field stim_in_field
0 0.615038 0.678867
1 0.620690 0.727273
2 0.662100 0.739712
3 0.437500 0.699267
4 0.728571 0.741722
... ... ...
56 0.283505 0.316288
57 0.596752 0.638956
58 0.311111 0.359195
59 0.558739 0.601518
60 0.552529 0.629842

61 rows × 2 columns

In [255]:
results_stim_stim_30 = []
z1_stim=5e-3
z2_stim=11e-3
z1_base=0
z2_base=5e-3
for row_stim in stimulated_30.itertuples():
    _, _, base_in_field = compute_field_spikes(
        row_stim, z1=z1_base, z2=z2_base)
    _, _, stim_in_field = compute_field_spikes(
        row_stim, z1=z1_stim, z2=z2_stim)
    
    results_stim_stim_30.append({
        'base_in_field': 1 - base_in_field.mean(),
        'stim_in_field': 1 - stim_in_field.mean()
    })
#     break
In [258]:
results_stim_stim_30 = pd.DataFrame(results_stim_stim_30)
results_stim_stim_30
Out[258]:
base_in_field stim_in_field
0 0.444444 0.400000
1 0.220588 0.321023
2 0.511504 0.579270
3 0.640244 0.636975
4 0.370968 0.378876
5 0.687144 0.675090
6 0.509735 0.528025
7 0.535645 0.536465
8 0.391549 0.433108
9 0.524590 0.655920
10 0.568627 0.543882
11 0.574074 0.648464
12 0.522109 0.571429
13 0.555556 0.599616
14 0.188235 0.310062
15 0.325301 0.510780
16 0.685393 0.736201
17 0.565046 0.621489
18 0.457364 0.553879
19 0.485961 0.522303
20 0.402010 0.466478
21 0.329670 0.273570
22 0.455128 0.426690
23 0.421053 0.421569
24 0.484375 0.461538
25 0.736328 0.723063
26 0.602041 0.472362
27 0.570312 0.699342
28 0.292683 0.357735
29 0.517162 0.671468
30 0.187500 0.328277
31 0.562500 0.560521
32 0.540453 0.692541
33 0.612903 0.649031
34 0.537879 0.569832
35 0.525164 0.579110
36 0.317073 0.383289
37 0.354167 0.363636
38 0.458333 0.446855
39 0.526316 0.660650
In [336]:
results_stim_stim_all = pd.concat([results_stim_stim_11, results_stim_stim_30])
In [339]:
plt.rc('axes', titlesize=12)
plt.rcParams.update({
    'font.size': 12, 
    'figure.figsize': (1.7, 3), 
    'figure.dpi': 150
})

fig = plt.figure()
violinplot(
    results_stim_stim_all.base_in_field, 
    results_stim_stim_all.stim_in_field, 
    colors=None,
    test='wilcoxon'
)
figname = 'stim_field_spikes_combined'
fig.savefig(
    output_path / 'figures' / f'{figname}.png', 
    bbox_inches='tight', transparent=True)
fig.savefig(
    output_path / 'figures' / f'{figname}.svg', 
    bbox_inches='tight', transparent=True)

# 11
fig = plt.figure()
violinplot(
    results_stim_stim_11.base_in_field, 
    results_stim_stim_11.stim_in_field, 
    colors=['#1b9e77','#d95f02'],
    test='wilcoxon'
)
figname = 'stim_field_spikes_11'
fig.savefig(
    output_path / 'figures' / f'{figname}.png', 
    bbox_inches='tight', transparent=True)
fig.savefig(
    output_path / 'figures' / f'{figname}.svg', 
    bbox_inches='tight', transparent=True)
    
# 30
fig = plt.figure()
violinplot(
    results_stim_stim_30.base_in_field, 
    results_stim_stim_30.stim_in_field, 
    colors=['#7570b3', '#e7298a'],
    test='wilcoxon'
)
figname = 'stim_field_spikes_30'
fig.savefig(
    output_path / 'figures' / f'{figname}.png', 
    bbox_inches='tight', transparent=True)
fig.savefig(
    output_path / 'figures' / f'{figname}.svg', 
    bbox_inches='tight', transparent=True)
    
In [115]:
 

more in baseline than response

In [247]:
wilcoxon(results_stim_stim_11.base_in_field - results_stim_stim_11.stim_in_field)
Out[247]:
WilcoxonResult(statistic=35.0, pvalue=6.155288245645252e-11)
In [248]:
wilcoxon(results_stim_stim_11.base_in_field - results_stim_stim_11.stim_in_field, alternative='less')
Out[248]:
WilcoxonResult(statistic=35.0, pvalue=3.077644122822626e-11)
In [ ]:
 
In [249]:
(results_stim_stim_11.base_in_field - results_stim_stim_11.stim_in_field).hist()
Out[249]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fbf3ae7fb70>
In [251]:
(results_stim_stim_11.base_in_field - results_stim_stim_11.stim_in_field).median()
Out[251]:
-0.0628016121809225

analysis baseline stim

In [188]:
iter_base = baseline_i_sub.sort_values('unit_day', ascending=False).itertuples()
iter_stim = stimulated_11_sub.sort_values('unit_day', ascending=False).itertuples()
results = []
z1=5e-3
z2=11e-3
# z1=0
# z2=5e-3
for row_base, row_stim in zip(iter_base, iter_stim):
    base_fields, base_in_field, _ = compute_field_spikes(
        row_base, z1=z1, z2=z2)
    stim_fields, stim_in_field, stim_stim_in_field = compute_field_spikes(
        row_stim, z1=z1, z2=z2)
    results.append({
        'base_in_field': base_in_field.mean(),
        'stim_in_field': stim_in_field.mean(), 
        'stim_stim_in_field': stim_stim_in_field.mean(),
        'gridness_base': row_base.gridness,
        'gridness_stim': row_stim.gridness,
        'action_base': row_base.action,
        'action_stim': row_stim.action
    })
#     break
In [189]:
results = pd.DataFrame(results)
results
Out[189]:
base_in_field stim_in_field stim_stim_in_field gridness_base gridness_stim action_base action_stim
0 0.414166 0.428529 0.361044 0.339934 -0.045053 1833-010719-1 1833-010719-2
1 0.404667 0.477324 0.419231 0.401503 -0.179613 1833-260619-1 1833-260619-2
2 0.280399 0.384092 0.321133 0.557819 0.917221 1839-060619-1 1839-060619-3
3 0.815385 0.745888 0.657431 0.532037 0.397104 1833-010719-1 1833-010719-2
4 0.481623 0.399033 0.300733 0.282903 0.001879 1834-220319-1 1834-220319-2
5 0.734889 0.681776 0.611208 0.776775 0.869214 1833-260619-1 1833-260619-2
6 0.280964 0.449841 0.416287 0.317565 0.344708 1833-200619-1 1833-200619-2
7 0.739768 0.729892 0.676768 1.101447 0.976654 1833-200619-1 1833-200619-2
8 0.599141 0.549244 0.487360 0.215937 -0.201297 1833-260619-1 1833-260619-2
9 0.826862 0.732639 0.663934 0.855641 0.554912 1833-120619-1 1833-120619-2
10 0.477416 0.392704 0.317717 0.298222 -0.087079 1833-260619-1 1833-260619-2
11 0.560374 0.543610 0.511349 0.977155 -0.058795 1833-020719-1 1833-020719-2
12 0.858028 0.624060 0.640805 0.941494 0.342802 1833-010719-1 1833-010719-2
13 0.767867 0.687664 0.667808 0.487981 0.199693 1833-050619-1 1833-050619-2
In [115]:
 

more in baseline than response

In [190]:
wilcoxon(results.base_in_field - results.stim_stim_in_field)
Out[190]:
WilcoxonResult(statistic=12.0, pvalue=0.011007912955186742)
In [191]:
wilcoxon(results.base_in_field - results.stim_stim_in_field, alternative='greater')
Out[191]:
WilcoxonResult(statistic=93.0, pvalue=0.005503956477593371)
In [192]:
(results.base_in_field - results.stim_stim_in_field).hist()
Out[192]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fbf3a9ee780>
In [153]:
(results.base_in_field - results.stim_stim_in_field).median()
Out[153]:
0.09670222033391301

more in baseline than total of stim

In [150]:
wilcoxon(results.base_in_field - results.stim_in_field)
Out[150]:
WilcoxonResult(statistic=96.0, pvalue=0.32207523283525596)
In [151]:
wilcoxon(results.base_in_field - results.stim_in_field, alternative='greater')
Out[151]:
WilcoxonResult(statistic=157.0, pvalue=0.16103761641762798)
In [152]:
(results.base_in_field - results.stim_in_field).hist()
Out[152]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fbf40cfb710>

greater than chance

In [143]:
wilcoxon(results.base_in_field - 0.5, alternative='greater') # most fall within the field
Out[143]:
WilcoxonResult(statistic=160.0, pvalue=0.13838602372323838)
In [144]:
(results.base_in_field - 0.5).hist()
Out[144]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fbf3bb414a8>
In [145]:
wilcoxon(results.stim_stim_in_field - 0.5, alternative='greater') # most fall within the field
Out[145]:
WilcoxonResult(statistic=101.0, pvalue=0.7961290925328811)
In [146]:
(results.stim_stim_in_field - 0.5).hist()
Out[146]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fbf3aa22c88>
In [139]:
plt.rc('axes', titlesize=12)
plt.rcParams.update({
    'font.size': 12, 
    'figure.figsize': (3.5, 3), 
    'figure.dpi': 150
})
In [142]:
fig, ax = plt.subplots(1,1)
sc = ax.scatter(
    results.base_in_field, results.stim_in_field,
    c=results.gridness_base
#     c=results.gridness_stim
)
ax.plot([0, 1], [0,1], 'k--')
plt.xlabel('Baseline percentage in field')
plt.ylabel('11 Hz percentage in field')
cb = plt.colorbar(mappable=sc, cax=None, ax=ax)
cb.ax.yaxis.set_ticks_position('right')
cb.set_label('Baseline gridness')
In [64]:
baseline_i.merge(stimulated_11, on='unit_day', suffixes=['_base', '_stim'])
Out[64]:
action_base baseline_base entity_base frequency_base i_base ii_base session_base stim_location_base stimulated_base tag_base ... information_rate_stim information_specificity_stim head_mean_ang_stim head_mean_vec_len_stim spacing_stim orientation_stim t_e_peak_stim p_e_peak_stim t_i_peak_stim p_i_peak_stim
0 1833-260619-1 True 1833 NaN True False 1 NaN False baseline i ... 1.066774 0.302404 5.539069 0.103552 0.342142 10.007980 NaN NaN NaN NaN
1 1833-260619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.553675 0.106860 5.284125 0.151640 0.361430 14.743563 NaN NaN NaN NaN
2 1833-260619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.914806 0.085579 5.650586 0.102010 0.477803 51.952957 NaN NaN NaN NaN
3 1833-260619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.681461 0.154506 2.601550 0.015570 0.343784 0.000000 NaN NaN NaN NaN
4 1833-010719-1 True 1833 NaN True False 1 NaN False baseline i ... 1.021932 0.349152 1.196446 0.137136 0.344403 13.240520 NaN NaN NaN NaN
5 1833-010719-1 True 1833 NaN True False 1 NaN False baseline i ... 0.735341 0.043588 3.418123 0.055051 0.405348 46.468801 NaN NaN NaN NaN
6 1833-010719-1 True 1833 NaN True False 1 NaN False baseline i ... 0.857698 0.269769 2.340344 0.137602 0.351311 13.240520 NaN NaN NaN NaN
7 1833-050619-1 True 1833 NaN True False 1 NaN False baseline i ... 1.442780 0.585854 2.321438 0.220139 0.387257 5.440332 NaN NaN NaN NaN
8 1833-050619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.862973 0.183897 0.069331 0.091981 0.366146 9.462322 NaN NaN NaN NaN
9 1833-050619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.873561 0.103053 3.668189 0.069408 0.434762 70.559965 NaN NaN NaN NaN
10 1833-050619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.678798 0.054802 3.726041 0.014519 0.355665 6.009006 NaN NaN NaN NaN
11 1834-010319-1 True 1834 NaN True False 1 NaN False baseline i ... 0.858656 1.359164 4.808760 0.175971 0.547521 39.093859 NaN NaN NaN NaN
12 1833-120619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.802834 0.424748 5.035940 0.132587 0.384357 8.972627 NaN NaN NaN NaN
13 1833-120619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.928796 0.151146 3.473257 0.042733 0.392784 11.309932 NaN NaN NaN NaN
14 1834-220319-1 True 1834 NaN True False 1 NaN False baseline i ... 1.174019 0.059398 4.815305 0.031014 0.590135 0.000000 NaN NaN NaN NaN
15 1834-220319-1 True 1834 NaN True False 1 NaN False baseline i ... 1.918954 0.627402 4.241643 0.074006 0.707647 29.054604 NaN NaN NaN NaN
16 1839-060619-1 True 1839 NaN True False 1 NaN False baseline i ... 0.897083 0.060246 0.891795 0.030190 0.491483 48.576334 NaN NaN NaN NaN
17 1849-110319-1 True 1849 NaN True False 1 NaN False baseline i ... 0.398943 0.146648 0.222076 0.102539 0.643598 26.565051 NaN NaN NaN NaN
18 1833-020719-1 True 1833 NaN True False 1 NaN False baseline i ... 0.876495 0.115399 1.012156 0.003392 0.356249 12.528808 NaN NaN NaN NaN
19 1833-020719-1 True 1833 NaN True False 1 NaN False baseline i ... 1.429178 0.107937 5.369824 0.095342 0.344101 9.462322 NaN NaN NaN NaN
20 1833-200619-1 True 1833 NaN True False 1 NaN False baseline i ... 1.192882 0.427188 0.039146 0.199068 0.390419 6.009006 NaN NaN NaN NaN
21 1833-200619-1 True 1833 NaN True False 1 NaN False baseline i ... 0.411013 0.037815 5.107094 0.029690 0.374918 6.009006 NaN NaN NaN NaN

22 rows × 87 columns

In [ ]: