diff --git a/actions/stimulus-spike-lfp-response/data/10-calculate-stimulus-spike-lfp-response.html b/actions/stimulus-spike-lfp-response/data/10-calculate-stimulus-spike-lfp-response.html index 10a5f0674..1c3723fcf 100644 --- a/actions/stimulus-spike-lfp-response/data/10-calculate-stimulus-spike-lfp-response.html +++ b/actions/stimulus-spike-lfp-response/data/10-calculate-stimulus-spike-lfp-response.html @@ -13115,7 +13115,7 @@ div#notebook {
%load_ext autoreload
@@ -13129,7 +13129,7 @@ div#notebook {
import matplotlib.pyplot as plt
@@ -13140,6 +13140,8 @@ div#notebook {
import expipe
import os
import pathlib
+import scipy
+import scipy.signal
import numpy as np
import exdir
import pandas as pd
@@ -13162,28 +13164,10 @@ div#notebook {
data_loader = dp.Data()
@@ -13198,7 +13182,7 @@ div#notebook {
output = pathlib.Path('output/stimulus-spike-lfp-response')
@@ -13212,7 +13196,7 @@ div#notebook {
identify_neurons = actions['identify-neurons']
@@ -13227,7 +13211,7 @@ div#notebook {
def get_lim(action_id):
@@ -13258,7 +13242,7 @@ div#notebook {
def signaltonoise(a, axis=0, ddof=0):
@@ -13266,16 +13250,6 @@ div#notebook {
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m / sd)
-
-
-def compute_clean_lfp(anas, width=500, threshold=2):
- anas = np.array(anas)
-
- for ch in range(anas.shape[1]):
- idxs, = np.where(abs(anas[:, ch]) > threshold)
- for idx in idxs:
- anas[idx-width:idx+width, ch] = 0 # TODO AR model prediction
- return anas
def compute_energy(p, f, f1, f2):
@@ -13303,7 +13277,7 @@ div#notebook {
def find_theta_peak(p, f, f1, f2):
@@ -13324,7 +13298,7 @@ div#notebook {
def compute_half_width(p, f, m_p, m_f):
@@ -13354,7 +13328,36 @@ div#notebook {
# p = np.load('debug_p.npy')
+# f = np.load('debug_f.npy')
+# compute_half_width(p, f, 0.01038941, 30.30187709636872)
+
# plt.plot(f, p)
+# plt.xlim(29.9,30.6)
+
def compute_stim_peak(p, f, s_f):
@@ -13367,81 +13370,16 @@ div#notebook {
def compute_spike_lfp(anas, sptr, t_start, t_stop, NFFT):
-
- t_start = t_start * pq.s if t_start is not None else 0 * pq.s
- sampling_rate = anas.sampling_rate
- units = anas.units
- if t_start is not None and t_stop is not None:
- t_stop = t_stop * pq.s
- mask = (anas.times > t_start) & (anas.times < t_stop)
- anas = np.array(anas)[mask,:]
-
- cleaned_anas = zscore(compute_clean_lfp(anas))
-
- cleaned_anas = neo.AnalogSignal(
- signal=cleaned_anas * units, sampling_rate=sampling_rate, t_start=t_start
- )
-
- sptr = neo.SpikeTrain(
- sptr.times[(sptr.times > t_start) & (sptr.times < cleaned_anas.times[-1])],
- t_start=t_start, t_stop=cleaned_anas.times[-1]
- )
-
- sigs, freqs = el.sta.spike_field_coherence(cleaned_anas, sptr, **{'nperseg': NFFT})
- return sigs, freqs, cleaned_anas
-
# action_id_0 = '1833-200619-1'
-# action_id_1 = '1833-200619-2'
-# lfp_0 = data_loader.lfp(action_id_0, 6)
-# lfp_1 = data_loader.lfp(action_id_1, 6)
-
-# lim_0 = get_lim(action_id_0)
-# lim_1 = get_lim(action_id_1)
-
-# sptrs_0 = data_loader.spike_trains(action_id_0, 6)
-# sptrs_1 = data_loader.spike_trains(action_id_1, 6)
-
-# coher_0, freq_0, clean_lfp_0 = compute_spike_lfp(lfp_0, sptrs_0[163], *lim_0, 4096)
-# coher_1, freq_1, clean_lfp_1 = compute_spike_lfp(lfp_1, sptrs_1[28], *lim_1, 4096)
-
-# best_channel_0 = np.argmax(signaltonoise(clean_lfp_0))
-# best_channel_1 = np.argmax(signaltonoise(clean_lfp_1))
-
-# plt.plot(freq_0, coher_0[:,best_channel_0])
-# plt.plot(freq_1, coher_1[:,best_channel_1])
-# plt.xlim(0,20)
-
NFFT = 4096*2
-theta_band_f1, theta_band_f2 = 6, 10
+def compute_spike_lfp_coherence(anas, sptr, NFFT):
+
+ sigs, freqs = el.sta.spike_field_coherence(anas, sptr, **{'nperseg': NFFT})
+ return sigs, freqs
def process(row):
+def butter_bandpass(lowcut, highcut, fs, order=5):
+ nyq = 0.5 * fs
+ low = lowcut / nyq
+ high = highcut / nyq
+ b, a = scipy.signal.butter(order, [low, high], btype='band')
+ return b, a
+
+
+def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
+ b, a = butter_bandpass(lowcut, highcut, fs, order=order)
+ y = scipy.signal.filtfilt(b, a, data)
+ return y
+
+def compute_spike_phase_func(lfp, times, return_degrees=False):
+ x_a = hilbert(lfp)
+ x_phase = np.angle(x_a)
+ if return_degrees:
+ x_phase = x_phase * 180 / np.pi
+ return interp1d(times, x_phase)
+
+
+def vonmises_kde(data, kappa=100, n_bins=100):
+ from scipy.special import i0
+ bins = np.linspace(-np.pi, np.pi, n_bins)
+ x = np.linspace(-np.pi, np.pi, n_bins)
+ # integrate vonmises kernels
+ kde = np.exp(kappa * np.cos(x[:, None] - data[None, :])).sum(1) / (2 * np.pi * i0(kappa))
+ kde /= np.trapz(kde, x=bins)
+ return bins, kde
+
+
+def spike_phase_score(phase_bins, density):
+ import math
+ import pycircstat as pc
+ ang = pc.mean(phase_bins, w=density)
+ vec_len = pc.resultant_vector_length(phase_bins, w=density)
+ # ci_lim = pc.mean_ci_limits(head_angle_bins, w=rate)
+ return ang, vec_len
+
+
+
def compute_clean_lfp(anas, width=500, threshold=2):
+ anas = np.array(anas)
+ idxs, = np.where(abs(anas) > threshold)
+ for idx in idxs:
+ anas[idx-width:idx+width] = 0 # TODO AR model prediction
+ return anas, idxs
+
+
+def compute_clean_spikes(spikes, idxs, times, width=500):
+
+ for idx in idxs:
+ t0 = times[idx-width]
+ stop = idx + width
+ if stop > len(times) - 1:
+ stop = len(times) - 1
+ t1 = times[stop]
+ mask = (spikes > t0) & (spikes < t1)
+ spikes = spikes[~mask]
+ spikes = spikes[spikes <= times[-1]]
+ return spikes
+
+
+def prepare_spike_lfp(anas, sptr, t_start, t_stop):
+
+ t_start = t_start * pq.s if t_start is not None else 0 * pq.s
+ sampling_rate = anas.sampling_rate
+ units = anas.units
+ times = anas.times
+ if t_start is not None and t_stop is not None:
+ t_stop = t_stop * pq.s
+ mask = (times > t_start) & (times < t_stop)
+ anas = np.array(anas)[mask,:]
+ times = times[mask]
+
+ best_channel = np.argmax(signaltonoise(anas))
+# best_channel = np.random.choice(anas.shape[1])
+
+ cleaned_anas, idxs = compute_clean_lfp(anas[:, best_channel])
+
+ cleaned_anas = neo.AnalogSignal(
+ signal=cleaned_anas * units, sampling_rate=sampling_rate, t_start=t_start
+ )
+
+ spike_units = sptr.units
+ spike_times = sptr.times
+ spike_times = compute_clean_spikes(spike_times, idxs, times)
+
+ sptr = neo.SpikeTrain(
+ spike_times[(spike_times > t_start) & (spike_times < times[-1])], units=spike_units,
+ t_start=t_start, t_stop=times[-1]
+ )
+
+ return cleaned_anas, sptr, best_channel
+
def compute_spike_phase_func(lfp, times, return_degrees=False):
+ from scipy.fftpack import next_fast_len
+ x_a = scipy.signal.hilbert(
+ lfp, next_fast_len(len(lfp)))[:len(lfp)]
+# x_a = hilbert(lfp)
+ x_phase = np.angle(x_a, deg=return_degrees)
+ return interp1d(times, x_phase)
+
+
+def compute_spike_phase(lfp, spikes, flim=[6,10]):
+
+ sample_rate = lfp.sampling_rate.magnitude
+
+ # sometimes the position is recorded after LFP recording is ended
+ times = np.arange(lfp.shape[0]) / sample_rate
+
+ spikes = np.array(spikes)
+ spikes = spikes[(spikes > times.min()) & (spikes < times.max())]
+
+ filtered_lfp = butter_bandpass_filter(
+ lfp.magnitude.ravel(), *flim, fs=sample_rate, order=3)
+
+ spike_phase_func = compute_spike_phase_func(filtered_lfp, times)
+
+ return spike_phase_func(spikes), filtered_lfp
+
# plt.figure(figsize=(16,9))
+# # lfp = data_loader.lfp('1833-200619-2', 6)
+# lfp = data_loader.lfp('1834-220319-3', 6)
+# # lfp = data_loader.lfp('1849-010319-4', 6)
+# times = np.arange(lfp.shape[0]) / lfp.sampling_rate.magnitude
+# clean_lfp, _ = compute_clean_lfp(lfp.magnitude[:, 0], threshold=2)
+# plt.plot(times,lfp[:,0])
+# plt.plot(times,clean_lfp)
+
+# plt.figure(figsize=(16,9))
+# plt.psd(lfp[:,0].ravel(), Fs=1000, NFFT=10000)
+# plt.psd(clean_lfp, Fs=1000, NFFT=10000)
+# plt.xlim(0,100)
+
# plt.figure(figsize=(16,9))
+
+# plt.plot(times,lfp[:,0])
+# # plt.plot(clean_lfp*100)
+# plt.plot(times[:-1], np.diff(lfp[:,0].magnitude.ravel()))
+# plt.xlim(64.5,65.5)
+# # plt.ylim(-250,250)
+
# # action_id_0, channel_0, unit_0 = '1833-200619-1', 6, 163
+# # action_id_1, channel_1, unit_1 = '1833-200619-2', 6, 28
+# action_id_0, channel_0, unit_0 = '1834-220319-3', 2, 46
+# action_id_1, channel_1, unit_1 = '1834-220319-4', 2, 60
+# lfp_0 = data_loader.lfp(action_id_0, channel_0)
+# lfp_1 = data_loader.lfp(action_id_1, channel_1)
+
+# sample_rate_0 = lfp_0.sampling_rate
+# sample_rate_1 = lfp_1.sampling_rate
+
+# lim_0 = get_lim(action_id_0)
+# lim_1 = get_lim(action_id_1)
+
+# sptrs_0 = data_loader.spike_trains(action_id_0, channel_0)
+
+# sptrs_1 = data_loader.spike_trains(action_id_1, channel_1)
+
+# cleaned_lfps_0, sptr_0, best_channel_0 = prepare_spike_lfp(lfp_0, sptrs_0[unit_0], *lim_0)
+
+# cleaned_lfps_1, sptr_1, best_channel_1 = prepare_spike_lfp(lfp_1, sptrs_1[unit_1], *lim_1)
+
+# coher_0, freq_0 = compute_spike_lfp_coherence(cleaned_lfps_0, sptr_0, 4096)
+
+# coher_1, freq_1 = compute_spike_lfp_coherence(cleaned_lfps_1, sptr_1, 4096)
+
+# spike_phase_0, filtered_lfp_0 = compute_spike_phase(cleaned_lfps_0, sptrs_0[unit_0], flim=[6,10])
+
+# spike_phase_1, filtered_lfp_1 = compute_spike_phase(cleaned_lfps_1, sptrs_1[unit_1], flim=[6,10])
+
+# spike_phase_1_stim, filtered_lfp_1_stim = compute_spike_phase(cleaned_lfps_1, sptrs_1[unit_1], flim=[10.5,11.5])
+
+# plt.figure()
+# plt.plot(freq_0, coher_0.ravel())
+# plt.plot(freq_1, coher_1.ravel())
+# plt.xlim(0,20)
+
+# plt.figure()
+# bins_0, kde_0 = vonmises_kde(spike_phase_0, 100)
+# ang_0, vec_len_0 = spike_phase_score(bins_0, kde_0)
+# plt.polar(bins_0, kde_0, color='b')
+# plt.polar([ang_0, ang_0], [0, vec_len_0], color='b')
+
+# bins_1, kde_1 = vonmises_kde(spike_phase_1, 100)
+# ang_1, vec_len_1 = spike_phase_score(bins_1, kde_1)
+# plt.polar(bins_1, kde_1, color='r')
+# plt.polar([ang_1, ang_1], [0, vec_len_1], color='r')
+
+# bins_1_stim, kde_1_stim = vonmises_kde(spike_phase_1_stim, 100)
+# ang_1_stim, vec_len_1_stim = spike_phase_score(bins_1_stim, kde_1_stim)
+# plt.polar(bins_1_stim, kde_1_stim, color='k')
+# plt.polar([ang_1_stim, ang_1_stim], [0, vec_len_1_stim], color='k')
+
+
# TODO fix artefact stuff from phase precession
+
NFFT = 8192
+theta_band_f1, theta_band_f2 = 6, 10
+
coherence_data, freqency_data = {}, {}
+theta_kde_data, theta_bins_data = {}, {}
+stim_kde_data, stim_bins_data = {}, {}
+
+def process(row):
action_id = row['action']
channel_group = row['channel_group']
unit_name = row['unit_name']
+ name = f'{action_id}_{channel_group}_{unit_name}'
+
lfp = data_loader.lfp(action_id, channel_group) # TODO consider choosing strongest stim response
sptr = data_loader.spike_train(action_id, channel_group, unit_name)
lim = get_lim(action_id)
-
- p_xys, freq, clean_lfp = compute_spike_lfp(lfp, sptr, *lim, NFFT=NFFT)
- snls = signaltonoise(clean_lfp)
- best_channel = np.argmax(snls)
- snl = snls[best_channel]
- p_xy = p_xys[:,best_channel].magnitude
+ cleaned_lfp, sptr, best_channel = prepare_spike_lfp(lfp, sptr, *lim)
+
+ p_xys, freq = compute_spike_lfp_coherence(cleaned_lfp, sptr, NFFT=NFFT)
+
+ p_xy = p_xys.magnitude.ravel()
freq = freq.magnitude
theta_f, theta_p_max = find_theta_peak(p_xy, freq, theta_band_f1, theta_band_f2)
@@ -13483,6 +13725,12 @@ div#notebook {
theta_half_energy = compute_energy(p_xy, freq, theta_half_f1, theta_half_f2) # theta band 6 - 10 Hz
+ theta_spike_phase, _ = compute_spike_phase(cleaned_lfp, sptr, flim=[theta_band_f1, theta_band_f2])
+ theta_bins, theta_kde = vonmises_kde(theta_spike_phase)
+ theta_ang, theta_vec_len = spike_phase_score(theta_bins, theta_kde)
+ theta_kde_data.update({name: theta_kde})
+ theta_bins_data.update({name: theta_bins})
+
# stim
stim_freq = compute_stim_freq(action_id)
@@ -13494,8 +13742,19 @@ div#notebook {
stim_energy = compute_energy(p_xy, freq, stim_half_f1, stim_half_f2)
+ if np.isnan(stim_freq):
+ stim_spike_phase, stim_bins, stim_kde, stim_ang, stim_vec_len = [np.nan] * 5
+ else:
+ stim_spike_phase, _ = compute_spike_phase(cleaned_lfp, sptr, flim=[stim_freq - .5, stim_freq + .5])
+ stim_bins, stim_kde = vonmises_kde(stim_spike_phase)
+ stim_ang, stim_vec_len = spike_phase_score(stim_bins, stim_kde)
+ stim_kde_data.update({name: stim_kde})
+ stim_bins_data.update({name: stim_bins})
+
+ coherence_data.update({name: p_xy})
+ freqency_data.update({name: freq})
+
result = pd.Series({
- 'signal_to_noise': snl,
'best_channel': best_channel,
'theta_freq': theta_f,
'theta_peak': theta_p_max,
@@ -13504,12 +13763,16 @@ div#notebook {
'theta_half_f2': theta_half_f2,
'theta_half_width': theta_half_width,
'theta_half_energy': theta_half_energy,
+ 'theta_ang': theta_ang,
+ 'theta_vec_len': theta_vec_len,
'stim_freq': stim_freq,
'stim_p_max': stim_p_max,
'stim_half_f1': stim_half_f1,
'stim_half_f2': stim_half_f2,
'stim_half_width': stim_half_width,
- 'stim_energy': stim_energy
+ 'stim_energy': stim_energy,
+ 'stim_ang': stim_ang,
+ 'stim_vec_len': stim_vec_len
})
return result
results = units.merge(
@@ -13547,13 +13810,13 @@ div#notebook {
-
+
@@ -13565,8 +13828,9 @@ var element = $('#fdf01411-110c-4f09-8a74-86966ed09bad');
coher, freqs = {}, {}
-for i, row in tqdm(units.iterrows(), total=len(units)):
- action_id = row['action']
- channel_group = row['channel_group']
- unit_name = row['unit_name']
+# coher, freqs = {}, {}
+# for i, row in tqdm(units.iterrows(), total=len(units)):
+# action_id = row['action']
+# channel_group = row['channel_group']
+# unit_name = row['unit_name']
- name = f'{action_id}_{channel_group}_{unit_name}'
+# name = f'{action_id}_{channel_group}_{unit_name}'
- lfp = data_loader.lfp(action_id, channel_group) # TODO consider choosing strongest stim response
+# lfp = data_loader.lfp(action_id, channel_group) # TODO consider choosing strongest stim response
- sptr = data_loader.spike_train(action_id, channel_group, unit_name)
+# sptr = data_loader.spike_train(action_id, channel_group, unit_name)
- lim = get_lim(action_id)
+# lim = get_lim(action_id)
- p_xys, freq, clean_lfp = compute_spike_lfp(lfp, sptr, *lim, NFFT=NFFT)
+# p_xys, freq, clean_lfp = compute_spike_lfp(lfp, sptr, *lim, NFFT=NFFT)
- snls = signaltonoise(clean_lfp)
- best_channel = np.argmax(snls)
- snl = snls[best_channel]
- p_xy = p_xys[:,best_channel].magnitude
- freq = freq.magnitude
+# snls = signaltonoise(clean_lfp)
+# best_channel = np.argmax(snls)
+# snl = snls[best_channel]
+# p_xy = p_xys[:,best_channel].magnitude
+# freq = freq.magnitude
- coher.update({name: p_xy})
- freqs.update({name: freq})
+# coher.update({name: p_xy})
+# freqs.update({name: freq})
coher.to_feather(output / 'data' / 'coherence.feather')
-freqs.to_feather(output / 'data' / 'freqs.feather')
+pd.DataFrame(coherence_data).to_feather(output / 'data' / 'coherence.feather')
+pd.DataFrame(frequency_data).to_feather(output / 'data' / 'freqs.feather')
+pd.DataFrame(theta_kde_data).to_feather(output / 'data' / 'theta_kde.feather')
+pd.DataFrame(theta_bins_data).to_feather(output / 'data' / 'theta_bins.feather')
+pd.DataFrame(stim_kde_data).to_feather(output / 'data' / 'stim_kde.feather')
+pd.DataFrame(stim_bins_data).to_feather(output / 'data' / 'stim_bins.feather')
%matplotlib inline
-plt.rc('axes', titlesize=12)
-plt.rcParams.update({
- 'font.size': 12,
- 'figure.figsize': (6, 4),
- 'figure.dpi': 150
-})
+#################################################
-output_path = pathlib.Path("output") / "stimulus-spike-lfp-response"
-(output_path / "statistics").mkdir(exist_ok=True, parents=True)
-(output_path / "figures").mkdir(exist_ok=True, parents=True)
-output_path.mkdir(exist_ok=True)
+lfp_location = ''
+# lfp_location = '-other-tetrode'
+# lfp_location = '-other-drive'
+
+##################################################
data_loader = dp.Data()
-actions = data_loader.actions
-project = data_loader.project
+%matplotlib inline
+plt.rc('axes', titlesize=12)
+plt.rcParams.update({
+ 'font.size': 12,
+ 'figure.figsize': (6, 4),
+ 'figure.dpi': 150
+})
+
+output_path = pathlib.Path("output") / ("stimulus-spike-lfp-response" + lfp_location)
+(output_path / "statistics").mkdir(exist_ok=True, parents=True)
+(output_path / "figures").mkdir(exist_ok=True, parents=True)
+output_path.mkdir(exist_ok=True)
identification_action = actions['identify-neurons']
-sessions = pd.read_csv(identification_action.data_path('sessions'))
-units = pd.read_csv(identification_action.data_path('units'))
-session_units = pd.merge(sessions, units, on='action')
+project_path = dp.project_path()
+project = expipe.get_project(project_path)
+actions = project.actions
lfp_action = actions['stimulus-spike-lfp-response']
-lfp_results = pd.read_csv(lfp_action.data_path('results'))
-
# lfp_results has old unit id's but correct on (action, unit_name, channel_group)
-lfp_results = lfp_results.drop('unit_id', axis=1)
-
stim_action = actions['stimulus-response']
-stim_results = pd.read_csv(stim_action.data_path('results'))
-
# lfp_results has old unit id's but correct on (action, unit_name, channel_group)
-stim_results = stim_results.drop('unit_id', axis=1)
-
statistics_action = actions['calculate-statistics']
-shuffling = actions['shuffling']
-
-statistics_results = pd.read_csv(statistics_action.data_path('results'))
-statistics_results = session_units.merge(statistics_results, how='left')
-quantiles_95 = pd.read_csv(shuffling.data_path('quantiles_95'))
-action_columns = ['action', 'channel_group', 'unit_name']
-data = pd.merge(statistics_results, quantiles_95, on=action_columns, suffixes=("", "_threshold"))
-
data['unit_day'] = data.apply(lambda x: str(x.unit_idnum) + '_' + x.action.split('-')[1], axis=1)
-
data = data.merge(lfp_results, how='left')
-
data = data.merge(stim_results, how='left')
-
waveform_action = actions['waveform-analysis']
-waveform_results = pd.read_csv(waveform_action.data_path('results')).drop('template', axis=1)
-
data = data.merge(waveform_results, how='left')
-
colors = ['#1b9e77','#d95f02','#7570b3','#e7298a']
-labels = ['Baseline I', '11 Hz', 'Baseline II', '30 Hz']
-queries = ['baseline and Hz11', 'frequency==11', 'baseline and Hz30', 'frequency==30']
-
data.bs = data.bs.astype(bool)
-
data.loc[data.eval('t_i_peak == t_i_peak and not bs'), 'ns_inhibited'] = True
-data.ns_inhibited.fillna(False, inplace=True)
-
-data.loc[data.eval('t_i_peak != t_i_peak and not bs'), 'ns_not_inhibited'] = True
-data.ns_not_inhibited.fillna(False, inplace=True)
-
# make baseline for inhibited vs not inhibited
-data.loc[data.unit_id.isin(data.query('ns_inhibited').unit_id.values), 'ns_inhibited'] = True
-data.loc[data.unit_id.isin(data.query('ns_not_inhibited').unit_id.values), 'ns_not_inhibited'] = True
-
query = (
- 'gridness > gridness_threshold and '
- 'information_rate > information_rate_threshold and '
- 'gridness > .2 and '
- 'average_rate < 25'
-)
-sessions_above_threshold = data.query(query)
-print("Number of sessions above threshold", len(sessions_above_threshold))
-print("Number of animals", len(sessions_above_threshold.groupby(['entity'])))
+data, labels, colors, queries = load_data_frames()
gridcell_sessions = data[data.unit_day.isin(sessions_above_threshold.unit_day.values)]
-print("Number of gridcells", gridcell_sessions.unit_idnum.nunique())
-print("Number of gridcell recordings", len(gridcell_sessions))
-print("Number of animals", len(gridcell_sessions.groupby(['entity'])))
-
gridcell_sessions
-
data.loc[:,'gridcell'] = np.nan
-data['gridcell'] = data.isin(gridcell_sessions)
-
-data.loc[data.eval('not gridcell and bs'), 'bs_not_gridcell'] = True
-data.bs_not_gridcell.fillna(False, inplace=True)
+lfp_action = actions['stimulus-spike-lfp-response' + lfp_location]
+lfp_results = pd.read_csv(lfp_action.data_path('results'))
data.query('baseline and Hz11 and gridcell').head()
+# lfp_results has old unit id's but correct on (action, unit_name, channel_group)
+lfp_results = lfp_results.drop('unit_id', axis=1)
density = True
-cumulative = True
-histtype = 'step'
-lw = 2
-bins = {
- 'theta_energy': np.arange(0, 2.1, .03),
- 'theta_peak': np.arange(0, 1, .03),
- 'theta_freq': np.arange(4, 12, .1),
- 'theta_half_width': np.arange(0, 5, .1)
-}
-xlabel = {
- 'theta_energy': 'Theta coherence energy',
- 'theta_peak': 'Theta peak coherence',
- 'theta_freq': '(Hz)',
- 'theta_half_width': '(Hz)',
-}
-results = {}
-for cell_type in ['gridcell', 'ns_inhibited', 'ns_not_inhibited']:
- results[cell_type] = {}
- for key in bins:
- results[cell_type][key] = pd.DataFrame()
- fig = plt.figure(figsize=(3.5,2.2))
- plt.suptitle(key + ' ' + cell_type)
- legend_lines = []
- for color, query, label in zip(colors, queries, labels):
- values = data.query(query + ' and ' + cell_type)[key]
- results[cell_type][key] = pd.concat([
- results[cell_type][key],
- values.rename(label).reset_index(drop=True)], axis=1)
- values.hist(
- bins=bins[key], density=density, cumulative=cumulative, lw=lw,
- histtype=histtype, color=color)
- legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, lw=lw, label=label))
- plt.xlabel(xlabel[key])
- plt.legend(
- handles=legend_lines,
- bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
- plt.tight_layout()
- plt.grid(False)
- plt.xlim(-0.05, bins[key].max() - bins[key].max()*0.02)
- despine()
- figname = f'spike-lfp-coherence-histogram-{key}-{cell_type}'.replace(' ', '-')
- fig.savefig(
- output_path / 'figures' / f'{figname}.png',
- bbox_inches='tight', transparent=True)
- fig.savefig(
- output_path / 'figures' / f'{figname}.svg',
- bbox_inches='tight', transparent=True)
+data = data.merge(lfp_results, how='left')
data['stim_strength'] = data.stim_p_max / data.theta_peak
@@ -14340,49 +13326,255 @@ Number of animals 4
density = True
-cumulative = True
-histtype = 'step'
-lw = 2
-bins = {
- 'stim_energy': np.arange(0, .4, .01),
- 'stim_half_width': np.arange(0, 1.5, .01),
- 'stim_p_max': np.arange(0, 1, .01),
- 'stim_strength': np.arange(0, 50, 1)
+keys = [
+ 'theta_energy',
+ 'theta_peak',
+ 'theta_freq',
+ 'theta_half_width',
+ 'theta_vec_len',
+ 'theta_ang',
+ 'stim_energy',
+ 'stim_half_width',
+ 'stim_p_max',
+ 'stim_strength',
+ 'stim_vec_len',
+ 'stim_ang'
+]
+
+
+
results, labels = make_paired_tables(data, keys)
+
results['gridcell']['theta_peak']
+
xlabel = {
+ 'theta_energy': 'Theta coherence energy',
+ 'theta_peak': 'Theta peak coherence',
+ 'theta_freq': '(Hz)',
+ 'theta_half_width': '(Hz)',
+ 'theta_vec_len': 'a.u.',
+ 'theta_ang': 'rad'
}
-xlabel = {
- 'stim_energy': 'Coherence energy',
- 'stim_half_width': '(Hz)',
- 'stim_p_max': 'Peak coherence',
- 'stim_strength': 'Ratio',
-}
-# key = 'theta_energy'
-# key = 'theta_peak'
for cell_type in ['gridcell', 'ns_inhibited', 'ns_not_inhibited']:
- for key in bins:
- results[cell_type][key] = pd.DataFrame()
- fig = plt.figure(figsize=(3.2,2.2))
+ for key in xlabel:
+ fig = plt.figure(figsize=(3.7,2.2))
plt.suptitle(key + ' ' + cell_type)
legend_lines = []
- for color, query, label in zip(colors[1::2], queries[1::2], labels[1::2]):
- values = data.query(query + ' and ' + cell_type)[key]
- results[cell_type][key] = pd.concat([
- results[cell_type][key],
- values.rename(label).reset_index(drop=True)], axis=1)
- values.hist(
- bins=bins[key], density=density, cumulative=cumulative, lw=lw,
- histtype=histtype, color=color)
- legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, lw=lw, label=label))
+ for color, label in zip(colors, labels):
+ legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, label=label))
+ sns.kdeplot(data=results[cell_type][key].loc[:,labels], cumulative=True, legend=False, palette=colors, common_norm=False)
plt.xlabel(xlabel[key])
plt.legend(
handles=legend_lines,
bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
plt.tight_layout()
plt.grid(False)
- plt.xlim(-0.05, bins[key].max() - bins[key].max()*0.02)
despine()
figname = f'spike-lfp-coherence-histogram-{key}-{cell_type}'.replace(' ', '-')
fig.savefig(
@@ -14409,7 +13601,7 @@ Number of animals 4
@@ -14424,7 +13616,7 @@ Number of animals 4
@@ -14439,7 +13631,7 @@ Number of animals 4
@@ -14454,7 +13646,7 @@ Number of animals 4
@@ -14469,7 +13661,7 @@ Number of animals 4
@@ -14484,7 +13676,7 @@ Number of animals 4
@@ -14499,7 +13691,7 @@ Number of animals 4
@@ -14514,7 +13706,7 @@ Number of animals 4
@@ -14529,7 +13721,7 @@ Number of animals 4
@@ -14544,7 +13736,7 @@ Number of animals 4
@@ -14559,7 +13751,7 @@ Number of animals 4
@@ -14574,7 +13766,418 @@ Number of animals 4
+
+
xlabel = {
+ 'stim_energy': 'Coherence energy',
+ 'stim_half_width': '(Hz)',
+ 'stim_p_max': 'Peak coherence',
+ 'stim_strength': 'Ratio',
+ 'stim_vec_len': 'a.u.',
+ 'stim_ang': 'rad'
+}
+# key = 'theta_energy'
+# key = 'theta_peak'
+for cell_type in ['gridcell', 'ns_inhibited', 'ns_not_inhibited']:
+ for key in xlabel:
+ fig = plt.figure(figsize=(3.3,2.2))
+ plt.suptitle(key + ' ' + cell_type)
+ legend_lines = []
+ for color, label in zip(colors[1::2], labels[1::2]):
+ legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, label=label))
+ sns.kdeplot(data=results[cell_type][key].loc[:,labels[1::2]], cumulative=True, legend=False, palette=colors[1::2], common_norm=False)
+ plt.xlabel(xlabel[key])
+ plt.legend(
+ handles=legend_lines,
+ bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
+ plt.tight_layout()
+ plt.grid(False)
+ despine()
+ figname = f'spike-lfp-coherence-histogram-{key}-{cell_type}'.replace(' ', '-')
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.svg',
+ bbox_inches='tight', transparent=True)
+
def summarize(data):
- return "{:.2f} ± {:.2f} ({})".format(data.mean(), data.sem(), sum(~np.isnan(data)))
-
-
-def MWU(df, keys):
- '''
- Mann Whitney U
- '''
- Uvalue, pvalue = scipy.stats.mannwhitneyu(
- df[keys[0]].dropna(),
- df[keys[1]].dropna(),
- alternative='two-sided')
-
- return "{:.2f}, {:.3f}".format(Uvalue, pvalue)
-
-
-def PRS(df, keys):
- '''
- Permutation ReSampling
- '''
- pvalue, observed_diff, diffs = permutation_resampling(
- df[keys[0]].dropna(),
- df[keys[1]].dropna(), statistic=np.median)
-
- return "{:.2f}, {:.3f}".format(observed_diff, pvalue)
-
-
-def rename(name):
- return name.replace("_field", "-field").replace("_", " ").capitalize()
+from septum_mec.analysis.statistics import VonMisesKDE
for paradigm in ['stim', 'theta']:
+ key = paradigm + '_vec_len'
+ for cell_type in ['gridcell', 'ns_inhibited', 'ns_not_inhibited']:
+ fig = plt.figure(figsize=(3.2,2.2))
+ plt.suptitle(key + ' ' + cell_type)
+ legend_lines = []
+ for color, query, label in zip(colors, queries, labels):
+ data_query = data.query(query + ' and ' + cell_type)
+ values = data_query[key].values
+ angles = data_query[paradigm + '_ang'].values
+ kde = VonMisesKDE(angles, weights=values, kappa=5)
+ bins = np.linspace(-np.pi, np.pi, 100)
+ plt.polar(bins, kde.evaluate(bins), color=color, lw=2)
+ plt.polar(angles, values, color=color, lw=1, ls='none', marker='.', markersize=2)
+# values.hist(
+# bins=bins[key], density=density, cumulative=cumulative, lw=lw,
+# histtype=histtype, color=color)
+ legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, lw=2, label=label))
+ plt.legend(
+ handles=legend_lines,
+ bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
+ plt.tight_layout()
+# plt.grid(False)
+ figname = f'spike-lfp-polar-plot-{paradigm}-{cell_type}'.replace(' ', '-')
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.svg',
+ bbox_inches='tight', transparent=True)
+
stats = {}
-for cell_type in results:
- stat = pd.DataFrame()
-
- for key, df in results[cell_type].items():
- Key = rename(key)
- stat[Key] = df.agg(summarize)
- stat[Key] = df.agg(summarize)
-
- for i, c1 in enumerate(df.columns):
- for c2 in df.columns[i+1:]:
- stat.loc[f'MWU {c1} {c2}', Key] = MWU(df, [c1, c2])
- stat.loc[f'PRS {c1} {c2}', Key] = PRS(df, [c1, c2])
-
- stats[cell_type] = stat
+for cell_type, result in results.items():
+ stats[cell_type], _ = make_statistics_table(result, labels)
for cell_type, cell_results in results.items():
+ for key, result in cell_results.items():
+ result.to_latex(output_path / "statistics" / f"values_{cell_type}_{key}.tex")
+ result.to_csv(output_path / "statistics" / f"values_{cell_type}_{key}.csv")
+
nsi_vs_nsni = {}
+for key in keys:
+ df = pd.DataFrame()
+ dfs = [results[k][key].loc[:, ['entity', 'unit_idnum', 'Baseline I']].rename({'Baseline I': k}, axis=1) for k in ['ns_inhibited', 'ns_not_inhibited']]
+ df = pd.merge(*dfs, on=['entity', 'unit_idnum'], how='outer')
+ nsi_vs_nsni[key] = df
+
nsi_vs_nsni.keys()
+
nsi_vs_nsni['theta_energy']
+
from septum_mec.analysis.statistics import LMM
+
LMM(nsi_vs_nsni['theta_energy'], 'ns_inhibited', 'ns_not_inhibited', 'theta_energy')
+
stat, stat_vals = make_statistics_table(nsi_vs_nsni, ['ns_inhibited', 'ns_not_inhibited'], wilcoxon_test=False)
+
stat
+
stat.to_latex(output_path / "statistics" / f"statistics_nsi_vs_nsni.tex")
+stat.to_csv(output_path / "statistics" / f"statistics_nsi_vs_nsni.csv")
+
action = project.require_action("stimulus-spike-lfp-response")
+action = project.require_action("stimulus-spike-lfp-response" + lfp_location)
\n", + " | entity | \n", + "unit_idnum | \n", + "channel_group | \n", + "date | \n", + "Baseline I | \n", + "11 Hz | \n", + "Baseline II | \n", + "30 Hz | \n", + "
---|---|---|---|---|---|---|---|---|
51 | \n", + "1833 | \n", + "8 | \n", + "0 | \n", + "20719 | \n", + "0.244278 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
85 | \n", + "1833 | \n", + "13 | \n", + "0 | \n", + "20719 | \n", + "NaN | \n", + "0.039465 | \n", + "0.279382 | \n", + "0.054633 | \n", + "
86 | \n", + "1833 | \n", + "14 | \n", + "0 | \n", + "20719 | \n", + "NaN | \n", + "0.022268 | \n", + "0.448472 | \n", + "NaN | \n", + "
58 | \n", + "1833 | \n", + "23 | \n", + "0 | \n", + "200619 | \n", + "0.280865 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
127 | \n", + "1833 | \n", + "26 | \n", + "0 | \n", + "200619 | \n", + "NaN | \n", + "NaN | \n", + "0.181703 | \n", + "0.038459 | \n", + "
... | \n", + "... | \n", + "... | \n", + "... | \n", + "... | \n", + "... | \n", + "... | \n", + "... | \n", + "... | \n", + "
139 | \n", + "1849 | \n", + "835 | \n", + "4 | \n", + "150319 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "0.692552 | \n", + "
43 | \n", + "1849 | \n", + "851 | \n", + "5 | \n", + "60319 | \n", + "0.662908 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
65 | \n", + "1849 | \n", + "932 | \n", + "7 | \n", + "280219 | \n", + "0.050995 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
74 | \n", + "1849 | \n", + "937 | \n", + "7 | \n", + "280219 | \n", + "NaN | \n", + "0.131422 | \n", + "NaN | \n", + "NaN | \n", + "
105 | \n", + "1849 | \n", + "939 | \n", + "7 | \n", + "280219 | \n", + "NaN | \n", + "NaN | \n", + "0.472020 | \n", + "NaN | \n", + "
137 rows × 8 columns
\n", + "\n", - " | action | \n", - "baseline | \n", - "entity | \n", - "frequency | \n", - "i | \n", - "ii | \n", - "session | \n", - "stim_location | \n", - "stimulated | \n", - "tag | \n", - "... | \n", - "t_i_peak | \n", - "p_i_peak | \n", - "half_width | \n", - "peak_to_trough | \n", - "average_firing_rate | \n", - "bs | \n", - "bs_stim | \n", - "bs_ctrl | \n", - "ns_inhibited | \n", - "ns_not_inhibited | \n", - "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14 | \n", - "1839-120619-4 | \n", - "False | \n", - "1839 | \n", - "30.0 | \n", - "False | \n", - "True | \n", - "4 | \n", - "ms | \n", - "True | \n", - "stim ii | \n", - "... | \n", - "0.0087 | \n", - "0.000055 | \n", - "0.259757 | \n", - "0.362390 | \n", - "0.180529 | \n", - "False | \n", - "0.0 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "
21 | \n", - "1839-120619-4 | \n", - "False | \n", - "1839 | \n", - "30.0 | \n", - "False | \n", - "True | \n", - "4 | \n", - "ms | \n", - "True | \n", - "stim ii | \n", - "... | \n", - "0.0008 | \n", - "0.000880 | \n", - "0.242524 | \n", - "0.534827 | \n", - "2.265039 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
29 | \n", - "1839-120619-4 | \n", - "False | \n", - "1839 | \n", - "30.0 | \n", - "False | \n", - "True | \n", - "4 | \n", - "ms | \n", - "True | \n", - "stim ii | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.279806 | \n", - "0.598967 | \n", - "10.924422 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
30 | \n", - "1839-120619-4 | \n", - "False | \n", - "1839 | \n", - "30.0 | \n", - "False | \n", - "True | \n", - "4 | \n", - "ms | \n", - "True | \n", - "stim ii | \n", - "... | \n", - "0.0005 | \n", - "0.002365 | \n", - "0.265158 | \n", - "0.581451 | \n", - "3.984881 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
31 | \n", - "1839-120619-4 | \n", - "False | \n", - "1839 | \n", - "30.0 | \n", - "False | \n", - "True | \n", - "4 | \n", - "ms | \n", - "True | \n", - "stim ii | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.246920 | \n", - "0.570844 | \n", - "3.497452 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "... | \n", - "
1263 | \n", - "1833-010719-2 | \n", - "False | \n", - "1833 | \n", - "11.0 | \n", - "True | \n", - "False | \n", - "2 | \n", - "ms | \n", - "True | \n", - "stim i | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.280033 | \n", - "0.560729 | \n", - "4.760330 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
1264 | \n", - "1833-010719-2 | \n", - "False | \n", - "1833 | \n", - "11.0 | \n", - "True | \n", - "False | \n", - "2 | \n", - "ms | \n", - "True | \n", - "stim i | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.281934 | \n", - "0.627089 | \n", - "15.890929 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
1268 | \n", - "1833-010719-2 | \n", - "False | \n", - "1833 | \n", - "11.0 | \n", - "True | \n", - "False | \n", - "2 | \n", - "ms | \n", - "True | \n", - "stim i | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.266512 | \n", - "0.594033 | \n", - "2.704037 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
1271 | \n", - "1833-010719-2 | \n", - "False | \n", - "1833 | \n", - "11.0 | \n", - "True | \n", - "False | \n", - "2 | \n", - "ms | \n", - "True | \n", - "stim i | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.223261 | \n", - "0.592553 | \n", - "9.658453 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
1275 | \n", - "1833-010719-2 | \n", - "False | \n", - "1833 | \n", - "11.0 | \n", - "True | \n", - "False | \n", - "2 | \n", - "ms | \n", - "True | \n", - "stim i | \n", - "... | \n", - "NaN | \n", - "NaN | \n", - "0.257098 | \n", - "0.545188 | \n", - "5.292658 | \n", - "True | \n", - "1.0 | \n", - "NaN | \n", - "False | \n", - "False | \n", - "
231 rows × 73 columns
\n", - "\n", - " | action | \n", - "baseline | \n", - "entity | \n", - "frequency | \n", - "i | \n", - "ii | \n", - "session | \n", - "stim_location | \n", - "stimulated | \n", - "tag | \n", - "... | \n", - "half_width | \n", - "peak_to_trough | \n", - "average_firing_rate | \n", - "bs | \n", - "bs_stim | \n", - "bs_ctrl | \n", - "ns_inhibited | \n", - "ns_not_inhibited | \n", - "gridcell | \n", - "bs_not_gridcell | \n", - "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33 | \n", - "1833-260619-1 | \n", - "True | \n", - "1833 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "1 | \n", - "NaN | \n", - "False | \n", - "baseline i | \n", - "... | \n", - "0.272875 | \n", - "0.602667 | \n", - "5.945508 | \n", - "True | \n", - "NaN | \n", - "1.0 | \n", - "False | \n", - "False | \n", - "True | \n", - "False | \n", - "
34 | \n", - "1833-260619-1 | \n", - "True | \n", - "1833 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "1 | \n", - "NaN | \n", - "False | \n", - "baseline i | \n", - "... | \n", - "0.226452 | \n", - "0.274814 | \n", - "2.860048 | \n", - "False | \n", - "NaN | \n", - "0.0 | \n", - "False | \n", - "True | \n", - "True | \n", - "False | \n", - "
35 | \n", - "1833-260619-1 | \n", - "True | \n", - "1833 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "1 | \n", - "NaN | \n", - "False | \n", - "baseline i | \n", - "... | \n", - "0.247266 | \n", - "0.570104 | \n", - "3.365674 | \n", - "True | \n", - "NaN | \n", - "1.0 | \n", - "False | \n", - "False | \n", - "True | \n", - "False | \n", - "
39 | \n", - "1833-260619-1 | \n", - "True | \n", - "1833 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "1 | \n", - "NaN | \n", - "False | \n", - "baseline i | \n", - "... | \n", - "0.284542 | \n", - "0.644111 | \n", - "17.471520 | \n", - "True | \n", - "NaN | \n", - "1.0 | \n", - "False | \n", - "False | \n", - "True | \n", - "False | \n", - "
40 | \n", - "1833-260619-1 | \n", - "True | \n", - "1833 | \n", - "NaN | \n", - "True | \n", - "False | \n", - "1 | \n", - "NaN | \n", - "False | \n", - "baseline i | \n", - "... | \n", - "0.259920 | \n", - "0.581698 | \n", - "5.891739 | \n", - "True | \n", - "NaN | \n", - "1.0 | \n", - "False | \n", - "False | \n", - "True | \n", - "False | \n", - "
5 rows × 75 columns
\n", - "