diff --git a/actions/stimulus-lfp-response-no-zscore/attributes.yaml b/actions/stimulus-lfp-response-no-zscore/attributes.yaml new file mode 100644 index 000000000..acae8117c --- /dev/null +++ b/actions/stimulus-lfp-response-no-zscore/attributes.yaml @@ -0,0 +1,5 @@ +registered: '2020-11-28T11:18:33' +data: + results: results.csv + notebook: 10-calculate-stimulus-lfp-response.ipynb + html: 10-calculate-stimulus-lfp-response.html diff --git a/actions/stimulus-lfp-response-no-zscore/data/10-calculate-stimulus-lfp-response.html b/actions/stimulus-lfp-response-no-zscore/data/10-calculate-stimulus-lfp-response.html new file mode 100644 index 000000000..d50d0b7cf --- /dev/null +++ b/actions/stimulus-lfp-response-no-zscore/data/10-calculate-stimulus-lfp-response.html @@ -0,0 +1,13727 @@ + + +
+ +%load_ext autoreload
+%autoreload 2
+
import matplotlib.pyplot as plt
+%matplotlib inline
+import spatial_maps as sp
+import septum_mec.analysis.data_processing as dp
+import septum_mec.analysis.registration
+import expipe
+import os
+import pathlib
+import numpy as np
+import exdir
+import pandas as pd
+import optogenetics as og
+import quantities as pq
+import shutil
+from distutils.dir_util import copy_tree
+import scipy
+import scipy.signal as ss
+
+from scipy.signal import find_peaks
+from scipy.interpolate import interp1d
+from matplotlib import mlab
+
+from tqdm import tqdm_notebook as tqdm
+from tqdm._tqdm_notebook import tqdm_notebook
+tqdm_notebook.pandas()
+
data_loader = dp.Data()
+actions = data_loader.actions
+project = data_loader.project
+
#############################
+
+perform_zscore = False
+
+if not perform_zscore:
+ zscore_str = "-no-zscore"
+else:
+ zscore_str = ""
+
+#################################
+
output = pathlib.Path('output/stimulus-lfp-response' + zscore_str)
+(output / 'data').mkdir(parents=True, exist_ok=True)
+
identify_neurons = actions['identify-neurons']
+sessions = pd.read_csv(identify_neurons.data_path('sessions'))
+
channel_groups = []
+for i, row in sessions.iterrows():
+ for ch in range(8):
+ row['channel_group'] = ch
+ channel_groups.append(row.to_dict())
+
channel_groups = pd.DataFrame(channel_groups)
+
def get_lim(action_id):
+ stim_times = data_loader.stim_times(action_id)
+ if stim_times is None:
+ return [0, np.inf]
+ stim_times = np.array(stim_times)
+ return [stim_times.min(), stim_times.max()]
+
+def get_mask(lfp, lim):
+ return (lfp.times >= lim[0]) & (lfp.times <= lim[1])
+
+def zscore(a):
+ return (a - a.mean()) / a.std()
+
+def compute_stim_freq(action_id):
+ stim_times = data_loader.stim_times(action_id)
+ if stim_times is None:
+ return np.nan
+ stim_times = np.array(stim_times)
+ return 1 / np.mean(np.diff(stim_times))
+
def signaltonoise(a, axis=0, ddof=0):
+ a = np.asanyarray(a)
+ m = a.mean(axis)
+ sd = a.std(axis=axis, ddof=ddof)
+ return np.where(sd == 0, 0, m / sd)
+
+
+def select_and_clean(anas, width=500, threshold=2):
+ anas = np.array(anas)
+
+ for ch in range(anas.shape[1]):
+ idxs, = np.where(abs(anas[:, ch]) > threshold)
+ for idx in idxs:
+ anas[idx-width:idx+width, ch] = 0 # TODO AR model prediction
+ return anas
+
def compute_energy(p, f, f1, f2):
+ if np.isnan(f1):
+ return np.nan
+ mask = (f > f1) & (f < f2)
+ df = f[1] - f[0]
+ return np.sum(p[mask]) * df
+
+
+def compute_band_power(p, f, f1, f2):
+ if np.isnan(f1) or np.all(np.isnan(p)):
+ return [np.nan] * 2
+ from scipy.integrate import simps
+ dx = f[1] - f[0]
+ mask = (f > f1) & (f < f2)
+ # Compute the absolute power by approximating the area under the curve
+ band_power = simps(p[mask], dx=dx)
+ total_power = simps(p, dx=dx)
+ rel_power = band_power / total_power
+ return band_power, rel_power
+
def find_theta_peak(p, f, f1, f2):
+ if np.all(np.isnan(p)):
+ return np.nan, np.nan
+ mask = (f > f1) & (f < f2)
+ p_m = p[mask]
+ f_m = f[mask]
+ peaks, _ = find_peaks(p_m)
+ idx = np.argmax(p_m[peaks])
+ return f_m[peaks[idx]], p_m[peaks[idx]]
+
def compute_half_width(power, freq, max_power, max_frequency, band, band_width=1):
+ if np.isnan(max_power):
+ return [np.nan] * 3
+
+ # estimate baseline power
+ low_baseline_mask = (freq > band[0] - band_width) & (freq < band[0])
+ high_baseline_mask = (freq > band[1]) & (freq < band[1] + band_width)
+ baseline = np.mean(np.concatenate([power[low_baseline_mask], power[high_baseline_mask]]))
+ p = power - baseline
+ m_p = max_power - baseline
+ m_f = max_frequency
+ f = freq
+
+ # estimate half width
+ m_p_half = m_p / 2
+ half_p = p - m_p_half
+ idx_f = np.where(f <= m_f)[0].max()
+ idxs_p1, = np.where(np.diff(half_p[:idx_f + 1] > 0) == 1)
+ if len(idxs_p1) == 0:
+ return [np.nan] * 3
+ m1 = idxs_p1.max()
+ idxs_p2, = np.where(np.diff(half_p[idx_f:] > 0) == 1)
+ if len(idxs_p2) == 0:
+ return [np.nan] * 3
+ m2 = idxs_p2.min() + idx_f
+# assert p[m1] < m_p_half < p[m1+1], (p[m1], m_p_half, p[m1+1])
+# assert p[m2] > m_p_half > p[m2+1], (p[m2], m_p_half, p[m2+1])
+
+ f1 = interp1d([half_p[m1], half_p[m1 + 1]], [f[m1], f[m1 + 1]])(0)
+ f2 = interp1d([half_p[m2], half_p[m2 + 1]], [f[m2], f[m2 + 1]])(0)
+ return f1, f2, m_p_half + baseline
+
def compute_stim_peak(p, f, s_f):
+ if np.isnan(s_f):
+ return np.nan
+ return interp1d(f, p)(s_f)
+
+
+def compute_relative_peak(power, freq, max_power, band, band_width=1):
+ # estimate baseline power
+ low_baseline_mask = (freq > band[0] - band_width) & (freq < band[0])
+ high_baseline_mask = (freq > band[1]) & (freq < band[1] + band_width)
+ baseline = np.mean(np.concatenate([power[low_baseline_mask], power[high_baseline_mask]]))
+ return (max_power - baseline) / abs(baseline)
+
theta_band_f1, theta_band_f2 = 6, 10
+
psd_data, freq_data = {}, {}
+
+def process(row, perform_zscore):
+ action_id = row['action']
+ channel_group = row['channel_group']
+ name = f'{action_id}_{channel_group}'
+ lfp = data_loader.lfp(action_id, channel_group)
+ clean_lfp = select_and_clean(lfp)
+ snls = signaltonoise(clean_lfp)
+ best_channel = np.argmax(snls)
+ snl = snls[best_channel]
+
+ lim = get_lim(action_id)
+
+ mask = get_mask(lfp, lim)
+ if perform_zscore:
+ signal = zscore(clean_lfp[mask, best_channel].ravel())
+ else:
+ signal = clean_lfp[mask, best_channel].ravel()
+
+ window = int(6 * lfp.sampling_rate.magnitude)
+
+# p_xx, freq = mlab.psd(signal, Fs=lfp.sampling_rate.magnitude, NFFT=NFFT)
+ freq, p_xx = ss.welch(signal, fs=lfp.sampling_rate.magnitude, nperseg=window, nfft=scipy.fftpack.next_fast_len(window))
+# p_xx = 10 * np.log10(p_xx)
+
+ theta_f, theta_p_max = find_theta_peak(p_xx, freq, theta_band_f1, theta_band_f2)
+
+ theta_bandpower, theta_relpower = compute_band_power(p_xx, freq, theta_band_f1, theta_band_f2)
+
+ theta_relpeak = compute_relative_peak(p_xx, freq, theta_p_max, [theta_band_f1, theta_band_f2])
+
+ theta_half_f1, theta_half_f2, theta_half_power = compute_half_width(p_xx, freq, theta_p_max, theta_f, [theta_band_f1, theta_band_f2])
+
+ theta_half_width = theta_half_f2 - theta_half_f1
+
+ psd_data.update({name: p_xx})
+ freq_data.update({name: freq})
+
+
+ # stim
+
+ stim_freq = compute_stim_freq(action_id)
+
+ stim_p_max = compute_stim_peak(p_xx, freq, stim_freq)
+
+ stim_half_f1, stim_half_f2, stim_half_power = compute_half_width(p_xx, freq, stim_p_max, stim_freq, [stim_freq - 1, stim_freq + 1])
+
+ stim_half_width = stim_half_f2 - stim_half_f1
+
+ stim_bandpower, stim_relpower = compute_band_power(p_xx, freq, stim_freq - 1, stim_freq + 1)
+
+ stim_relpeak = compute_relative_peak(p_xx, freq, stim_p_max, [stim_freq - 1, stim_freq + 1])
+
+ result = pd.Series({
+ 'signal_to_noise': snl,
+ 'best_channel': best_channel,
+ 'theta_freq': theta_f,
+ 'theta_peak': theta_p_max,
+ 'theta_bandpower': theta_bandpower,
+ 'theta_relpower': theta_relpower,
+ 'theta_relpeak': theta_relpeak,
+ 'theta_half_f1': theta_half_f1,
+ 'theta_half_f2': theta_half_f2,
+ 'theta_half_width': theta_half_width,
+ 'stim_freq': stim_freq,
+ 'stim_p_max': stim_p_max,
+ 'stim_half_f1': stim_half_f1,
+ 'stim_half_f2': stim_half_f2,
+ 'stim_half_width': stim_half_width,
+ 'stim_bandpower': stim_bandpower,
+ 'stim_relpower': stim_relpower,
+ 'stim_relpeak': stim_relpeak,
+ })
+ return result
+
results = channel_groups.merge(
+ channel_groups.progress_apply(process, perform_zscore=perform_zscore, axis=1),
+ left_index=True, right_index=True)
+
pd.DataFrame(psd_data).to_feather(output / 'data' / 'psd.feather')
+pd.DataFrame(freq_data).to_feather(output / 'data' / 'freqs.feather')
+
action = project.require_action("stimulus-lfp-response" + zscore_str)
+
action.modules['parameters'] = {
+ 'window': 6,
+ 'theta_band_f1': theta_band_f1,
+ 'theta_band_f2': theta_band_f2
+}
+
action.data['results'] = 'results.csv'
+results.to_csv(action.data_path('results'), index=False)
+
copy_tree(output, str(action.data_path()))
+
septum_mec.analysis.registration.store_notebook(action, "10-calculate-stimulus-lfp-response.ipynb")
+
+
%load_ext autoreload
+%autoreload 2
+
import os
+import expipe
+import pathlib
+import numpy as np
+import spatial_maps.stats as stats
+import septum_mec
+import septum_mec.analysis.data_processing as dp
+import septum_mec.analysis.registration
+import head_direction.head as head
+import spatial_maps as sp
+import speed_cells.speed as spd
+import re
+import joblib
+import multiprocessing
+import shutil
+import psutil
+import pandas as pd
+import matplotlib.pyplot as plt
+import matplotlib
+from distutils.dir_util import copy_tree
+from neo import SpikeTrain
+import scipy
+import seaborn as sns
+from tqdm.notebook import tqdm_notebook as tqdm
+tqdm.pandas()
+
+from spike_statistics.core import permutation_resampling_test
+
+from spikewaveform.core import calculate_waveform_features_from_template, cluster_waveform_features
+
+from septum_mec.analysis.plotting import violinplot, despine
+
#############################
+
+perform_zscore = False
+
+if not perform_zscore:
+ zscore_str = "-no-zscore"
+else:
+ zscore_str = ""
+
+#################################
+
%matplotlib inline
+plt.rc('axes', titlesize=12)
+plt.rcParams.update({
+ 'font.size': 12,
+ 'figure.figsize': (6, 4),
+ 'figure.dpi': 150
+})
+
+output_path = pathlib.Path("output") / ("stimulus-lfp-response" + zscore_str)
+(output_path / "statistics").mkdir(exist_ok=True, parents=True)
+(output_path / "figures").mkdir(exist_ok=True, parents=True)
+output_path.mkdir(exist_ok=True)
+
data_loader = dp.Data()
+actions = data_loader.actions
+project = data_loader.project
+
identify_neurons = actions['identify-neurons']
+sessions = pd.read_csv(identify_neurons.data_path('sessions'))
+
lfp_action = actions['stimulus-lfp-response' + zscore_str]
+lfp_results = pd.read_csv(lfp_action.data_path('results'))
+
lfp_results = pd.merge(sessions, lfp_results, how='left')
+
lfp_results = lfp_results.query('stim_location!="mecl" and stim_location!="mecr"')
+
def action_group(row):
+ a = int(row.channel_group in [0,1,2,3])
+ return f'{row.action}-{a}'
+lfp_results['action_side_a'] = lfp_results.apply(action_group, axis=1)
+
lfp_results['stim_strength'] = lfp_results['stim_p_max'] / lfp_results['theta_energy']
+
# lfp_results_hemisphere = lfp_results.sort_values(
+# by=['action_side_a', 'stim_strength', 'signal_to_noise'], ascending=[True, False, False]
+lfp_results_hemisphere = lfp_results.sort_values(
+ by=['action_side_a', 'channel_group'], ascending=[True, False]
+).drop_duplicates(subset='action_side_a', keep='first')
+lfp_results_hemisphere.loc[:,['action_side_a','channel_group', 'signal_to_noise', 'stim_strength']].head()
+
colors = ['#1b9e77','#d95f02','#7570b3','#e7298a']
+labels = ['Baseline I', '11 Hz', 'Baseline II', '30 Hz']
+# Hz11 means that the baseline session was indeed before an 11 Hz session
+queries = ['baseline and i and Hz11', 'frequency==11', 'baseline and ii and Hz30', 'frequency==30']
+
# prepare pairwise comparison: same animal same side same date different sessions
+
def make_entity_date_side(row):
+ s = row.action_side_a.split('-')
+ del s[2]
+ return '-'.join(s)
+
lfp_results_hemisphere['entity_date_side'] = lfp_results_hemisphere.apply(make_entity_date_side, axis=1)
+
density = True
+cumulative = True
+histtype = 'step'
+lw = 2
+if perform_zscore:
+ bins = {
+ 'theta_energy': np.arange(0, .7, .03),
+ 'theta_peak': np.arange(0, .7, .03),
+ 'theta_freq': np.arange(4, 10, .5),
+ 'theta_half_width': np.arange(0, 15, .5)
+ }
+else:
+ bins = {
+ 'theta_energy': np.arange(0, .008, .0003),
+ 'theta_peak': np.arange(0, .007, .0003),
+ 'theta_freq': np.arange(4, 12, .5),
+ 'theta_half_width': np.arange(0, 15, .5)
+ }
+xlabel = {
+ 'theta_energy': 'Theta energy (dB)',
+ 'theta_peak': 'Peak PSD (dB/Hz)',
+ 'theta_freq': '(Hz)',
+ 'theta_half_width': '(Hz)',
+}
+# key = 'theta_energy'
+# key = 'theta_peak'
+results = {}
+for key in bins:
+ results[key] = list()
+ fig = plt.figure(figsize=(3.5,2))
+ plt.suptitle(key)
+ legend_lines = []
+ for color, query, label in zip(colors, queries, labels):
+ values = lfp_results_hemisphere.query(query).loc[:,['entity_date_side', key]]
+ results[key].append(values.rename({key: label}, axis=1))
+ values[key].hist(
+ bins=bins[key], density=density, cumulative=cumulative, lw=lw,
+ histtype=histtype, color=color)
+ legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, lw=lw, label=label))
+
+ plt.legend(
+ handles=legend_lines,
+ bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
+ plt.tight_layout()
+ plt.grid(False)
+ plt.xlim(right=bins[key].max() - bins[key].max()*0.025)
+ despine()
+ plt.xlabel(xlabel[key])
+ figname = f'lfp-psd-histogram-{key}'
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.svg',
+ bbox_inches='tight', transparent=True)
+
density = True
+cumulative = True
+histtype = 'step'
+lw = 2
+if perform_zscore:
+ bins = {
+ 'stim_energy': np.arange(0, .7, .01),
+ 'stim_half_width': np.arange(0, 10, .5),
+ 'stim_p_max': np.arange(0, 4, .01),
+ 'stim_strength': np.arange(0, 160, 1)
+ }
+else:
+ bins = {
+ 'stim_energy': np.arange(0, .008, .0001),
+ 'stim_half_width': np.arange(0, 0.5, .001),
+ 'stim_p_max': np.arange(0, .06, .0001),
+ 'stim_strength': np.arange(0, 160, 1)
+ }
+xlabel = {
+ 'stim_energy': 'Energy (dB)',
+ 'stim_half_width': '(Hz)',
+ 'stim_p_max': 'Peak PSD (dB/Hz)',
+ 'stim_strength': 'Ratio',
+}
+# key = 'theta_energy'
+# key = 'theta_peak'
+for key in bins:
+ results[key] = list()
+ fig = plt.figure(figsize=(3.2,2))
+ plt.suptitle(key)
+ legend_lines = []
+ for color, query, label in zip(colors[1::2], queries[1::2], labels[1::2]):
+ values = lfp_results_hemisphere.query(query).loc[:,['entity_date_side', key]]
+ results[key].append(values.rename({key: label}, axis=1))
+ values[key].hist(
+ bins=bins[key], density=density, cumulative=cumulative, lw=lw,
+ histtype=histtype, color=color)
+ legend_lines.append(matplotlib.lines.Line2D([0], [0], color=color, lw=lw, label=label))
+
+ plt.legend(
+ handles=legend_lines,
+ bbox_to_anchor=(1.04,1), borderaxespad=0, frameon=False)
+ plt.tight_layout()
+ plt.grid(False)
+ plt.xlim(right=bins[key].max() - bins[key].max()*0.025)
+ despine()
+ plt.xlabel(xlabel[key])
+ figname = f'lfp-psd-histogram-{key}'
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.png',
+ bbox_inches='tight', transparent=True)
+ fig.savefig(
+ output_path / 'figures' / f'{figname}.svg',
+ bbox_inches='tight', transparent=True)
+
from functools import reduce
+
for key, val in results.items():
+ df = reduce(lambda left,right: pd.merge(left, right, on='entity_date_side', how='outer'), val)
+ results[key] = df.drop('entity_date_side',axis=1)
+
def summarize(data):
+ return "{:.1e} ± {:.1e} ({})".format(data.mean(), data.sem(), sum(~np.isnan(data)))
+
+
+def MWU(df, keys):
+ '''
+ Mann Whitney U
+ '''
+ Uvalue, pvalue = scipy.stats.mannwhitneyu(
+ df[keys[0]].dropna(),
+ df[keys[1]].dropna(),
+ alternative='two-sided')
+
+ return "{:.2f}, {:.3f}".format(Uvalue, pvalue)
+
+
+def PRS(df, keys):
+ '''
+ Permutation ReSampling
+ '''
+ pvalue, observed_diff, diffs = permutation_resampling(
+ df[keys[0]].dropna(),
+ df[keys[1]].dropna(), statistic=np.median)
+
+ return "{:.2f}, {:.3f}".format(observed_diff, pvalue)
+
+
+def wilcoxon(df, keys):
+ dff = df.loc[:,[keys[0], keys[1]]].dropna()
+ statistic, pvalue = scipy.stats.wilcoxon(
+ dff[keys[0]],
+ dff[keys[1]],
+ alternative='two-sided')
+
+ return "{:.2f}, {:.3f}, ({})".format(statistic, pvalue, len(dff))
+
+
+def paired_t(df, keys):
+ dff = df.loc[:,[keys[0], keys[1]]].dropna()
+ statistic, pvalue = scipy.stats.ttest_rel(
+ dff[keys[0]],
+ dff[keys[1]])
+
+ return "{:.2f}, {:.3f}".format(statistic, pvalue)
+
+
+def normality(df, key):
+ statistic, pvalue = scipy.stats.normaltest(
+ df[key].dropna())
+
+ return "{:.2f}, {:.3f}".format(statistic, pvalue)
+
+
+def shapiro(df, key):
+ statistic, pvalue = scipy.stats.shapiro(
+ df[key].dropna())
+
+ return "{:.2f}, {:.3f}".format(statistic, pvalue)
+
+def rename(name):
+ return name.replace("_field", "-field").replace("_", " ").capitalize()
+
stat = pd.DataFrame()
+
+for key, df in results.items():
+ Key = rename(key)
+ stat[Key] = df.agg(summarize)
+ stat[Key] = df.agg(summarize)
+
+ for i, c1 in enumerate(df.columns):
+ stat.loc[f'Normality {c1}', Key] = normality(df, c1)
+# stat.loc[f'Shapiro {c1}', Key] = shapiro(df, c1)
+ for c2 in df.columns[i+1:]:
+# stat.loc[f'MWU {c1} {c2}', Key] = MWU(df, [c1, c2])
+# stat.loc[f'PRS {c1} {c2}', Key] = PRS(df, [c1, c2])
+ stat.loc[f'Wilcoxon {c1} {c2}', Key] = wilcoxon(df, [c1, c2])
+# stat.loc[f'Paired T {c1} {c2}', Key] = paired_t(df, [c1, c2])
+
+stat.sort_index()
+
stat.to_latex(output_path / "statistics" / "statistics.tex")
+stat.to_csv(output_path / "statistics" / "statistics.csv")
+
psd = pd.read_feather(output_path / 'data' / 'psd.feather')
+freqs = pd.read_feather(output_path / 'data' / 'freqs.feather')
+
from septum_mec.analysis.plotting import plot_bootstrap_timeseries
+
freq = freqs.T.iloc[0].values
+
+mask = (freq < 49)
+
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(5,2))
+axs = axs.repeat(2)
+for i, (ax, query) in enumerate(zip(axs.ravel(), queries)):
+ selection = [
+ f'{r.action}_{r.channel_group}'
+ for i, r in lfp_results_hemisphere.query(query).iterrows()]
+ values = psd.loc[mask, selection].to_numpy()
+ values = 10 * np.log10(values)
+ plot_bootstrap_timeseries(freq[mask], values, ax=ax, lw=1, label=labels[i], color=colors[i])
+# ax.set_title(titles[i])
+ ax.set_xlabel('Frequency Hz')
+ ax.legend(frameon=False)
+axs[0].set_ylabel('PSD (dB/Hz)')
+# axs[0].set_ylim(-31, 1)
+despine()
+
+figname = 'lfp-psd'
+fig.savefig(
+ output_path / 'figures' / f'{figname}.png',
+ bbox_inches='tight', transparent=True)
+fig.savefig(
+ output_path / 'figures' / f'{figname}.svg',
+ bbox_inches='tight', transparent=True)
+
action = project.require_action("stimulus-lfp-response" + zscore_str)
+
copy_tree(output_path, str(action.data_path()))
+
septum_mec.analysis.registration.store_notebook(action, "20_stimulus-lfp-response.ipynb")
+
+
\n", + " | action_side_a | \n", + "channel_group | \n", + "signal_to_noise | \n", + "stim_strength | \n", + "
---|---|---|---|---|
71 | \n", + "1833-010719-1-0 | \n", + "7 | \n", + "0.001902 | \n", + "NaN | \n", + "
67 | \n", + "1833-010719-1-1 | \n", + "3 | \n", + "0.003522 | \n", + "NaN | \n", + "
695 | \n", + "1833-010719-2-0 | \n", + "7 | \n", + "0.004280 | \n", + "2.260538 | \n", + "
691 | \n", + "1833-010719-2-1 | \n", + "3 | \n", + "0.003974 | \n", + "6.446883 | \n", + "
583 | \n", + "1833-020719-1-0 | \n", + "7 | \n", + "-0.002942 | \n", + "NaN | \n", + "
\n", + " | Theta energy | \n", + "Theta peak | \n", + "Theta freq | \n", + "Theta half width | \n", + "Stim energy | \n", + "Stim half width | \n", + "Stim p max | \n", + "Stim strength | \n", + "
---|---|---|---|---|---|---|---|---|
11 Hz | \n", + "9.1e-04 ± 9.0e-05 (44) | \n", + "3.9e-04 ± 5.1e-05 (44) | \n", + "7.5e+00 ± 1.5e-01 (44) | \n", + "5.5e+00 ± 5.6e-01 (42) | \n", + "4.3e-04 ± 4.7e-05 (44) | \n", + "1.8e-01 ± 1.3e-02 (44) | \n", + "3.9e-03 ± 4.5e-04 (44) | \n", + "5.8e+00 ± 7.3e-01 (44) | \n", + "
30 Hz | \n", + "5.7e-04 ± 7.0e-05 (34) | \n", + "2.5e-04 ± 4.3e-05 (34) | \n", + "7.7e+00 ± 2.1e-01 (34) | \n", + "6.7e+00 ± 8.4e-01 (29) | \n", + "1.0e-03 ± 2.1e-04 (34) | \n", + "1.5e-01 ± 1.6e-03 (34) | \n", + "1.0e-02 ± 2.0e-03 (34) | \n", + "2.2e+01 ± 4.5e+00 (34) | \n", + "
Baseline I | \n", + "2.3e-03 ± 2.2e-04 (46) | \n", + "1.8e-03 ± 1.9e-04 (46) | \n", + "7.7e+00 ± 6.3e-02 (46) | \n", + "1.2e+00 ± 2.0e-01 (45) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Baseline II | \n", + "2.3e-03 ± 2.4e-04 (32) | \n", + "1.8e-03 ± 2.3e-04 (32) | \n", + "8.1e+00 ± 4.7e-02 (32) | \n", + "9.1e-01 ± 3.9e-02 (31) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Normality 11 Hz | \n", + "27.15, 0.000 | \n", + "57.06, 0.000 | \n", + "1.61, 0.447 | \n", + "11.89, 0.003 | \n", + "16.86, 0.000 | \n", + "82.50, 0.000 | \n", + "14.96, 0.001 | \n", + "15.95, 0.000 | \n", + "
Normality 30 Hz | \n", + "30.23, 0.000 | \n", + "43.50, 0.000 | \n", + "4.85, 0.088 | \n", + "5.64, 0.060 | \n", + "19.73, 0.000 | \n", + "9.69, 0.008 | \n", + "19.72, 0.000 | \n", + "25.38, 0.000 | \n", + "
Normality Baseline I | \n", + "41.33, 0.000 | \n", + "32.43, 0.000 | \n", + "25.20, 0.000 | \n", + "74.91, 0.000 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Normality Baseline II | \n", + "13.17, 0.001 | \n", + "20.78, 0.000 | \n", + "0.96, 0.618 | \n", + "13.33, 0.001 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Wilcoxon 11 Hz 30 Hz | \n", + "118.00, 0.006, (32) | \n", + "134.00, 0.015, (32) | \n", + "164.00, 0.247, (32) | \n", + "143.00, 0.269, (27) | \n", + "173.00, 0.089, (32) | \n", + "25.00, 0.000, (32) | \n", + "157.00, 0.045, (32) | \n", + "105.00, 0.003, (32) | \n", + "
Wilcoxon 11 Hz Baseline II | \n", + "11.00, 0.000, (32) | \n", + "5.00, 0.000, (32) | \n", + "60.00, 0.000, (32) | \n", + "3.00, 0.000, (30) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Wilcoxon Baseline I 11 Hz | \n", + "42.00, 0.000, (44) | \n", + "16.00, 0.000, (44) | \n", + "329.50, 0.190, (44) | \n", + "14.00, 0.000, (41) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Wilcoxon Baseline I 30 Hz | \n", + "12.00, 0.000, (32) | \n", + "5.00, 0.000, (32) | \n", + "221.50, 0.427, (32) | \n", + "8.00, 0.000, (27) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Wilcoxon Baseline I Baseline II | \n", + "264.00, 1.000, (32) | \n", + "256.00, 0.881, (32) | \n", + "0.00, 0.000, (32) | \n", + "203.00, 0.544, (30) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "
Wilcoxon Baseline II 30 Hz | \n", + "17.00, 0.000, (32) | \n", + "9.00, 0.000, (32) | \n", + "129.50, 0.020, (32) | \n", + "11.00, 0.000, (27) | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "