In [1]:
%load_ext autoreload
%autoreload 2
In [2]:
import matplotlib.pyplot as plt
%matplotlib inline
import spatial_maps as sp
import septum_mec.analysis.data_processing as dp
import septum_mec.analysis.registration
import expipe
import os
import pathlib
import numpy as np
import exdir
import pandas as pd
import optogenetics as og
import quantities as pq
import shutil
from distutils.dir_util import copy_tree

from scipy.signal import find_peaks
from scipy.interpolate import interp1d
from matplotlib import mlab

from tqdm import tqdm_notebook as tqdm
from tqdm._tqdm_notebook import tqdm_notebook
tqdm_notebook.pandas()
10:26:07 [I] klustakwik KlustaKwik2 version 0.2.6
In [3]:
data_loader = dp.Data()
actions = data_loader.actions
project = data_loader.project
In [4]:
output = pathlib.Path('output/stimulus-lfp-response')
(output / 'data').mkdir(parents=True, exist_ok=True)
In [5]:
identify_neurons = actions['identify-neurons']
sessions = pd.read_csv(identify_neurons.data_path('sessions'))
In [6]:
channel_groups = []
for i, row in sessions.iterrows():
    for ch in range(8):
        row['channel_group'] = ch
        channel_groups.append(row.to_dict())
In [7]:
channel_groups = pd.DataFrame(channel_groups)
In [8]:
def get_lim(action_id):
    stim_times = data_loader.stim_times(action_id)
    if stim_times is None:
        return [0, np.inf]
    stim_times = np.array(stim_times)
    return [stim_times.min(), stim_times.max()]

def get_mask(lfp, lim):
    return (lfp.times >= lim[0]) & (lfp.times <= lim[1])

def zscore(a):
    return (a - a.mean()) / a.std()

def compute_stim_freq(action_id):
    stim_times = data_loader.stim_times(action_id)
    if stim_times is None:
        return np.nan
    stim_times = np.array(stim_times)
    return 1 / np.mean(np.diff(stim_times))
In [9]:
def signaltonoise(a, axis=0, ddof=0):
    a = np.asanyarray(a)
    m = a.mean(axis)
    sd = a.std(axis=axis, ddof=ddof)
    return np.where(sd == 0, 0, m / sd)


def select_and_clean(anas, width=500, threshold=2):
    anas = np.array(anas)

    for ch in range(anas.shape[1]):
        idxs, = np.where(abs(anas[:, ch]) > threshold)
        for idx in idxs:
            anas[idx-width:idx+width, ch] = 0 # TODO AR model prediction
    return anas
In [10]:
def compute_energy(p, f, f1, f2):
    if np.isnan(f1):
        return np.nan
    mask = (f > f1) & (f < f2)
    df = f[1] - f[0]
    return np.sum(p[mask]) * df
In [11]:
def find_theta_peak(p, f):
    mask = (f > 6) & (f < 10)
    p_m = p[mask]
    f_m = f[mask]
    peaks, _ = find_peaks(p_m)
    idx = np.argmax(p_m[peaks])
    return f_m[peaks[idx]], p_m[peaks[idx]]
In [12]:
def compute_half_width(p, f, m_p, m_f):
    if np.isnan(m_p):
        return np.nan, np.nan
    m_p_half = m_p / 2
    half_p = p - m_p_half
    idx_f = np.where(f <= m_f)[0].max()
    idxs_p1, = np.where(np.diff(half_p[:idx_f + 1] > 0) == 1)
    if len(idxs_p1) == 0:
        return np.nan, np.nan
    m1 = idxs_p1.max()
    idxs_p2, = np.where(np.diff(half_p[idx_f:] > 0) == 1)
    m2 = idxs_p2.min() + idx_f    
#     plt.plot(f, p)
#     plt.plot(m_f, m_p, marker='o', ls='none', markersize=10)
#     plt.plot(f[m1], p[m1], marker='x', ls='none', markersize=10, c='r')
#     plt.plot(f[m1+1], p[m1+1], marker='+', ls='none', markersize=10, c='r')
    
#     plt.plot(f[m2], p[m2], marker='x', ls='none', markersize=10, c='k')
#     plt.plot(f[m2+1], p[m2+1], marker='+', ls='none', markersize=10, c='k')
    
#     plt.xlim(4,12)
    assert p[m1] < m_p_half < p[m1+1], (p[m1], m_p_half, p[m1+1])
    assert p[m2] > m_p_half > p[m2+1], (p[m2], m_p_half, p[m2+1])
    
    f1 = interp1d([half_p[m1], half_p[m1 + 1]], [f[m1], f[m1 + 1]])(0)
    f2 = interp1d([half_p[m2], half_p[m2 + 1]], [f[m2], f[m2 + 1]])(0)
    return f1, f2
In [13]:
def compute_stim_peak(p, f, s_f):
    if np.isnan(s_f):
        return np.nan
    return interp1d(f, p)(s_f)
In [14]:
NFFT = 10000
theta_band_f1, theta_band_f2 = 6, 10 
In [15]:
def process(row):
    action_id = row['action']
    channel_group = row['channel_group']
    lfp = data_loader.lfp(action_id, channel_group)
    clean_lfp = select_and_clean(lfp)
    snls = signaltonoise(clean_lfp)
    best_channel = np.argmax(snls)
    snl = snls[best_channel]
    
    lim = get_lim(action_id)
    
    mask = get_mask(lfp, lim)
    signal = zscore(clean_lfp[mask, best_channel].ravel())
    
    p_xx, freq = mlab.psd(signal, Fs=lfp.sampling_rate.magnitude, NFFT=NFFT)
    
    theta_f, theta_p_max = find_theta_peak(p_xx, freq)
    
    theta_energy = compute_energy(p_xx, freq, theta_band_f1, theta_band_f2) # theta band 6 - 10 Hz
    
    theta_half_f1, theta_half_f2 = compute_half_width(p_xx, freq, theta_p_max, theta_f)
    
    theta_half_width = theta_half_f2 - theta_half_f1
    
    theta_half_energy = compute_energy(p_xx, freq, theta_half_f1, theta_half_f2) # theta band 6 - 10 Hz
    
    # stim
    
    stim_freq = compute_stim_freq(action_id)
    
    stim_p_max = compute_stim_peak(p_xx, freq, stim_freq)
    
    stim_half_f1, stim_half_f2 = compute_half_width(p_xx, freq, stim_p_max, stim_freq)
    stim_half_width = stim_half_f2 - stim_half_f1
    
    stim_energy = compute_energy(p_xx, freq, stim_half_f1, stim_half_f2)
    
    result = pd.Series({
        'signal_to_noise': snl,
        'best_channel': best_channel,
        'theta_freq': theta_f,
        'theta_peak': theta_p_max,
        'theta_energy': theta_energy,
        'theta_half_f1': theta_half_f1, 
        'theta_half_f2': theta_half_f2,
        'theta_half_width': theta_half_width,
        'theta_half_energy': theta_half_energy,
        'stim_freq': stim_freq,
        'stim_p_max': stim_p_max,
        'stim_half_f1': stim_half_f1, 
        'stim_half_f2': stim_half_f2,
        'stim_half_width': stim_half_width,
        'stim_energy': stim_energy
    })
    return result
In [16]:
results = channel_groups.merge(
    channel_groups.progress_apply(process, axis=1), 
    left_index=True, right_index=True)

In [17]:
psd, freqs = {}, {}
for i, row in tqdm(channel_groups.iterrows(), total=len(channel_groups)):
    action_id = row['action']
    channel_group = row['channel_group']
    action_group = f'{action_id}_{channel_group}'
    lfp = data_loader.lfp(action_id, channel_group)
    clean_lfp = select_and_clean(lfp)
    snls = signaltonoise(clean_lfp)
    best_channel = np.argmax(snls)
    snl = snls[best_channel]
    
    lim = get_lim(action_id)
    
    mask = get_mask(lfp, lim)
    signal = zscore(clean_lfp[mask, best_channel].ravel())
    
    p_xx, freq = mlab.psd(signal, Fs=lfp.sampling_rate.magnitude, NFFT=NFFT)
    psd.update({action_group: p_xx})
    freqs.update({action_group: freq})

In [18]:
(psd).to_feather(output / 'data' / 'psd.feather')
freqs.to_feather(output / 'data' / 'freqs.feather')
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-18-7660e1d3307d> in <module>
----> 1 psd.to_feather(output / 'data' / 'psd.feather')
      2 freqs.to_feather(output / 'data' / 'freqs.feather')

AttributeError: 'dict' object has no attribute 'to_feather'

Save to expipe

In [ ]:
action = project.require_action("stimulus-lfp-response")
In [ ]:
action.modules['parameters'] = {
    'NFFT': NFFT,
    'theta_band_f1': theta_band_f1,
    'theta_band_f2': theta_band_f2
}
In [ ]:
action.data['results'] = 'results.csv'
results.to_csv(action.data_path('results'), index=False)
In [ ]:
copy_tree(output, str(action.data_path()))
In [ ]:
septum_mec.analysis.registration.store_notebook(action, "10-calculate-stimulus-lfp-response.ipynb")
In [ ]: