diff --git a/actions/calculate-statistics/attributes.yaml b/actions/calculate-statistics/attributes.yaml new file mode 100644 index 000000000..56a60d791 --- /dev/null +++ b/actions/calculate-statistics/attributes.yaml @@ -0,0 +1,6 @@ +registered: '2019-10-04T08:11:18' +data: + units: units.csv + results: results.csv + notebook: 10_calculate_spatial_statistics.ipynb + html: 10_calculate_spatial_statistics.html diff --git a/actions/calculate-statistics/data/10_calculate_spatial_statistics.html b/actions/calculate-statistics/data/10_calculate_spatial_statistics.html new file mode 100644 index 000000000..4918b6b5c --- /dev/null +++ b/actions/calculate-statistics/data/10_calculate_spatial_statistics.html @@ -0,0 +1,13679 @@ + + +
+ +%load_ext autoreload
+%autoreload 2
+
import os
+import expipe
+import pathlib
+import numpy as np
+import spatial_maps.stats as stats
+import septum_mec.analysis.data_processing as dp
+import head_direction.head as head
+import spatial_maps as sp
+import septum_mec.analysis.registration
+import speed_cells.speed as spd
+import septum_mec.analysis.spikes as spikes
+import re
+import joblib
+import multiprocessing
+import shutil
+import psutil
+import pandas as pd
+import matplotlib.pyplot as plt
+import septum_mec
+import scipy.ndimage.measurements
+from distutils.dir_util import copy_tree
+
+from tqdm import tqdm_notebook as tqdm
+from tqdm._tqdm_notebook import tqdm_notebook
+tqdm_notebook.pandas()
+
max_speed = 1, # m/s only used for speed score
+min_speed = 0.02, # m/s only used for speed score
+position_sampling_rate = 100 # for interpolation
+position_low_pass_frequency = 6 # for low pass filtering of position
+
+box_size = 1.0
+bin_size = 0.02
+smoothing_low = 0.03
+smoothing_high = 0.06
+
project_path = dp.project_path()
+
+project = expipe.get_project(project_path)
+actions = project.actions
+
identify_neurons = actions['identify-neurons']
+units = pd.read_csv(identify_neurons.data_path('units'))
+units.head()
+
data_loader = dp.Data(
+ position_sampling_rate=position_sampling_rate,
+ position_low_pass_frequency=position_low_pass_frequency,
+ box_size=box_size, bin_size=bin_size
+)
+
first_row = units[units['action'] == '1849-060319-3'].iloc[0]
+#first_row = sessions.iloc[50]
+
def process(row):
+ action_id = row['action']
+ channel_id = row['channel_group']
+ unit_id = row['unit_name']
+
+ # common values for all units == faster calculations
+ x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])
+ ang, ang_t = map(data_loader.head_direction(action_id).get, ['a', 't'])
+ occupancy_map = data_loader.occupancy(action_id)
+ xbins, ybins = data_loader.spatial_bins
+ box_size_, bin_size_ = data_loader.box_size_, data_loader.bin_size_
+ prob_dist = data_loader.prob_dist(action_id)
+
+ smooth_low_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_low)
+ smooth_high_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_high)
+
+ spike_times = data_loader.spike_train(action_id, channel_id, unit_id)
+
+ # common
+ spike_map = sp.maps._spike_map(x, y, t, spike_times, xbins, ybins)
+
+ smooth_low_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_low)
+ smooth_high_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_high)
+
+ smooth_low_rate_map = smooth_low_spike_map / smooth_low_occupancy_map
+ smooth_high_rate_map = smooth_high_spike_map / smooth_high_occupancy_map
+
+ # find fields with laplace
+ fields_laplace = sp.separate_fields_by_laplace(smooth_high_rate_map)
+ fields = fields_laplace.copy() # to be cleaned by Ismakov
+ fields_areas = scipy.ndimage.measurements.sum(
+ np.ones_like(fields), fields, index=np.arange(fields.max() + 1))
+ fields_area = fields_areas[fields]
+ fields[fields_area < 9.0] = 0
+
+ # find fields with Ismakov-method
+ fields_ismakov, radius = sp.separate_fields_by_distance(smooth_high_rate_map)
+ fields_ismakov_real = fields_ismakov * bin_size
+ approved_fields = []
+
+ # remove fields not found by both methods
+ for point in fields_ismakov:
+ field_id = fields[tuple(point)]
+ approved_fields.append(field_id)
+
+ for field_id in np.arange(1, fields.max() + 1):
+ if not field_id in approved_fields:
+ fields[fields == field_id] = 0
+
+ # varying statistics
+ average_rate = len(spike_times) / (t.max() - t.min())
+
+ max_rate = smooth_low_rate_map.max()
+
+ out_field_mean_rate = smooth_low_rate_map[np.where(fields == 0)].mean()
+ in_field_mean_rate = smooth_low_rate_map[np.where(fields != 0)].mean()
+ max_field_mean_rate = smooth_low_rate_map[np.where(fields == 1)].mean()
+
+ interspike_interval = np.diff(spike_times)
+ interspike_interval_cv = interspike_interval.std() / interspike_interval.mean()
+
+ autocorrelogram = sp.autocorrelation(smooth_high_rate_map)
+ peaks = sp.fields.find_peaks(autocorrelogram)
+ real_peaks = peaks * bin_size
+ autocorrelogram_box_size = box_size * autocorrelogram.shape[0] / smooth_high_rate_map.shape[0]
+ spacing, orientation = sp.spacing_and_orientation(real_peaks, autocorrelogram_box_size)
+ orientation *= 180 / np.pi
+
+ selectivity = stats.selectivity(smooth_low_rate_map, prob_dist)
+
+ sparsity = stats.sparsity(smooth_low_rate_map, prob_dist)
+
+ gridness = sp.gridness(smooth_high_rate_map)
+
+ border_score = sp.border_score(smooth_high_rate_map, fields_laplace)
+
+ information_rate = stats.information_rate(smooth_high_rate_map, prob_dist)
+
+ single_spikes, bursts, bursty_spikes = spikes.find_bursts(spike_times, threshold=0.01)
+ burst_event_ratio = np.sum(bursts) / (np.sum(single_spikes) + np.sum(bursts))
+ bursty_spike_ratio = np.sum(bursty_spikes) / (np.sum(bursty_spikes) + np.sum(single_spikes))
+ mean_spikes_per_burst = np.sum(bursty_spikes) / np.sum(bursts)
+
+ speed_score = spd.speed_correlation(
+ speed, t, spike_times, min_speed=min_speed, max_speed=max_speed)
+
+ ang_bin, ang_rate = head.head_direction_rate(spike_times, ang, ang_t)
+
+ head_mean_ang, head_mean_vec_len = head.head_direction_score(ang_bin, ang_rate)
+
+ result = pd.Series({
+ 'average_rate': average_rate,
+ 'speed_score': speed_score,
+ 'out_field_mean_rate': out_field_mean_rate,
+ 'in_field_mean_rate': in_field_mean_rate,
+ 'max_field_mean_rate': max_field_mean_rate,
+ 'max_rate': max_rate,
+ 'sparsity': sparsity,
+ 'selectivity': selectivity,
+ 'interspike_interval_cv': float(interspike_interval_cv),
+ 'burst_event_ratio': burst_event_ratio,
+ 'bursty_spike_ratio': bursty_spike_ratio,
+ 'gridness': gridness,
+ 'border_score': border_score,
+ 'information_rate': information_rate,
+ 'head_mean_ang': head_mean_ang,
+ 'head_mean_vec_len': head_mean_vec_len,
+ 'spacing': spacing,
+ 'orientation': orientation
+ })
+ return result
+
+process(first_row)
+
results = units.merge(
+ units.progress_apply(process, axis=1),
+ left_index=True, right_index=True)
+
output_path = pathlib.Path("output") / "calculate-statistics"
+output_path.mkdir(exist_ok=True)
+
units.to_csv(output_path / "units.csv", index=False)
+results.to_csv(output_path / "results.csv", index=False)
+
statistics_action = project.require_action("calculate-statistics")
+
statistics_action.data["units"] = "units.csv"
+statistics_action.data["results"] = "results.csv"
+copy_tree(output_path, str(statistics_action.data_path()))
+
septum_mec.analysis.registration.store_notebook(statistics_action, "10_calculate_spatial_statistics.ipynb")
+
+
\n", + " | action | \n", + "channel_group | \n", + "unit_name | \n", + "
---|---|---|---|
0 | \n", + "1834-150319-3 | \n", + "0 | \n", + "71 | \n", + "
1 | \n", + "1834-150319-3 | \n", + "0 | \n", + "75 | \n", + "
2 | \n", + "1834-120319-4 | \n", + "0 | \n", + "85 | \n", + "
3 | \n", + "1834-120319-1 | \n", + "0 | \n", + "1 | \n", + "
4 | \n", + "1834-120319-2 | \n", + "0 | \n", + "39 | \n", + "