From 7c18a9f762af1c50e66b8ebd29433baa749cf26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mikkel=20Elle=20Lepper=C3=B8d?= Date: Wed, 9 Oct 2019 14:24:33 +0200 Subject: [PATCH] auto commit actions/calculate-statistics --- actions/calculate-statistics/attributes.yaml | 12 +- .../data/10_calculate_spatial_statistics.html | 27298 ++++++++-------- .../10_calculate_spatial_statistics.ipynb | 933 +- 3 files changed, 14076 insertions(+), 14167 deletions(-) diff --git a/actions/calculate-statistics/attributes.yaml b/actions/calculate-statistics/attributes.yaml index 56a60d791..74a2c3c66 100644 --- a/actions/calculate-statistics/attributes.yaml +++ b/actions/calculate-statistics/attributes.yaml @@ -1,6 +1,6 @@ -registered: '2019-10-04T08:11:18' -data: - units: units.csv - results: results.csv - notebook: 10_calculate_spatial_statistics.ipynb - html: 10_calculate_spatial_statistics.html +registered: '2019-10-04T08:11:18' +data: + units: units.csv + results: results.csv + notebook: 10_calculate_spatial_statistics.ipynb + html: 10_calculate_spatial_statistics.html diff --git a/actions/calculate-statistics/data/10_calculate_spatial_statistics.html b/actions/calculate-statistics/data/10_calculate_spatial_statistics.html index 4918b6b5c..9b43bc021 100644 --- a/actions/calculate-statistics/data/10_calculate_spatial_statistics.html +++ b/actions/calculate-statistics/data/10_calculate_spatial_statistics.html @@ -1,13679 +1,13619 @@ - - - - -10_calculate_spatial_statistics - - - - - - - - - - - - - - - - - - - - - - - -
-
- -
-
-
In [1]:
-
-
-
%load_ext autoreload
-%autoreload 2
-
- -
-
-
- -
-
-
-
In [2]:
-
-
-
import os
-import expipe
-import pathlib
-import numpy as np
-import spatial_maps.stats as stats
-import septum_mec.analysis.data_processing as dp
-import head_direction.head as head
-import spatial_maps as sp
-import septum_mec.analysis.registration
-import speed_cells.speed as spd
-import septum_mec.analysis.spikes as spikes
-import re
-import joblib
-import multiprocessing
-import shutil
-import psutil
-import pandas as pd
-import matplotlib.pyplot as plt
-import septum_mec
-import scipy.ndimage.measurements
-from distutils.dir_util import copy_tree
-
-from tqdm import tqdm_notebook as tqdm
-from tqdm._tqdm_notebook import tqdm_notebook
-tqdm_notebook.pandas()
-
- -
-
-
- -
-
- - -
- -
- - -
-
08:27:22 [I] klustakwik KlustaKwik2 version 0.2.6
-
-
-
- -
-
- -
-
-
-
In [3]:
-
-
-
max_speed = 1, # m/s only used for speed score
-min_speed = 0.02, # m/s only used for speed score
-position_sampling_rate = 100 # for interpolation
-position_low_pass_frequency = 6 # for low pass filtering of position
-
-box_size = 1.0
-bin_size = 0.02
-smoothing_low = 0.03
-smoothing_high = 0.06
-
- -
-
-
- -
-
-
-
In [4]:
-
-
-
project_path = dp.project_path()
-
-project = expipe.get_project(project_path)
-actions = project.actions
-
- -
-
-
- -
-
-
-
In [5]:
-
-
-
identify_neurons = actions['identify-neurons']
-units = pd.read_csv(identify_neurons.data_path('units'))
-units.head()
-
- -
-
-
- -
-
- - -
- -
Out[5]:
- - - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
actionchannel_groupunit_name
01834-150319-3071
11834-150319-3075
21834-120319-4085
31834-120319-101
41834-120319-2039
-
-
- -
- -
-
- -
-
-
-
In [6]:
-
-
-
data_loader = dp.Data(
-    position_sampling_rate=position_sampling_rate, 
-    position_low_pass_frequency=position_low_pass_frequency,
-    box_size=box_size, bin_size=bin_size
-)
-
- -
-
-
- -
-
-
-
In [9]:
-
-
-
first_row = units[units['action'] == '1849-060319-3'].iloc[0]
-#first_row = sessions.iloc[50]
-
- -
-
-
- -
-
-
-
In [10]:
-
-
-
def process(row):
-    action_id = row['action']
-    channel_id = row['channel_group']
-    unit_id = row['unit_name']
-    
-    # common values for all units == faster calculations
-    x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])
-    ang, ang_t = map(data_loader.head_direction(action_id).get, ['a', 't'])
-    occupancy_map = data_loader.occupancy(action_id)
-    xbins, ybins = data_loader.spatial_bins
-    box_size_, bin_size_ = data_loader.box_size_, data_loader.bin_size_
-    prob_dist = data_loader.prob_dist(action_id)
-    
-    smooth_low_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_low)
-    smooth_high_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_high)
-            
-    spike_times = data_loader.spike_train(action_id, channel_id, unit_id)
-
-    # common
-    spike_map = sp.maps._spike_map(x, y, t, spike_times, xbins, ybins)
-
-    smooth_low_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_low)
-    smooth_high_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_high)
-
-    smooth_low_rate_map = smooth_low_spike_map / smooth_low_occupancy_map
-    smooth_high_rate_map = smooth_high_spike_map / smooth_high_occupancy_map
-
-    # find fields with laplace
-    fields_laplace = sp.separate_fields_by_laplace(smooth_high_rate_map)
-    fields = fields_laplace.copy() # to be cleaned by Ismakov
-    fields_areas = scipy.ndimage.measurements.sum(
-        np.ones_like(fields), fields, index=np.arange(fields.max() + 1))
-    fields_area = fields_areas[fields]
-    fields[fields_area < 9.0] = 0
-
-    # find fields with Ismakov-method
-    fields_ismakov, radius = sp.separate_fields_by_distance(smooth_high_rate_map)
-    fields_ismakov_real = fields_ismakov * bin_size
-    approved_fields = []
-
-    # remove fields not found by both methods
-    for point in fields_ismakov:
-        field_id = fields[tuple(point)]
-        approved_fields.append(field_id)
-
-    for field_id in np.arange(1, fields.max() + 1):
-        if not field_id in approved_fields:
-            fields[fields == field_id] = 0
-
-    # varying statistics
-    average_rate = len(spike_times) / (t.max() - t.min())
-
-    max_rate = smooth_low_rate_map.max()
-
-    out_field_mean_rate = smooth_low_rate_map[np.where(fields == 0)].mean()
-    in_field_mean_rate = smooth_low_rate_map[np.where(fields != 0)].mean()
-    max_field_mean_rate = smooth_low_rate_map[np.where(fields == 1)].mean()
-
-    interspike_interval = np.diff(spike_times)
-    interspike_interval_cv = interspike_interval.std() / interspike_interval.mean()
-
-    autocorrelogram = sp.autocorrelation(smooth_high_rate_map)
-    peaks = sp.fields.find_peaks(autocorrelogram)
-    real_peaks = peaks * bin_size
-    autocorrelogram_box_size = box_size * autocorrelogram.shape[0] / smooth_high_rate_map.shape[0]
-    spacing, orientation = sp.spacing_and_orientation(real_peaks, autocorrelogram_box_size)
-    orientation *= 180 / np.pi
-
-    selectivity = stats.selectivity(smooth_low_rate_map, prob_dist)
-
-    sparsity = stats.sparsity(smooth_low_rate_map, prob_dist)
-
-    gridness = sp.gridness(smooth_high_rate_map)
-
-    border_score = sp.border_score(smooth_high_rate_map, fields_laplace)
-
-    information_rate = stats.information_rate(smooth_high_rate_map, prob_dist)
-
-    single_spikes, bursts, bursty_spikes = spikes.find_bursts(spike_times, threshold=0.01)
-    burst_event_ratio = np.sum(bursts) / (np.sum(single_spikes) + np.sum(bursts))
-    bursty_spike_ratio = np.sum(bursty_spikes) / (np.sum(bursty_spikes) + np.sum(single_spikes))
-    mean_spikes_per_burst = np.sum(bursty_spikes) / np.sum(bursts)
-
-    speed_score = spd.speed_correlation(
-        speed, t, spike_times, min_speed=min_speed, max_speed=max_speed)
-
-    ang_bin, ang_rate = head.head_direction_rate(spike_times, ang, ang_t)
-
-    head_mean_ang, head_mean_vec_len = head.head_direction_score(ang_bin, ang_rate)
-
-    result = pd.Series({
-        'average_rate': average_rate,
-        'speed_score': speed_score,
-        'out_field_mean_rate': out_field_mean_rate,
-        'in_field_mean_rate': in_field_mean_rate,
-        'max_field_mean_rate': max_field_mean_rate,
-        'max_rate': max_rate,
-        'sparsity': sparsity,
-        'selectivity': selectivity,
-        'interspike_interval_cv': float(interspike_interval_cv),
-        'burst_event_ratio': burst_event_ratio,
-        'bursty_spike_ratio': bursty_spike_ratio,
-        'gridness': gridness,
-        'border_score': border_score,
-        'information_rate': information_rate,
-        'head_mean_ang': head_mean_ang,
-        'head_mean_vec_len': head_mean_vec_len,
-        'spacing': spacing,
-        'orientation': orientation
-    })
-    return result
-        
-process(first_row)
-
- -
-
-
- -
-
- - -
- -
- - -
-
/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/elephant/statistics.py:835: UserWarning: Instantaneous firing rate approximation contains negative values, possibly caused due to machine precision errors.
-  warnings.warn("Instantaneous firing rate approximation contains "
-
-
-
- -
- -
Out[10]:
- - - - -
-
average_rate               3.095328
-speed_score               -0.063922
-out_field_mean_rate        1.837642
-in_field_mean_rate         5.122323
-max_field_mean_rate        8.882211
-max_rate                  23.006163
-sparsity                   0.468122
-selectivity                7.306812
-interspike_interval_cv     3.970863
-burst_event_ratio          0.397921
-bursty_spike_ratio         0.676486
-gridness                  -0.459487
-border_score               0.078474
-information_rate           0.965845
-head_mean_ang              5.788704
-head_mean_vec_len          0.043321
-spacing                    0.624971
-orientation               22.067900
-dtype: float64
-
- -
- -
-
- -
-
-
-
In [ ]:
-
-
-
results = units.merge(
-    units.progress_apply(process, axis=1), 
-    left_index=True, right_index=True)
-
- -
-
-
- -
-
- - -
- -
- - - - - - - -
-
- - -
- -
- -
- -
- - -
-
/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: invalid value encountered in log2
-  return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *
-/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: divide by zero encountered in log2
-  return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *
-/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: invalid value encountered in multiply
-  return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *
-/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:56: RuntimeWarning: Mean of empty slice.
-/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/numpy/core/_methods.py:85: RuntimeWarning: invalid value encountered in double_scalars
-  ret = ret.dtype.type(ret / rcount)
-/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:57: RuntimeWarning: Mean of empty slice.
-/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:82: RuntimeWarning: invalid value encountered in long_scalars
-
-
-
- -
-
- -
-
-
-
In [ ]:
-
-
-
output_path = pathlib.Path("output") / "calculate-statistics"
-output_path.mkdir(exist_ok=True)
-
- -
-
-
- -
-
-
-
In [ ]:
-
-
-
units.to_csv(output_path / "units.csv", index=False)
-results.to_csv(output_path / "results.csv", index=False)
-
- -
-
-
- -
-
-
-
-

Store results in Expipe action

-
-
-
-
-
-
In [ ]:
-
-
-
statistics_action = project.require_action("calculate-statistics")
-
- -
-
-
- -
-
-
-
In [ ]:
-
-
-
statistics_action.data["units"] = "units.csv"
-statistics_action.data["results"] = "results.csv"
-copy_tree(output_path, str(statistics_action.data_path()))
-
- -
-
-
- -
-
-
-
In [ ]:
-
-
-
septum_mec.analysis.registration.store_notebook(statistics_action, "10_calculate_spatial_statistics.ipynb")
-
- -
-
-
- -
-
-
-
In [ ]:
-
-
-
 
-
- -
-
-
- -
-
-
- - - - - - + + + + +10_calculate_spatial_statistics + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+
In [1]:
+
+
+
%load_ext autoreload
+%autoreload 2
+
+ +
+
+
+ +
+
+
+
In [2]:
+
+
+
import os
+import expipe
+import pathlib
+import numpy as np
+import spatial_maps.stats as stats
+import septum_mec.analysis.data_processing as dp
+import head_direction.head as head
+import spatial_maps as sp
+import septum_mec.analysis.registration
+import speed_cells.speed as spd
+import septum_mec.analysis.spikes as spikes
+import re
+import joblib
+import multiprocessing
+import shutil
+import psutil
+import pandas as pd
+import matplotlib.pyplot as plt
+import septum_mec
+import scipy.ndimage.measurements
+from distutils.dir_util import copy_tree
+
+from tqdm import tqdm_notebook as tqdm
+from tqdm._tqdm_notebook import tqdm_notebook
+tqdm_notebook.pandas()
+
+ +
+
+
+ +
+
+ + +
+ +
+ + +
+
17:02:20 [I] klustakwik KlustaKwik2 version 0.2.6
+
+
+
+ +
+
+ +
+
+
+
In [3]:
+
+
+
max_speed = 1, # m/s only used for speed score
+min_speed = 0.02, # m/s only used for speed score
+position_sampling_rate = 100 # for interpolation
+position_low_pass_frequency = 6 # for low pass filtering of position
+
+box_size = [1.0, 1.0]
+bin_size = 0.02
+smoothing_low = 0.03
+smoothing_high = 0.06
+
+ +
+
+
+ +
+
+
+
In [4]:
+
+
+
project_path = dp.project_path()
+
+project = expipe.get_project(project_path)
+actions = project.actions
+
+ +
+
+
+ +
+
+
+
In [5]:
+
+
+
identify_neurons = actions['identify-neurons']
+units = pd.read_csv(identify_neurons.data_path('units'))
+units.head()
+
+ +
+
+
+ +
+
+ + +
+ +
Out[5]:
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
actionchannel_groupmax_depth_deltamax_dissimilarityunit_idunit_name
01834-010319-101000.058d8cecbe-e2e5-4020-9c94-9573ca55cdfc2
11834-010319-101000.055b7fc3e8-b76d-4eed-a876-9ba184e508ac39
21834-010319-301000.051b42831d-5d71-4cb1-ba85-b5019b56ca2e1
31834-010319-301000.05270fb3b3-3a7d-4060-bc1a-bc68d2ecab1a12
41834-010319-301000.056da7e1db-2d4f-4bd7-b45c-a1855aaa2fec72
+
+
+ +
+ +
+
+ +
+
+
+
In [6]:
+
+
+
data_loader = dp.Data(
+    position_sampling_rate=position_sampling_rate, 
+    position_low_pass_frequency=position_low_pass_frequency,
+    box_size=box_size, bin_size=bin_size
+)
+
+ +
+
+
+ +
+
+
+
In [7]:
+
+
+
first_row = units[units['action'] == '1849-060319-3'].iloc[0]
+#first_row = sessions.iloc[50]
+
+ +
+
+
+ +
+
+
+
In [ ]:
+
+
+
def process(row):
+    action_id = row['action']
+    channel_id = row['channel_group']
+    unit_id = row['unit_name']
+    
+    # common values for all units == faster calculations
+    x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])
+    ang, ang_t = map(data_loader.head_direction(action_id).get, ['a', 't'])
+    occupancy_map = data_loader.occupancy(action_id)
+    xbins, ybins = data_loader.spatial_bins
+    box_size_, bin_size_ = data_loader.box_size_, data_loader.bin_size_
+    prob_dist = data_loader.prob_dist(action_id)
+    
+    smooth_low_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_low)
+    smooth_high_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_high)
+            
+    spike_times = data_loader.spike_train(action_id, channel_id, unit_id)
+
+    # common
+    spike_map = sp.maps._spike_map(x, y, t, spike_times, xbins, ybins)
+
+    smooth_low_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_low)
+    smooth_high_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_high)
+
+    smooth_low_rate_map = smooth_low_spike_map / smooth_low_occupancy_map
+    smooth_high_rate_map = smooth_high_spike_map / smooth_high_occupancy_map
+
+    # find fields with laplace
+    fields_laplace = sp.separate_fields_by_laplace(smooth_high_rate_map)
+    fields = fields_laplace.copy() # to be cleaned by Ismakov
+    fields_areas = scipy.ndimage.measurements.sum(
+        np.ones_like(fields), fields, index=np.arange(fields.max() + 1))
+    fields_area = fields_areas[fields]
+    fields[fields_area < 9.0] = 0
+
+    # find fields with Ismakov-method
+    fields_ismakov, radius = sp.separate_fields_by_distance(smooth_high_rate_map)
+    fields_ismakov_real = fields_ismakov * bin_size
+    approved_fields = []
+
+    # remove fields not found by both methods
+    for point in fields_ismakov:
+        field_id = fields[tuple(point)]
+        approved_fields.append(field_id)
+
+    for field_id in np.arange(1, fields.max() + 1):
+        if not field_id in approved_fields:
+            fields[fields == field_id] = 0
+
+    # varying statistics
+    average_rate = len(spike_times) / (t.max() - t.min())
+
+    max_rate = smooth_low_rate_map.max()
+
+    out_field_mean_rate = smooth_low_rate_map[np.where(fields == 0)].mean()
+    in_field_mean_rate = smooth_low_rate_map[np.where(fields != 0)].mean()
+    max_field_mean_rate = smooth_low_rate_map[np.where(fields == 1)].mean()
+
+    interspike_interval = np.diff(spike_times)
+    interspike_interval_cv = interspike_interval.std() / interspike_interval.mean()
+
+    autocorrelogram = sp.autocorrelation(smooth_high_rate_map)
+    peaks = sp.fields.find_peaks(autocorrelogram)
+    real_peaks = peaks * bin_size
+    autocorrelogram_box_size = box_size * autocorrelogram.shape[0] / smooth_high_rate_map.shape[0]
+    spacing, orientation = sp.spacing_and_orientation(real_peaks, autocorrelogram_box_size)
+    orientation *= 180 / np.pi
+
+    selectivity = stats.selectivity(smooth_low_rate_map, prob_dist)
+
+    sparsity = stats.sparsity(smooth_low_rate_map, prob_dist)
+
+    gridness = sp.gridness(smooth_high_rate_map)
+
+    border_score = sp.border_score(smooth_high_rate_map, fields_laplace)
+
+    information_rate = stats.information_rate(smooth_high_rate_map, prob_dist)
+
+    single_spikes, bursts, bursty_spikes = spikes.find_bursts(spike_times, threshold=0.01)
+    burst_event_ratio = np.sum(bursts) / (np.sum(single_spikes) + np.sum(bursts))
+    bursty_spike_ratio = np.sum(bursty_spikes) / (np.sum(bursty_spikes) + np.sum(single_spikes))
+    mean_spikes_per_burst = np.sum(bursty_spikes) / np.sum(bursts)
+
+    speed_score = spd.speed_correlation(
+        speed, t, spike_times, min_speed=min_speed, max_speed=max_speed)
+
+    ang_bin, ang_rate = head.head_direction_rate(spike_times, ang, ang_t)
+
+    head_mean_ang, head_mean_vec_len = head.head_direction_score(ang_bin, ang_rate)
+
+    result = pd.Series({
+        'average_rate': average_rate,
+        'speed_score': speed_score,
+        'out_field_mean_rate': out_field_mean_rate,
+        'in_field_mean_rate': in_field_mean_rate,
+        'max_field_mean_rate': max_field_mean_rate,
+        'max_rate': max_rate,
+        'sparsity': sparsity,
+        'selectivity': selectivity,
+        'interspike_interval_cv': float(interspike_interval_cv),
+        'burst_event_ratio': burst_event_ratio,
+        'bursty_spike_ratio': bursty_spike_ratio,
+        'gridness': gridness,
+        'border_score': border_score,
+        'information_rate': information_rate,
+        'head_mean_ang': head_mean_ang,
+        'head_mean_vec_len': head_mean_vec_len,
+        'spacing': spacing,
+        'orientation': orientation
+    })
+    return result
+        
+process(first_row)
+
+ +
+
+
+ +
+
+
+
In [ ]:
+
+
+
results = units.merge(
+    units.progress_apply(process, axis=1), 
+    left_index=True, right_index=True)
+
+ +
+
+
+ +
+
+
+
In [ ]:
+
+
+
output_path = pathlib.Path("output") / "calculate-statistics"
+output_path.mkdir(exist_ok=True)
+
+ +
+
+
+ +
+
+
+
In [ ]:
+
+
+
units.to_csv(output_path / "units.csv", index=False)
+results.to_csv(output_path / "results.csv", index=False)
+
+ +
+
+
+ +
+
+
+
+

Store results in Expipe action

+
+
+
+
+
+
In [14]:
+
+
+
statistics_action = project.require_action("calculate-statistics")
+
+ +
+
+
+ +
+
+
+
In [15]:
+
+
+
statistics_action.data["units"] = "units.csv"
+statistics_action.data["results"] = "results.csv"
+copy_tree(output_path, str(statistics_action.data_path()))
+
+ +
+
+
+ +
+
+ + +
+ +
Out[15]:
+ + + + +
+
['/media/storage/expipe/septum-mec/actions/calculate-statistics/data/results.csv',
+ '/media/storage/expipe/septum-mec/actions/calculate-statistics/data/sessions.csv',
+ '/media/storage/expipe/septum-mec/actions/calculate-statistics/data/units.csv']
+
+ +
+ +
+
+ +
+
+
+
In [16]:
+
+
+
septum_mec.analysis.registration.store_notebook(statistics_action, "10_calculate_spatial_statistics.ipynb")
+
+ +
+
+
+ +
+
+
+
In [ ]:
+
+
+
 
+
+ +
+
+
+ +
+
+
+ + + + + + diff --git a/actions/calculate-statistics/data/10_calculate_spatial_statistics.ipynb b/actions/calculate-statistics/data/10_calculate_spatial_statistics.ipynb index fc2b56dc4..d8e6b058d 100644 --- a/actions/calculate-statistics/data/10_calculate_spatial_statistics.ipynb +++ b/actions/calculate-statistics/data/10_calculate_spatial_statistics.ipynb @@ -1,482 +1,451 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "08:27:22 [I] klustakwik KlustaKwik2 version 0.2.6\n" - ] - } - ], - "source": [ - "import os\n", - "import expipe\n", - "import pathlib\n", - "import numpy as np\n", - "import spatial_maps.stats as stats\n", - "import septum_mec.analysis.data_processing as dp\n", - "import head_direction.head as head\n", - "import spatial_maps as sp\n", - "import septum_mec.analysis.registration\n", - "import speed_cells.speed as spd\n", - "import septum_mec.analysis.spikes as spikes\n", - "import re\n", - "import joblib\n", - "import multiprocessing\n", - "import shutil\n", - "import psutil\n", - "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", - "import septum_mec\n", - "import scipy.ndimage.measurements\n", - "from distutils.dir_util import copy_tree\n", - "\n", - "from tqdm import tqdm_notebook as tqdm\n", - "from tqdm._tqdm_notebook import tqdm_notebook\n", - "tqdm_notebook.pandas()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "max_speed = 1, # m/s only used for speed score\n", - "min_speed = 0.02, # m/s only used for speed score\n", - "position_sampling_rate = 100 # for interpolation\n", - "position_low_pass_frequency = 6 # for low pass filtering of position\n", - "\n", - "box_size = 1.0\n", - "bin_size = 0.02\n", - "smoothing_low = 0.03\n", - "smoothing_high = 0.06" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "project_path = dp.project_path()\n", - "\n", - "project = expipe.get_project(project_path)\n", - "actions = project.actions" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
actionchannel_groupunit_name
01834-150319-3071
11834-150319-3075
21834-120319-4085
31834-120319-101
41834-120319-2039
\n", - "
" - ], - "text/plain": [ - " action channel_group unit_name\n", - "0 1834-150319-3 0 71\n", - "1 1834-150319-3 0 75\n", - "2 1834-120319-4 0 85\n", - "3 1834-120319-1 0 1\n", - "4 1834-120319-2 0 39" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "identify_neurons = actions['identify-neurons']\n", - "units = pd.read_csv(identify_neurons.data_path('units'))\n", - "units.head()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "data_loader = dp.Data(\n", - " position_sampling_rate=position_sampling_rate, \n", - " position_low_pass_frequency=position_low_pass_frequency,\n", - " box_size=box_size, bin_size=bin_size\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "first_row = units[units['action'] == '1849-060319-3'].iloc[0]\n", - "#first_row = sessions.iloc[50]" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/elephant/statistics.py:835: UserWarning: Instantaneous firing rate approximation contains negative values, possibly caused due to machine precision errors.\n", - " warnings.warn(\"Instantaneous firing rate approximation contains \"\n" - ] - }, - { - "data": { - "text/plain": [ - "average_rate 3.095328\n", - "speed_score -0.063922\n", - "out_field_mean_rate 1.837642\n", - "in_field_mean_rate 5.122323\n", - "max_field_mean_rate 8.882211\n", - "max_rate 23.006163\n", - "sparsity 0.468122\n", - "selectivity 7.306812\n", - "interspike_interval_cv 3.970863\n", - "burst_event_ratio 0.397921\n", - "bursty_spike_ratio 0.676486\n", - "gridness -0.459487\n", - "border_score 0.078474\n", - "information_rate 0.965845\n", - "head_mean_ang 5.788704\n", - "head_mean_vec_len 0.043321\n", - "spacing 0.624971\n", - "orientation 22.067900\n", - "dtype: float64" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def process(row):\n", - " action_id = row['action']\n", - " channel_id = row['channel_group']\n", - " unit_id = row['unit_name']\n", - " \n", - " # common values for all units == faster calculations\n", - " x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])\n", - " ang, ang_t = map(data_loader.head_direction(action_id).get, ['a', 't'])\n", - " occupancy_map = data_loader.occupancy(action_id)\n", - " xbins, ybins = data_loader.spatial_bins\n", - " box_size_, bin_size_ = data_loader.box_size_, data_loader.bin_size_\n", - " prob_dist = data_loader.prob_dist(action_id)\n", - " \n", - " smooth_low_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_low)\n", - " smooth_high_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_high)\n", - " \n", - " spike_times = data_loader.spike_train(action_id, channel_id, unit_id)\n", - "\n", - " # common\n", - " spike_map = sp.maps._spike_map(x, y, t, spike_times, xbins, ybins)\n", - "\n", - " smooth_low_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_low)\n", - " smooth_high_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_high)\n", - "\n", - " smooth_low_rate_map = smooth_low_spike_map / smooth_low_occupancy_map\n", - " smooth_high_rate_map = smooth_high_spike_map / smooth_high_occupancy_map\n", - "\n", - " # find fields with laplace\n", - " fields_laplace = sp.separate_fields_by_laplace(smooth_high_rate_map)\n", - " fields = fields_laplace.copy() # to be cleaned by Ismakov\n", - " fields_areas = scipy.ndimage.measurements.sum(\n", - " np.ones_like(fields), fields, index=np.arange(fields.max() + 1))\n", - " fields_area = fields_areas[fields]\n", - " fields[fields_area < 9.0] = 0\n", - "\n", - " # find fields with Ismakov-method\n", - " fields_ismakov, radius = sp.separate_fields_by_distance(smooth_high_rate_map)\n", - " fields_ismakov_real = fields_ismakov * bin_size\n", - " approved_fields = []\n", - "\n", - " # remove fields not found by both methods\n", - " for point in fields_ismakov:\n", - " field_id = fields[tuple(point)]\n", - " approved_fields.append(field_id)\n", - "\n", - " for field_id in np.arange(1, fields.max() + 1):\n", - " if not field_id in approved_fields:\n", - " fields[fields == field_id] = 0\n", - "\n", - " # varying statistics\n", - " average_rate = len(spike_times) / (t.max() - t.min())\n", - "\n", - " max_rate = smooth_low_rate_map.max()\n", - "\n", - " out_field_mean_rate = smooth_low_rate_map[np.where(fields == 0)].mean()\n", - " in_field_mean_rate = smooth_low_rate_map[np.where(fields != 0)].mean()\n", - " max_field_mean_rate = smooth_low_rate_map[np.where(fields == 1)].mean()\n", - "\n", - " interspike_interval = np.diff(spike_times)\n", - " interspike_interval_cv = interspike_interval.std() / interspike_interval.mean()\n", - "\n", - " autocorrelogram = sp.autocorrelation(smooth_high_rate_map)\n", - " peaks = sp.fields.find_peaks(autocorrelogram)\n", - " real_peaks = peaks * bin_size\n", - " autocorrelogram_box_size = box_size * autocorrelogram.shape[0] / smooth_high_rate_map.shape[0]\n", - " spacing, orientation = sp.spacing_and_orientation(real_peaks, autocorrelogram_box_size)\n", - " orientation *= 180 / np.pi\n", - "\n", - " selectivity = stats.selectivity(smooth_low_rate_map, prob_dist)\n", - "\n", - " sparsity = stats.sparsity(smooth_low_rate_map, prob_dist)\n", - "\n", - " gridness = sp.gridness(smooth_high_rate_map)\n", - "\n", - " border_score = sp.border_score(smooth_high_rate_map, fields_laplace)\n", - "\n", - " information_rate = stats.information_rate(smooth_high_rate_map, prob_dist)\n", - "\n", - " single_spikes, bursts, bursty_spikes = spikes.find_bursts(spike_times, threshold=0.01)\n", - " burst_event_ratio = np.sum(bursts) / (np.sum(single_spikes) + np.sum(bursts))\n", - " bursty_spike_ratio = np.sum(bursty_spikes) / (np.sum(bursty_spikes) + np.sum(single_spikes))\n", - " mean_spikes_per_burst = np.sum(bursty_spikes) / np.sum(bursts)\n", - "\n", - " speed_score = spd.speed_correlation(\n", - " speed, t, spike_times, min_speed=min_speed, max_speed=max_speed)\n", - "\n", - " ang_bin, ang_rate = head.head_direction_rate(spike_times, ang, ang_t)\n", - "\n", - " head_mean_ang, head_mean_vec_len = head.head_direction_score(ang_bin, ang_rate)\n", - "\n", - " result = pd.Series({\n", - " 'average_rate': average_rate,\n", - " 'speed_score': speed_score,\n", - " 'out_field_mean_rate': out_field_mean_rate,\n", - " 'in_field_mean_rate': in_field_mean_rate,\n", - " 'max_field_mean_rate': max_field_mean_rate,\n", - " 'max_rate': max_rate,\n", - " 'sparsity': sparsity,\n", - " 'selectivity': selectivity,\n", - " 'interspike_interval_cv': float(interspike_interval_cv),\n", - " 'burst_event_ratio': burst_event_ratio,\n", - " 'bursty_spike_ratio': bursty_spike_ratio,\n", - " 'gridness': gridness,\n", - " 'border_score': border_score,\n", - " 'information_rate': information_rate,\n", - " 'head_mean_ang': head_mean_ang,\n", - " 'head_mean_vec_len': head_mean_vec_len,\n", - " 'spacing': spacing,\n", - " 'orientation': orientation\n", - " })\n", - " return result\n", - " \n", - "process(first_row)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "837fde7fe486422bac67341ff512a4e1", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "HBox(children=(IntProgress(value=0, max=1281), HTML(value='')))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: invalid value encountered in log2\n", - " return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *\n", - "/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: divide by zero encountered in log2\n", - " return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *\n", - "/home/mikkel/apps/expipe-project/spatial-maps/spatial_maps/stats.py:13: RuntimeWarning: invalid value encountered in multiply\n", - " return (np.nansum(np.ravel(tmp_rate_map * np.log2(tmp_rate_map/avg_rate) *\n", - "/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:56: RuntimeWarning: Mean of empty slice.\n", - "/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/numpy/core/_methods.py:85: RuntimeWarning: invalid value encountered in double_scalars\n", - " ret = ret.dtype.type(ret / rcount)\n", - "/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:57: RuntimeWarning: Mean of empty slice.\n", - "/home/mikkel/.virtualenvs/expipe/lib/python3.6/site-packages/ipykernel_launcher.py:82: RuntimeWarning: invalid value encountered in long_scalars\n" - ] - } - ], - "source": [ - "results = units.merge(\n", - " units.progress_apply(process, axis=1), \n", - " left_index=True, right_index=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "output_path = pathlib.Path(\"output\") / \"calculate-statistics\"\n", - "output_path.mkdir(exist_ok=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "units.to_csv(output_path / \"units.csv\", index=False)\n", - "results.to_csv(output_path / \"results.csv\", index=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Store results in Expipe action" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "statistics_action = project.require_action(\"calculate-statistics\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "statistics_action.data[\"units\"] = \"units.csv\"\n", - "statistics_action.data[\"results\"] = \"results.csv\"\n", - "copy_tree(output_path, str(statistics_action.data_path()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "septum_mec.analysis.registration.store_notebook(statistics_action, \"10_calculate_spatial_statistics.ipynb\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "17:02:20 [I] klustakwik KlustaKwik2 version 0.2.6\n" + ] + } + ], + "source": [ + "import os\n", + "import expipe\n", + "import pathlib\n", + "import numpy as np\n", + "import spatial_maps.stats as stats\n", + "import septum_mec.analysis.data_processing as dp\n", + "import head_direction.head as head\n", + "import spatial_maps as sp\n", + "import septum_mec.analysis.registration\n", + "import speed_cells.speed as spd\n", + "import septum_mec.analysis.spikes as spikes\n", + "import re\n", + "import joblib\n", + "import multiprocessing\n", + "import shutil\n", + "import psutil\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import septum_mec\n", + "import scipy.ndimage.measurements\n", + "from distutils.dir_util import copy_tree\n", + "\n", + "from tqdm import tqdm_notebook as tqdm\n", + "from tqdm._tqdm_notebook import tqdm_notebook\n", + "tqdm_notebook.pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "max_speed = 1, # m/s only used for speed score\n", + "min_speed = 0.02, # m/s only used for speed score\n", + "position_sampling_rate = 100 # for interpolation\n", + "position_low_pass_frequency = 6 # for low pass filtering of position\n", + "\n", + "box_size = [1.0, 1.0]\n", + "bin_size = 0.02\n", + "smoothing_low = 0.03\n", + "smoothing_high = 0.06" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "project_path = dp.project_path()\n", + "\n", + "project = expipe.get_project(project_path)\n", + "actions = project.actions" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
actionchannel_groupmax_depth_deltamax_dissimilarityunit_idunit_name
01834-010319-101000.058d8cecbe-e2e5-4020-9c94-9573ca55cdfc2
11834-010319-101000.055b7fc3e8-b76d-4eed-a876-9ba184e508ac39
21834-010319-301000.051b42831d-5d71-4cb1-ba85-b5019b56ca2e1
31834-010319-301000.05270fb3b3-3a7d-4060-bc1a-bc68d2ecab1a12
41834-010319-301000.056da7e1db-2d4f-4bd7-b45c-a1855aaa2fec72
\n", + "
" + ], + "text/plain": [ + " action channel_group max_depth_delta max_dissimilarity \\\n", + "0 1834-010319-1 0 100 0.05 \n", + "1 1834-010319-1 0 100 0.05 \n", + "2 1834-010319-3 0 100 0.05 \n", + "3 1834-010319-3 0 100 0.05 \n", + "4 1834-010319-3 0 100 0.05 \n", + "\n", + " unit_id unit_name \n", + "0 8d8cecbe-e2e5-4020-9c94-9573ca55cdfc 2 \n", + "1 5b7fc3e8-b76d-4eed-a876-9ba184e508ac 39 \n", + "2 1b42831d-5d71-4cb1-ba85-b5019b56ca2e 1 \n", + "3 270fb3b3-3a7d-4060-bc1a-bc68d2ecab1a 12 \n", + "4 6da7e1db-2d4f-4bd7-b45c-a1855aaa2fec 72 " + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "identify_neurons = actions['identify-neurons']\n", + "units = pd.read_csv(identify_neurons.data_path('units'))\n", + "units.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "data_loader = dp.Data(\n", + " position_sampling_rate=position_sampling_rate, \n", + " position_low_pass_frequency=position_low_pass_frequency,\n", + " box_size=box_size, bin_size=bin_size\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "first_row = units[units['action'] == '1849-060319-3'].iloc[0]\n", + "#first_row = sessions.iloc[50]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "def process(row):\n", + " action_id = row['action']\n", + " channel_id = row['channel_group']\n", + " unit_id = row['unit_name']\n", + " \n", + " # common values for all units == faster calculations\n", + " x, y, t, speed = map(data_loader.tracking(action_id).get, ['x', 'y', 't', 'v'])\n", + " ang, ang_t = map(data_loader.head_direction(action_id).get, ['a', 't'])\n", + " occupancy_map = data_loader.occupancy(action_id)\n", + " xbins, ybins = data_loader.spatial_bins\n", + " box_size_, bin_size_ = data_loader.box_size_, data_loader.bin_size_\n", + " prob_dist = data_loader.prob_dist(action_id)\n", + " \n", + " smooth_low_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_low)\n", + " smooth_high_occupancy_map = sp.maps.smooth_map(occupancy_map, bin_size=bin_size_, smoothing=smoothing_high)\n", + " \n", + " spike_times = data_loader.spike_train(action_id, channel_id, unit_id)\n", + "\n", + " # common\n", + " spike_map = sp.maps._spike_map(x, y, t, spike_times, xbins, ybins)\n", + "\n", + " smooth_low_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_low)\n", + " smooth_high_spike_map = sp.maps.smooth_map(spike_map, bin_size=bin_size_, smoothing=smoothing_high)\n", + "\n", + " smooth_low_rate_map = smooth_low_spike_map / smooth_low_occupancy_map\n", + " smooth_high_rate_map = smooth_high_spike_map / smooth_high_occupancy_map\n", + "\n", + " # find fields with laplace\n", + " fields_laplace = sp.separate_fields_by_laplace(smooth_high_rate_map)\n", + " fields = fields_laplace.copy() # to be cleaned by Ismakov\n", + " fields_areas = scipy.ndimage.measurements.sum(\n", + " np.ones_like(fields), fields, index=np.arange(fields.max() + 1))\n", + " fields_area = fields_areas[fields]\n", + " fields[fields_area < 9.0] = 0\n", + "\n", + " # find fields with Ismakov-method\n", + " fields_ismakov, radius = sp.separate_fields_by_distance(smooth_high_rate_map)\n", + " fields_ismakov_real = fields_ismakov * bin_size\n", + " approved_fields = []\n", + "\n", + " # remove fields not found by both methods\n", + " for point in fields_ismakov:\n", + " field_id = fields[tuple(point)]\n", + " approved_fields.append(field_id)\n", + "\n", + " for field_id in np.arange(1, fields.max() + 1):\n", + " if not field_id in approved_fields:\n", + " fields[fields == field_id] = 0\n", + "\n", + " # varying statistics\n", + " average_rate = len(spike_times) / (t.max() - t.min())\n", + "\n", + " max_rate = smooth_low_rate_map.max()\n", + "\n", + " out_field_mean_rate = smooth_low_rate_map[np.where(fields == 0)].mean()\n", + " in_field_mean_rate = smooth_low_rate_map[np.where(fields != 0)].mean()\n", + " max_field_mean_rate = smooth_low_rate_map[np.where(fields == 1)].mean()\n", + "\n", + " interspike_interval = np.diff(spike_times)\n", + " interspike_interval_cv = interspike_interval.std() / interspike_interval.mean()\n", + "\n", + " autocorrelogram = sp.autocorrelation(smooth_high_rate_map)\n", + " peaks = sp.fields.find_peaks(autocorrelogram)\n", + " real_peaks = peaks * bin_size\n", + " autocorrelogram_box_size = box_size * autocorrelogram.shape[0] / smooth_high_rate_map.shape[0]\n", + " spacing, orientation = sp.spacing_and_orientation(real_peaks, autocorrelogram_box_size)\n", + " orientation *= 180 / np.pi\n", + "\n", + " selectivity = stats.selectivity(smooth_low_rate_map, prob_dist)\n", + "\n", + " sparsity = stats.sparsity(smooth_low_rate_map, prob_dist)\n", + "\n", + " gridness = sp.gridness(smooth_high_rate_map)\n", + "\n", + " border_score = sp.border_score(smooth_high_rate_map, fields_laplace)\n", + "\n", + " information_rate = stats.information_rate(smooth_high_rate_map, prob_dist)\n", + "\n", + " single_spikes, bursts, bursty_spikes = spikes.find_bursts(spike_times, threshold=0.01)\n", + " burst_event_ratio = np.sum(bursts) / (np.sum(single_spikes) + np.sum(bursts))\n", + " bursty_spike_ratio = np.sum(bursty_spikes) / (np.sum(bursty_spikes) + np.sum(single_spikes))\n", + " mean_spikes_per_burst = np.sum(bursty_spikes) / np.sum(bursts)\n", + "\n", + " speed_score = spd.speed_correlation(\n", + " speed, t, spike_times, min_speed=min_speed, max_speed=max_speed)\n", + "\n", + " ang_bin, ang_rate = head.head_direction_rate(spike_times, ang, ang_t)\n", + "\n", + " head_mean_ang, head_mean_vec_len = head.head_direction_score(ang_bin, ang_rate)\n", + "\n", + " result = pd.Series({\n", + " 'average_rate': average_rate,\n", + " 'speed_score': speed_score,\n", + " 'out_field_mean_rate': out_field_mean_rate,\n", + " 'in_field_mean_rate': in_field_mean_rate,\n", + " 'max_field_mean_rate': max_field_mean_rate,\n", + " 'max_rate': max_rate,\n", + " 'sparsity': sparsity,\n", + " 'selectivity': selectivity,\n", + " 'interspike_interval_cv': float(interspike_interval_cv),\n", + " 'burst_event_ratio': burst_event_ratio,\n", + " 'bursty_spike_ratio': bursty_spike_ratio,\n", + " 'gridness': gridness,\n", + " 'border_score': border_score,\n", + " 'information_rate': information_rate,\n", + " 'head_mean_ang': head_mean_ang,\n", + " 'head_mean_vec_len': head_mean_vec_len,\n", + " 'spacing': spacing,\n", + " 'orientation': orientation\n", + " })\n", + " return result\n", + " \n", + "process(first_row)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "results = units.merge(\n", + " units.progress_apply(process, axis=1), \n", + " left_index=True, right_index=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output_path = pathlib.Path(\"output\") / \"calculate-statistics\"\n", + "output_path.mkdir(exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "units.to_csv(output_path / \"units.csv\", index=False)\n", + "results.to_csv(output_path / \"results.csv\", index=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Store results in Expipe action" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "statistics_action = project.require_action(\"calculate-statistics\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['/media/storage/expipe/septum-mec/actions/calculate-statistics/data/results.csv',\n", + " '/media/storage/expipe/septum-mec/actions/calculate-statistics/data/sessions.csv',\n", + " '/media/storage/expipe/septum-mec/actions/calculate-statistics/data/units.csv']" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "statistics_action.data[\"units\"] = \"units.csv\"\n", + "statistics_action.data[\"results\"] = \"results.csv\"\n", + "copy_tree(output_path, str(statistics_action.data_path()))" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "septum_mec.analysis.registration.store_notebook(statistics_action, \"10_calculate_spatial_statistics.ipynb\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}