Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updating tests so that they are faster. #160

Merged
merged 4 commits into from
Dec 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
name: lint and install
name: tests

on:
push:
branches:
- main
tags:
- '*'
pull_request:

jobs:
Expand All @@ -12,24 +14,25 @@ jobs:
steps:
- uses: neuroinformatics-unit/actions/lint@main


install:
test:
needs: [linting]
name: ${{ matrix.os }} py${{ matrix.python-version }}
runs-on: ${{ matrix.os }}

defaults:
run:
shell: bash -l {0} # https://stackoverflow.com/questions/69070754/shell-bash-l-0-in-github-actions

strategy:
matrix:
os: [windows-latest, ubuntu-latest, macos-latest]
python-version: ["3.8", "3.9", "3.10", "3.11"]

steps:
- uses: actions/checkout@v4
- name: Install dependencies
run:
python3 -m pip install --upgrade pip
pip install .
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[dev]
- name: Test
run: pytest
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
.obsidian
slurm_logs/
derivatives/
tests/data/
tests/data/steve_multi_run
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ classifiers = [
]

dependencies = [
"spikeinterface==0.98.2",
"probeinterface==0.2.18",
"spikeinterface[full] @git+https://github.com/JoeZiminski/spikeinterface.git@random_chunks_chunk_size_to_warning",
"probeinterface",
"neo==0.12.0",
"submitit",
"PyYAML",
Expand All @@ -30,7 +30,7 @@ dependencies = [
# sorter-specific
"tridesclous",
# "spyking-circus", TODO: this is not straightforward, requires mpi4py. TBD if we want to manage this.
"mountainsort5; platform_system != 'Darwin'",
"mountainsort5",
"docker; platform_system=='Windows'",
"docker; platform_system=='Darwin'",
"spython; platform_system=='Linux'", # I think missing from SI?
Expand Down
38 changes: 38 additions & 0 deletions spikewrap/configs/fast_test_pipeline.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
'preprocessing':
'1':
- phase_shift
- {}
'2':
- bandpass_filter
- freq_min: 300
freq_max: 6000
'3':
- common_reference
- operator: median
reference: global

'sorting':
'kilosort2':
'car': False # common average referencing
'freq_min': 150 # highpass filter cutoff, False nor 0 does not work to turn off. (results in KS error)
'kilosort2_5':
'car': False
'freq_min': 150
'kilosort3':
'car': False
'freq_min': 300
'mountainsort5':
'scheme': '2'
'filter': False
'whiten': False

'waveforms':
'ms_before': 2
'ms_after': 2
'max_spikes_per_unit': 500
'return_scaled': True
# Sparsity Options
'sparse': True
'peak_sign': "neg"
'method': "radius"
'radius_um': 75
2 changes: 2 additions & 0 deletions spikewrap/configs/test_default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
'mountainsort5':
'scheme': '2'
'filter': False
'whiten': False # TODO: add this back.

'waveforms':
'ms_before': 2
Expand All @@ -38,3 +39,4 @@
'peak_sign': "neg"
'method': "radius"
'radius_um': 75
'chunk_size': 1
6 changes: 3 additions & 3 deletions spikewrap/examples/example_full_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
from spikewrap.pipeline.full_pipeline import run_full_pipeline

base_path = Path(
"/ceph/neuroinformatics/neuroinformatics/scratch/jziminski/ephys/code/spikewrap/tests/data/steve_multi_run/time-short-multises"
# "/ceph/neuroinformatics/neuroinformatics/scratch/jziminski/ephys/test_data/steve_multi_run/1119617/time-short-multises"
# "/ceph/neuroinformatics/neuroinformatics/scratch/jziminski/ephys/code/spikewrap/tests/data/steve_multi_run/time-short-multises"
"/ceph/neuroinformatics/neuroinformatics/scratch/jziminski/ephys/test_data/steve_multi_run/1119617/time-short-multises"
# r"C:\data\ephys\test_data\steve_multi_run\1119617\time-miniscule-mutlises"
# r"C:\data\ephys\test_data\steve_multi_run\1119617\time-short-multises"
# "/ceph/neuroinformatics/neuroinformatics/scratch/jziminski/ephys/test_data/steve_multi_run/1119617/time-short"
Expand Down Expand Up @@ -36,7 +36,7 @@
sessions_and_runs,
config_name,
sorter,
concat_sessions_for_sorting=True, # TODO: validate this at the start, in `run_full_pipeline`
concat_sessions_for_sorting=False, # TODO: validate this at the start, in `run_full_pipeline`
concat_runs_for_sorting=True,
existing_preprocessed_data="skip_if_exists", # this is kind of confusing...
existing_sorting_output="overwrite",
Expand Down
4 changes: 0 additions & 4 deletions spikewrap/pipeline/sort.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from __future__ import annotations

import os
import platform
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Union

Expand Down Expand Up @@ -152,9 +151,6 @@ def _run_sorting(
passed_arguments = locals()
validate.check_function_arguments(passed_arguments)

if sorter == "mountainsort5" and platform.system() == "Darwin":
raise EnvironmentError("Mountainsort is not currently supported on macOS.")

logs = logging_sw.get_started_logger(
utils.get_logging_path(base_path, sub_name),
"sorting",
Expand Down
64 changes: 64 additions & 0 deletions tests/data/small_toy_data/generate_test_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from pathlib import Path

import numpy as np
import spikeinterface.extractors as se
import spikeinterface.preprocessing as spre

print("Starting...")

sessions_and_runs = {
"ses-001": ["ses-001_run-001", "ses-001_run-002"],
"ses-002": ["ses-002_run-001", "ses-002_run-002"],
"ses-003": ["ses-003_run-001", "ses-003_run-002"],
}

if False:
from spikewrap.pipeline.preprocess import run_preprocessing

sub_name = "sub-001_type-test"
base_path = Path(__file__).parent.resolve()

from spikewrap.pipeline.load_data import load_data

preprocess_data = load_data(
base_path, sub_name, sessions_and_runs, "spikeinterface"
)

run_preprocessing(
preprocess_data,
"default",
handle_existing_data="overwrite",
slurm_batch=False,
log=False,
) # TODO: use config_name for all funcs.


# if False:

sub = "sub-001_type-test"
base_path = Path(__file__).parent.resolve() / "rawdata"

for ses in sessions_and_runs.keys():
for run in sessions_and_runs[ses]:
num_channels = 16
recording, _ = se.toy_example(
duration=[0.05], num_segments=1, num_channels=num_channels, num_units=2
)
four_shank_groupings = np.repeat([0, 1, 2, 3], 4)
recording.set_property("group", four_shank_groupings)
recording.set_property("inter_sample_shift", np.arange(16) * 0.0001)

recording._main_ids = np.array(
[f"imec0.ap#AP{i}" for i in range(num_channels)]
) # for consistency with spikeglx dataset TODO this is a hack

recording = spre.scale(recording, gain=50, offset=20)

output_path = base_path / sub / ses / "ephys" / run

recording.save(folder=output_path, chunk_size=1000000)

# shifted_recording = spre.phase_shift(recording)
# filtered_recording = spre.bandpass_filter(recording)
# referenced_recording = spre.common_reference(filtered_recording)
# run_sorter("mountainsort5", referenced_recording, remove_existing_folder=True, whiten=False)
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
{
"class": "spikeinterface.core.binaryrecordingextractor.BinaryRecordingExtractor",
"module": "spikeinterface",
"kwargs": {
"file_paths": [
"traces_cached_seg0.raw"
],
"sampling_frequency": 30000.0,
"t_starts": null,
"num_channels": 16,
"dtype": "<f4",
"channel_ids": [
"imec0.ap#AP0",
"imec0.ap#AP1",
"imec0.ap#AP2",
"imec0.ap#AP3",
"imec0.ap#AP4",
"imec0.ap#AP5",
"imec0.ap#AP6",
"imec0.ap#AP7",
"imec0.ap#AP8",
"imec0.ap#AP9",
"imec0.ap#AP10",
"imec0.ap#AP11",
"imec0.ap#AP12",
"imec0.ap#AP13",
"imec0.ap#AP14",
"imec0.ap#AP15"
],
"time_axis": 0,
"file_offset": 0,
"gain_to_uV": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0
],
"offset_to_uV": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0
],
"is_filtered": null
},
"version": "0.100.0.dev0",
"annotations": {
"is_filtered": false
},
"properties": {
"group": null,
"location": null,
"gain_to_uV": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0
],
"offset_to_uV": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0
]
},
"relative_paths": true
}
Loading