Skip to content

Commit

Permalink
FIX: Green
Browse files Browse the repository at this point in the history
  • Loading branch information
larsoner committed Oct 17, 2023
1 parent 3ce7d8b commit 5cb32a4
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 32 deletions.
60 changes: 29 additions & 31 deletions mne/io/neuralynx/neuralynx.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import os.path as op
from ..base import BaseRaw
from ..._fiff.utils import _read_segments_file
import numpy as np

from ..._fiff.meas_info import create_info
from ...utils import logger, verbose, fill_doc
from ..._fiff.utils import _mult_cal_one
from ...utils import _check_fname, _soft_import, fill_doc, logger, verbose
from ..base import BaseRaw


@fill_doc
Expand Down Expand Up @@ -35,14 +36,12 @@ class RawNeuralynx(BaseRaw):

@verbose
def __init__(self, fname, preload=False, verbose=None):
try:
from neo.io import NeuralynxIO
except Exception:
raise ImportError("Missing the neo-python package") from None
_soft_import("neo", "Reading NeuralynxIO files", strict=True)
from neo.io import NeuralynxIO

datadir = op.abspath(fname)
fname = _check_fname(fname, "read", True, "fname", need_dir=True)

logger.info(f"Checking files in {datadir}")
logger.info(f"Checking files in {fname}")

# get basic file info
nlx_reader = NeuralynxIO(dirname=fname)
Expand All @@ -58,32 +57,31 @@ def __init__(self, fname, preload=False, verbose=None):
n_segments = nlx_reader.header["nb_segment"][0]
block_id = 0 # assumes there's only one block of recording
n_total_samples = sum(
[
nlx_reader.get_signal_size(block_id, segment)
for segment in range(n_segments)
]
nlx_reader.get_signal_size(block_id, segment)
for segment in range(n_segments)
)

# loop over found filenames and collect names and store last sample numbers
last_samps = []
ncs_fnames = []

for chan_key in nlx_reader.ncs_filenames.keys():
ncs_fname = nlx_reader.ncs_filenames[chan_key]
ncs_fnames.append(ncs_fname)
last_samps.append(
n_total_samples - 1
) # assumes the same sample size for all files/channels

super(RawNeuralynx, self).__init__(
info=info,
last_samps=last_samps[0:1],
filenames=ncs_fnames,
last_samps=[n_total_samples - 1],
filenames=[fname],
preload=preload,
)

def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(
self, data, idx, fi, start, stop, cals, mult, dtype="<i2", n_channels=1
)
from neo.io import NeuralynxIO

nlx_reader = NeuralynxIO(dirname=self._filenames[fi])
bl = nlx_reader.read(lazy=False)
# TODO: This is massively inefficient -- we should not need to read *all*
# samples just to get the ones from `idx` channels and `start:stop` time span.
# But let's start here and make it efficient later.
all_data = np.concatenate(
[sig.magnitude for seg in bl[0].segments for sig in seg.analogsignals]
).T
# ... but to get something that works, let's do it:
block = all_data[:, start:stop]
# Convert uV to V
block *= 1e-6
# Then store the result where it needs to go
_mult_cal_one(data, block, idx, cals, mult)
3 changes: 2 additions & 1 deletion mne/io/neuralynx/tests/test_neuralynx.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
import pytest
from numpy.testing import assert_allclose

from mne.datasets.testing import data_path, requires_testing_data
from mne.io import read_raw_neuralynx
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets.testing import data_path, requires_testing_data

testing_path = data_path(download=False) / "neuralynx"

Expand All @@ -30,6 +30,7 @@ def test_neuralynx():
d2 = np.concatenate(
[sig.magnitude for seg in bl[0].segments for sig in seg.analogsignals]
).T
d2 *= 1e-6 # to V
t2 = np.concatenate(
[sig.times.magnitude for seg in bl[0].segments for sig in seg.analogsignals]
).T
Expand Down

0 comments on commit 5cb32a4

Please sign in to comment.