Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MRG: Test constant consistency #5332

Merged
merged 14 commits into from
Sep 13, 2018
2 changes: 2 additions & 0 deletions doc/whats_new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ Bug

- Fix bug of not showing ERD's in baseline rescaled tfr topomaps if grads are combined by `Erkka Heinila`_

- Fix bug with FIF I/O where strings were written in UTF-8 format instead of Latin-1 by `Eric Larson`_

- Fix bug with reading measurement dates from BrainVision files by `Stefan Appelhoff`_

- Fix bug with `mne.fit_dipole` where the residual was returned as ``ndarray`` instead of :class:`mne.Evoked` instance, by `Eric Larson`_
Expand Down
10 changes: 5 additions & 5 deletions mne/bem.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
import numpy as np
from scipy import linalg

from .transforms import _ensure_trans, apply_trans
from .io.constants import FIFF
from .io.constants import FIFF, FWD
from .io.write import (start_file, start_block, write_float, write_int,
write_float_matrix, write_int_matrix, end_block,
end_file)
Expand All @@ -26,6 +25,7 @@
from .surface import (read_surface, write_surface, complete_surface_info,
_compute_nearest, _get_ico_surface, read_tri,
_fast_cross_nd_sum, _get_solids)
from .transforms import _ensure_trans, apply_trans
from .utils import (verbose, logger, run_subprocess, get_subjects_dir, warn,
_pl, _validate_type)
from .fixes import einsum
Expand Down Expand Up @@ -267,7 +267,7 @@ def _fwd_bem_linear_collocation_solution(m):
m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
if len(m['surfs']) == 3:
ip_mult = m['sigma'][1] / m['sigma'][2]
if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
if ip_mult <= FWD.BEM_IP_APPROACH_LIMIT:
logger.info('IP approach required...')
logger.info(' Matrix coefficients (homog)...')
coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
Expand All @@ -278,7 +278,7 @@ def _fwd_bem_linear_collocation_solution(m):
'IP approach...')
_fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
nps)
m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
m['bem_method'] = FWD.BEM_LINEAR_COLL
logger.info("Solution ready.")


Expand Down Expand Up @@ -1521,7 +1521,7 @@ def write_bem_solution(fname, bem):
_write_bem_surfaces_block(fid, bem['surfs'])
# The potential solution
if 'solution' in bem:
if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
if bem['bem_method'] != FWD.BEM_LINEAR_COLL:
raise RuntimeError('Only linear collocation supported')
write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
Expand Down
10 changes: 5 additions & 5 deletions mne/epochs.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,10 @@ def _save_split(epochs, fname, part_idx, n_parts):
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]

write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))

write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION,
epochs.selection)

# And now write the next file info in case epochs are split on disk
Expand Down Expand Up @@ -2470,10 +2470,10 @@ def _read_one_epoch_file(f, tree, preload):
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)

Expand Down Expand Up @@ -2513,7 +2513,7 @@ def _read_one_epoch_file(f, tree, preload):
tmax = last / info['sfreq']
event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
Expand Down
2 changes: 1 addition & 1 deletion mne/externals/decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __iter__(self):
def get_init(cls):
return cls.__init__.__func__

DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')

# basic functionality
class FunctionMaker(object):
Expand Down
4 changes: 2 additions & 2 deletions mne/forward/_compute_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from copy import deepcopy

from ..surface import fast_cross_3d, _project_onto_surface
from ..io.constants import FIFF
from ..io.constants import FIFF, FWD
from ..transforms import apply_trans
from ..utils import logger, verbose, _pl
from ..parallel import parallel_func
Expand Down Expand Up @@ -682,7 +682,7 @@ def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
"""
bem_rr = mults = mri_Q = head_mri_t = None
if not bem['is_sphere']:
if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
if bem['bem_method'] != FWD.BEM_LINEAR_COLL:
raise RuntimeError('only linear collocation supported')
# Store (and apply soon) μ_0/(4π) factor before source computations
mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
Expand Down
7 changes: 3 additions & 4 deletions mne/forward/_field_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
import numpy as np
from scipy import linalg

from ..io.constants import FWD
from ..bem import _check_origin
from ..io.constants import FIFF
from ..io.pick import pick_types, pick_info
from ..surface import get_head_surf, get_meg_helmet_surf

Expand All @@ -23,9 +23,8 @@

def _is_axial_coil(coil):
"""Determine if the coil is axial."""
is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
FIFF.FWD_COILC_AXIAL_GRAD,
FIFF.FWD_COILC_AXIAL_GRAD2)
is_ax = coil['coil_class'] in (
FWD.COILC_MAG, FWD.COILC_AXIAL_GRAD, FWD.COILC_AXIAL_GRAD2)
return is_ax


Expand Down
10 changes: 5 additions & 5 deletions mne/forward/_make_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from os import path as op
import numpy as np

from ._compute_forward import _compute_forwards
from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc, Info
from ..io.pick import _has_kit_refs, pick_types, pick_info
from ..io.constants import FIFF
from ..io.constants import FIFF, FWD
from ..transforms import (_ensure_trans, transform_surface_to, apply_trans,
_get_trans, _print_coord_trans, _coord_frame_name,
Transform)
Expand All @@ -26,11 +27,10 @@
from ..externals.six import string_types

from .forward import Forward, _merge_meg_eeg_fwds, convert_forward_solution
from ._compute_forward import _compute_forwards


_accuracy_dict = dict(normal=FIFF.FWD_COIL_ACCURACY_NORMAL,
accurate=FIFF.FWD_COIL_ACCURACY_ACCURATE)
_accuracy_dict = dict(normal=FWD.COIL_ACCURACY_NORMAL,
accurate=FWD.COIL_ACCURACY_ACCURATE)


@verbose
Expand Down Expand Up @@ -180,7 +180,7 @@ def _create_eeg_el(ch, t=None):
# The electrode location
cosmag = r0ex.copy()
_normalize_vectors(cosmag)
res = dict(chname=ch['ch_name'], coil_class=FIFF.FWD_COILC_EEG, w=w,
res = dict(chname=ch['ch_name'], coil_class=FWD.COILC_EEG, w=w,
accuracy=_accuracy_dict['normal'], type=ch['coil_type'],
coord_frame=t['to'], rmag=r0ex, cosmag=cosmag)
return res
Expand Down
49 changes: 26 additions & 23 deletions mne/io/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __setattr__(self, attr, val): # noqa: D105
# FIFF version number in use
#
FIFF.FIFFC_MAJOR_VERSION = 1
FIFF.FIFFC_MINOR_VERSION = 3
FIFF.FIFFC_MINOR_VERSION = 4
FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION

#
Expand Down Expand Up @@ -129,7 +129,7 @@ def __setattr__(self, attr, val): # noqa: D105
FIFF.FIFF_ARTEF_REMOVAL = 221
FIFF.FIFF_COORD_TRANS = 222
FIFF.FIFF_HIGHPASS = 223
FIFF.FIFF_CH_CALS = 22 # This will not occur in new files
FIFF.FIFF_CH_CALS = 224 # This will not occur in new files
FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi
FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations
FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging
Expand Down Expand Up @@ -646,27 +646,30 @@ def __setattr__(self, attr, val): # noqa: D105
#
# FWD Types
#
FIFF.FWD_COIL_UNKNOWN = 0
FIFF.FWD_COILC_UNKNOWN = 0
FIFF.FWD_COILC_EEG = 1000
FIFF.FWD_COILC_MAG = 1
FIFF.FWD_COILC_AXIAL_GRAD = 2
FIFF.FWD_COILC_PLANAR_GRAD = 3
FIFF.FWD_COILC_AXIAL_GRAD2 = 4

FIFF.FWD_COIL_ACCURACY_POINT = 0
FIFF.FWD_COIL_ACCURACY_NORMAL = 1
FIFF.FWD_COIL_ACCURACY_ACCURATE = 2
FWD = BunchConst()

FIFF.FWD_BEM_UNKNOWN = -1
FIFF.FWD_BEM_CONSTANT_COLL = 1
FIFF.FWD_BEM_LINEAR_COLL = 2
FWD.COIL_UNKNOWN = 0
FWD.COILC_UNKNOWN = 0
FWD.COILC_EEG = 1000
FWD.COILC_MAG = 1
FWD.COILC_AXIAL_GRAD = 2
FWD.COILC_PLANAR_GRAD = 3
FWD.COILC_AXIAL_GRAD2 = 4

FIFF.FWD_BEM_IP_APPROACH_LIMIT = 0.1
FWD.COIL_ACCURACY_POINT = 0
FWD.COIL_ACCURACY_NORMAL = 1
FWD.COIL_ACCURACY_ACCURATE = 2

FIFF.FWD_BEM_LIN_FIELD_SIMPLE = 1
FIFF.FWD_BEM_LIN_FIELD_FERGUSON = 2
FIFF.FWD_BEM_LIN_FIELD_URANKAR = 3
FWD.BEM_UNKNOWN = -1
FWD.BEM_CONSTANT_COLL = 1
FWD.BEM_LINEAR_COLL = 2

FWD.BEM_IP_APPROACH_LIMIT = 0.1

FWD.BEM_LIN_FIELD_SIMPLE = 1
FWD.BEM_LIN_FIELD_FERGUSON = 2
FWD.BEM_LIN_FIELD_URANKAR = 3

#
# Data types
Expand Down Expand Up @@ -848,11 +851,11 @@ def __setattr__(self, attr, val): # noqa: D105
FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client

# MNE epochs bookkeeping
FIFF.FIFFB_MNE_EPOCHS_SELECTION = 3800 # the epochs selection
FIFF.FIFFB_MNE_EPOCHS_DROP_LOG = 3801 # the drop log
FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection
FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log

# MNE annotations
FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations
FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block

# MNE Metadata Dataframes
FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes
FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block
2 changes: 1 addition & 1 deletion mne/io/ctf/tests/test_ctf.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def test_read_ctf():
assert_allclose(raw.times, raw_c.times)
assert_allclose(raw._cals, raw_c._cals)
assert_equal(raw.info['meas_id']['version'],
raw_c.info['meas_id']['version'])
raw_c.info['meas_id']['version'] + 1)
for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'],
rtol=1e-4, atol=1e-7)
Expand Down
7 changes: 4 additions & 3 deletions mne/io/fiff/tests/test_raw_fiff.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
Expand Down Expand Up @@ -513,16 +514,16 @@ def test_io_raw():
rng = np.random.RandomState(0)
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
for chars in [u'äöé', 'a']:
with read_raw_fif(fif_fname) as r:
assert ('Raw' in repr(r))
assert (op.basename(fif_fname) in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
r.info['description'] = chars
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with read_raw_fif(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
assert desc2 == chars

# Let's construct a simple test for IO first
raw = read_raw_fif(fif_fname).crop(0, 3.5)
Expand Down
4 changes: 2 additions & 2 deletions mne/io/tag.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,9 +341,9 @@ def _read_simple(fid, tag, shape, rlims, dtype):

def _read_string(fid, tag, shape, rlims):
"""Read a string tag."""
# Always decode to unicode.
# Always decode to ISO 8859-1 / latin1 (FIFF standard).
d = _frombuffer_rows(fid, tag.size, dtype='>c', shape=shape, rlims=rlims)
return text_type(d.tostring().decode('utf-8', 'ignore'))
return text_type(d.tostring().decode('latin1', 'ignore'))
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FYI @agramfort I saw in the official docs that strings are written this way, not using UTF-8, so I have changed it :(

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok. Do we test IO with utf8 characters?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep though I just pushed a commit to more explicitly test it

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this will not break the IO of old files?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might make some of the characters look different. But either way (what we have in master vs what this PR does) some files are broken on read and write. The difference is that what this PR does follows the FIF spec, what we had before did not. So this should probably be in whats_new.rst under the BUG section.



def _read_complex_float(fid, tag, shape, rlims):
Expand Down
Loading