diff --git a/.mailmap b/.mailmap index 79ce4939e6..4205e5e22e 100644 --- a/.mailmap +++ b/.mailmap @@ -29,6 +29,7 @@ Basile Pinsard bpinsard Nguyen, Ly lxn2 Ben Cipollini Ben Cipollini Chris Markiewicz Christopher J. Markiewicz +Chris Markiewicz Chris Markiewicz Chris Markiewicz Christopher J. Markiewicz Chris Markiewicz Christopher J. Markiewicz Chris Markiewicz Chris Johnson diff --git a/.travis.yml b/.travis.yml index 28ac4fa5f4..b6e69d09ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,11 +4,10 @@ # for it to be on multiple physical lines, so long as you remember: - There # can't be any leading "-"s - All newlines will be removed, so use ";"s +dist: xenial +sudo: true language: python -# Run jobs on container-based infrastructure, can be overridden per job -sudo: false - cache: directories: - $HOME/.cache/pip @@ -22,11 +21,14 @@ env: - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - PRE_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" python: - - 3.4 - 3.5 - 3.6 + - 3.7 matrix: include: + - python: 3.4 + dist: trusty + sudo: false - python: 2.7 env: - COVERAGE=1 diff --git a/COPYING b/COPYING index 5827950a17..6f03ba5ccd 100644 --- a/COPYING +++ b/COPYING @@ -121,36 +121,40 @@ Sphinx 0.6 doesn't work properly. OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Ordereddict +OrderedSet ----------- -In ``nibabel/externals/ordereddict.py`` +In ``nibabel/externals/oset.py`` -Copied from: https://pypi.python.org/packages/source/o/ordereddict/ordereddict-1.1.tar.gz#md5=a0ed854ee442051b249bfad0f638bbec +Copied from: https://files.pythonhosted.org/packages/d6/b1/a49498c699a3fda5d635cc1fa222ffc686ea3b5d04b84a3166c4cab0c57b/oset-0.1.3.tar.gz :: - Copyright (c) 2009 Raymond Hettinger - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation files - (the "Software"), to deal in the Software without restriction, - including without limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of the Software, - and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. + Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. + + Package structured based on the one developed to odict Copyright (c) 2010, BlueDynamics Alliance, Austria + + - Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + + - Neither the name of the BlueDynamics Alliance nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY BlueDynamics Alliance AS IS AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + SHALL BlueDynamics Alliance BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. mni_icbm152_t1_tal_nlin_asym_09a -------------------------------- diff --git a/Changelog b/Changelog index 153d522629..0ca78d01a8 100644 --- a/Changelog +++ b/Changelog @@ -20,10 +20,46 @@ Nibabel releases Most work on NiBabel so far has been by Matthew Brett (MB), Michael Hanke (MH) Ben Cipollini (BC), Marc-Alexandre Côté (MC), Chris Markiewicz (CM), Stephan -Gerhard (SG) and Eric Larson (EL). +Gerhard (SG), Eric Larson (EL), Yaroslav Halchenko (YOH) and Chris Cheng (CC). References like "pr/298" refer to github pull request numbers. +Upcoming release +================ + +New features +------------ +* ``nib-diff`` command line tool for comparing image files (pr/617, pr/672, + pr/678) (CC, reviewed by YOH, Pradeep Raamana and CM) + +Enhancements +------------ +* Speed up reading of numeric arrays in CIFTI2 (pr/655) (Michiel Cottaar, + reviewed by CM) +* Add ``ndim`` property to ``ArrayProxy`` and ``DataobjImage`` (pr/674) (CM, + reviewed by MB) + +Bug fixes +--------- +* Deterministic deduction of slice ordering in degenerate cases (pr/647) + (YOH, reviewed by CM) +* Allow 0ms TR in MGH files (pr/653) (EL, reviewed by CM) +* Allow for PPC64 little-endian long doubles (pr/658) (MB, reviewed by CM) +* Correct construction of FreeSurfer annotation labels (pr/666) (CM, reviewed + by EL, Paul D. McCarthy) +* Fix logic for persisting filehandles with indexed-gzip (pr/679) (Paul D. + McCarthy, reviewed by CM) + +Maintenance +----------- +* Fix semantic error in coordinate systems documentation (pr/646) (Ariel + Rokem, reviewed by CM, MB) +* Test on Python 3.7, minor associated fixes (pr/651) (CM, reviewed by Gregory + R. Lee, MB) + +API changes and deprecations +---------------------------- + 2.3 (Tuesday 12 June 2018) ========================== diff --git a/appveyor.yml b/appveyor.yml index e41aee90c8..772bfa142d 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,6 +12,8 @@ environment: - PYTHON: C:\Python35-x64 - PYTHON: C:\Python36 - PYTHON: C:\Python36-x64 + - PYTHON: C:\Python37 + - PYTHON: C:\Python37-x64 install: # Prepend newly installed Python to the PATH of this build (this cannot be @@ -20,8 +22,7 @@ install: - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% # Install the dependencies of the project. - - pip install numpy scipy matplotlib nose h5py mock - - pip install pydicom + - pip install numpy scipy matplotlib nose h5py mock pydicom - pip install . - SET NIBABEL_DATA_DIR=%CD%\nibabel-data diff --git a/bin/nib-diff b/bin/nib-diff new file mode 100755 index 0000000000..2ae66dda9d --- /dev/null +++ b/bin/nib-diff @@ -0,0 +1,17 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Quick diff summary for a set of neuroimaging files +""" + +from nibabel.cmdline.diff import main + +if __name__ == '__main__': + main() diff --git a/doc/source/coordinate_systems.rst b/doc/source/coordinate_systems.rst index 1576eceb95..ffb24a2e78 100644 --- a/doc/source/coordinate_systems.rst +++ b/doc/source/coordinate_systems.rst @@ -255,7 +255,7 @@ axes *starts* on the right, anterior, superior of the subject, rather than *ending* on the right, anterior, superior. In other words, they would use "RAS" to refer to a coordinate system we would call "LPI". To be safe, we'll call our interpretation of the RAS convention "RAS+", meaning that Right, -Anterior, Posterior are all positive values on these axes. +Anterior, Superior are all positive values on these axes. Some people also use "right" to mean the right hand side when an observer looks at the front of the scanner, from the foot the scanner bed. diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index b3faa21a1f..c74386b0ac 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -42,11 +42,14 @@ used for the lifetime of the ``ArrayProxy``. It should be set to one of ``True``, ``False``, or ``'auto'``. -If ``True``, a single file handle is created and used. If ``False``, a new -file handle is created every time the image is accessed. For gzip files, if -``'auto'``, and the optional ``indexed_gzip`` dependency is present, a single -file handle is created and persisted. If ``indexed_gzip`` is not available, -behaviour is the same as if ``keep_file_open is False``. +Management of file handles will be performed either by ``ArrayProxy`` objects, +or by the ``indexed_gzip`` package if it is used. + +If this flag is set to ``True``, a single file handle is created and used. If +``False``, a new file handle is created every time the image is accessed. For +gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is +present, a single file handle is created and persisted. If ``indexed_gzip`` is +not available, behaviour is the same as if ``keep_file_open is False``. If this is set to any other value, attempts to create an ``ArrayProxy`` without specifying the ``keep_file_open`` flag will result in a ``ValueError`` being @@ -160,8 +163,10 @@ def __init__(self, file_like, spec, mmap=True, keep_file_open=None): # Permit any specifier that can be interpreted as a numpy dtype self._dtype = np.dtype(self._dtype) self._mmap = mmap - self._keep_file_open = self._should_keep_file_open(file_like, - keep_file_open) + # Flags to keep track of whether a single ImageOpener is created, and + # whether a single underlying file handle is created. + self._keep_file_open, self._persist_opener = \ + self._should_keep_file_open(file_like, keep_file_open) self._lock = RLock() def __del__(self): @@ -184,16 +189,64 @@ def __setstate__(self, state): self._lock = RLock() def _should_keep_file_open(self, file_like, keep_file_open): - """Called by ``__init__``, and used to determine the final value of - ``keep_file_open``. + """Called by ``__init__``. + + This method determines how to manage ``ImageOpener`` instances, + and the underlying file handles - the behaviour depends on: + + - whether ``file_like`` is an an open file handle, or a path to a + ``'.gz'`` file, or a path to a non-gzip file. + - whether ``indexed_gzip`` is present (see + :attr:`.openers.HAVE_INDEXED_GZIP`). + + An ``ArrayProxy`` object uses two internal flags to manage + ``ImageOpener`` instances and underlying file handles. + + - The ``_persist_opener`` flag controls whether a single + ``ImageOpener`` should be created and used for the lifetime of + this ``ArrayProxy``, or whether separate ``ImageOpener`` instances + should be created on each file access. + + - The ``_keep_file_open`` flag controls qwhether the underlying file + handle should be kept open for the lifetime of this + ``ArrayProxy``, or whether the file handle should be (re-)opened + and closed on each file access. + + The internal ``_keep_file_open`` flag is only relevant if + ``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is + present. + + This method returns the values to be used for the internal + ``_persist_opener`` and ``_keep_file_open`` flags; these values are + derived according to the following rules: - The return value is derived from these rules: + 1. If ``file_like`` is a file(-like) object, both flags are set to + ``False``. - - If ``file_like`` is a file(-like) object, ``False`` is returned. - Otherwise, ``file_like`` is assumed to be a file name. - - If ``keep_file_open`` is ``auto``, and ``indexed_gzip`` is - not available, ``False`` is returned. - - Otherwise, the value of ``keep_file_open`` is returned unchanged. + 2. If ``keep_file_open`` (as passed to :meth:``__init__``) is + ``True``, both internal flags are set to ``True``. + + 3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path + to a ``.gz`` file or ``indexed_gzip`` is not present, both flags + are set to ``False``. + + 4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a + ``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener`` + is set to ``True``, and ``_keep_file_open`` is set to ``False``. + In this case, file handle management is delegated to the + ``indexed_gzip`` library. + + 5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a + ``.gz`` file, and ``indexed_gzip`` is present, both internal flags + are set to ``True``. + + 6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a + path to a ``.gz`` file, or ``indexed_gzip`` is not present, both + internal flags are set to ``False``. + + Note that a value of ``'auto'`` for ``keep_file_open`` will become + deprecated behaviour in version 2.4.0, and support for ``'auto'`` will + be removed in version 3.0.0. Parameters ---------- @@ -206,8 +259,10 @@ def _should_keep_file_open(self, file_like, keep_file_open): Returns ------- - The value of ``keep_file_open`` that will be used by this - ``ArrayProxy``, and passed through to ``ImageOpener`` instances. + A tuple containing: + - ``keep_file_open`` flag to control persistence of file handles + - ``persist_opener`` flag to control persistence of ``ImageOpener`` + objects. """ if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT @@ -216,12 +271,15 @@ def _should_keep_file_open(self, file_like, keep_file_open): '\'auto\', True, False}') # file_like is a handle - keep_file_open is irrelevant if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): - return False - # don't have indexed_gzip - auto -> False - if keep_file_open == 'auto' and not (openers.HAVE_INDEXED_GZIP and - file_like.endswith('.gz')): - return False - return keep_file_open + return False, False + # if the file is a gzip file, and we have_indexed_gzip, + have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz') + if keep_file_open == 'auto': + return have_igzip, have_igzip + elif keep_file_open: + return True, True + else: + return False, have_igzip @property @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') @@ -232,6 +290,10 @@ def header(self): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def dtype(self): return self._dtype @@ -265,13 +327,14 @@ def _get_fileobj(self): A newly created ``ImageOpener`` instance, or an existing one, which provides access to the file. """ - if self._keep_file_open: + if self._persist_opener: if not hasattr(self, '_opener'): self._opener = openers.ImageOpener( self.file_like, keep_open=self._keep_file_open) yield self._opener else: - with openers.ImageOpener(self.file_like) as opener: + with openers.ImageOpener( + self.file_like, keep_open=False) as opener: yield opener def get_unscaled(self): diff --git a/nibabel/casting.py b/nibabel/casting.py index 0ad0d5a5ca..ebdd96d550 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -268,6 +268,9 @@ def type_info(np_type): # 80) but in calculations nexp in fact appears to be 11 as for float64 ret.update(dict(width=width)) return ret + if vals == (105, 11, 16): # correctly detected double double + ret.update(dict(nmant=nmant, nexp=nexp, width=width)) + return ret # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 4b3d5fa267..b9919eb2e1 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -517,28 +517,28 @@ def flush_chardata(self): # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) vertices = self.struct_state[-1] - vertices.extend(np.genfromtxt(c, dtype=np.int)) + vertices.extend(np.loadtxt(c, dtype=np.int)) c.close() elif self.write_to == 'VoxelIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) parent = self.struct_state[-1] - parent.voxel_indices_ijk.extend(np.genfromtxt(c, dtype=np.int).reshape(-1, 3)) + parent.voxel_indices_ijk.extend(np.loadtxt(c, dtype=np.int).reshape(-1, 3)) c.close() elif self.write_to == 'VertexIndices': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) index = self.struct_state[-1] - index.extend(np.genfromtxt(c, dtype=np.int)) + index.extend(np.loadtxt(c, dtype=np.int)) c.close() elif self.write_to == 'TransformMatrix': # conversion to numpy array c = BytesIO(data.strip().encode('utf-8')) transform = self.struct_state[-1] - transform.matrix = np.genfromtxt(c, dtype=np.float) + transform.matrix = np.loadtxt(c, dtype=np.float) c.close() elif self.write_to == 'Label': diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py new file mode 100755 index 0000000000..4b8b69381c --- /dev/null +++ b/nibabel/cmdline/diff.py @@ -0,0 +1,365 @@ +#!python +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +""" +Quick summary of the differences among a set of neuroimaging files +""" +from __future__ import division, print_function, absolute_import + +import re +import sys +from collections import OrderedDict +from optparse import OptionParser, Option + +import numpy as np + +import nibabel as nib +import nibabel.cmdline.utils +import hashlib +import os + + +def get_opt_parser(): + # use module docstring for help output + p = OptionParser( + usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + version="%prog " + nib.__version__) + + p.add_options([ + Option("-v", "--verbose", action="count", + dest="verbose", default=0, + help="Make more noise. Could be specified multiple times"), + + Option("-H", "--header-fields", + dest="header_fields", default='all', + help="Header fields (comma separated) to be printed as well" + " (if present)"), + + Option("--ma", "--data-max-abs-diff", + dest="data_max_abs_diff", + type=float, + default=0.0, + help="Maximal absolute difference in data between files" + " to tolerate."), + + Option("--mr", "--data-max-rel-diff", + dest="data_max_rel_diff", + type=float, + default=0.0, + help="Maximal relative difference in data between files to" + " tolerate. If --data-max-abs-diff is also specified," + " only the data points with absolute difference greater" + " than that value would be considered for relative" + " difference check."), + Option("--dt", "--datatype", + dest="dtype", + default=np.float64, + help="Enter a numpy datatype such as 'float32'.") + ]) + + return p + + +def are_values_different(*values): + """Generically compare values, return True if different + + Note that comparison is targetting reporting of comparison of the headers + so has following specifics: + - even a difference in data types is considered a difference, i.e. 1 != 1.0 + - nans are considered to be the "same", although generally nan != nan + """ + value0 = values[0] + + # to not recompute over again + if isinstance(value0, np.ndarray): + try: + # np.asarray for elderly numpys, e.g. 1.7.1 where for + # degenerate arrays (shape ()) it would return a pure scalar + value0_nans = np.asanyarray(np.isnan(value0)) + value0_nonnans = np.asanyarray(np.logical_not(value0_nans)) + # if value0_nans.size == 1: + # import pdb; pdb.set_trace() + if not np.any(value0_nans): + value0_nans = None + except TypeError as exc: + str_exc = str(exc) + # Not implemented in numpy 1.7.1 + if "not supported" in str_exc or "ot implemented" in str_exc: + value0_nans = None + else: + raise + + for value in values[1:]: + if type(value0) != type(value): # if types are different, then we consider them different + return True + elif isinstance(value0, np.ndarray): + if value0.dtype != value.dtype or \ + value0.shape != value.shape: + return True + # there might be nans and they need special treatment + if value0_nans is not None: + value_nans = np.isnan(value) + if np.any(value0_nans != value_nans): + return True + if np.any(value0[value0_nonnans] != value[value0_nonnans]): + return True + elif np.any(value0 != value): + return True + elif value0 is np.nan: + if value is not np.nan: + return True + elif value0 != value: + return True + + return False + + +def get_headers_diff(file_headers, names=None): + """Get difference between headers + + Parameters + ---------- + file_headers: list of actual headers (dicts) from files + names: list of header fields to test + + Returns + ------- + dict + str: list for each header field which differs, return list of + values per each file + """ + difference = OrderedDict() + fields = names + + if names is None: + fields = file_headers[0].keys() + + # for each header field + for field in fields: + values = [header.get(field) for header in file_headers] # get corresponding value + + # if these values are different, store them in a dictionary + if are_values_different(*values): + difference[field] = values + + return difference + + +def get_data_hash_diff(files, dtype=np.float64): + """Get difference between md5 values of data + + Parameters + ---------- + files: list of actual files + + Returns + ------- + list + np.array: md5 values of respective files + """ + + md5sums = [ + hashlib.md5(np.ascontiguousarray(nib.load(f).get_fdata(dtype=dtype))).hexdigest() + for f in files + ] + + if len(set(md5sums)) == 1: + return [] + + return md5sums + + +def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): + """Get difference between data + + Parameters + ---------- + files: list of (str or ndarray) + If list of strings is provided -- they must be existing file names + max_abs: float, optional + Maximal absolute difference to tolerate. + max_rel: float, optional + Maximal relative (`abs(diff)/mean(diff)`) difference to tolerate. + If `max_abs` is specified, then those data points with lesser than that + absolute difference, are not considered for relative difference testing + dtype: np, optional + Datatype to be used when extracting data from files + + Returns + ------- + diffs: OrderedDict + An ordered dict with a record per each file which has differences + with other files subsequent detected. Each record is a list of + difference records, one per each file pair. + Each difference record is an Ordered Dict with possible keys + 'abs' or 'rel' showing maximal absolute or relative differences + in the file or the record ('CMP': 'incompat') if file shapes + are incompatible. + """ + + # we are doomed to keep them in RAM now + data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) + for f in files] + diffs = OrderedDict() + for i, d1 in enumerate(data[:-1]): + # populate empty entries for non-compared + diffs1 = [None] * (i + 1) + + for j, d2 in enumerate(data[i + 1:], i + 1): + + if d1.shape == d2.shape: + abs_diff = np.abs(d1 - d2) + mean_abs = (np.abs(d1) + np.abs(d2)) * 0.5 + candidates = np.logical_or(mean_abs != 0, abs_diff != 0) + + if max_abs: + candidates[abs_diff <= max_abs] = False + + max_abs_diff = np.max(abs_diff) + if np.any(candidates): + rel_diff = abs_diff[candidates] / mean_abs[candidates] + if max_rel: + sub_thr = rel_diff <= max_rel + # Since we operated on sub-selected values already, we need + # to plug them back in + candidates[ + tuple((indexes[sub_thr] for indexes in np.where(candidates))) + ] = False + max_rel_diff = np.max(rel_diff) + else: + max_rel_diff = 0 + + if np.any(candidates): + + diff_rec = OrderedDict() # so that abs goes before relative + + diff_rec['abs'] = max_abs_diff.astype(dtype) + diff_rec['rel'] = max_rel_diff.astype(dtype) + diffs1.append(diff_rec) + else: + diffs1.append(None) + + else: + diffs1.append({'CMP': "incompat"}) + + if any(diffs1): + + diffs['DATA(diff %d:)' % (i + 1)] = diffs1 + + return diffs + + +def display_diff(files, diff): + """Format header differences into a nice string + + Parameters + ---------- + files: list of files that were compared so we can print their names + diff: dict of different valued header fields + + Returns + ------- + str + string-formatted table of differences + """ + output = "" + field_width = "{:<15}" + filename_width = "{:<53}" + value_width = "{:<55}" + + output += "These files are different.\n" + output += field_width.format('Field/File') + + for i, f in enumerate(files, 1): + output += "%d:%s" % (i, filename_width.format(os.path.basename(f))) + + output += "\n" + + for key, value in diff.items(): + output += field_width.format(key) + + for item in value: + if isinstance(item, dict): + item_str = ', '.join('%s: %s' % i for i in item.items()) + elif item is None: + item_str = '-' + else: + item_str = str(item) + # Value might start/end with some invisible spacing characters so we + # would "condition" it on both ends a bit + item_str = re.sub('^[ \t]+', '<', item_str) + item_str = re.sub('[ \t]+$', '>', item_str) + # and also replace some other invisible symbols with a question + # mark + item_str = re.sub('[\x00]', '?', item_str) + output += value_width.format(item_str) + + output += "\n" + + return output + + +def diff(files, header_fields='all', data_max_abs_diff=None, + data_max_rel_diff=None, dtype=np.float64): + assert len(files) >= 2, "Please enter at least two files" + + file_headers = [nib.load(f).header for f in files] + + # signals "all fields" + if header_fields == 'all': + # TODO: header fields might vary across file types, + # thus prior sensing would be needed + header_fields = file_headers[0].keys() + else: + header_fields = header_fields.split(',') + + diff = get_headers_diff(file_headers, header_fields) + + data_md5_diffs = get_data_hash_diff(files, dtype) + if data_md5_diffs: + # provide details, possibly triggering the ignore of the difference + # in data + data_diffs = get_data_diff(files, + max_abs=data_max_abs_diff, + max_rel=data_max_rel_diff, + dtype=dtype) + if data_diffs: + diff['DATA(md5)'] = data_md5_diffs + diff.update(data_diffs) + + return diff + + +def main(args=None, out=None): + """Getting the show on the road""" + + out = out or sys.stdout + parser = get_opt_parser() + (opts, files) = parser.parse_args(args) + + nibabel.cmdline.utils.verbose_level = opts.verbose + + if nibabel.cmdline.utils.verbose_level < 3: + # suppress nibabel format-compliance warnings + nib.imageglobals.logger.level = 50 + + files_diff = diff( + files, + header_fields=opts.header_fields, + data_max_abs_diff=opts.data_max_abs_diff, + data_max_rel_diff=opts.data_max_rel_diff, + dtype=opts.dtype + ) + + if files_diff: + out.write(display_diff(files, files_diff)) + raise SystemExit(1) + else: + out.write("These files are identical.\n") + raise SystemExit(0) diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 8dcd09e261..e701925870 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,13 +5,17 @@ Test running scripts """ -from numpy.testing import (assert_almost_equal, - assert_array_equal) - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) +from nose.tools import assert_equal +from numpy.testing import assert_raises +import nibabel as nib +import numpy as np from nibabel.cmdline.utils import * +from nibabel.cmdline.diff import * +from os.path import (join as pjoin) +from nibabel.testing import data_path +from collections import OrderedDict +from six import StringIO def test_table2string(): @@ -42,3 +46,159 @@ def get_test(self): assert_equal(safe_get(test, "test"), 2) assert_equal(safe_get(test, "failtest"), "-") + + +def test_get_headers_diff(): + fnames = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) + expected_difference = OrderedDict([ + ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), + ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), + ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), + np.array([ 4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), + ("pixdim", [np.array([ 1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( + [ -1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, + 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), + ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), + ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), + ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), + ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), + np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), + ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("quatern_b", [np.array(0.0).astype(dtype="float32"), + np.array(-1.9451068140294884e-26).astype(dtype="float32")]), + ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), + ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), + ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), + ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), + ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), + ("srow_x", [np.array([ 1., 0., 0., 0.]).astype(dtype="float32"), + np.array([ -2.00000000e+00, 6.71471565e-19, 9.08102451e-18, + 1.17855103e+02]).astype(dtype="float32")]), + ("srow_y", [np.array([ 0., 3., 0., 0.]).astype(dtype="float32"), + np.array([ -6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype(dtype="float32")]), + ("srow_z", [np.array([ 0., 0., 2., 0.]).astype(dtype="float32"), + np.array([ 8.25548089e-18, 3.23207617e-01, 2.17108178e+00, + -7.24879837e+00]).astype(dtype="float32")])]) + + np.testing.assert_equal(actual_difference, expected_difference) + + +def test_display_diff(): + bogus_names = ["hellokitty.nii.gz", "privettovarish.nii.gz"] + + dict_values = OrderedDict([ + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]) + ]) + + expected_output = "These files are different.\n" + "Field/File 1:hellokitty.nii.gz" \ + " " \ + "2:privettovarish.nii.gz \n" \ + "datatype " \ + "2 " \ + "4 \n" \ + "bitpix " \ + "8 16" \ + " " \ + "\n" + + assert_equal(display_diff(bogus_names, dict_values), expected_output) + + +def test_get_data_diff(): + # testing for identical files specifically as md5 may vary by computer + test_names = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'standard.nii.gz')] + assert_equal(get_data_hash_diff(test_names), []) + + # testing the maximum relative and absolute differences' different use cases + test_array = np.arange(16).reshape(4, 4) + test_array_2 = np.arange(1, 17).reshape(4, 4) + test_array_3 = np.arange(2, 18).reshape(4, 4) + test_array_4 = np.arange(100).reshape(10, 10) + test_array_5 = np.arange(64).reshape(8, 8) + + # same shape, 2 files + assert_equal(get_data_diff([test_array, test_array_2]), + OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])])) + + # same shape, 3 files + assert_equal(get_data_diff([test_array, test_array_2, test_array_3]), + OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)]), + OrderedDict([('abs', 2), ('rel', 2.0)])]), + ('DATA(diff 2:)', [None, None, + OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])])])) + + # same shape, 2 files, modified maximum abs/rel + assert_equal(get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2), OrderedDict()) + + # different shape, 2 files + assert_equal(get_data_diff([test_array_2, test_array_4]), + OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}])])) + + # different shape, 3 files + assert_equal(get_data_diff([test_array_4, test_array_5, test_array_2]), + OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), + ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}])])) + + test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) + assert_equal(type(test_return['DATA(diff 1:)'][1]['abs']), np.float32) + assert_equal(type(test_return['DATA(diff 1:)'][1]['rel']), np.float32) + + test_return_2 = get_data_diff([test_array, test_array_2, test_array_3]) + assert_equal(type(test_return_2['DATA(diff 1:)'][1]['abs']), np.float64) + assert_equal(type(test_return_2['DATA(diff 1:)'][1]['rel']), np.float64) + assert_equal(type(test_return_2['DATA(diff 2:)'][2]['abs']), np.float64) + assert_equal(type(test_return_2['DATA(diff 2:)'][2]['rel']), np.float64) + + +def test_main(): + test_names = [pjoin(data_path, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + expected_difference = OrderedDict([ + ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), + ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), + ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), + ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), + ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), + ("pixdim", [np.array([1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( + [-1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, + 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), + ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), + ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), + ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), + ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), + np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), + ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), + ("quatern_b", [np.array(0.0).astype(dtype="float32"), + np.array(-1.9451068140294884e-26).astype(dtype="float32")]), + ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), + ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), + ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), + ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), + ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), + ("srow_x", [np.array([1., 0., 0., 0.]).astype(dtype="float32"), + np.array([-2.00000000e+00, 6.71471565e-19, 9.08102451e-18, + 1.17855103e+02]).astype(dtype="float32")]), + ("srow_y", [np.array([0., 3., 0., 0.]).astype(dtype="float32"), + np.array([-6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype( + dtype="float32")]), + ("srow_z", [np.array([0., 0., 2., 0.]).astype(dtype="float32"), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e+00, + -7.24879837e+00]).astype(dtype="float32")]), + ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) + + with assert_raises(SystemExit): + np.testing.assert_equal(main(test_names, StringIO()), expected_difference) + + test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] + + with assert_raises(SystemExit): + assert_equal(main(test_names_2, StringIO()), "These files are identical.") diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 66043858d6..86185a7aef 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -28,8 +28,8 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): ---------- dataobj : object Object containg image data. It should be some object that retuns an - array from ``np.asanyarray``. It should have a ``shape`` attribute - or property + array from ``np.asanyarray``. It should have ``shape`` and ``ndim`` + attributes or properties header : None or mapping or header instance, optional metadata for this image format extra : None or mapping, optional @@ -392,6 +392,10 @@ def uncache(self): def shape(self): return self._dataobj.shape + @property + def ndim(self): + return self._dataobj.ndim + @deprecate_with_version('get_shape method is deprecated.\n' 'Please use the ``img.shape`` property ' 'instead.', diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 814e7b85cd..c8abee91a0 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -52,8 +52,8 @@ class FutureWarningMixin(object): >>> with warnings.catch_warnings(record=True) as warns: ... d = D() - ... warns[0].message - FutureWarning("Please, don't use this class",) + ... warns[0].message.args[0] + "Please, don't use this class" """ warn_message = 'This class will be removed in future versions' diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 3c0957e11d..c2d343f739 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -680,6 +680,10 @@ def __init__(self, subheader): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def is_proxy(self): return True diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py new file mode 100644 index 0000000000..6bc6ed67a3 --- /dev/null +++ b/nibabel/externals/oset.py @@ -0,0 +1,85 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +"""OrderedSet implementation + +Borrowed from https://pypi.org/project/oset/ +Copyright (c) 2009, Raymond Hettinger, and others All rights reserved. +License: BSD-3 +""" + +from __future__ import absolute_import + +from collections import MutableSet + +KEY, PREV, NEXT = range(3) + + +class OrderedSet(MutableSet): + + def __init__(self, iterable=None): + self.end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.map = {} # key --> [key, prev, next] + if iterable is not None: + self |= iterable + + def __len__(self): + return len(self.map) + + def __contains__(self, key): + return key in self.map + + def __getitem__(self, key): + return list(self)[key] + + def add(self, key): + if key not in self.map: + end = self.end + curr = end[PREV] + curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end] + + def discard(self, key): + if key in self.map: + key, prev, next = self.map.pop(key) + prev[NEXT] = next + next[PREV] = prev + + def __iter__(self): + end = self.end + curr = end[NEXT] + while curr is not end: + yield curr[KEY] + curr = curr[NEXT] + + def __reversed__(self): + end = self.end + curr = end[PREV] + while curr is not end: + yield curr[KEY] + curr = curr[PREV] + + def pop(self, last=True): + if not self: + raise KeyError('set is empty') + key = next(reversed(self)) if last else next(iter(self)) + self.discard(key) + return key + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self)) + + def __eq__(self, other): + if isinstance(other, OrderedSet): + return len(self) == len(other) and list(self) == list(other) + return set(self) == set(other) + + def __del__(self): + self.clear() # remove circular references \ No newline at end of file diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 4212574ef9..edce19c6cd 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -81,24 +81,24 @@ def _read_volume_info(fobj): return volume_info -def _pack_rgba(rgba): - """Pack an RGBA sequence into a single integer. +def _pack_rgb(rgb): + """Pack an RGB sequence into a single integer. Used by :func:`read_annot` and :func:`write_annot` to generate "annotation values" for a Freesurfer ``.annot`` file. Parameters ---------- - rgba : ndarray, shape (n, 4) - RGBA colors + rgb : ndarray, shape (n, 3) + RGB colors Returns ------- out : ndarray, shape (n, 1) Annotation values for each color. """ - bitshifts = 2 ** np.array([[0], [8], [16], [24]], dtype=rgba.dtype) - return rgba.dot(bitshifts) + bitshifts = 2 ** np.array([[0], [8], [16]], dtype=rgb.dtype) + return rgb.dot(bitshifts) def read_geometry(filepath, read_metadata=False, read_stamp=False): @@ -333,9 +333,13 @@ def read_annot(filepath, orig_ids=False): Annotation file format versions 1 and 2 are supported, corresponding to the "old-style" and "new-style" color table layout. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/read_annotation.m + * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -352,7 +356,7 @@ def read_annot(filepath, orig_ids=False): Annotation id at each vertex. If a vertex does not belong to any label and orig_ids=False, its id will be set to -1. ctab : ndarray, shape (n_labels, 5) - RGBA + label id colortable array. + RGBT + label id colortable array. names : list of str (python 2), list of bytes (python 3) The names of the labels. The length of the list is n_labels. """ @@ -384,7 +388,7 @@ def read_annot(filepath, orig_ids=False): ctab, names = _read_annot_ctab_new_format(fobj, -n_entries) # generate annotation values for each LUT entry - ctab[:, [4]] = _pack_rgba(ctab[:, :4]) + ctab[:, [4]] = _pack_rgb(ctab[:, :3]) if not orig_ids: ord = np.argsort(ctab[:, -1]) @@ -397,6 +401,9 @@ def read_annot(filepath, orig_ids=False): def _read_annot_ctab_old_format(fobj, n_entries): """Read in an old-style Freesurfer color table from `fobj`. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + This function is used by :func:`read_annot`. Parameters @@ -412,7 +419,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): ------- ctab : ndarray, shape (n_entries, 5) - RGBA colortable array - the last column contains all zeros. + RGBT colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_entries. """ @@ -430,7 +437,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # read RGBA for this entry + # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -439,6 +446,9 @@ def _read_annot_ctab_old_format(fobj, n_entries): def _read_annot_ctab_new_format(fobj, ctab_version): """Read in a new-style Freesurfer color table from `fobj`. + Note that the output color table ``ctab`` is in RGBT form, where T + (transparency) is 255 - alpha. + This function is used by :func:`read_annot`. Parameters @@ -454,7 +464,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ------- ctab : ndarray, shape (n_labels, 5) - RGBA colortable array - the last column contains all zeros. + RGBT colortable array - the last column contains all zeros. names : list of str The names of the labels. The length of the list is n_labels. """ @@ -480,7 +490,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] names.append(name) - # RGBA + # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) return ctab, names @@ -489,9 +499,13 @@ def _read_annot_ctab_new_format(fobj, ctab_version): def write_annot(filepath, labels, ctab, names, fill_ctab=True): """Write out a "new-style" Freesurfer annotation file. + Note that the color table ``ctab`` is in RGBT form, where T (transparency) + is 255 - alpha. + See: * https://surfer.nmr.mgh.harvard.edu/fswiki/LabelsClutsAnnotationFiles#Annotation * https://github.com/freesurfer/freesurfer/blob/dev/matlab/write_annotation.m + * https://github.com/freesurfer/freesurfer/blob/8b88b34/utils/colortab.c Parameters ---------- @@ -500,7 +514,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): labels : ndarray, shape (n_vertices,) Annotation id at each vertex. ctab : ndarray, shape (n_labels, 5) - RGBA + label id colortable array. + RGBT + label id colortable array. names : list of str The names of the labels. The length of the list is n_labels. fill_ctab : {True, False} optional @@ -523,8 +537,8 @@ def write_string(s): # Generate annotation values for each ctab entry if fill_ctab: - ctab = np.hstack((ctab[:, :4], _pack_rgba(ctab[:, :4]))) - elif not np.array_equal(ctab[:, [4]], _pack_rgba(ctab[:, :4])): + ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3]))) + elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])): warnings.warn('Annotation values in {} will be incorrect'.format( filepath)) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 927d6126c0..bf92bd962c 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -276,10 +276,15 @@ def set_zooms(self, zooms): ndims = self._ndims() if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) - if np.any(zooms <= 0): - raise HeaderDataError('zooms must be positive') + if np.any(zooms[:3] <= 0): + raise HeaderDataError('Spatial (first three) zooms must be ' + 'positive; got {!r}' + ''.format(tuple(zooms[:3]))) hdr['delta'] = zooms[:3] if len(zooms) == 4: + if zooms[3] < 0: + raise HeaderDataError('TR must be non-negative; got {!r}' + ''.format(zooms[3])) hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 83da38ed20..1b6065f351 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -16,7 +16,7 @@ from .. import (read_geometry, read_morph_data, read_annot, read_label, write_geometry, write_morph_data, write_annot) -from ..io import _pack_rgba +from ..io import _pack_rgb from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data from ...fileslice import strided_scalar @@ -236,8 +236,7 @@ def test_read_write_annot(): # Generate the annotation values for each LUT entry rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16) + - rgbal[:, 3] * (2 ** 24)) + rgbal[:, 2] * (2 ** 16)) annot_path = 'c.annot' with InTemporaryDirectory(): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) @@ -287,8 +286,7 @@ def test_write_annot_fill_ctab(): rgbal = np.hstack((rgba, np.zeros((nlabels, 1), dtype=np.int32))) rgbal[:, 4] = (rgbal[:, 0] + rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16) + - rgbal[:, 3] * (2 ** 24)) + rgbal[:, 2] * (2 ** 16)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) assert_true( @@ -307,7 +305,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): dt = '>i' vdata = np.zeros((nverts, 2), dtype=dt) vdata[:, 0] = np.arange(nverts) - vdata[:, [1]] = _pack_rgba(rgba[labels, :]) + vdata[:, [1]] = _pack_rgb(rgba[labels, :3]) fbytes = b'' # number of vertices fbytes += struct.pack(dt, nverts) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 776c461e18..47e54080c3 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -159,6 +159,8 @@ def test_set_zooms(): (1, 1, 1, 1, 5)): with assert_raises(HeaderDataError): h.set_zooms(zooms) + # smoke test for tr=0 + h.set_zooms((1, 1, 1, 0)) def bad_dtype_mgh(): diff --git a/nibabel/info.py b/nibabel/info.py index 3ae906bc6b..204412c5d2 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -19,8 +19,8 @@ _version_major = 2 _version_minor = 3 _version_micro = 1 -_version_extra = 'dev' -# _version_extra = '' +# _version_extra = 'dev' +_version_extra = '' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 5eb077ada0..57042f32f0 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -252,6 +252,10 @@ def __init__(self, minc_file): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def is_proxy(self): return True diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 24c1808df5..056d0dbee9 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -1573,14 +1573,23 @@ def set_slice_times(self, slice_times): so_recoder = self._field_recoders['slice_code'] labels = so_recoder.value_set('label') labels.remove('unknown') + + matching_labels = [] for label in labels: if np.all(st_order == self._slice_time_order( label, n_timed)): - break - else: + matching_labels.append(label) + + if not matching_labels: raise HeaderDataError('slice ordering of %s fits ' 'with no known scheme' % st_order) + if len(matching_labels) > 1: + warnings.warn( + 'Multiple slice orders satisfy: %s. Choosing the first one' + % ', '.join(matching_labels) + ) + label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start hdr['slice_end'] = slice_end diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 5fd460b4e1..87e1ac81e6 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -622,6 +622,10 @@ def __init__(self, file_like, header, mmap=True, scaling='dv'): def shape(self): return self._shape + @property + def ndim(self): + return len(self.shape) + @property def dtype(self): return self._dtype diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index fd6cfd5a44..187d5940df 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -349,52 +349,135 @@ def __init__(self, *args, **kwargs): def _count_ImageOpeners(proxy, data, voxels): CountingImageOpener.num_openers = 0 + # expected data is defined in the test_keep_file_open_* tests for i in range(voxels.shape[0]): x, y, z = [int(c) for c in voxels[i, :]] assert proxy[x, y, z] == x * 100 + y * 10 + z return CountingImageOpener.num_openers +@contextlib.contextmanager +def patch_keep_file_open_default(value): + # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value + with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): + yield + + def test_keep_file_open_true_false_invalid(): # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # True or False. - CountingImageOpener.num_openers = 0 - fname = 'testdata' + # True or False. Expected behaviour is as follows: + # keep_open | igzip present | persist ImageOpener | igzip.drop_handles + # | and is gzip file | | + # ----------|------------------|---------------------|------------------- + # False | False | False | n/a + # False | True | True | True + # True | False | True | n/a + # True | True | True | False + # 'auto' | False | False | n/a + # 'auto' | True | True | False + # + # Each test tuple contains: + # - file type - gzipped ('gz') or not ('bin'), or an open file handle + # ('open') + # - keep_file_open value passed to ArrayProxy + # - whether or not indexed_gzip is present + # - expected value for internal ArrayProxy._persist_opener flag + # - expected value for internal ArrayProxy._keep_file_open flag + tests = [ + # open file handle - kfo and have_igzip are both irrelevant + ('open', False, False, False, False), + ('open', False, True, False, False), + ('open', True, False, False, False), + ('open', True, True, False, False), + ('open', 'auto', False, False, False), + ('open', 'auto', True, False, False), + # non-gzip file - have_igzip is irrelevant, decision should be made + # solely from kfo flag + ('bin', False, False, False, False), + ('bin', False, True, False, False), + ('bin', True, False, True, True), + ('bin', True, True, True, True), + ('bin', 'auto', False, False, False), + ('bin', 'auto', True, False, False), + # gzip file. If igzip is present, we persist the ImageOpener. If kfo + # is 'auto': + # - if igzip is present, kfo -> True + # - otherwise, kfo -> False + ('gz', False, False, False, False), + ('gz', False, True, True, False), + ('gz', True, False, True, True), + ('gz', True, True, True, True), + ('gz', 'auto', False, False, False), + ('gz', 'auto', True, True, True)] + dtype = np.float32 data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) voxels = np.random.randint(0, 10, (10, 3)) + + for test in tests: + filetype, kfo, have_igzip, exp_persist, exp_kfo = test + with InTemporaryDirectory(), \ + mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ + patch_indexed_gzip(have_igzip): + fname = 'testdata.{}'.format(filetype) + # create the test data file + if filetype == 'gz': + with gzip.open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + else: + with open(fname, 'wb') as fobj: + fobj.write(data.tostring(order='F')) + # pass in a file name or open file handle. If the latter, we open + # two file handles, because we're going to create two proxies + # below. + if filetype == 'open': + fobj1 = open(fname, 'rb') + fobj2 = open(fname, 'rb') + else: + fobj1 = fname + fobj2 = fname + try: + proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), + keep_file_open=kfo) + # We also test that we get the same behaviour when the + # KEEP_FILE_OPEN_DEFAULT flag is changed + with patch_keep_file_open_default(kfo): + proxy_def = ArrayProxy(fobj2, ((10, 10, 10), dtype)) + # check internal flags + assert proxy._persist_opener == exp_persist + assert proxy._keep_file_open == exp_kfo + assert proxy_def._persist_opener == exp_persist + assert proxy_def._keep_file_open == exp_kfo + # check persist_opener behaviour - whether one imageopener is + # created for the lifetime of the ArrayProxy, or one is + # created on each access + if exp_persist: + assert _count_ImageOpeners(proxy, data, voxels) == 1 + assert _count_ImageOpeners(proxy_def, data, voxels) == 1 + else: + assert _count_ImageOpeners(proxy, data, voxels) == 10 + assert _count_ImageOpeners(proxy_def, data, voxels) == 10 + # if indexed_gzip is active, check that the file object was + # created correctly - the _opener.fobj will be a + # MockIndexedGzipFile, defined in test_openers.py + if filetype == 'gz' and have_igzip: + assert proxy._opener.fobj._drop_handles == (not exp_kfo) + # if we were using an open file handle, check that the proxy + # didn't close it + if filetype == 'open': + assert not fobj1.closed + assert not fobj2.closed + finally: + del proxy + del proxy_def + if filetype == 'open': + fobj1.close() + fobj2.close() + # Test invalid values of keep_file_open with InTemporaryDirectory(): + fname = 'testdata' with open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) - # Test that ArrayProxy(keep_file_open=True) only creates one file - # handle, and that ArrayProxy(keep_file_open=False) creates a file - # handle on every data access. - with mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - proxy_no_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=False) - assert not proxy_no_kfp._keep_file_open - assert _count_ImageOpeners(proxy_no_kfp, data, voxels) == 10 - proxy_kfp = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=True) - assert proxy_kfp._keep_file_open - assert _count_ImageOpeners(proxy_kfp, data, voxels) == 1 - del proxy_kfp - del proxy_no_kfp - # Test that the keep_file_open flag has no effect if an open file - # handle is passed in - with open(fname, 'rb') as fobj: - for kfo in (True, False, 'auto'): - proxy = ArrayProxy(fobj, ((10, 10, 10), dtype), - keep_file_open=kfo) - assert proxy._keep_file_open is False - for i in range(voxels.shape[0]): - x, y, z = [int(c) for c in voxels[i, :]] - assert proxy[x, y, z] == x * 100 + y * 10 + z - assert not fobj.closed - del proxy - assert not fobj.closed - assert fobj.closed - # Test invalid values of keep_file_open with assert_raises(ValueError): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) with assert_raises(ValueError): @@ -403,109 +486,6 @@ def test_keep_file_open_true_false_invalid(): ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') -def test_keep_file_open_auto(): - # Test the behaviour of the keep_file_open __init__ flag, when it is set to - # 'auto'. - # if indexed_gzip is present, the ArrayProxy should persist its ImageOpener. - # Otherwise the ArrayProxy should drop openers. - dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) - voxels = np.random.randint(0, 10, (10, 3)) - with InTemporaryDirectory(): - fname = 'testdata.gz' - with gzip.open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # If have_indexed_gzip, then the arrayproxy should create one - # ImageOpener - with patch_indexed_gzip(True), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open == 'auto' - assert _count_ImageOpeners(proxy, data, voxels) == 1 - # If no have_indexed_gzip, then keep_file_open should be False - with patch_indexed_gzip(False), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - # If not a gzip file, keep_file_open should be False - fname = 'testdata' - with open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # regardless of whether indexed_gzip is present or not - with patch_indexed_gzip(True), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - with patch_indexed_gzip(False), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener): - CountingImageOpener.num_openers = 0 - proxy = ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open='auto') - assert proxy._keep_file_open is False - assert _count_ImageOpeners(proxy, data, voxels) == 10 - - -@contextlib.contextmanager -def patch_keep_file_open_default(value): - # Patch arrayproxy.KEEP_FILE_OPEN_DEFAULT with the given value - with mock.patch('nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT', value): - yield - - -def test_keep_file_open_default(): - # Test the behaviour of the keep_file_open __init__ flag, when the - # arrayproxy.KEEP_FILE_OPEN_DEFAULT value is changed - dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) - with InTemporaryDirectory(): - fname = 'testdata.gz' - with gzip.open(fname, 'wb') as fobj: - fobj.write(data.tostring(order='F')) - # If KEEP_FILE_OPEN_DEFAULT is False, ArrayProxy instances should - # interpret keep_file_open as False - with patch_keep_file_open_default(False): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - # If KEEP_FILE_OPEN_DEFAULT is True, ArrayProxy instances should - # interpret keep_file_open as True - with patch_keep_file_open_default(True): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is True - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is True - # If KEEP_FILE_OPEN_DEFAULT is auto, ArrayProxy instances should - # interpret it as auto if indexed_gzip is present, False otherwise. - with patch_keep_file_open_default('auto'): - with patch_indexed_gzip(False): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open is False - with patch_indexed_gzip(True): - proxy = ArrayProxy(fname, ((10, 10, 10), dtype)) - assert proxy._keep_file_open == 'auto' - # KEEP_FILE_OPEN_DEFAULT=any other value should cuse an error to be - # raised - with patch_keep_file_open_default('badvalue'): - assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), - dtype)) - with patch_keep_file_open_default(None): - assert_raises(ValueError, ArrayProxy, fname, ((10, 10, 10), - dtype)) - - def test_pickle_lock(): # Test that ArrayProxy can be pickled, and that thread lock is created diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py new file mode 100644 index 0000000000..4f99ca145f --- /dev/null +++ b/nibabel/tests/test_diff.py @@ -0,0 +1,74 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Test diff +""" +from __future__ import division, print_function, absolute_import + +from os.path import (dirname, join as pjoin, abspath) +import numpy as np + + +DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) + +from nibabel.cmdline.diff import are_values_different + + +def test_diff_values_int(): + large = 10**30 + assert not are_values_different(0, 0) + assert not are_values_different(1, 1) + assert not are_values_different(large, large) + assert are_values_different(0, 1) + assert are_values_different(1, 2) + assert are_values_different(1, large) + + +def test_diff_values_float(): + assert not are_values_different(0., 0.) + assert not are_values_different(0., 0., 0.) # can take more + assert not are_values_different(1.1, 1.1) + assert are_values_different(0., 1.1) + assert are_values_different(0., 0, 1.1) + assert are_values_different(1., 2.) + + +def test_diff_values_mixed(): + assert are_values_different(1.0, 1) + assert are_values_different(1.0, "1") + assert are_values_different(1, "1") + assert are_values_different(1, None) + assert are_values_different(np.ndarray([0]), 'hey') + assert not are_values_different(None, None) + + +def test_diff_values_array(): + from numpy import nan, array, inf + a_int = array([1, 2]) + a_float = a_int.astype(float) + + assert are_values_different(a_int, a_float) + assert are_values_different(a_int, a_int, a_float) + assert are_values_different(np.arange(3), np.arange(1, 4)) + assert are_values_different(np.arange(3), np.arange(4)) + assert are_values_different(np.arange(4), np.arange(4).reshape((2, 2))) + # no broadcasting should kick in - shape difference + assert are_values_different(array([1]), array([1, 1])) + assert not are_values_different(a_int, a_int) + assert not are_values_different(a_float, a_float) + + # nans - we consider them "the same" for the purpose of these comparisons + assert not are_values_different(nan, nan) + assert not are_values_different(nan, nan, nan) + assert are_values_different(nan, nan, 1) + assert are_values_different(1, nan, nan) + assert not are_values_different(array([nan, nan]), array([nan, nan])) + assert not are_values_different(array([nan, nan]), array([nan, nan]), array([nan, nan])) + assert not are_values_different(array([nan, 1]), array([nan, 1])) + assert are_values_different(array([nan, nan]), array([nan, 1])) + assert are_values_different(array([0, nan]), array([nan, 0])) + assert are_values_different(array([1, 2, 3, nan]), array([nan, 3, 5, 4])) + assert are_values_different(nan, 1.0) + assert are_values_different(array([1, 2, 3, nan]), array([3, 4, 5, nan])) + # and some inf should not be a problem + assert not are_values_different(array([0, inf]), array([0, inf])) + assert are_values_different(array([0, inf]), array([inf, 0])) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 3022265df4..96376270b1 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -30,6 +30,16 @@ LD_INFO = type_info(np.longdouble) +def dtt2dict(dtt): + """ Create info dictionary from numpy type + """ + info = np.finfo(dtt) + return dict(min=info.min, max=info.max, + nexp=info.nexp, nmant=info.nmant, + minexp=info.minexp, maxexp=info.maxexp, + width=np.dtype(dtt).itemsize) + + def test_type_info(): # Test routine to get min, max, nmant, nexp for dtt in np.sctypes['int'] + np.sctypes['uint']: @@ -42,42 +52,35 @@ def test_type_info(): assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) for dtt in IEEE_floats + [np.complex64, np.complex64]: - info = np.finfo(dtt) infod = type_info(dtt) - assert_equal(dict(min=info.min, max=info.max, - nexp=info.nexp, nmant=info.nmant, - minexp=info.minexp, maxexp=info.maxexp, - width=np.dtype(dtt).itemsize), - infod) + assert_equal(dtt2dict(dtt), infod) assert_equal(infod['min'].dtype.type, dtt) assert_equal(infod['max'].dtype.type, dtt) # What is longdouble? - info = np.finfo(np.longdouble) - dbl_info = np.finfo(np.float64) + ld_dict = dtt2dict(np.longdouble) + dbl_dict = dtt2dict(np.float64) infod = type_info(np.longdouble) - width = np.dtype(np.longdouble).itemsize - vals = (info.nmant, info.nexp, width) + vals = tuple(ld_dict[k] for k in ('nmant', 'nexp', 'width')) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html if vals in ((52, 11, 8), # longdouble is same as double (63, 15, 12), (63, 15, 16), # intel 80 bit (112, 15, 16), # real float128 (106, 11, 16)): # PPC head, tail doubles, expected values - assert_equal(dict(min=info.min, max=info.max, - minexp=info.minexp, maxexp=info.maxexp, - nexp=info.nexp, nmant=info.nmant, width=width), - infod) - elif vals == (1, 1, 16): # bust info for PPC head / tail longdoubles - assert_equal(dict(min=dbl_info.min, max=dbl_info.max, - minexp=-1022, maxexp=1024, - nexp=11, nmant=106, width=16), - infod) + pass + elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles + # min and max broken, copy from infod + ld_dict.update({k: infod[k] for k in ('min', 'max')}) + elif vals == (1, 1, 16): # another bust info for PPC head / tail longdoubles + ld_dict = dbl_dict.copy() + ld_dict.update(dict(nmant=106, width=16)) elif vals == (52, 15, 12): - exp_res = type_info(np.float64) - exp_res['width'] = width - assert_equal(exp_res, infod) + width = ld_dict['width'] + ld_dict = dbl_dict.copy() + ld_dict['width'] = width else: - raise ValueError("Unexpected float type to test") + raise ValueError("Unexpected float type {} to test".format(np.longdouble)) + assert_equal(ld_dict, infod) def test_nmant(): @@ -103,7 +106,7 @@ def test_check_nmant_nexp(): # Check against type_info for t in ok_floats(): ti = type_info(t) - if ti['nmant'] != 106: # This check does not work for PPC double pair + if ti['nmant'] not in (105, 106): # This check does not work for PPC double pair assert_true(_check_nmant(t, ti['nmant'])) # Test fails for longdouble after blacklisting of OSX powl as of numpy # 1.12 - see https://github.com/numpy/numpy/issues/8307 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index c53b012cc2..ba51878715 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -202,6 +202,7 @@ def validate_data_interface(self, imaker, params): # Check get data returns array, and caches img = imaker() assert_equal(img.shape, img.dataobj.shape) + assert_equal(img.ndim, len(img.shape)) assert_data_similar(img.dataobj, params) for meth_name in self.meth_names: if params['is_proxy']: @@ -210,6 +211,8 @@ def validate_data_interface(self, imaker, params): self._check_array_interface(imaker, meth_name) # Data shape is same as image shape assert_equal(img.shape, getattr(img, meth_name)().shape) + # Data ndim is same as image ndim + assert_equal(img.ndim, getattr(img, meth_name)().ndim) # Values to get_data caching parameter must be 'fill' or # 'unchanged' assert_raises(ValueError, img.get_data, caching='something') @@ -394,6 +397,17 @@ def validate_shape(self, imaker, params): # Read only assert_raises(AttributeError, setattr, img, 'shape', np.eye(4)) + def validate_ndim(self, imaker, params): + # Validate shape + img = imaker() + # Same as expected ndim + assert_equal(img.ndim, len(params['shape'])) + # Same as array ndim if passed + if 'data' in params: + assert_equal(img.ndim, params['data'].ndim) + # Read only + assert_raises(AttributeError, setattr, img, 'ndim', 5) + def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API img = imaker() diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 1c6fb989b5..78f876ec7d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -38,7 +38,12 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from ..testing import data_path, suppress_warnings, runif_extra_has +from ..testing import ( + clear_and_catch_warnings, + data_path, + runif_extra_has, + suppress_warnings, +) from . import test_analyze as tana from . import test_spm99analyze as tspm @@ -558,6 +563,22 @@ def test_slice_times(self): assert_equal(hdr['slice_end'], 5) assert_array_almost_equal(hdr['slice_duration'], 0.1) + # Ambiguous case + hdr2 = self.header_class() + hdr2.set_dim_info(slice=2) + hdr2.set_slice_duration(0.1) + hdr2.set_data_shape((1, 1, 2)) + with clear_and_catch_warnings() as w: + warnings.simplefilter("always") + hdr2.set_slice_times([0.1, 0]) + assert len(w) == 1 + # but always must be choosing sequential one first + assert_equal(hdr2.get_value_label('slice_code'), 'sequential decreasing') + # and the other direction + hdr2.set_slice_times([0, 0.1]) + assert_equal(hdr2.get_value_label('slice_code'), 'sequential increasing') + + def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index ca1654bf9a..6b5f231fc3 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -107,7 +107,7 @@ def test_BinOpener(): class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): - kwargs.pop('drop_handles', False) + self._drop_handles = kwargs.pop('drop_handles', False) super(MockIndexedGzipFile, self).__init__(*args, **kwargs) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 285674083b..7280c5552d 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -108,6 +108,14 @@ def validate_shape(self, pmaker, params): # Read only assert_raises(AttributeError, setattr, prox, 'shape', params['shape']) + def validate_ndim(self, pmaker, params): + # Check shape + prox, fio, hdr = pmaker() + assert_equal(prox.ndim, len(params['shape'])) + # Read only + assert_raises(AttributeError, setattr, prox, + 'ndim', len(params['shape'])) + def validate_is_proxy(self, pmaker, params): # Check shape prox, fio, hdr = pmaker() diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 9756a16747..2c17c33fd1 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -67,6 +67,40 @@ def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): assert_equal(fname, stdout[:len(fname)]) assert_re_in(expected_re, stdout[len(fname):]) + +def check_nib_diff_examples(): + fnames = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames, check_code=False) + checked_fields = ["Field/File", "regular", "dim_info", "dim", "datatype", "bitpix", "pixdim", "slice_end", + "xyzt_units", "cal_max", "descrip", "qform_code", "sform_code", "quatern_b", + "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", + "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] + for item in checked_fields: + assert_true(item in stdout) + + fnames2 = [pjoin(DATA_PATH, f) + for f in ('example4d.nii.gz', 'example4d.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) + assert_equal(stdout, "These files are identical.") + + fnames3 = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) + for item in checked_fields: + assert_true(item in stdout) + + fnames4 = [pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] + code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) + assert_equal(stdout, "These files are identical.") + + code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) + for item in checked_fields: + assert_true(item in stdout) + + + @script_test def test_nib_ls(): yield check_nib_ls_example4d @@ -150,6 +184,11 @@ def test_help(): assert_equal(stderr, '') +@script_test +def test_nib_diff(): + yield check_nib_diff_examples + + @script_test def test_nib_nifti_dx(): # Test nib-nifti-dx script diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index f528555d05..40d5ebc41e 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -64,7 +64,7 @@ def test_assert_allclose_safely(): def assert_warn_len_equal(mod, n_in_context): mod_warns = mod.__warningregistry__ - # Python 3.4 appears to clear any pre-existing warnings of the same type, + # Python 3 appears to clear any pre-existing warnings of the same type, # when raising warnings inside a catch_warnings block. So, there is a # warning generated by the tests within the context manager, but no # previous warnings. @@ -84,18 +84,15 @@ def test_clear_and_catch_warnings(): assert_equal(my_mod.__warningregistry__, {}) # Without specified modules, don't clear warnings during context with clear_and_catch_warnings(): - warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1) # Confirm that specifying module keeps old warning, does not add new with clear_and_catch_warnings(modules=[my_mod]): - warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 1) # Another warning, no module spec does add to warnings dict, except on - # Python 3.4 (see comments in `assert_warn_len_equal`) + # Python 3 (see comments in `assert_warn_len_equal`) with clear_and_catch_warnings(): - warnings.simplefilter('ignore') warnings.warn('Another warning') assert_warn_len_equal(my_mod, 2) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index e442b508d8..b7a510e337 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -13,6 +13,7 @@ import warnings import gzip import bz2 +from collections import OrderedDict from os.path import exists, splitext from operator import mul from functools import reduce @@ -22,6 +23,7 @@ from .casting import (shared_range, type_info, OK_FLOATS) from .openers import Opener from .deprecated import deprecate_with_version +from .externals.oset import OrderedSet sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -78,7 +80,7 @@ class Recoder(object): 2 ''' - def __init__(self, codes, fields=('code',), map_maker=dict): + def __init__(self, codes, fields=('code',), map_maker=OrderedDict): ''' Create recoder object ``codes`` give a sequence of code, alias sequences @@ -97,7 +99,7 @@ def __init__(self, codes, fields=('code',), map_maker=dict): Parameters ---------- - codes : seqence of sequences + codes : sequence of sequences Each sequence defines values (codes) that are equivalent fields : {('code',) string sequence}, optional names by which elements in sequences can be accessed @@ -133,13 +135,15 @@ def add_codes(self, code_syn_seqs): Examples -------- - >>> code_syn_seqs = ((1, 'one'), (2, 'two')) + >>> code_syn_seqs = ((2, 'two'), (1, 'one')) >>> rc = Recoder(code_syn_seqs) >>> rc.value_set() == set((1,2)) True >>> rc.add_codes(((3, 'three'), (1, 'first'))) >>> rc.value_set() == set((1,2,3)) True + >>> print(rc.value_set()) # set is actually ordered + OrderedSet([2, 1, 3]) ''' for code_syns in code_syn_seqs: # Add all the aliases @@ -186,7 +190,7 @@ def keys(self): return self.field1.keys() def value_set(self, name=None): - ''' Return set of possible returned values for column + ''' Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -212,7 +216,7 @@ def value_set(self, name=None): d = self.field1 else: d = self.__dict__[name] - return set(d.values()) + return OrderedSet(d.values()) # Endian code aliases diff --git a/setup.py b/setup.py index b0f5bc093c..27f85d3e99 100755 --- a/setup.py +++ b/setup.py @@ -119,6 +119,7 @@ def main(**extra_args): pjoin('bin', 'nib-nifti-dx'), pjoin('bin', 'nib-tck2trk'), pjoin('bin', 'nib-trk2tck'), + pjoin('bin', 'nib-diff'), ], cmdclass = cmdclass, **extra_args