diff --git a/.travis.yml b/.travis.yml
index b6e69d09ba..f6f07e513c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -96,7 +96,7 @@ before_install:
     - virtualenv --python=python venv
     - source venv/bin/activate
     - python --version # just to check
-    - pip install -U pip wheel  # needed at one point
+    - pip install -U pip setuptools>=27.0 wheel
     - retry pip install nose flake8 mock  # always
     - pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS
     - if [ "${COVERAGE}" == "1" ]; then
diff --git a/appveyor.yml b/appveyor.yml
index 772bfa142d..93438cfc0f 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -22,6 +22,7 @@ install:
   - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%
 
   # Install the dependencies of the project.
+  - python -m pip install --upgrade pip setuptools wheel
   - pip install numpy scipy matplotlib nose h5py mock pydicom
   - pip install .
   - SET NIBABEL_DATA_DIR=%CD%\nibabel-data
diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py
index 9a2dcec728..ce71b92bcc 100644
--- a/nibabel/cifti2/tests/test_cifti2.py
+++ b/nibabel/cifti2/tests/test_cifti2.py
@@ -58,7 +58,7 @@ def test_cifti2_metadata():
     assert_equal(md.data, dict(metadata_test))
 
     assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test))))
-    
+
     md.update({'a': 'aval', 'b': 'bval'})
     assert_equal(md.data, dict(metadata_test))
 
@@ -310,7 +310,7 @@ def test_matrix():
 
     assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none)
     assert_equal(m.mapped_indices, [])
-   
+
     h = ci.Cifti2Header(matrix=m)
     assert_equal(m.mapped_indices, [])
     m.insert(0, mim_0)
diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py
index 24b17706b8..e485533cd7 100644
--- a/nibabel/externals/netcdf.py
+++ b/nibabel/externals/netcdf.py
@@ -37,7 +37,7 @@
 
 import numpy as np  # noqa
 from ..py3k import asbytes, asstr
-from numpy import fromstring, ndarray, dtype, empty, array, asarray
+from numpy import frombuffer, ndarray, dtype, empty, array, asarray
 from numpy import little_endian as LITTLE_ENDIAN
 from functools import reduce
 
@@ -519,7 +519,7 @@ def _read(self):
         if not magic == b'CDF':
             raise TypeError("Error: %s is not a valid NetCDF 3 file" %
                             self.filename)
-        self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
+        self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
 
         # Read file headers and set data.
         self._read_numrecs()
@@ -608,7 +608,7 @@ def _read_var_array(self):
                 # Calculate size to avoid problems with vsize (above)
                 a_size = reduce(mul, shape, 1) * size
                 if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes:
-                    data = fromstring(b'\x00'*a_size, dtype=dtype_)
+                    data = frombuffer(b'\x00'*a_size, dtype=dtype_)
                 elif self.use_mmap:
                     mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
                     data = ndarray.__new__(ndarray, shape, dtype=dtype_,
@@ -622,7 +622,7 @@ def _read_var_array(self):
                     buf = self.fp.read(a_size)
                     if len(buf) < a_size:
                         buf = b'\x00'*a_size
-                    data = fromstring(buf, dtype=dtype_)
+                    data = frombuffer(buf, dtype=dtype_)
                     data.shape = shape
                     self.fp.seek(pos)
 
@@ -644,7 +644,7 @@ def _read_var_array(self):
             else:
                 pos = self.fp.tell()
                 self.fp.seek(begin)
-                rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
+                rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes)
                 rec_array.shape = (self._recs,)
                 self.fp.seek(pos)
 
@@ -687,7 +687,7 @@ def _read_values(self):
         self.fp.read(-count % 4)  # read padding
 
         if typecode is not 'c':
-            values = fromstring(values, dtype='>%s' % typecode)
+            values = frombuffer(values, dtype='>%s' % typecode)
             if values.shape == (1,):
                 values = values[0]
         else:
@@ -705,14 +705,14 @@ def _pack_int(self, value):
     _pack_int32 = _pack_int
 
     def _unpack_int(self):
-        return int(fromstring(self.fp.read(4), '>i')[0])
+        return int(frombuffer(self.fp.read(4), '>i')[0])
     _unpack_int32 = _unpack_int
 
     def _pack_int64(self, value):
         self.fp.write(array(value, '>q').tostring())
 
     def _unpack_int64(self):
-        return fromstring(self.fp.read(8), '>q')[0]
+        return frombuffer(self.fp.read(8), '>q')[0]
 
     def _pack_string(self, s):
         count = len(s)
diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py
index 4cdbd3d768..de02f4c76b 100644
--- a/nibabel/gifti/parse_gifti_fast.py
+++ b/nibabel/gifti/parse_gifti_fast.py
@@ -47,7 +47,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
         dec = base64.b64decode(data.encode('ascii'))
         dt = data_type_codes.type[datatype]
         sh = tuple(shape)
-        newarr = np.fromstring(dec, dtype=dt)
+        newarr = np.frombuffer(dec, dtype=dt)
         if len(newarr.shape) != len(sh):
             newarr = newarr.reshape(sh, order=ord)
 
@@ -59,7 +59,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
         zdec = zlib.decompress(dec)
         dt = data_type_codes.type[datatype]
         sh = tuple(shape)
-        newarr = np.fromstring(zdec, dtype=dt)
+        newarr = np.frombuffer(zdec, dtype=dt)
         if len(newarr.shape) != len(sh):
             newarr = newarr.reshape(sh, order=ord)
 
diff --git a/nibabel/info.py b/nibabel/info.py
index 204412c5d2..56cdcb2c80 100644
--- a/nibabel/info.py
+++ b/nibabel/info.py
@@ -209,4 +209,5 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__):
 ISRELEASE = _version_extra == ''
 VERSION = __version__
 PROVIDES = ["nibabel", 'nisext']
-REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION]
+REQUIRES = ["numpy>=%s" % NUMPY_MIN_VERSION,
+            'bz2file; python_version < "3.0"']
diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py
index 60ff818e57..84cfed956a 100644
--- a/nibabel/nifti1.py
+++ b/nibabel/nifti1.py
@@ -579,7 +579,7 @@ def from_fileobj(klass, fileobj, size, byteswap):
             # otherwise there should be a full extension header
             if not len(ext_def) == 8:
                 raise HeaderDataError('failed to read extension header')
-            ext_def = np.fromstring(ext_def, dtype=np.int32)
+            ext_def = np.frombuffer(ext_def, dtype=np.int32)
             if byteswap:
                 ext_def = ext_def.byteswap()
             # be extra verbose
diff --git a/nibabel/openers.py b/nibabel/openers.py
index f64ab23b37..e551404561 100644
--- a/nibabel/openers.py
+++ b/nibabel/openers.py
@@ -9,7 +9,11 @@
 """ Context manager openers for various fileobject types
 """
 
-import bz2
+import sys
+if sys.version_info[0] < 3:
+    from bz2file import BZ2File
+else:
+    from bz2 import BZ2File
 import gzip
 import sys
 import warnings
@@ -127,7 +131,7 @@ class Opener(object):
         for \*args
     """
     gz_def = (_gzip_open, ('mode', 'compresslevel', 'keep_open'))
-    bz2_def = (bz2.BZ2File, ('mode', 'buffering', 'compresslevel'))
+    bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel'))
     compress_ext_map = {
         '.gz': gz_def,
         '.bz2': bz2_def,
@@ -209,6 +213,9 @@ def fileno(self):
     def read(self, *args, **kwargs):
         return self.fobj.read(*args, **kwargs)
 
+    def readinto(self, *args, **kwargs):
+        return self.fobj.readinto(*args, **kwargs)
+
     def write(self, *args, **kwargs):
         return self.fobj.write(*args, **kwargs)
 
diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py
index 1ba2d76625..31f0be0ab5 100644
--- a/nibabel/streamlines/tck.py
+++ b/nibabel/streamlines/tck.py
@@ -405,18 +405,21 @@ def _read(cls, fileobj, header, buffer_size=4):
             n_streams = 0
 
             while not eof:
+                buff = bytearray(buffer_size)
+                n_read = f.readinto(buff)
+                eof = n_read != buffer_size
+                if eof:
+                    buff = buff[:n_read]
 
-                bytes_read = f.read(buffer_size)
-                buffs.append(bytes_read)
-                eof = len(bytes_read) != buffer_size
+                buffs.append(buff)
 
                 # Make sure we've read enough to find a streamline delimiter.
-                if fiber_marker not in bytes_read:
+                if fiber_marker not in buff:
                     # If we've read the whole file, then fail.
                     if eof:
                         # Could have minimal buffering, and have read only the
                         # EOF delimiter
-                        buffs = [b''.join(buffs)]
+                        buffs = [bytearray().join(buffs)]
                         if not buffs[0] == eof_marker:
                             raise DataError(
                                 "Cannot find a streamline delimiter. This file"
@@ -425,15 +428,13 @@ def _read(cls, fileobj, header, buffer_size=4):
                         # Otherwise read a bit more.
                         continue
 
-                all_parts = b''.join(buffs).split(fiber_marker)
+                all_parts = bytearray().join(buffs).split(fiber_marker)
                 point_parts, buffs = all_parts[:-1], all_parts[-1:]
                 point_parts = [p for p in point_parts if p != b'']
 
                 for point_part in point_parts:
                     # Read floats.
                     pts = np.frombuffer(point_part, dtype=dtype)
-                    # Enforce ability to write to underlying bytes object
-                    pts.flags.writeable = True
                     # Convert data to little-endian if needed.
                     yield pts.astype('<f4', copy=False).reshape([-1, 3])
 
diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py
index d6d0edef6c..f5752bbe8b 100644
--- a/nibabel/streamlines/tests/test_tck.py
+++ b/nibabel/streamlines/tests/test_tck.py
@@ -3,7 +3,7 @@
 import numpy as np
 from os.path import join as pjoin
 
-from six import BytesIO
+from io import BytesIO
 from nibabel.py3k import asbytes
 
 from ..array_sequence import ArraySequence
diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py
index c4c10bbcbd..a0a3d8a1f3 100644
--- a/nibabel/streamlines/tests/test_trk.py
+++ b/nibabel/streamlines/tests/test_trk.py
@@ -103,7 +103,7 @@ def test_load_complex_file(self):
     def trk_with_bytes(self, trk_key='simple_trk_fname', endian='<'):
         """ Return example trk file bytes and struct view onto bytes """
         with open(DATA[trk_key], 'rb') as fobj:
-            trk_bytes = fobj.read()
+            trk_bytes = bytearray(fobj.read())
         dt = trk_module.header_2_dtype.newbyteorder(endian)
         trk_struct = np.ndarray((1,), dt, buffer=trk_bytes)
         trk_struct.flags.writeable = True
diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py
index 36a605d53f..629673e943 100644
--- a/nibabel/streamlines/trk.py
+++ b/nibabel/streamlines/trk.py
@@ -556,11 +556,11 @@ def _read_header(fileobj):
         start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None
 
         with Opener(fileobj) as f:
-
-            # Read the header in one block.
-            header_str = f.read(header_2_dtype.itemsize)
-            header_rec = np.fromstring(string=header_str, dtype=header_2_dtype)
-
+            # Reading directly from a file into a (mutable) bytearray enables a zero-copy
+            # cast to a mutable numpy object with frombuffer
+            header_buf = bytearray(header_2_dtype.itemsize)
+            f.readinto(header_buf)
+            header_rec = np.frombuffer(buffer=header_buf, dtype=header_2_dtype)
             # Check endianness
             endianness = native_code
             if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py
index 6b5f231fc3..6aeb66aaf7 100644
--- a/nibabel/tests/test_openers.py
+++ b/nibabel/tests/test_openers.py
@@ -10,12 +10,11 @@
 import os
 import contextlib
 from gzip import GzipFile
-from bz2 import BZ2File
 from io import BytesIO, UnsupportedOperation
 from distutils.version import StrictVersion
 
 from ..py3k import asstr, asbytes
-from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP
+from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP, BZ2File
 from ..tmpdirs import InTemporaryDirectory
 from ..volumeutils import BinOpener
 
diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py
index aa145af2e9..29c0edaf07 100644
--- a/nibabel/tests/test_volumeutils.py
+++ b/nibabel/tests/test_volumeutils.py
@@ -46,7 +46,7 @@
                            _dt_min_max,
                            _write_data,
                            )
-from ..openers import Opener
+from ..openers import Opener, BZ2File
 from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range)
 
 from numpy.testing import (assert_array_almost_equal,
@@ -71,7 +71,7 @@ def test__is_compressed_fobj():
     with InTemporaryDirectory():
         for ext, opener, compressed in (('', open, False),
                                         ('.gz', gzip.open, True),
-                                        ('.bz2', bz2.BZ2File, True)):
+                                        ('.bz2', BZ2File, True)):
             fname = 'test.bin' + ext
             for mode in ('wb', 'rb'):
                 fobj = opener(fname, mode)
@@ -94,7 +94,7 @@ def make_array(n, bytes):
     with InTemporaryDirectory():
         for n, opener in itertools.product(
                 (256, 1024, 2560, 25600),
-                (open, gzip.open, bz2.BZ2File)):
+                (open, gzip.open, BZ2File)):
             in_arr = np.arange(n, dtype=dtype)
             # Write array to file
             fobj_w = opener(fname, 'wb')
@@ -103,7 +103,8 @@ def make_array(n, bytes):
             # Read back from file
             fobj_r = opener(fname, 'rb')
             try:
-                contents1 = fobj_r.read()
+                contents1 = bytearray(4 * n)
+                fobj_r.readinto(contents1)
                 # Second element is 1
                 assert_false(contents1[0:8] == b'\x00' * 8)
                 out_arr = make_array(n, contents1)
@@ -114,7 +115,8 @@ def make_array(n, bytes):
                 assert_equal(contents1[:8], b'\x00' * 8)
                 # Reread, to get unmodified contents
                 fobj_r.seek(0)
-                contents2 = fobj_r.read()
+                contents2 = bytearray(4 * n)
+                fobj_r.readinto(contents2)
                 out_arr2 = make_array(n, contents2)
                 assert_array_equal(in_arr, out_arr2)
                 assert_equal(out_arr[1], 0)
diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py
index 1880aa63bf..2b8349d369 100644
--- a/nibabel/volumeutils.py
+++ b/nibabel/volumeutils.py
@@ -12,7 +12,6 @@
 import sys
 import warnings
 import gzip
-import bz2
 from collections import OrderedDict
 from os.path import exists, splitext
 from operator import mul
@@ -21,7 +20,7 @@
 import numpy as np
 
 from .casting import (shared_range, type_info, OK_FLOATS)
-from .openers import Opener
+from .openers import Opener, BZ2File
 from .deprecated import deprecate_with_version
 from .externals.oset import OrderedSet
 
@@ -40,10 +39,7 @@
 default_compresslevel = 1
 
 #: file-like classes known to hold compressed data
-COMPRESSED_FILE_LIKES = (gzip.GzipFile, bz2.BZ2File)
-
-#: file-like classes known to return string values that are safe to modify
-SAFE_STRINGERS = (gzip.GzipFile, bz2.BZ2File)
+COMPRESSED_FILE_LIKES = (gzip.GzipFile, BZ2File)
 
 
 class Recoder(object):
@@ -530,7 +526,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True):
     else:
         data_bytes = infile.read(n_bytes)
         n_read = len(data_bytes)
-        needs_copy = not isinstance(infile, SAFE_STRINGERS)
+        needs_copy = True
     if n_bytes != n_read:
         raise IOError('Expected {0} bytes, got {1} bytes from {2}\n'
                       ' - could the file be damaged?'.format(
diff --git a/setup.py b/setup.py
index 27f85d3e99..222ad8562a 100755
--- a/setup.py
+++ b/setup.py
@@ -19,14 +19,7 @@
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')
 
-# For some commands, use setuptools.
-if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
-            'install_egg_info', 'egg_info', 'easy_install', 'bdist_wheel',
-            'bdist_mpkg')).intersection(sys.argv)) > 0:
-    # setup_egg imports setuptools setup, thus monkeypatching distutils.
-    import setup_egg  # noqa
-
-from distutils.core import setup
+from setuptools import setup
 
 # Commit hash writing, and dependency checking
 from nisext.sexts import (get_comrec_build, package_check, install_scripts_bat,
@@ -77,8 +70,8 @@ def main(**extra_args):
           author_email=INFO.AUTHOR_EMAIL,
           platforms=INFO.PLATFORMS,
           version=INFO.VERSION,
-          requires=INFO.REQUIRES,
           provides=INFO.PROVIDES,
+          install_requires=INFO.REQUIRES,
           packages     = ['nibabel',
                           'nibabel.externals',
                           'nibabel.externals.tests',
@@ -127,4 +120,6 @@ def main(**extra_args):
 
 
 if __name__ == "__main__":
+    # Do not use nisext's dynamically updated install_requires
+    extra_setuptools_args.pop('install_requires', None)
     main(**extra_setuptools_args)