diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 0000000..c4121b1
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,23 @@
+name: Lint
+
+on: [push, pull_request]
+
+jobs:
+ run-hooks:
+ name: Run pre-commit hooks
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out Git repository
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: 3.7
+
+ - name: Install Python dependencies
+ run: pip install pre-commit
+
+ - name: Run pre-commit hooks
+ run: pre-commit run --all-files
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 3fb3648..9d65bd4 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -25,4 +25,4 @@ jobs:
run: coverage run -m unittest discover -v nrrd/tests
- name: Upload Coverage to Codecov
- uses: codecov/codecov-action@v2
\ No newline at end of file
+ uses: codecov/codecov-action@v2
diff --git a/.gitignore b/.gitignore
index 8321bd5..dd6df2b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,4 +10,4 @@ MANIFEST
docs/build/
# coverage.py files
-.coverage
\ No newline at end of file
+.coverage
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..91a4b27
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,25 @@
+exclude: "docs"
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v2.34.0
+ hooks:
+ - id: pyupgrade
+ args: [--py37-plus]
+
+ - repo: https://github.com/timothycrosley/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
+
+ - repo: https://gitlab.com/pycqa/flake8
+ rev: 3.9.2
+ hooks:
+ - id: flake8
+ args: ["--config=setup.cfg"]
diff --git a/MANIFEST.in b/MANIFEST.in
index a942b47..a659298 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
include AUTHORS
include README.md
-include LICENSE
\ No newline at end of file
+include LICENSE
diff --git a/README.rst b/README.rst
index 5d9d5a7..c5c8c3a 100644
--- a/README.rst
+++ b/README.rst
@@ -25,7 +25,7 @@
pynrrd
======
-pynrrd is a pure-Python module for reading and writing `NRRD `_ files into and
+pynrrd is a pure-Python module for reading and writing `NRRD `_ files into and
from numpy arrays.
Dependencies
@@ -46,7 +46,7 @@ Install via pip and GitHub
.. code-block:: bash
pip install git+https://github.com/mhe/pynrrd.git
-
+
Install from source (recommended for contributing to pynrrd)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For developers that want to contribute to pynrrd, you can clone the pynrrd repository and install it using the following commands:
@@ -74,20 +74,24 @@ The tests can be run via the following command from the base directory:
python -m unittest discover -v nrrd/tests
+**Format and Lint code**
+
+ This repository uses pre-commit hooks to run format and lint the code and they are enforced in CI. See [pre-commit](https://pre-commit.com)
+
Example usage
-------------
.. code-block:: python
import numpy as np
import nrrd
-
+
# Some sample numpy data
data = np.zeros((5,4,3,2))
filename = 'testdata.nrrd'
-
+
# Write to a NRRD file
nrrd.write(filename, data)
-
+
# Read the data back from file
readdata, header = nrrd.read(filename)
print(readdata.shape)
diff --git a/nrrd/formatters.py b/nrrd/formatters.py
index 10ab36c..580289e 100644
--- a/nrrd/formatters.py
+++ b/nrrd/formatters.py
@@ -31,7 +31,7 @@ def format_number(x):
# floating point number.
# The g option is used rather than f because g precision uses significant digits while f is just the number of
# digits after the decimal. (NRRD C implementation uses g).
- value = '{:.17g}'.format(x)
+ value = f'{x:.17g}'
else:
value = str(x)
diff --git a/nrrd/reader.py b/nrrd/reader.py
index 334f0d1..755e4a7 100644
--- a/nrrd/reader.py
+++ b/nrrd/reader.py
@@ -1,4 +1,3 @@
-# encoding: utf-8
import bz2
import os
import re
@@ -321,9 +320,9 @@ def read_data(header, fh=None, filename=None, index_order='F'):
Filename of the header file. Only necessary if data is detached from the header. This is used to get the
absolute data path.
index_order : {'C', 'F'}, optional
- Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
- slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
- from fastest-varying to slowest-varying (e.g. (x, y, z)).
+ Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered
+ from slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are
+ ordered from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
@@ -452,8 +451,8 @@ def read_data(header, fh=None, filename=None, index_order='F'):
fh.close()
if total_data_points != data.size:
- raise NRRDError('Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'
- .format(total_data_points, data.size, total_data_points - data.size))
+ raise NRRDError(f'Size of the data does not equal the product of all the dimensions: '
+ f'{total_data_points}-{data.size}={total_data_points - data.size}')
# In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes
# fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order
@@ -475,7 +474,9 @@ def read(filename, custom_field_map=None, index_order='F'):
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
.. note::
- Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the original data and hence the writer needs to be aware of this.
+ Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and
+ `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the
+ original data and hence the writer needs to be aware of this.
Parameters
----------
@@ -485,9 +486,9 @@ def read(filename, custom_field_map=None, index_order='F'):
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
index_order : {'C', 'F'}, optional
- Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
- slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
- from fastest-varying to slowest-varying (e.g. (x, y, z)).
+ Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered
+ from slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are
+ ordered from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
diff --git a/nrrd/tests/data/test1d_ascii.nrrd b/nrrd/tests/data/test1d_ascii.nrrd
index a3513f5..9478d17 100644
--- a/nrrd/tests/data/test1d_ascii.nrrd
+++ b/nrrd/tests/data/test1d_ascii.nrrd
@@ -35,4 +35,3 @@ encoding: ASCII
25
26
27
-
diff --git a/nrrd/tests/data/test2d_ascii.nrrd b/nrrd/tests/data/test2d_ascii.nrrd
index 3c8a86b..34dde50 100644
--- a/nrrd/tests/data/test2d_ascii.nrrd
+++ b/nrrd/tests/data/test2d_ascii.nrrd
@@ -17,4 +17,3 @@ encoding: ASCII
19 20 21
22 23 24
25 26 27
-
diff --git a/nrrd/tests/data/test_customFields.nrrd b/nrrd/tests/data/test_customFields.nrrd
index ca06a62..f0b522e 100644
--- a/nrrd/tests/data/test_customFields.nrrd
+++ b/nrrd/tests/data/test_customFields.nrrd
@@ -45,4 +45,3 @@ double matrix:= (1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)
25
26
27
-
diff --git a/nrrd/tests/test_formatting.py b/nrrd/tests/test_formatting.py
index 8077f93..df6d615 100644
--- a/nrrd/tests/test_formatting.py
+++ b/nrrd/tests/test_formatting.py
@@ -1,12 +1,13 @@
import os
import sys
-# Required specifically in each module so that searches happen at the parent directory for importing modules
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-
import numpy as np
-from nrrd.tests.util import *
+
import nrrd
+from nrrd.tests.util import *
+
+# Required specifically in each module so that searches happen at the parent directory for importing modules
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
class TestFieldFormatting(unittest.TestCase):
diff --git a/nrrd/tests/test_parsing.py b/nrrd/tests/test_parsing.py
index 37275e2..9d5904f 100644
--- a/nrrd/tests/test_parsing.py
+++ b/nrrd/tests/test_parsing.py
@@ -1,11 +1,12 @@
import os
import sys
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-
import numpy as np
-from nrrd.tests.util import *
+
import nrrd
+from nrrd.tests.util import *
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
class TestFieldParsing(unittest.TestCase):
diff --git a/nrrd/tests/test_reading.py b/nrrd/tests/test_reading.py
index 364a5b2..d3a39e5 100644
--- a/nrrd/tests/test_reading.py
+++ b/nrrd/tests/test_reading.py
@@ -1,24 +1,25 @@
import os
import sys
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-
import numpy as np
-from nrrd.tests.util import *
+
import nrrd
+from nrrd.tests.util import *
+
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-class TestReadingFunctions(object):
+class TestReadingFunctions:
def setUp(self):
- self.expected_header = {u'dimension': 3,
- u'encoding': 'raw',
- u'endian': 'little',
- u'kinds': ['domain', 'domain', 'domain'],
- u'sizes': np.array([30, 30, 30]),
- u'space': 'left-posterior-superior',
- u'space directions': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
- u'space origin': np.array([0, 0, 0]),
- u'type': 'short'}
+ self.expected_header = {'dimension': 3,
+ 'encoding': 'raw',
+ 'endian': 'little',
+ 'kinds': ['domain', 'domain', 'domain'],
+ 'sizes': np.array([30, 30, 30]),
+ 'space': 'left-posterior-superior',
+ 'space directions': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
+ 'space origin': np.array([0, 0, 0]),
+ 'type': 'short'}
self.expected_data = np.fromfile(RAW_DATA_FILE_PATH, np.int16).reshape((30, 30, 30))
if self.index_order == 'F':
@@ -43,7 +44,7 @@ def test_read_header_only_with_filename(self):
def test_read_detached_header_only(self):
expected_header = self.expected_header
- expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
+ expected_header['data file'] = os.path.basename(RAW_DATA_FILE_PATH)
with open(RAW_NHDR_FILE_PATH, 'rb') as fh:
header = nrrd.read_header(fh)
@@ -61,7 +62,7 @@ def test_read_header_and_data_filename(self):
def test_read_detached_header_and_data(self):
expected_header = self.expected_header
- expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
+ expected_header['data file'] = os.path.basename(RAW_DATA_FILE_PATH)
data, header = nrrd.read(RAW_NHDR_FILE_PATH, index_order=self.index_order)
@@ -73,8 +74,8 @@ def test_read_detached_header_and_data(self):
def test_read_detached_header_and_data_with_byteskip_minus1(self):
expected_header = self.expected_header
- expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
- expected_header[u'byte skip'] = -1
+ expected_header['data file'] = os.path.basename(RAW_DATA_FILE_PATH)
+ expected_header['byte skip'] = -1
data, header = nrrd.read(RAW_BYTESKIP_NHDR_FILE_PATH, index_order=self.index_order)
@@ -86,10 +87,10 @@ def test_read_detached_header_and_data_with_byteskip_minus1(self):
def test_read_detached_header_and_nifti_data_with_byteskip_minus1(self):
expected_header = self.expected_header
- expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
- expected_header[u'byte skip'] = -1
- expected_header[u'encoding'] = 'gzip'
- expected_header[u'data file'] = 'BallBinary30x30x30.nii.gz'
+ expected_header['data file'] = os.path.basename(RAW_DATA_FILE_PATH)
+ expected_header['byte skip'] = -1
+ expected_header['encoding'] = 'gzip'
+ expected_header['data file'] = 'BallBinary30x30x30.nii.gz'
data, header = nrrd.read(GZ_BYTESKIP_NIFTI_NHDR_FILE_PATH, index_order=self.index_order)
@@ -100,18 +101,17 @@ def test_read_detached_header_and_nifti_data_with_byteskip_minus1(self):
self.assertTrue(data.flags['WRITEABLE'])
def test_read_detached_header_and_nifti_data(self):
- with self.assertRaisesRegex(nrrd.NRRDError, 'Size of the data does not equal '
- + 'the product of all the dimensions: 27000-27176=-176'):
+ with self.assertRaisesRegex(
+ nrrd.NRRDError, 'Size of the data does not equal the product of all the dimensions: 27000-27176=-176'):
nrrd.read(GZ_NIFTI_NHDR_FILE_PATH, index_order=self.index_order)
def test_read_detached_header_and_data_with_byteskip_minus5(self):
- with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid byteskip, allowed values '
- + 'are greater than or equal to -1'):
+ with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid byteskip, allowed values are greater than or equal to -1'):
nrrd.read(RAW_INVALID_BYTESKIP_NHDR_FILE_PATH, index_order=self.index_order)
def test_read_header_and_gz_compressed_data(self):
expected_header = self.expected_header
- expected_header[u'encoding'] = 'gzip'
+ expected_header['encoding'] = 'gzip'
data, header = nrrd.read(GZ_NRRD_FILE_PATH, index_order=self.index_order)
@@ -123,9 +123,9 @@ def test_read_header_and_gz_compressed_data(self):
def test_read_header_and_gz_compressed_data_with_byteskip_minus1(self):
expected_header = self.expected_header
- expected_header[u'encoding'] = 'gzip'
- expected_header[u'type'] = 'int16'
- expected_header[u'byte skip'] = -1
+ expected_header['encoding'] = 'gzip'
+ expected_header['type'] = 'int16'
+ expected_header['byte skip'] = -1
data, header = nrrd.read(GZ_BYTESKIP_NRRD_FILE_PATH, index_order=self.index_order)
@@ -137,7 +137,7 @@ def test_read_header_and_gz_compressed_data_with_byteskip_minus1(self):
def test_read_header_and_bz2_compressed_data(self):
expected_header = self.expected_header
- expected_header[u'encoding'] = 'bzip2'
+ expected_header['encoding'] = 'bzip2'
data, header = nrrd.read(BZ2_NRRD_FILE_PATH, index_order=self.index_order)
@@ -149,8 +149,8 @@ def test_read_header_and_bz2_compressed_data(self):
def test_read_header_and_gz_compressed_data_with_lineskip3(self):
expected_header = self.expected_header
- expected_header[u'encoding'] = 'gzip'
- expected_header[u'line skip'] = 3
+ expected_header['encoding'] = 'gzip'
+ expected_header['line skip'] = 3
data, header = nrrd.read(GZ_LINESKIP_NRRD_FILE_PATH, index_order=self.index_order)
@@ -161,16 +161,16 @@ def test_read_header_and_gz_compressed_data_with_lineskip3(self):
self.assertTrue(data.flags['WRITEABLE'])
def test_read_raw_header(self):
- expected_header = {u'type': 'float', u'dimension': 3, u'min': 0, u'max': 35.4}
+ expected_header = {'type': 'float', 'dimension': 3, 'min': 0, 'max': 35.4}
header = nrrd.read_header(('NRRD0005', 'type: float', 'dimension: 3', 'min: 0', 'max: 35.4'))
self.assertEqual(expected_header, header)
- expected_header = {u'my extra info': u'my : colon-separated : values'}
+ expected_header = {'my extra info': 'my : colon-separated : values'}
header = nrrd.read_header(('NRRD0005', 'my extra info:=my : colon-separated : values'))
np.testing.assert_equal(expected_header, header)
def test_read_dup_field_error_and_warn(self):
- expected_header = {u'type': 'float', u'dimension': 3}
+ expected_header = {'type': 'float', 'dimension': 3}
header_txt_tuple = ('NRRD0005', 'type: float', 'dimension: 3', 'type: float')
with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: 'type'"):
@@ -187,12 +187,12 @@ def test_read_dup_field_error_and_warn(self):
nrrd.reader.ALLOW_DUPLICATE_FIELD = False
def test_read_header_and_ascii_1d_data(self):
- expected_header = {u'dimension': 1,
- u'encoding': 'ASCII',
- u'kinds': ['domain'],
- u'sizes': [27],
- u'spacings': [1.0458000000000001],
- u'type': 'unsigned char'}
+ expected_header = {'dimension': 1,
+ 'encoding': 'ASCII',
+ 'kinds': ['domain'],
+ 'sizes': [27],
+ 'spacings': [1.0458000000000001],
+ 'type': 'unsigned char'}
data, header = nrrd.read(ASCII_1D_NRRD_FILE_PATH, index_order=self.index_order)
@@ -204,12 +204,12 @@ def test_read_header_and_ascii_1d_data(self):
self.assertTrue(data.flags['WRITEABLE'])
def test_read_header_and_ascii_2d_data(self):
- expected_header = {u'dimension': 2,
- u'encoding': 'ASCII',
- u'kinds': ['domain', 'domain'],
- u'sizes': [3, 9],
- u'spacings': [1.0458000000000001, 2],
- u'type': 'unsigned short'}
+ expected_header = {'dimension': 2,
+ 'encoding': 'ASCII',
+ 'kinds': ['domain', 'domain'],
+ 'sizes': [3, 9],
+ 'spacings': [1.0458000000000001, 2],
+ 'type': 'unsigned short'}
data, header = nrrd.read(ASCII_2D_NRRD_FILE_PATH, index_order=self.index_order)
@@ -248,44 +248,44 @@ def test_read_simple_4d_nrrd(self):
self.assertTrue(data.flags['WRITEABLE'])
def test_custom_fields_without_field_map(self):
- expected_header = {u'dimension': 1,
- u'encoding': 'ASCII',
- u'kinds': ['domain'],
- u'sizes': [27],
- u'spacings': [1.0458000000000001],
- u'int': '24',
- u'double': '25.5566',
- u'string': 'This is a long string of information that is important.',
- u'int list': '1 2 3 4 5 100',
- u'double list': '0.2 0.502 0.8',
- u'string list': 'words are split by space in list',
- u'int vector': '(100, 200, -300)',
- u'double vector': '(100.5,200.3,-300.99)',
- u'int matrix': '(1,0,0) (0,1,0) (0,0,1)',
- u'double matrix': '(1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)',
- u'type': 'unsigned char'}
+ expected_header = {'dimension': 1,
+ 'encoding': 'ASCII',
+ 'kinds': ['domain'],
+ 'sizes': [27],
+ 'spacings': [1.0458000000000001],
+ 'int': '24',
+ 'double': '25.5566',
+ 'string': 'This is a long string of information that is important.',
+ 'int list': '1 2 3 4 5 100',
+ 'double list': '0.2 0.502 0.8',
+ 'string list': 'words are split by space in list',
+ 'int vector': '(100, 200, -300)',
+ 'double vector': '(100.5,200.3,-300.99)',
+ 'int matrix': '(1,0,0) (0,1,0) (0,0,1)',
+ 'double matrix': '(1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)',
+ 'type': 'unsigned char'}
header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH)
self.assertEqual(header, expected_header)
def test_custom_fields_with_field_map(self):
- expected_header = {u'dimension': 1,
- u'encoding': 'ASCII',
- u'kinds': ['domain'],
- u'sizes': [27],
- u'spacings': [1.0458000000000001],
- u'int': 24,
- u'double': 25.5566,
- u'string': 'This is a long string of information that is important.',
- u'int list': np.array([1, 2, 3, 4, 5, 100]),
- u'double list': np.array([0.2, 0.502, 0.8]),
- u'string list': ['words', 'are', 'split', 'by', 'space', 'in', 'list'],
- u'int vector': np.array([100, 200, -300]),
- u'double vector': np.array([100.5, 200.3, -300.99]),
- u'int matrix': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
- u'double matrix': np.array([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]),
- u'type': 'unsigned char'}
+ expected_header = {'dimension': 1,
+ 'encoding': 'ASCII',
+ 'kinds': ['domain'],
+ 'sizes': [27],
+ 'spacings': [1.0458000000000001],
+ 'int': 24,
+ 'double': 25.5566,
+ 'string': 'This is a long string of information that is important.',
+ 'int list': np.array([1, 2, 3, 4, 5, 100]),
+ 'double list': np.array([0.2, 0.502, 0.8]),
+ 'string list': ['words', 'are', 'split', 'by', 'space', 'in', 'list'],
+ 'int vector': np.array([100, 200, -300]),
+ 'double vector': np.array([100.5, 200.3, -300.99]),
+ 'int matrix': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
+ 'double matrix': np.array([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]),
+ 'type': 'unsigned char'}
custom_field_map = {'int': 'int',
'double': 'double',
@@ -355,7 +355,7 @@ def test_invalid_encoding(self):
nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
def test_detached_header_no_filename(self):
- self.expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
+ self.expected_header['data file'] = os.path.basename(RAW_DATA_FILE_PATH)
with open(RAW_NHDR_FILE_PATH, 'rb') as fh:
header = nrrd.read_header(fh)
diff --git a/nrrd/tests/test_writing.py b/nrrd/tests/test_writing.py
index 7ff22ee..431fbd4 100644
--- a/nrrd/tests/test_writing.py
+++ b/nrrd/tests/test_writing.py
@@ -1,16 +1,13 @@
import io
-import os
-import sys
-
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-
import tempfile
+
import numpy as np
-from nrrd.tests.util import *
+
import nrrd
+from nrrd.tests.util import *
-class TestWritingFunctions(object):
+class TestWritingFunctions:
def setUp(self):
self.temp_write_dir = tempfile.mkdtemp('nrrdtest')
self.data_input, _ = nrrd.read(RAW_NRRD_FILE_PATH, index_order=self.index_order)
@@ -19,10 +16,10 @@ def setUp(self):
self.expected_data = fh.read()
def write_and_read_back(self, encoding=None, level=9):
- output_filename = os.path.join(self.temp_write_dir, 'testfile_{}_{}.nrrd'.format(encoding, str(level)))
+ output_filename = os.path.join(self.temp_write_dir, f'testfile_{encoding}_{str(level)}.nrrd')
headers = {}
if encoding is not None:
- headers[u'encoding'] = encoding
+ headers['encoding'] = encoding
nrrd.write(output_filename, self.data_input, headers, compression_level=level,
index_order=self.index_order)
@@ -37,21 +34,21 @@ def test_write_default_header(self):
self.write_and_read_back()
def test_write_raw(self):
- self.write_and_read_back(u'raw')
+ self.write_and_read_back('raw')
def test_write_gz(self):
- self.write_and_read_back(u'gzip')
+ self.write_and_read_back('gzip')
def test_write_bzip2(self):
- self.write_and_read_back(u'bzip2')
+ self.write_and_read_back('bzip2')
def test_write_gz_level1(self):
- filename = self.write_and_read_back(u'gzip', level=1)
+ filename = self.write_and_read_back('gzip', level=1)
self.assertLess(os.path.getsize(GZ_NRRD_FILE_PATH), os.path.getsize(filename))
def test_write_bzip2_level1(self):
- _ = self.write_and_read_back(u'bzip2', level=1)
+ _ = self.write_and_read_back('bzip2', level=1)
# note: we don't currently assert reduction here, because with the binary ball test data,
# the output size does not change at different bz2 levels.
@@ -61,7 +58,7 @@ def test_write_ascii_1d(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_ascii_1d.nrrd')
x = np.arange(1, 28)
- nrrd.write(output_filename, x, {u'encoding': 'ascii'}, index_order=self.index_order)
+ nrrd.write(output_filename, x, {'encoding': 'ascii'}, index_order=self.index_order)
# Read back the same file
data, header = nrrd.read(output_filename, index_order=self.index_order)
@@ -72,7 +69,7 @@ def test_write_ascii_2d(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_ascii_2d.nrrd')
x = np.arange(1, 28).reshape((3, 9), order=self.index_order)
- nrrd.write(output_filename, x, {u'encoding': 'ascii'}, index_order=self.index_order)
+ nrrd.write(output_filename, x, {'encoding': 'ascii'}, index_order=self.index_order)
# Read back the same file
data, header = nrrd.read(output_filename, index_order=self.index_order)
@@ -83,7 +80,7 @@ def test_write_ascii_3d(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_ascii_3d.nrrd')
x = np.arange(1, 28).reshape((3, 3, 3), order=self.index_order)
- nrrd.write(output_filename, x, {u'encoding': 'ascii'}, index_order=self.index_order)
+ nrrd.write(output_filename, x, {'encoding': 'ascii'}, index_order=self.index_order)
# Read back the same file
data, header = nrrd.read(output_filename, index_order=self.index_order)
@@ -96,7 +93,7 @@ def test_write_custom_fields_without_custom_field_map(self):
data, header = nrrd.read(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, index_order=self.index_order)
nrrd.write(output_filename, data, header, index_order=self.index_order)
- with open(output_filename, 'r') as fh:
+ with open(output_filename) as fh:
lines = fh.readlines()
# Strip newline from end of line
@@ -136,7 +133,7 @@ def test_write_custom_fields_with_custom_field_map(self):
data, header = nrrd.read(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map, index_order=self.index_order)
nrrd.write(output_filename, data, header, custom_field_map=custom_field_map, index_order=self.index_order)
- with open(output_filename, 'r') as fh:
+ with open(output_filename) as fh:
lines = fh.readlines()
# Strip newline from end of line
@@ -164,7 +161,7 @@ def test_write_detached_raw_as_nrrd(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nrrd')
- nrrd.write(output_data_filename, self.data_input, {u'encoding': 'raw'}, detached_header=True,
+ nrrd.write(output_data_filename, self.data_input, {'encoding': 'raw'}, detached_header=True,
relative_data_path=False, index_order=self.index_order)
# Read back the same file
@@ -176,7 +173,7 @@ def test_write_detached_raw_as_nrrd(self):
def test_write_detached_raw_odd_extension(self):
output_data_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nrrd2')
- nrrd.write(output_data_filename, self.data_input, {u'encoding': 'raw'}, detached_header=True,
+ nrrd.write(output_data_filename, self.data_input, {'encoding': 'raw'}, detached_header=True,
index_order=self.index_order)
# Read back the same file
@@ -189,14 +186,14 @@ def test_write_fake_encoding(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid encoding specification while writing NRRD file: fake'):
- nrrd.write(output_filename, self.data_input, {u'encoding': 'fake'}, index_order=self.index_order)
+ nrrd.write(output_filename, self.data_input, {'encoding': 'fake'}, index_order=self.index_order)
def test_write_detached_raw(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_detached_raw.nhdr')
# Data & header are still detached even though detached_header is False because the filename is .nhdr
# Test also checks detached data filename that it is relative (default value)
- nrrd.write(output_filename, self.data_input, {u'encoding': 'raw'}, detached_header=False,
+ nrrd.write(output_filename, self.data_input, {'encoding': 'raw'}, detached_header=False,
index_order=self.index_order)
# Read back the same file
@@ -211,7 +208,7 @@ def test_write_detached_gz(self):
# Data & header are still detached even though detached_header is False because the filename is .nhdr
# Test also checks detached data filename that it is absolute
- nrrd.write(output_filename, self.data_input, {u'encoding': 'gz'}, detached_header=False,
+ nrrd.write(output_filename, self.data_input, {'encoding': 'gz'}, detached_header=False,
relative_data_path=False, index_order=self.index_order)
# Read back the same file
@@ -225,7 +222,7 @@ def test_write_detached_bz2(self):
# Data & header are still detached even though detached_header is False because the filename is .nhdr
# Test also checks detached data filename that it is relative (default value)
- nrrd.write(output_filename, self.data_input, {u'encoding': 'bz2'}, detached_header=False,
+ nrrd.write(output_filename, self.data_input, {'encoding': 'bz2'}, detached_header=False,
index_order=self.index_order)
# Read back the same file
@@ -239,7 +236,7 @@ def test_write_detached_ascii(self):
# Data & header are still detached even though detached_header is False because the filename is .nhdr
# Test also checks detached data filename that it is relative (default value)
- nrrd.write(output_filename, self.data_input, {u'encoding': 'txt'}, detached_header=False,
+ nrrd.write(output_filename, self.data_input, {'encoding': 'txt'}, detached_header=False,
index_order=self.index_order)
# Read back the same file
@@ -261,7 +258,7 @@ def test_remove_endianness(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_remove_endianness.nrrd')
x = np.arange(1, 28)
- nrrd.write(output_filename, x, {u'encoding': 'ascii', u'endian': 'little', 'space': 'right-anterior-superior',
+ nrrd.write(output_filename, x, {'encoding': 'ascii', 'endian': 'little', 'space': 'right-anterior-superior',
'space dimension': 3}, index_order=self.index_order)
# Read back the same file
@@ -285,20 +282,20 @@ def test_invalid_index_order(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_invalid_index_order.nrrd')
with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid index order'):
- nrrd.write(output_filename, np.zeros((3,9)), index_order=None)
+ nrrd.write(output_filename, np.zeros((3, 9)), index_order=None)
def test_quoted_string_list_header(self):
output_filename = os.path.join(self.temp_write_dir, 'testfile_ascii_3d.nrrd')
x = np.arange(1, 28).reshape((3, 3, 3), order=self.index_order)
nrrd.write(output_filename, x, {
- u'encoding': 'ascii',
- u'units': ['mm', 'cm', 'in'],
- u'space units': ['mm', 'cm', 'in'],
- u'labels': ['X', 'Y', 'f(log(X, 10), Y)'],
+ 'encoding': 'ascii',
+ 'units': ['mm', 'cm', 'in'],
+ 'space units': ['mm', 'cm', 'in'],
+ 'labels': ['X', 'Y', 'f(log(X, 10), Y)'],
}, index_order=self.index_order)
- with open(output_filename, 'r') as fh:
+ with open(output_filename) as fh:
lines = fh.readlines()
# Strip newline from end of line
diff --git a/nrrd/writer.py b/nrrd/writer.py
index 552907f..548f455 100644
--- a/nrrd/writer.py
+++ b/nrrd/writer.py
@@ -83,7 +83,7 @@ def _format_field_value(value, field_type):
elif field_type == 'string list':
return ' '.join(value)
elif field_type == 'quoted string list':
- return ' '.join('"{0}"'.format(x) for x in value)
+ return ' '.join(f'"{x}"' for x in value)
elif field_type == 'int vector':
return format_vector(value)
elif field_type == 'double vector':
@@ -169,9 +169,9 @@ def _write_header(file, header, custom_field_map=None):
# Custom fields are written as key/value pairs with a := instead of : delimiter
if x >= custom_field_start_index:
- file.write(('%s:=%s\n' % (field, value_str)).encode('ascii'))
+ file.write((f'{field}:={value_str}\n').encode('ascii'))
else:
- file.write(('%s: %s\n' % (field, value_str)).encode('ascii'))
+ file.write((f'{field}: {value_str}\n').encode('ascii'))
# Write the closing extra newline
file.write(b'\n')
@@ -295,9 +295,9 @@ def write(file, data, header=None, detached_header=False, relative_data_path=Tru
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
compression_level : :class:`int`
- Integer between 1 and 9 specifying the compression level when using a compressed encoding (gzip or bzip). A value
- of :obj:`1` compresses the data the least amount and is the fastest, while a value of :obj:`9` compresses the
- data the most and is the slowest.
+ Integer between 1 and 9 specifying the compression level when using a compressed encoding (gzip or bzip).
+ A value of :obj:`1` compresses the data the least amount and is the fastest, while a value of :obj:`9`
+ compresses the data the most and is the slowest.
index_order : {'C', 'F'}, optional
Specifies the index order used for writing. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
diff --git a/requirements.txt b/requirements.txt
index b6517e7..ea11541 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,2 @@
numpy>=1.11.1
-numpydoc
\ No newline at end of file
+numpydoc
diff --git a/setup.cfg b/setup.cfg
index c34b498..a2c7e81 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,4 +2,12 @@
# This flag says that the code is written to work on both Python 2 and Python
# 3. If at all possible, it is good practice to do this. If you cannot, you
# will need to generate wheels for each Python version that you support.
-universal=1
\ No newline at end of file
+universal=1
+
+[flake8]
+max-line-length = 120
+ignore =
+ # F405 defined from star imports
+ F405,
+ # F403 unable to detect undefined names from start imports
+ F403
diff --git a/setup.py b/setup.py
index 2d3c9d4..a860f0d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,13 +1,13 @@
import os
-from setuptools import setup, find_packages
+from setuptools import find_packages, setup
from nrrd._version import __version__
currentPath = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
-with open(os.path.join(currentPath, 'README.rst'), 'r') as fh:
+with open(os.path.join(currentPath, 'README.rst')) as fh:
longDescription = fh.read()
longDescription = '\n' + longDescription