Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 49 additions & 20 deletions lib/iris/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,16 @@
from six.moves import (filter, input, map, range, zip) # noqa
import six

import codecs
import collections
import contextlib
import difflib
import filecmp
import functools
import gzip
import hashlib
import inspect
import json
import io
import logging
import os
Expand Down Expand Up @@ -620,16 +623,21 @@ def _unique_id(self):

"""
# Obtain a consistent ID for the current test.

# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')[-3:]
bits = self.id().split('.')
if bits[0] == '__main__':
file_name = os.path.basename(sys.modules['__main__'].__file__)
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)

# Derive the sequential assertion ID within the test
Expand All @@ -652,21 +660,22 @@ def _ensure_folder(self, path):
logger.warning('Creating folder: %s', dir_path)
os.makedirs(dir_path)

def check_graphic(self, tol=_DEFAULT_IMAGE_TOLERANCE):
"""Checks the CRC matches for the current matplotlib.pyplot figure, and closes the figure."""
def check_graphic(self, tol=None):
"""
Checks the CRC matches for the current matplotlib.pyplot figure,
and closes the figure.

"""

unique_id = self._unique_id()

figure = plt.gcf()

repo_fname = os.path.join(os.path.dirname(__file__), 'results', 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))

try:
expected_fname = os.path.join(os.path.dirname(__file__),
'results', 'visual_tests',
unique_id + '.png')

if not os.path.isdir(os.path.dirname(expected_fname)):
os.makedirs(os.path.dirname(expected_fname))

#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
Expand Down Expand Up @@ -695,18 +704,38 @@ def check_graphic(self, tol=_DEFAULT_IMAGE_TOLERANCE):

figure.savefig(result_fname)

if not os.path.exists(expected_fname):
warnings.warn('Created image for test %s' % unique_id)
shutil.copy2(result_fname, expected_fname)
# XXX: Deal with a new test result i.e. it's not in the repo

err = mcompare.compare_images(expected_fname, result_fname, tol=tol)
# hash the created image using sha1
with open(result_fname, 'rb') as res_file:
sha1 = hashlib.sha1(res_file.read())
if unique_id not in repo:
msg = 'Image comparison failed: Created image {} for test {}.'
raise ValueError(msg.format(result_fname, unique_id))
Copy link
Member

@bjlittle bjlittle Oct 6, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@marqh You've changed the logic here from the original, which I kinda disagree with.

As it now stands any developer who adds a new graphics test, has to manually craft the changes to the imagerepo.json and manage the hash renaming of the png ... this doesn't scale very well.

I'll propose an alternative approach on your rebase_hash branch as a PR that (I believe) improves this work-flow. We can discuss that further there ... so hang tight for that.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@marqh Okay, I'm working on that rebase_hash branch PR ... in-bound soon, thanks

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand your concerns and I am keen to see a work flow for updating lots of images

however, in your previous implementation, the effect was that tests with no checksum to check would pass, for developers and for travis.
This is a more serious problem. I simplified the logic to allow me to fix this problem, which I deemed critical.
If you are working with respect to this branch, please make sure that an implementation does not allow tests to pass where there is no entry in the imagerepo or where the image checksum comparison fails.

any tooling to help updating must (imho) deal with these as test failures, then help the developer fix them, not enable them to pass

I hope that point of view is clear and coherent

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@marqh I completely understand your concerns, and I think that there is a suitable compromise to be had here to meet all our needs.

I don't think it would be wise of me to mix this into this PR - let's do that in a separate PR 👍

else:
uris = repo[unique_id]
# Cherry-pick the registered expected hashes from the
# test case uri/s.
expected = [os.path.splitext(os.path.basename(uri))[0]
for uri in uris]

if sha1.hexdigest() not in expected:
emsg = 'Actual SHA1 {} not in expected {} for test {}.'
emsg = emsg.format(sha1.hexdigest(), expected, unique_id)
if _DISPLAY_FIGURES:
print('Image comparison would have failed. '
'Message: %s' % emsg)
else:
raise ValueError('Image comparison failed.'
' Message: {}'.format(emsg))

else:
# There is no difference between the actual and expected
# result, so remove the actual result file.
os.remove(result_fname)

if _DISPLAY_FIGURES:
if err:
print('Image comparison would have failed. Message: %s' % err)
plt.show()
else:
assert not err, 'Image comparison failed. Message: %s' % err

finally:
plt.close()
Expand Down
Loading