Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save patkujawa-wf/1f569d245bbfc73f83a3 to your computer and use it in GitHub Desktop.
Save patkujawa-wf/1f569d245bbfc73f83a3 to your computer and use it in GitHub Desktop.
Gold/approved file testing all methods in a test class against every file in a directory via metaclass metaprogramming in Python
import glob
import inspect
import os
import re
import unittest
# http://superuser.com/a/748264/43406. NOTE ^ in front for negation
_unsafe_filename_regex = re.compile('([^\[\]\.0-9a-zA-Z-,;_])')
def safe_filename(unsafe_filename):
return _unsafe_filename_regex.sub('_', unsafe_filename)
def mkdir_p(path_):
"""
Like shell's mkdir -p
"""
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
import errno
try:
os.makedirs(path_)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path_):
pass
else:
raise
def non_inherited_methods(klass):
# ismethod doesn't work when using the class __dict__
name_to_func_map = {
name: func
for name, func in klass.__dict__.iteritems()
if inspect.isfunction(func)
}
return name_to_func_map
class RunOnAllFilesMetaClass(type):
"""
Allows writing a test method once and running it on all files.
It works by you setting `__metaclass__ = RunOnAllFilesMetaClass` as a class
prop in your test class. Then, at runtime, this will find all methods
that don't start with 'test_' and create new test methods on your class
which call your test method, passing in the path to the input file.
See http://stackoverflow.com/a/3468410/116891
"""
def __init__(cls, what, bases=None, dict=None):
all_input_file_paths = glob.glob(
os.path.join(*'a/*/*.json'.split('/'))
)
# freeze, don't iterate, the methods, because we'll be adding to them
test_class_methods = non_inherited_methods(cls).items()
for func_name, func in test_class_methods:
# Skip normal test methods
if func_name.startswith('test_'):
continue
for fpath in all_input_file_paths:
def test_func(self, enclosed_file_path=fpath,
enclosed_func=func):
# NOTE: we need to have the kwargs or we will end up
# with a closure of just the last func and file
enclosed_func(self, enclosed_file_path)
test_method_name = cls.make_test_method_name(fpath, func_name)
# Add the test method to the class
setattr(cls, test_method_name, test_func)
super(RunOnAllFilesMetaClass, cls).__init__(what, bases, dict)
@staticmethod
def make_test_method_name(fpath, func_name):
"""
Return a valid name for a test method.
:param basestring fpath: full path of the input file
:param basestring func_name: name of the original function that will
be called and passed a file path to assert against
:return: a valid method name that will be detected by test runners
:rtype : basestring
"""
# In this case, we want the test to be named after the original
# test function, in addition to part of the path to the file that
# will be tested.
parent_dir, file_name_with_ext = os.path.split(fpath)
file_name, ext = os.path.splitext(file_name_with_ext)
parent_dir, subdir_name = os.path.split(parent_dir)
test_method_name = safe_filename('test_{}_{}_{}'.format(
func_name, subdir_name, file_name
))
return test_method_name
class JsonToXmlConversionApprovedFileComparisonTests(unittest.TestCase):
__metaclass__ = RunOnAllFilesMetaClass
@unittest.skip(
'only for manual testing; meta class will test all files for us'
)
def test_one_specific_file(self):
relative_path = 'a/b/text.json'
fpath = os.path.join('.', *relative_path.split('/'))
self.compare_to_approved(fpath)
def compare_to_approved(self, json_input_path):
parent_dir, filename = os.path.split(json_input_path)
filename, ext = os.path.splitext(filename)
assert ext == '.json'
out_filename = '{}.xml'.format(filename)
actual_xml = convert(json_input_path)
assert isinstance(actual_xml, basestring)
# Dump received output to file for manual diffing, if desired
received_dir = os.path.join(parent_dir, 'received')
mkdir_p(received_dir)
with open(os.path.join(received_dir, out_filename), 'w') as f:
f.write(actual_xml)
# Compare to approved/gold file
approved_dir = os.path.join(parent_dir, 'approved')
try:
matching_approved_file = os.path.join(approved_dir, out_filename)
with open(matching_approved_file, 'r') as f:
expected_xml = f.read()
self.assertSequenceEqual(actual_xml, expected_xml)
except IOError as exc:
print('no approved file or error reading it', str(exc))
self.fail('no approved file to compare against for {}'.format(
json_input_path
))
def should_produce_valid_xml(self, json_input_path):
actual_xml = convert(json_input_path)
self.assertTrue(actual_xml.startswith('<'))
self.assertTrue(actual_xml.endswith('/>'))
def convert(json_file_path):
return '<NotRealXml inputFile="{}" />'.format(json_file_path)

Provided that there is a directory structure like the following in the same directory as the test file, the shell command below should show similar output.

❯ tree a
a
└── b
    ├── approved
    │   └── text.xml
    ├── empty\ file.json
    ├── received
    │   └── text.xml
    └── text.json

3 directories, 4 files
❯ nosetests test_multiple_files_same_asserts_using_meta_programming.py --verbosity=3 --rednose
nose.config: INFO: Ignoring files matching ['^\\.', '^_', '^setup\\.py$']
test_compare_to_approved_b_empty_file (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests) ... FAILED
test_compare_to_approved_b_text (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests) ... passed
test_one_specific_file (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests) ... skipped
test_should_produce_valid_xml_b_empty_file (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests) ... passed
test_should_produce_valid_xml_b_text (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests) ... passed

-----------------------------------------------------------------------------
1) FAIL: test_compare_to_approved_b_empty_file (test.test_multiple_files_same_asserts_using_meta_programming.JsonToXmlConversionApprovedFileComparisonTests)

   Traceback (most recent call last):
    test_multiple_files_same_asserts_using_meta_programming.py line 70 in test_func
      enclosed_func(self, enclosed_file_path)
    test_multiple_files_same_asserts_using_meta_programming.py line 138 in compare_to_approved
      json_input_path
   AssertionError: no approved file to compare against for a/b/empty file.json

   -------------------- >> begin captured stdout << ---------------------
   ('no approved file or error reading it', "[Errno 2] No such file or directory: 'a/b/approved/empty file.xml'")

   --------------------- >> end captured stdout << ----------------------


-----------------------------------------------------------------------------
5 tests run in 0.0 seconds.
1 FAILED, 1 skipped (3 tests passed)

Notice that a test is named test_compare_to_approved_b_text, so the metaclass took the name of the method doing the assertions (compare_to_approved) and combined it with the file that it found (a/b/text.json) to make a descriptive test name.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment