Last active
May 13, 2024 12:17
-
-
Save juancarlospaco/040fbe326631e638f2a540fe8c1f2092 to your computer and use it in GitHub Desktop.
Unittest Templates
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
# -*- coding: utf-8 -*- | |
"""Unittest with DocTests.""" | |
import doctest | |
import unittest | |
from random import randint | |
# Random order for tests runs. (Original is: -1 if x<y, 0 if x==y, 1 if x>y). | |
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: randint(-1, 1) | |
def setUpModule(): | |
pass | |
def tearDownModule(): | |
pass | |
def load_tests(loader, tests, ignore): # Returns empty TestSuite if fails. | |
"""Convert DocTests from module to unittest.TestSuite.""" | |
tests.addTests(doctest.DocTestSuite(module=None, setUp=None, tearDown=None)) | |
return tests # Returned tests run with the rest of unittests. | |
class TestName(unittest.TestCase): | |
"""Unittest.""" | |
maxDiff, __slots__ = None, () | |
def setUp(self): | |
"""Method to prepare the test fixture. Run BEFORE the test methods.""" | |
pass | |
def tearDown(self): | |
"""Method to tear down the test fixture. Run AFTER the test methods.""" | |
pass | |
def addCleanup(self, function, *args, **kwargs): | |
"""Function called AFTER tearDown() to clean resources used on test.""" | |
pass | |
@classmethod | |
def setUpClass(cls): | |
"""Class method called BEFORE tests in an individual class run. """ | |
pass # Probably you may not use this one. See setUp(). | |
@classmethod | |
def tearDownClass(cls): | |
"""Class method called AFTER tests in an individual class run. """ | |
pass # Probably you may not use this one. See tearDown(). | |
@unittest.skip("Demonstrating skipping") # Skips this test only | |
@unittest.skipIf("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.skipUnless("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.expectedFailure # This test MUST fail. If test fails, then is Ok. | |
def test_dummy(self): | |
self.skipTest("Just examples, use as template!.") # Skips this test only | |
self.assertEqual(a, b) # a == b | |
self.assertNotEqual(a, b) # a != b | |
self.assertTrue(x) # bool(x) is True | |
self.assertFalse(x) # bool(x) is False | |
self.assertIs(a, b) # a is b | |
self.assertIsNot(a, b) # a is not b | |
self.assertIsNone(x) # x is None | |
self.assertIsNotNone(x) # x is not None | |
self.assertIn(a, b) # a in b | |
self.assertNotIn(a, b) # a not in b | |
self.assertIsInstance(a, b) # isinstance(a, b) | |
self.assertNotIsInstance(a, b) # not isinstance(a, b) | |
self.assertAlmostEqual(a, b) # round(a-b, 7) == 0 | |
self.assertNotAlmostEqual(a, b) # round(a-b, 7) != 0 | |
self.assertGreater(a, b) # a > b | |
self.assertGreaterEqual(a, b) # a >= b | |
self.assertLess(a, b) # a < b | |
self.assertLessEqual(a, b) # a <= b | |
self.assertRegex(s, r) # r.search(s) | |
self.assertNotRegex(s, r) # not r.search(s) | |
self.assertItemsEqual(a, b) # sorted(a) == sorted(b) and works with unhashable objs | |
self.assertDictContainsSubset(a, b) # all the key/value pairs in a exist in b | |
self.assertCountEqual(a, b) # a and b have the same elements in the same number, regardless of their order | |
# Compare different types of objects | |
self.assertMultiLineEqual(a, b) # Compare strings | |
self.assertSequenceEqual(a, b) # Compare sequences | |
self.assertListEqual(a, b) # Compare lists | |
self.assertTupleEqual(a, b) # Compare tuples | |
self.assertSetEqual(a, b) # Compare sets | |
self.assertDictEqual(a, b) # Compare dicts | |
# To Test code that MUST Raise Exceptions: | |
self.assertRaises(SomeException, callable, *args, **kwds) # callable Must raise SomeException | |
with self.assertRaises(SomeException) as cm: | |
do_something_that_raises() # This line Must raise SomeException | |
# To Test code that MUST Raise Warnings (see std lib warning module): | |
self.assertWarns(SomeWarning, callable, *args, **kwds) # callable Must raise SomeWarning | |
with self.assertWarns(SomeWarning) as cm: | |
do_something_that_warns() # This line Must raise SomeWarning | |
# Assert messages on a Logger log object. | |
self.assertLogs(logger, level) | |
with self.assertLogs('foo', level='INFO') as cm: | |
logging.getLogger('foo').info('example message') # cm.output is 'example message' | |
if __name__.__contains__("__main__"): | |
print(__doc__) | |
unittest.main() | |
# Run just 1 test. | |
# unittest.main(defaultTest='TestFoo.test_foo', warnings='ignore') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
# -*- coding: utf-8 -*- | |
"""Unittest with Performance test.""" | |
import cProfile | |
import os | |
import sys | |
import unittest | |
from datetime import datetime | |
from random import randint | |
try: | |
import resource | |
except ImportError: | |
resource = None # MS Window dont have resource | |
# Random order for tests runs. (Original is: -1 if x<y, 0 if x==y, 1 if x>y). | |
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: randint(-1, 1) | |
def setUpModule(): | |
pass | |
def tearDownModule(): | |
pass | |
class PerformanceTestCase(unittest.TestCase): | |
"""Class to use with unittest.TestCase that uses 'pprofile' module and | |
a method named 'test_performance()'' on each unit test with code to repeat, | |
while collecting Performance stats,that prints at the end of unittests. | |
It prints elapsed Time on Microseconds and Memory usage on Bytes info.""" | |
maxDiff, __slots__ = None, () | |
started = datetime.now() | |
profiler = cProfile.Profile() | |
def setUp(self): | |
"""This starts statprof module to check Performance.""" | |
super(PerformanceTestCase, self).setUp() | |
self.profiler.enable() | |
def tearDown(self): | |
"""This stops statprof module checks,display Performance results.""" | |
super(PerformanceTestCase, self).tearDown() | |
self.profiler.disable() | |
elapsed = (datetime.now() - self.started).total_seconds() | |
if sys.platform.startswith("linux") and resource: | |
total_ram = int(os.sysconf('SC_PAGE_SIZE') * | |
os.sysconf('SC_PHYS_PAGES') / 1_024 / 1_024 ) | |
used_ram = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * | |
resource.getpagesize() / 1_024 / 1_024) | |
print(f"""Memory used: { used_ram / total_ram:.2% } | |
{ used_ram:_ } of { total_ram:_ } Megabytes.""") | |
print(f"{self.id()}, Time elapsed: {elapsed} Seconds.") | |
self.profiler.print_stats() | |
def addCleanup(self, function, *args, **kwargs): | |
"""Function called AFTER tearDown() to clean resources used on test.""" | |
pass | |
@classmethod | |
def setUpClass(cls): | |
"""Class method called BEFORE tests in an individual class run. """ | |
pass # Probably you may not use this one. See setUp(). | |
@classmethod | |
def tearDownClass(cls): | |
"""Class method called AFTER tests in an individual class run. """ | |
pass # Probably you may not use this one. See tearDown(). | |
@unittest.skip("Demonstrating skipping") # Skips this test only | |
@unittest.skipIf("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.skipUnless("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.expectedFailure # This test MUST fail. If test fails, then is Ok. | |
def test_dummy(self): | |
self.skipTest("Just examples, use as template!.") # Skips this test only | |
self.assertEqual(a, b) # a == b | |
self.assertNotEqual(a, b) # a != b | |
self.assertTrue(x) # bool(x) is True | |
self.assertFalse(x) # bool(x) is False | |
self.assertIs(a, b) # a is b | |
self.assertIsNot(a, b) # a is not b | |
self.assertIsNone(x) # x is None | |
self.assertIsNotNone(x) # x is not None | |
self.assertIn(a, b) # a in b | |
self.assertNotIn(a, b) # a not in b | |
self.assertIsInstance(a, b) # isinstance(a, b) | |
self.assertNotIsInstance(a, b) # not isinstance(a, b) | |
self.assertAlmostEqual(a, b) # round(a-b, 7) == 0 | |
self.assertNotAlmostEqual(a, b) # round(a-b, 7) != 0 | |
self.assertGreater(a, b) # a > b | |
self.assertGreaterEqual(a, b) # a >= b | |
self.assertLess(a, b) # a < b | |
self.assertLessEqual(a, b) # a <= b | |
self.assertRegex(s, r) # r.search(s) | |
self.assertNotRegex(s, r) # not r.search(s) | |
self.assertItemsEqual(a, b) # sorted(a) == sorted(b) and works with unhashable objs | |
self.assertDictContainsSubset(a, b) # all the key/value pairs in a exist in b | |
self.assertCountEqual(a, b) # a and b have the same elements in the same number, regardless of their order | |
# Compare different types of objects | |
self.assertMultiLineEqual(a, b) # Compare strings | |
self.assertSequenceEqual(a, b) # Compare sequences | |
self.assertListEqual(a, b) # Compare lists | |
self.assertTupleEqual(a, b) # Compare tuples | |
self.assertSetEqual(a, b) # Compare sets | |
self.assertDictEqual(a, b) # Compare dicts | |
# To Test code that MUST Raise Exceptions: | |
self.assertRaises(SomeException, callable, *args, **kwds) # callable Must raise SomeException | |
with self.assertRaises(SomeException) as cm: | |
do_something_that_raises() # This line Must raise SomeException | |
# To Test code that MUST Raise Warnings (see std lib warning module): | |
self.assertWarns(SomeWarning, callable, *args, **kwds) # callable Must raise SomeWarning | |
with self.assertWarns(SomeWarning) as cm: | |
do_something_that_warns() # This line Must raise SomeWarning | |
# Assert messages on a Logger log object. | |
self.assertLogs(logger, level) | |
with self.assertLogs('foo', level='INFO') as cm: | |
logging.getLogger('foo').info('example message') # cm.output is 'example message' | |
if __name__.__contains__("__main__"): | |
print(__doc__) | |
unittest.main() | |
# Run just 1 test. | |
# unittest.main(defaultTest='TestFoo.test_foo', warnings='ignore') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
# -*- coding: utf-8 -*- | |
"""Unittest.""" | |
import unittest | |
from random import randint | |
# Random order for tests runs. (Original is: -1 if x<y, 0 if x==y, 1 if x>y). | |
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: randint(-1, 1) | |
def setUpModule(): | |
pass | |
def tearDownModule(): | |
pass | |
class TestName(unittest.TestCase): | |
"""Unittest.""" | |
maxDiff, __slots__ = None, () | |
def setUp(self): | |
"""Method to prepare the test fixture. Run BEFORE the test methods.""" | |
pass | |
def tearDown(self): | |
"""Method to tear down the test fixture. Run AFTER the test methods.""" | |
pass | |
def addCleanup(self, function, *args, **kwargs): | |
"""Function called AFTER tearDown() to clean resources used on test.""" | |
pass | |
@classmethod | |
def setUpClass(cls): | |
"""Class method called BEFORE tests in an individual class run. """ | |
pass # Probably you may not use this one. See setUp(). | |
@classmethod | |
def tearDownClass(cls): | |
"""Class method called AFTER tests in an individual class run. """ | |
pass # Probably you may not use this one. See tearDown(). | |
@unittest.skip("Demonstrating skipping") # Skips this test only | |
@unittest.skipIf("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.skipUnless("boolean_condition", "Reason to Skip Test here.") # Skips this test only | |
@unittest.expectedFailure # This test MUST fail. If test fails, then is Ok. | |
def test_dummy(self): | |
self.skipTest("Just examples, use as template!.") # Skips this test only | |
self.assertEqual(a, b) # a == b | |
self.assertNotEqual(a, b) # a != b | |
self.assertTrue(x) # bool(x) is True | |
self.assertFalse(x) # bool(x) is False | |
self.assertIs(a, b) # a is b | |
self.assertIsNot(a, b) # a is not b | |
self.assertIsNone(x) # x is None | |
self.assertIsNotNone(x) # x is not None | |
self.assertIn(a, b) # a in b | |
self.assertNotIn(a, b) # a not in b | |
self.assertIsInstance(a, b) # isinstance(a, b) | |
self.assertNotIsInstance(a, b) # not isinstance(a, b) | |
self.assertAlmostEqual(a, b) # round(a-b, 7) == 0 | |
self.assertNotAlmostEqual(a, b) # round(a-b, 7) != 0 | |
self.assertGreater(a, b) # a > b | |
self.assertGreaterEqual(a, b) # a >= b | |
self.assertLess(a, b) # a < b | |
self.assertLessEqual(a, b) # a <= b | |
self.assertRegex(s, r) # r.search(s) | |
self.assertNotRegex(s, r) # not r.search(s) | |
self.assertItemsEqual(a, b) # sorted(a) == sorted(b) and works with unhashable objs | |
self.assertDictContainsSubset(a, b) # all the key/value pairs in a exist in b | |
self.assertCountEqual(a, b) # a and b have the same elements in the same number, regardless of their order | |
# Compare different types of objects | |
self.assertMultiLineEqual(a, b) # Compare strings | |
self.assertSequenceEqual(a, b) # Compare sequences | |
self.assertListEqual(a, b) # Compare lists | |
self.assertTupleEqual(a, b) # Compare tuples | |
self.assertSetEqual(a, b) # Compare sets | |
self.assertDictEqual(a, b) # Compare dicts | |
# To Test code that MUST Raise Exceptions: | |
self.assertRaises(SomeException, callable, *args, **kwds) # callable Must raise SomeException | |
with self.assertRaises(SomeException) as cm: | |
do_something_that_raises() # This line Must raise SomeException | |
# To Test code that MUST Raise Warnings (see std lib warning module): | |
self.assertWarns(SomeWarning, callable, *args, **kwds) # callable Must raise SomeWarning | |
with self.assertWarns(SomeWarning) as cm: | |
do_something_that_warns() # This line Must raise SomeWarning | |
# Assert messages on a Logger log object. | |
self.assertLogs(logger, level) | |
with self.assertLogs('foo', level='INFO') as cm: | |
logging.getLogger('foo').info('example message') # cm.output is 'example message' | |
if __name__.__contains__("__main__"): | |
print(__doc__) | |
unittest.main() | |
# Run just 1 test. | |
# unittest.main(defaultTest='TestFoo.test_foo', warnings='ignore') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
import unittest | |
class TestName(unittest.TestCase): | |
maxDiff, __slots__ = None, () | |
def test_dummy(self): | |
self.assertEqual(a, b) # a == b | |
self.assertTrue(x) # bool(x) is True |
Hi, thanks for sharing. I hit one issue with Performance teamplate with pytest parametrize
.
I am sure there is no issue for my code:
@pytest.mark.parametrize( "name, age",
[
("John Doe", 8),
("Jane Doe", 9),
("Foo Bar", 12),
])
def test_say_hello(self, name, age):
"""Test the say_hello method."""
print(f">>> testing {name} {age}<<<")
But when i run the unit test script, below error occurt:
================================================= FAILURES =================================================
____________________________________ PerformanceTestCase.test_say_hello ____________________________________
self = <unittest.case._Outcome object at 0x1020802d0>
test_case = <tests.test_unittest_performance_template.PerformanceTestCase testMethod=test_say_hello>
subTest = False
@contextlib.contextmanager
def testPartExecutor(self, test_case, subTest=False):
old_success = self.success
self.success = True
try:
> yield
/opt/homebrew/Cellar/[email protected]/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/unittest/case.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/homebrew/Cellar/[email protected]/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/unittest/case.py:623: in run
self._callTestMethod(testMethod)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tests.test_unittest_performance_template.PerformanceTestCase testMethod=test_say_hello>
method = <bound method PerformanceTestCase.test_say_hello of <tests.test_unittest_performance_template.PerformanceTestCase testMethod=test_say_hello>>
def _callTestMethod(self, method):
> if method() is not None:
E TypeError: PerformanceTestCase.test_say_hello() missing 2 required positional arguments: 'name' and 'age'
/opt/homebrew/Cellar/[email protected]/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/unittest/case.py:579: TypeError
---------- coverage: platform darwin, python 3.11.6-final-0 ----------
Coverage HTML written to dir htmlcov
========================================= short test summary info ==========================================
FAILED tests/test_unittest_performance_template.py::PerformanceTestCase::test_say_hello - TypeError: PerformanceTestCase.test_say_hello() missing 2 required positional arguments: 'name' and 'age'
I have try my best to figure it out which line has error, but failed. Could you help me out? Thanks.
Updated on 24Mar2023 22:27
I come up with a work-around by removing the unnecessary (unit test class) codes. But why the prevoius one not work?
import pytest
import os
import sys
import resource
import cProfile
from datetime import datetime
class TestPerformance:
maxDiff, __slots__ = None, ()
started = datetime.now()
profiler = cProfile.Profile()
def setup_method(self, method):
"""This starts statprof module to check Performance."""
self.profiler.enable()
def teardown_method(self, method):
"""This stops statprof module checks,display Performance results."""
self.profiler.disable()
elapsed = (datetime.now() - self.started).total_seconds()
if sys.platform.startswith("linux") and resource:
total_ram = int(os.sysconf('SC_PAGE_SIZE') *
os.sysconf('SC_PHYS_PAGES') / 1_024 / 1_024)
used_ram = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss *
resource.getpagesize() / 1_024 / 1_024)
print(f"""Memory used: {used_ram / total_ram:.2%}
{used_ram:_} of {total_ram:_} Megabytes.""")
print(f"{method.__name__}, Time elapsed: {elapsed} Seconds.")
self.profiler.print_stats()
@pytest.mark.parametrize("name",
[
"John Doe",
"Jane Doe",
"Foo Bar",
])
def test_say_hello(self, name):
"""Test the say_hello method."""
print(f"Hello {name}")
# If you want to run the test using pytest, just run:
# pytest tests/test_unittest_performance_template.py
Not designed to be run with pytest, so maybe its a pytest bug.
Not designed to be run with pytest, so maybe its a pytest bug.
Thanks for replying. Closing the issue. Thanks again for your template!
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
https://www.peterbe.com/plog/how-to-do-performance-micro-benchmarks-in-python ❓
https://devdocs.io/python~3.6/library/statistics ❓