Skip to content

Instantly share code, notes, and snippets.

@Kyungpyo-Kim
Last active August 1, 2025 06:26
Show Gist options
  • Save Kyungpyo-Kim/d77d650e7b9760a07a36114d51406044 to your computer and use it in GitHub Desktop.
Save Kyungpyo-Kim/d77d650e7b9760a07a36114d51406044 to your computer and use it in GitHub Desktop.
import subprocess
import sys
import os
import pytest
from pathlib import Path
class TestPythonFileExecution:
"""Test class for executing Python files in relative paths using pytest"""
@pytest.fixture(autouse=True)
def setup_method(self):
"""Setup method executed before each test"""
self.base_path = Path(__file__).parent
def test_single_python_file(self):
"""Test execution of a single Python file"""
# Set relative path of target file
target_file = self.base_path / "src" / "main.py"
# Check if file exists
assert target_file.exists(), f"File does not exist: {target_file}"
# Execute Python file
result = subprocess.run(
[sys.executable, str(target_file)],
capture_output=True,
text=True,
timeout=30,
check=False
)
# Check if execution was successful (return code 0 means success)
assert result.returncode == 0, f"File execution failed. Error: {result.stderr}"
# Optional: validate output content
# assert "expected output" in result.stdout
def test_python_file_with_arguments(self):
"""Test execution of Python file with command line arguments"""
target_file = self.base_path / "scripts" / "process.py"
assert target_file.exists(), f"File does not exist: {target_file}"
# Execute with arguments
result = subprocess.run(
[sys.executable, str(target_file), "--input", "test_data"],
capture_output=True,
text=True,
timeout=30,
check=False
)
assert result.returncode == 0, f"File execution failed. Error: {result.stderr}"
@pytest.mark.parametrize("file_path", [
"utils/helper.py",
"modules/calculator.py",
"data/processor.py"
])
def test_multiple_python_files(self, file_path):
"""Test execution of multiple Python files using parametrize"""
target_file = self.base_path / file_path
# Skip if file does not exist
if not target_file.exists():
pytest.skip(f"File does not exist: {target_file}")
result = subprocess.run(
[sys.executable, str(target_file)],
capture_output=True,
text=True,
timeout=30,
check=False
)
assert result.returncode == 0, f"{file_path} execution failed. Error: {result.stderr}"
def test_python_file_with_imports(self):
"""Test Python file that imports other modules"""
target_file = self.base_path / "app" / "main.py"
if not target_file.exists():
pytest.skip(f"File does not exist: {target_file}")
# Set PYTHONPATH to add import path
env = os.environ.copy()
env['PYTHONPATH'] = str(self.base_path)
result = subprocess.run(
[sys.executable, str(target_file)],
capture_output=True,
text=True,
timeout=30,
env=env,
check=False
)
assert result.returncode == 0, f"File execution failed. Error: {result.stderr}"
def test_python_file_output_validation(self):
"""Test with output content validation"""
target_file = self.base_path / "examples" / "hello.py"
if not target_file.exists():
pytest.skip(f"File does not exist: {target_file}")
result = subprocess.run(
[sys.executable, str(target_file)],
capture_output=True,
text=True,
timeout=30,
check=False
)
# Check execution success
assert result.returncode == 0, f"File execution failed. Error: {result.stderr}"
# Validate output content
assert "Hello" in result.stdout, "Expected output not found"
assert "Error" not in result.stderr, "Error message found"
def test_python_file_syntax_check(self):
"""Test Python file syntax validation"""
target_file = self.base_path / "src" / "module.py"
if not target_file.exists():
pytest.skip(f"File does not exist: {target_file}")
# Perform syntax check only (using -m py_compile)
result = subprocess.run(
[sys.executable, "-m", "py_compile", str(target_file)],
capture_output=True,
text=True,
check=False
)
assert result.returncode == 0, f"Syntax error found: {result.stderr}"
@pytest.fixture
def python_files():
"""Fixture to provide list of Python files to test"""
base_path = Path(__file__).parent
files = list(base_path.glob("**/*.py"))
# Exclude test files themselves
return [f for f in files if not f.name.startswith("test_")]
class TestAllPythonFiles:
"""Test all Python files in the project automatically"""
def test_all_files_execute_successfully(self, python_files):
"""Test that all Python files can be executed without errors"""
failed_files = []
for file_path in python_files:
result = subprocess.run(
[sys.executable, str(file_path)],
capture_output=True,
text=True,
timeout=30,
check=False
)
if result.returncode != 0:
failed_files.append({
'file': file_path,
'error': result.stderr.strip()
})
if failed_files:
error_msg = "\n".join([
f"{f['file']}: {f['error']}" for f in failed_files
])
pytest.fail(f"The following files failed to execute:\n{error_msg}")
@pytest.mark.parametrize("file_path", pytest.lazy_fixture("python_files"))
def test_individual_file_execution(self, file_path):
"""Test individual file execution using parametrized fixture"""
result = subprocess.run(
[sys.executable, str(file_path)],
capture_output=True,
text=True,
timeout=30,
check=False
)
assert result.returncode == 0, f"File {file_path} failed: {result.stderr}"
# Custom pytest markers
pytestmark = [
pytest.mark.integration, # Mark all tests as integration tests
]
# Pytest configuration functions
def pytest_configure(config):
"""Configure pytest with custom markers"""
config.addinivalue_line(
"markers", "integration: mark test as integration test"
)
config.addinivalue_line(
"markers", "slow: mark test as slow running"
)
def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers automatically"""
for item in items:
# Add slow marker to tests that might take longer
if "multiple_files" in item.name or "all_files" in item.name:
item.add_marker(pytest.mark.slow)
# Helper functions for custom test discovery
def collect_python_files(base_path=None, patterns=None):
"""Helper function to collect Python files for testing"""
if base_path is None:
base_path = Path(__file__).parent
if patterns is None:
patterns = ["**/*.py"]
files = []
for pattern in patterns:
files.extend(base_path.glob(pattern))
# Filter out test files
return [f for f in files if not f.name.startswith("test_")]
# Pytest hooks
def pytest_addoption(parser):
"""Add custom command line options"""
parser.addoption(
"--file-pattern",
action="store",
default="**/*.py",
help="Pattern for files to test"
)
parser.addoption(
"--timeout",
action="store",
default="30",
type=int,
help="Timeout for file execution in seconds"
)
@pytest.fixture
def file_pattern(request):
"""Fixture to get file pattern from command line"""
return request.config.getoption("--file-pattern")
@pytest.fixture
def execution_timeout(request):
"""Fixture to get timeout from command line"""
return request.config.getoption("--timeout")
# Example usage with custom fixtures
class TestWithCustomOptions:
"""Test class demonstrating custom options usage"""
def test_with_custom_pattern(self, file_pattern, execution_timeout):
"""Test using custom file pattern and timeout"""
base_path = Path(__file__).parent
files = list(base_path.glob(file_pattern))
for file_path in files:
if file_path.name.startswith("test_"):
continue
result = subprocess.run(
[sys.executable, str(file_path)],
capture_output=True,
text=True,
timeout=execution_timeout,
check=False
)
assert result.returncode == 0, f"File {file_path} failed: {result.stderr}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment