Created
October 23, 2019 10:42
-
-
Save mbukatov/9287daffc1bb79d225946d45b64feb27 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
==================================== ERRORS ==================================== | |
______________________ ERROR at setup of test_deployment _______________________ | |
request = <SubRequest 'cluster' for <Function test_deployment>> | |
log_cli_level = 'INFO' | |
@pytest.fixture(scope="session", autouse=True) | |
def cluster(request, log_cli_level): | |
""" | |
This fixture initiates deployment for both OCP and OCS clusters. | |
Specific platform deployment classes will handle the fine details | |
of action | |
""" | |
log.info(f"All logs located at {ocsci_log_path()}") | |
teardown = config.RUN['cli_params']['teardown'] | |
deploy = config.RUN['cli_params']['deploy'] | |
factory = dep_factory.DeploymentFactory() | |
deployer = factory.get_deployment() | |
# Add a finalizer to teardown the cluster after test execution is finished | |
if teardown: | |
def cluster_teardown_finalizer(): | |
deployer.destroy_cluster(log_cli_level) | |
request.addfinalizer(cluster_teardown_finalizer) | |
log.info("Will teardown cluster because --teardown was provided") | |
# Download client | |
force_download = ( | |
config.RUN['cli_params'].get('deploy') | |
and config.DEPLOYMENT['force_download_client'] | |
) | |
get_openshift_client(force_download=force_download) | |
if deploy: | |
# Deploy cluster | |
> deployer.deploy_cluster(log_cli_level) | |
tests/conftest.py:723: | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
ocs_ci/deployment/deployment.py:90: in deploy_cluster | |
self.deploy_ocs() | |
ocs_ci/deployment/deployment.py:479: in deploy_ocs | |
pods_list | |
ocs_ci/ocs/resources/pod.py:819: in validate_pods_are_respinned_and_running_state | |
pod_obj = pod.get() | |
ocs_ci/ocs/resources/ocs.py:85: in get | |
resource_name=self.name, out_yaml_format=out_yaml_format | |
ocs_ci/ocs/ocp.py:168: in get | |
return self.exec_oc_cmd(command) | |
ocs_ci/ocs/ocp.py:100: in exec_oc_cmd | |
out = run_cmd(cmd=oc_cmd, secrets=secrets, **kwargs) | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
cmd = ['oc', '-n', 'openshift-monitoring', '--kubeconfig', '/home/ocsqe/data/cluster-2019-10-23.1/auth/kubeconfig', 'get', ...] | |
secrets = None, kwargs = {} | |
masked_cmd = 'oc -n openshift-monitoring --kubeconfig /home/ocsqe/data/cluster-2019-10-23.1/auth/kubeconfig get Pod alertmanager-main-0 -n openshift-monitoring -o yaml' | |
r = CompletedProcess(args=['oc', '-n', 'openshift-monitoring', '--kubeconfig', '/home/ocsqe/data/cluster-2019-10-23.1/auth...-o', 'yaml'], returncode=1, stdout=b'', stderr=b'Error from server (NotFound): pods "alertmanager-main-0" not found\n') | |
def run_cmd(cmd, secrets=None, **kwargs): | |
""" | |
Run an arbitrary command locally | |
Args: | |
cmd (str): command to run | |
secrets (list): A list of secrets to be masked with asterisks | |
This kwarg is popped in order to not interfere with | |
subprocess.run(**kwargs) | |
Raises: | |
CommandFailed: In case the command execution fails | |
Returns: | |
(str) Decoded stdout of command | |
""" | |
masked_cmd = mask_secrets(cmd, secrets) | |
log.info(f"Executing command: {masked_cmd}") | |
if isinstance(cmd, str): | |
cmd = shlex.split(cmd) | |
r = subprocess.run( | |
cmd, | |
stdout=subprocess.PIPE, | |
stderr=subprocess.PIPE, | |
stdin=subprocess.PIPE, | |
**kwargs | |
) | |
log.debug(f"Command output: {r.stdout.decode()}") | |
if r.stderr and not r.returncode: | |
log.warning(f"Command warning: {mask_secrets(r.stderr.decode(), secrets)}") | |
if r.returncode: | |
raise CommandFailed( | |
> f"Error during execution of command: {masked_cmd}." | |
f"\nError is {mask_secrets(r.stderr.decode(), secrets)}" | |
) | |
E ocs_ci.ocs.exceptions.CommandFailed: Error during execution of command: oc -n openshift-monitoring --kubeconfig /home/ocsqe/data/cluster-2019-10-23.1/auth/kubeconfig get Pod alertmanager-main-0 -n openshift-monitoring -o yaml. | |
E Error is Error from server (NotFound): pods "alertmanager-main-0" not found | |
ocs_ci/utility/utils.py:410: CommandFailed |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment