Skip to content

Instantly share code, notes, and snippets.

@dmccuk
Last active October 21, 2025 08:38
Show Gist options
  • Save dmccuk/e82ac412dd6956060c2794fa4e5f7f2b to your computer and use it in GitHub Desktop.
Save dmccuk/e82ac412dd6956060c2794fa4e5f7f2b to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
import os
import sys
import json
import socket
import ssl
import traceback
import requests
from urllib.parse import urlparse
from requests.utils import get_environ_proxies

def jprint(obj):
    print(json.dumps(obj, indent=2))

def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

# Inputs
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

if not SN_HOST:
    print("SN_HOST is missing", file=sys.stderr)
    sys.exit(1)

base_url = f"https://{SN_HOST}"
test_url = f"{base_url}/api/now/table/sys_user?sysparm_limit=1"

# Show runtime info
jprint({
    "step": "runtime",
    "python_executable": sys.executable,
    "python_version": sys.version,
})

# 1, show proxy env
proxy_env = {k: os.getenv(k) for k in ["HTTP_PROXY","http_proxy","HTTPS_PROXY","https_proxy","NO_PROXY","no_proxy"]}
jprint({"step": "env", "proxy_env": proxy_env})

# 2, DNS resolution
try:
    ip = socket.gethostbyname(SN_HOST)
    jprint({"step": "dns", "host": SN_HOST, "resolved_ip": ip})
except Exception as e:
    jprint({"step": "dns", "host": SN_HOST, "error": f"{e}"})
    sys.exit(1)

# 3, raw TCP connect test to ServiceNow 443
try:
    with socket.create_connection((SN_HOST, 443), timeout=10) as s:
        jprint({"step": "tcp_sn", "host": SN_HOST, "port": 443, "connect_ok": True})
except Exception as e:
    jprint({"step": "tcp_sn", "host": SN_HOST, "port": 443, "connect_ok": False, "error": f"{e}"})

# 4, minimal TLS handshake
try:
    ctx = ssl.create_default_context()
    if not SN_VERIFY_SSL:
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
    with socket.create_connection((SN_HOST, 443), timeout=10) as raw:
        with ctx.wrap_socket(raw, server_hostname=SN_HOST) as ssock:
            cert = ssock.getpeercert()
            jprint({
                "step": "tls",
                "handshake_ok": True,
                "subject": cert.get("subject", []),
                "issuer": cert.get("issuer", []),
            })
except Exception as e:
    jprint({"step": "tls", "handshake_ok": False, "error": f"{e}"})

# 5, see how requests resolves proxies for this URL
auto = get_environ_proxies(test_url)
jprint({"step": "requests_auto_proxy", "for_url": test_url, "resolved": auto})

def parse_proxy(url):
    if not url:
        return None, None
    u = urlparse(url)
    host = u.hostname
    port = u.port or (443 if u.scheme == "https" else 80)
    return host, port

proxy_url = auto.get("https") or os.getenv("HTTPS_PROXY") or os.getenv("https_proxy") or os.getenv("HTTP_PROXY") or os.getenv("http_proxy")
phost, pport = parse_proxy(proxy_url)

# 6, raw TCP to the proxy, this is what matters when requests is using a proxy
if phost:
    try:
        with socket.create_connection((phost, pport), timeout=10) as s:
            jprint({"step": "tcp_proxy", "host": phost, "port": pport, "connect_ok": True})
    except Exception as e:
        jprint({"step": "tcp_proxy", "host": phost, "port": pport, "connect_ok": False, "error": f"{e}"})
else:
    jprint({"step": "tcp_proxy", "note": "no proxy detected for this URL"})

# helper to run a requests GET with a given session and label
def try_get(label, session, url, verify):
    try:
        r = session.get(
            url,
            auth=(SN_USERNAME, SN_PASSWORD) if SN_USERNAME or SN_PASSWORD else None,
            headers={"Accept": "application/json"},
            timeout=20,
            verify=verify,
        )
        out = {"step": "https", "label": label, "status": r.status_code}
        try:
            js = r.json()
            out["json_top_keys"] = list(js.keys())
        except Exception:
            out["body_snippet"] = r.text[:200]
        jprint(out)
        return r
    except requests.exceptions.ProxyError as e:
        jprint({"step": "https", "label": label, "error": "ProxyError", "detail": str(e)})
    except requests.exceptions.SSLError as e:
        jprint({"step": "https", "label": label, "error": "SSLError", "detail": str(e)})
    except requests.exceptions.ConnectTimeout:
        jprint({"step": "https", "label": label, "error": "ConnectTimeout"})
    except requests.exceptions.ReadTimeout:
        jprint({"step": "https", "label": label, "error": "ReadTimeout"})
    except Exception as e:
        jprint({"step": "https", "label": label, "error": "Exception", "detail": str(e)})
        traceback.print_exc()
    return None

# 7, sessions
s_env = requests.Session()
s_env.trust_env = True

s_noproxy = requests.Session()
s_noproxy.trust_env = False
s_noproxy.proxies = {"http": None, "https": None}

# 8, run four checks, with and without proxy, verify on and off
try_get("env_proxy_verify_on",  s_env,     test_url, verify=SN_VERIFY_SSL)
try_get("env_proxy_verify_off", s_env,     test_url, verify=False)
try_get("noproxy_verify_on",    s_noproxy, test_url, verify=SN_VERIFY_SSL)
try_get("noproxy_verify_off",   s_noproxy, test_url, verify=False)

print("Done")

#!/usr/bin/env python3
import os, sys, json, socket, ssl, requests, traceback
from urllib.parse import urlparse

def jprint(obj):
    print(json.dumps(obj, indent=2))

def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1","true","yes","on"}

# Required
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

if not SN_HOST:
    print("SN_HOST is missing", file=sys.stderr)
    sys.exit(1)

base_url = f"https://{SN_HOST}"
test_url = f"{base_url}/api/now/table/sys_user?sysparm_limit=1"

# 1, show proxy env
proxy_env = {k: os.getenv(k) for k in ["HTTP_PROXY","http_proxy","HTTPS_PROXY","https_proxy","NO_PROXY","no_proxy"]}
jprint({"step":"env", "proxy_env": proxy_env})

# 2, DNS resolution
try:
    ip = socket.gethostbyname(SN_HOST)
    jprint({"step":"dns", "host": SN_HOST, "resolved_ip": ip})
except Exception as e:
    jprint({"step":"dns", "host": SN_HOST, "error": f"{e}"})
    sys.exit(1)

# 3, raw TCP connect test to 443
try:
    with socket.create_connection((SN_HOST, 443), timeout=10) as s:
        jprint({"step":"tcp", "connect_443_ok": True})
except Exception as e:
    jprint({"step":"tcp", "connect_443_ok": False, "error": f"{e}"})

# 4, minimal TLS handshake, optional, can help see TLS interception problems
try:
    ctx = ssl.create_default_context()
    if not SN_VERIFY_SSL:
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
    with socket.create_connection((SN_HOST, 443), timeout=10) as raw:
        with ctx.wrap_socket(raw, server_hostname=SN_HOST) as ssock:
            cert = ssock.getpeercert()
            jprint({"step":"tls", "handshake_ok": True, "subject": cert.get("subject", []), "issuer": cert.get("issuer", [])})
except Exception as e:
    jprint({"step":"tls", "handshake_ok": False, "error": f"{e}"})

# helper to run a requests GET with a given session and label
def try_get(label, session, url, verify):
    try:
        r = session.get(url, auth=(SN_USERNAME, SN_PASSWORD), headers={"Accept":"application/json"}, timeout=20, verify=verify)
        out = {"step":"https", "label": label, "status": r.status_code}
        try:
            js = r.json()
            out["json_top_keys"] = list(js.keys())
        except Exception:
            out["body_snippet"] = r.text[:200]
        jprint(out)
        return r
    except requests.exceptions.ProxyError as e:
        jprint({"step":"https", "label": label, "error":"ProxyError", "detail": str(e)})
    except requests.exceptions.SSLError as e:
        jprint({"step":"https", "label": label, "error":"SSLError", "detail": str(e)})
    except requests.exceptions.ConnectTimeout:
        jprint({"step":"https", "label": label, "error":"ConnectTimeout"})
    except requests.exceptions.ReadTimeout:
        jprint({"step":"https", "label": label, "error":"ReadTimeout"})
    except Exception as e:
        jprint({"step":"https", "label": label, "error":"Exception", "detail": str(e)})
        traceback.print_exc()
    return None

# 5, requests session that uses environment proxies
s_env = requests.Session()
s_env.trust_env = True

# 6, requests session that forces no proxy
s_noproxy = requests.Session()
s_noproxy.trust_env = False
s_noproxy.proxies = {"http": None, "https": None}

# 7, run four checks, with and without proxy, with verify on and off
try_get("env_proxy_verify_on",  s_env,     test_url, verify=SN_VERIFY_SSL)
try_get("env_proxy_verify_off", s_env,     test_url, verify=False)
try_get("noproxy_verify_on",    s_noproxy, test_url, verify=SN_VERIFY_SSL)
try_get("noproxy_verify_off",   s_noproxy, test_url, verify=False)

print("Done")

Grab morpheus variables with python:

#!/usr/bin/env python3
import json
import sys

def main():
    try:
        # Morpheus injects variables into stdin as JSON
        data = json.load(sys.stdin)
    except Exception as e:
        print(json.dumps({"error": f"Could not parse Morpheus input: {e}"}))
        sys.exit(1)

    # Dump everything it gave us
    print(json.dumps(data, indent=2))

    # Show some specific common keys if they exist
    morpheus = data.get("morpheus", {})
    out = {
        "instance_name": morpheus.get("instance", {}).get("name"),
        "instance_type": morpheus.get("instance", {}).get("instanceType", {}).get("code"),
        "instance_context": morpheus.get("instance", {}).get("instanceContext"),
        "server_platform": morpheus.get("server", {}).get("platform"),
        "server_os_type": morpheus.get("server", {}).get("osType"),
        "cloud": morpheus.get("cloud", {}).get("name"),
        "group": morpheus.get("group", {}).get("name"),
    }
    print("\n--- Extracted sample values ---")
    print(json.dumps(out, indent=2))

if __name__ == "__main__":
    main()

Basics for using Ansible to raise Change tickets in Service-Now:


0) Prereqs (once)

Install the collection:

ansible-galaxy collection install servicenow.itsm

(The certified servicenow.itsm collection includes change_request, change_request_info, api_info, etc.).

Auth options: username/password or OAuth. Store creds as env vars or an Ansible Vault. (The collection supports direct API auth; Red Hat’s docs cover the integration at a high level.) Red Hat

Typical env vars you can set in your controller:

export SN_HOST="yourinstance.service-now.com"
export SN_USERNAME="ansible_bot"
export SN_PASSWORD="***"
  1. Prove connectivity (read-only “smoke test”)

Use servicenow.itsm.api_info (fastest ping without modifying anything), or list CRs with change_request_info.

Playbook (api_info):

- name: SN connectivity check
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: Call a simple GET to verify auth & reachability
      api_info:
        resource: /api/now/table/sys_properties
        params:
          sysparm_limit: 1
      register: api_probe

    - name: Assert we got data back
      assert:
        that:
          - api_probe.status == 200

api_info is part of the same collection and is perfect for a smoke test.

Alternative (list a few CRs):

- name: SN change list check
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: List some change requests
      change_request_info:
        query:
          - state: = new
        limit: 3
      register: list_cr

    - debug: var=list_cr.records

change_request_info just reads—safe for proving RBAC + API access.

  1. Raise a basic Change (normal change)

Minimum required fields vary by your SN workflows, policies, and plugins (e.g., conflict detection can require planned dates + CI). But universally useful fields are short_description, type (normal/standard/emergency), optional category/priority/risk/impact, and assignment_group. ServiceNow’s docs emphasise these core attributes and note planned start/end & CI can be required if conflict detection is enabled. ServiceNow

Playbook (create a Normal Change):

- name: Create a normal change via Ansible
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  vars:
    # Example - use env vars or Vault in real use
    sn_instance: "{{ lookup('env','SN_HOST') }}"
  tasks:
    - name: Create (or update) a normal Change
      change_request:
        state: present
        type: normal
        short_description: "Deploy web tier patch to prod (via Ansible)"
        description: "Rolling patch of web tier; no downtime expected."
        category: "Software"
        impact: "2"          # 1=High, 2=Med, 3=Low (your instance may differ)
        urgency: "3"         # optional, pairs with impact -> priority
        priority: "4"        # some orgs let SN derive this
        cmdb_ci: "prod-web-01"     # may be required with conflict detection
        assignment_group: "Platform Ops"
        planned_start_date: "2025-09-11 21:00:00"
        planned_end_date:   "2025-09-11 22:00:00"
      register: cr

    - debug:
        msg: "Created/updated Change {{ cr.record.number }} (sys_id={{ cr.record.sys_id }})"

Module: servicenow.itsm.change_request. GitHub

Using a Standard Change template

If your process mandates a Standard Change, supply the template name so SN applies predefined fields/approvals:

    - name: Create a standard change from template
      change_request:
        state: present
        type: standard
        template: "Linux Web Patch - No Downtime"
        short_description: "StdChg: Linux web patch (Ansible)"

Templates are the recommended way to enforce consistent Standard Changes; SN community docs outline template usage and caveats. ServiceNow

  1. Field reality check (what’s “really” required?)

Depends on config: business rules, UI policies, mandatory fields in your form, and plugins (e.g., conflict detection) can hard-require planned_start/end and cmdb_ci. ServiceNow

Assignment group/state ties: many workflows require an assignment_group once you move past new to assess/schedule/implement. (Common in change_request state mapping used by the collection.)

Standard Change specifics: often must come from an approved template; free-form creation may be restricted. ServiceNow

  1. Troubleshooting quick hits

401/403: wrong creds or missing roles; verify the bot user has rights on change_request table (read/write). (Integration overview + collection usage.) Red Hat

Validation errors: your instance has extra mandatory fields—run a dry read of an existing good Change with change_request_info to see which fields are typically populated, or use a template. 

Field choices don’t match (e.g., impact, urgency): admins can rename values; the collection allows mappings/overrides if needed. Steampunk Spotter App

Optional: Prove end-to-end in one run

GET with api_info (assert 200).

CREATE with change_request.

READ BACK with change_request_info using number: "{{ cr.record.number }}".

This gives you a single job log that proves connectivity, creation, and retrieval—all the “basic set” you mentioned. Modules and flows above are current for the servicenow.itsm collection and the Red Hat guidance on the certified integration. GitHub +1 Red Hat

If you share which type of change LondonIAC prefers (Normal vs Standard + template name) and whether conflict detection is on, I can tailor the playbook to exactly match your instance’s mandatory fields and naming.

You said: can you write this as a run book of how to gt started with ansible and service now. add additional steps in if required. I'm familair with ansible so should be ok on the code. Go through the pre-reqs and all the steps to check connectivity and then onto raising basic changes in the system. Include if any information is needed from the service now team so I can ask for that as well. ChatGPT said:

Got it — here’s a step-by-step run book you can follow (or hand to colleagues) to get from zero → first Change raised with Ansible into ServiceNow. I’ve structured it as a typical run book with objectives, prerequisites, steps, and checkpoints, and included what you’ll need from your ServiceNow admins.

Run Book: Getting Started with Ansible + ServiceNow (Change Automation)

  1. Objectives

Confirm network/API connectivity between your Ansible control node and the ServiceNow instance.

Prove authentication works with a safe read-only API call.

Successfully raise a basic Change Request in ServiceNow via Ansible.

Document what information/config is required from the ServiceNow team.

  1. Prerequisites Ansible Environment

Ansible (>=2.9; ideally the latest stable release you use internally).

Python libraries required by Ansible collections (requests, PyYAML).

Internet or network access to your ServiceNow instance (usually https://.service-now.com).

ServiceNow Access

From your ServiceNow admins, request:

Integration user account:

Username + password or OAuth client credentials.

Role permissions: at minimum, read on sys_properties (for API probe) and read/write on change_request table.

Instance URL (FQDN of your tenant).

Mandatory field list:

Which fields are required to create a Normal Change (e.g., category, assignment_group, CI, planned start/end).

Whether conflict detection plugin is enabled (affects required CI/dates).

If Standard Changes are enforced: template names available.

Assignment groups or teams to which automated Changes should be routed.

(Optional) Approved Change model/type (Normal vs Standard) for automation.

Ansible Collection

Install the certified ServiceNow collection:

ansible-galaxy collection install servicenow.itsm

  1. Authentication Setup

Choose one of:

Basic auth (username + password).

OAuth (preferred in enterprise setups).

Store creds securely:

Use Ansible Vault for playbooks, or

Environment variables (e.g., SN_HOST, SN_USERNAME, SN_PASSWORD).

  1. Step 1 – Connectivity Check 4.1 Ping the API (read-only)

Use the api_info module to hit a neutral endpoint:

- name: Verify ServiceNow connectivity
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: Probe sys_properties table
      api_info:
        resource: /api/now/table/sys_properties
        params:
          sysparm_limit: 1
      register: probe

    - assert:
        that:
          - probe.status == 200

✅ Checkpoint: Run returns with status: 200 and one record. You’ve proven:

DNS resolution

TLS connectivity

Creds are valid

Role allows at least read

  1. Step 2 – List Existing Change Requests

Good sanity check that you can read the change_request table:

- name: Fetch 3 most recent CRs
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: Query for recent CRs
      change_request_info:
        limit: 3
      register: cr_list

    - debug: var=cr_list.records

✅ Checkpoint: Output includes CR numbers (CHG00xxxxx) with sys_ids and short_descriptions.

  1. Step 3 – Create a Basic Change (Normal)

Ask your SN team first which fields are mandatory. In many orgs, short_description + type are enough; in others, assignment_group, CI, or planned start/end are required.

- name: Create a Normal Change
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: Raise CR
      change_request:
        state: present
        type: normal
        short_description: "Patch deployment via Ansible"
        description: "Automated patch rollout on web tier servers."
        category: "Software"
        impact: "2"
        urgency: "3"
        assignment_group: "Platform Ops"
        planned_start_date: "2025-09-11 21:00:00"
        planned_end_date: "2025-09-11 22:00:00"
      register: new_cr

    - debug:
        msg: "Created Change {{ new_cr.record.number }}"

✅ Checkpoint: Playbook completes successfully and prints a valid CHG number.

  1. Step 4 – (Optional) Create a Standard Change

If your org enforces Standard Changes via templates:

- name: Create from StdChg Template
  hosts: localhost
  gather_facts: false
  collections: [servicenow.itsm]
  tasks:
    - name: Raise StdChg
      change_request:
        state: present
        type: standard
        template: "Linux Web Patch - No Downtime"
        short_description: "StdChg via Ansible"

✅ Checkpoint: New Change created using the approved template.

  1. Step 5 – End-to-End Validation

Retrieve the created CR using change_request_info with the returned number.

Confirm in ServiceNow UI that the record exists, with correct fields populated.

Verify workflow state is as expected (e.g., new or scheduled).

  1. Troubleshooting Quick Reference

401/403 errors: check creds/roles.

Field validation errors: ask SN team which fields are mandatory or whether conflict detection is on.

Assignment issues: ensure the assignment_group exists in SN and the integration user can set it.

State transitions blocked: some orgs lock state changes via business rules—coordinate with SN team.

  1. Deliverables from ServiceNow Team

When you engage your SN team, request:

✅ Integration user credentials with proper roles

✅ Instance URL

✅ Mandatory field list for change_request table

✅ Whether conflict detection is enabled

✅ Approved Change type/model (Normal vs Standard)

✅ Template names for Standard Changes

✅ Assignment group(s) to use

Final Notes

Start with read-only probes before writing Changes.

Use templates if your org already has Standard Changes defined.

Work with your SN team to align on governance: approvals, required data, and assignment policies.

Once you have one Change working end-to-end, you can expand into roles, playbook reuse, and CI/CD pipelines.

sn_connectivity_check.yml

---
- name: ServiceNow connectivity check
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # toggle which auth path to use
    sn_use_oauth: true
    # API path to hit for a simple GET
    sn_test_path: "/api/now/table/sys_user?sysparm_limit=1"
    # SSL verify toggle
    sn_verify_ssl: true

  # If you need to go through a proxy, uncomment environment below and set values
  # environment:
  #   http_proxy: "{{ lookup('env', 'http_proxy') | default(omit) }}"
  #   https_proxy: "{{ lookup('env', 'https_proxy') | default(omit) }}"
  #   no_proxy: "{{ lookup('env', 'no_proxy') | default(omit) }}"

  tasks:
    - name: Check DNS resolution for ServiceNow host
      ansible.builtin.getent:
        database: hosts
        key: "{{ sn_host }}"
      register: sn_dns

    - name: Show resolved addresses
      ansible.builtin.debug:
        msg: "Resolved {{ sn_host }} to {{ sn_dns.ansible_facts.getent_hosts[sn_host] | default([]) }}"

    - name: OAuth, get token
      when: sn_use_oauth
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/oauth_token.do"
        method: POST
        body_format: form-urlencoded
        body:
          grant_type: password
          username: "{{ sn_username }}"
          password: "{{ sn_password }}"
          client_id: "{{ sn_client_id }}"
          client_secret: "{{ sn_client_secret }}"
        return_content: true
        status_code: 200
        validate_certs: "{{ sn_verify_ssl }}"
      register: sn_oauth

    - name: Parse access token
      when: sn_use_oauth
      ansible.builtin.set_fact:
        sn_access_token: "{{ (sn_oauth.json.access_token | default('')) }}"

    - name: Fail if no access token returned
      when: sn_use_oauth and (sn_access_token | length == 0)
      ansible.builtin.fail:
        msg: "OAuth did not return an access_token"

    - name: Test API with OAuth
      when: sn_use_oauth
      ansible.builtin.uri:
        url: "https://{{ sn_host }}{{ sn_test_path }}"
        method: GET
        headers:
          Authorization: "Bearer {{ sn_access_token }}"
          Accept: "application/json"
        return_content: true
        status_code: 200
        validate_certs: "{{ sn_verify_ssl }}"
      register: sn_test_oauth

    - name: Test API with basic auth
      when: not sn_use_oauth
      ansible.builtin.uri:
        url: "https://{{ sn_host }}{{ sn_test_path }}"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
        return_content: true
        status_code: 200
        validate_certs: "{{ sn_verify_ssl }}"
      register: sn_test_basic

    - name: Show API response summary
      ansible.builtin.debug:
        msg:
          method: "{{ 'oauth' if sn_use_oauth else 'basic' }}"
          http_status: >-
            {{ (sn_test_oauth.status | default(sn_test_basic.status | default(''))) }}
          body_preview: >-
            {{ (sn_test_oauth.json | default(sn_test_basic.json | default({}))) | to_nice_json | regex_replace('\n',' ') | regex_replace('\s+',' ') | truncate(300, end='...') }}

    - name: Final assert, require HTTP 200
      ansible.builtin.assert:
        that:
          - "(sn_test_oauth.status | default(sn_test_basic.status)) == 200"
        fail_msg: "ServiceNow connectivity check failed"
        success_msg: "ServiceNow connectivity check succeeded"

sn_get_user.yml

---
- name: Get my ServiceNow user
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml  # holds sn_host, sn_username, sn_password, or oauth bits

  vars:
    sn_verify_ssl: "{{ sn_verify_ssl | default(true) }}"
    sn_user_query: "user_name={{ sn_username }}"

  tasks:
    - name: Fetch my user record
      servicenow.itsm.api_info:
        instance: "{{ sn_host }}"
        username: "{{ sn_username }}"
        password: "{{ sn_password }}"
        resource: table
        table: sys_user
        sysparm_query: "{{ sn_user_query }}"
        sysparm_limit: 1
        validate_certs: "{{ sn_verify_ssl }}"
      register: user_info

    - name: Show key fields
      debug:
        msg:
          name: "{{ user_info.json.result[0].name | default('') }}"
          user_name: "{{ user_info.json.result[0].user_name | default('') }}"
          email: "{{ user_info.json.result[0].email | default('') }}"
          sys_id: "{{ user_info.json.result[0].sys_id | default('') }}"

    - name: Assert we got a record
      assert:
        that:
          - user_info.status == 200
          - user_info.json.result | length > 0
        success_msg: "User lookup succeeded"
        fail_msg: "User lookup failed"

sn_change_smoke.yml

---
- name: Create and remove a test change, smoke test
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml

  vars:
    sn_verify_ssl: "{{ sn_verify_ssl | default(true) }}"
    sn_change_short: "ANSIBLE SMOKE TEST, please ignore"
    sn_change_desc: "Created by Ansible to validate connectivity and permissions, will be removed in this run"
    sn_change_type: "normal"   # normal, standard, emergency

  tasks:
    - name: Create a test change
      servicenow.itsm.change_request:
        instance:
          host: "{{ sn_host }}"
          username: "{{ sn_username }}"
          password: "{{ sn_password }}"
          validate_certs: "{{ sn_verify_ssl }}"
        state: present
        short_description: "{{ sn_change_short }}"
        description: "{{ sn_change_desc }}"
        type: "{{ sn_change_type }}"
      register: chg

    - name: Show new change
      debug:
        msg:
          - "number: {{ chg.record.number }}"
          - "sys_id: {{ chg.record.sys_id }}"
          - "state: {{ chg.record.state | default('') }}"
          - "url: https://{{ sn_host }}/nav_to.do?uri=change_request.do?sys_id={{ chg.record.sys_id }}"

    - name: Remove the test change
      servicenow.itsm.change_request:
        instance:
          host: "{{ sn_host }}"
          username: "{{ sn_username }}"
          password: "{{ sn_password }}"
          validate_certs: "{{ sn_verify_ssl }}"
        state: absent
        number: "{{ chg.record.number }}"

using URI:

---
- name: Get my ServiceNow user, via uri
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml   # sn_host, sn_username, sn_password, sn_verify_ssl

  tasks:
    - name: Fetch one sys_user by user_name
      ansible.builtin.uri:
        url: >-
          https://{{ sn_host }}/api/now/table/sys_user
          ?sysparm_limit=1
          &sysparm_query=user_name={{ sn_username | urlencode }}
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: application/json
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: 200
      register: sn_user

    - name: Show key fields
      ansible.builtin.debug:
        msg:
          http_status: "{{ sn_user.status }}"
          name: "{{ sn_user.json.result[0].name | default('') }}"
          user_name: "{{ sn_user.json.result[0].user_name | default('') }}"
          email: "{{ sn_user.json.result[0].email | default('') }}"
          sys_id: "{{ sn_user.json.result[0].sys_id | default('') }}"

sn_standard_change_create.yml

---
- name: Create a Standard Change from template sys_id
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml   # sn_host, sn_username, sn_password, sn_verify_ssl

  vars:
    # Template sys_id for the Standard Change you want to instantiate
    sc_template_sys_id: "i9743hfe84f408hf4h83fy"

    # Change data you want to submit
    requested_by: "user1"                      # often a username or sys_id, depends on your instance
    cmdb_ci: "fhdkjd7sishs9ss9s9sjksksd"       # usually a CI sys_id
    u_environment: "production"
    u_whost_role: "ds8ssksjfhfh7f6fd5d4s"
    start_date: "2025-09-12 09:56:43"          # must be exactly YYYY-MM-DD HH:MM:SS
    end_date:   "2025-09-12 12:56:43"          # 3 hours later, same exact format
    category: "Other"
    assigned_to: "user1"                       # user name or sys_id per your instance
    u_reason_for_change: "Maintenance"

    # If you need a proxy, uncomment:
    # proxy_env:
    #   https_proxy: "{{ lookup('env','https_proxy') | default(omit) }}"
    #   http_proxy:  "{{ lookup('env','http_proxy')  | default(omit) }}"
    #   no_proxy:    "{{ lookup('env','no_proxy')    | default(omit) }}"

  tasks:
  tasks:
    - name: Calculate dynamic start and end times
      ansible.builtin.set_fact:
        start_date: "{{ lookup('pipe', 'date -u -d \"+1 minute\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        end_date:   "{{ lookup('pipe', 'date -u -d \"+181 minutes\" +\"%Y-%m-%d %H:%M:%S\"') }}"
      # Explanation:
      # +1 minute for start_date
      # +181 minutes (1 + 180) for end_date = 3 hours after start

    - name: Validate date formats
      ansible.builtin.assert:
        that:
          - start_date is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')
          - end_date   is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')
        success_msg: "Start and end dates are correctly formatted"
        fail_msg: "Dates must be exactly YYYY-MM-DD HH:MM:SS"

    - name: Build request body
      ansible.builtin.set_fact:
        sc_payload:
          requested_by: "{{ requested_by }}"
          cmdb_ci: "{{ cmdb_ci }}"
          u_environment: "{{ u_environment }}"
          u_whost_role: "{{ u_whost_role }}"
          start_date: "{{ start_date }}"
          end_date: "{{ end_date }}"
          category: "{{ category }}"
          assigned_to: "{{ assigned_to }}"
          u_reason_for_change: "{{ u_reason_for_change }}"

    - name: Create Standard Change from template
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/sn_chg_rest/change/standard/{{ sc_template_sys_id }}"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ sc_payload }}"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code:
          - 200
          - 201
      # environment: "{{ proxy_env | default(omit) }}"
      register: sc_out

    - name: Show created change details
      ansible.builtin.debug:
        msg:
          http_status: "{{ sc_out.status }}"
          number: "{{ sc_out.json.result.number | default(sc_out.json.number | default('')) }}"
          sys_id: "{{ sc_out.json.result.sys_id | default(sc_out.json.sys_id | default('')) }}"
          state: "{{ sc_out.json.result.state | default(sc_out.json.state | default('')) }}"
          planned_start: "{{ sc_out.json.result.start_date | default(sc_out.json.start_date | default('')) }}"
          planned_end:   "{{ sc_out.json.result.end_date   | default(sc_out.json.end_date   | default('')) }}"
          url: "https://{{ sn_host }}/nav_to.do?uri=change_request.do?sys_id={{ sc_out.json.result.sys_id | default(sc_out.json.sys_id | default('')) }}"

    - name: Final assert, require a change number in response
      ansible.builtin.assert:
        that:
          - "(sc_out.json.result.number | default(sc_out.json.number | default(''))) | length > 0"
        success_msg: "Standard Change created successfully"
        fail_msg: "ServiceNow did not return a change number. Check permissions, template sys_id, and field values."

sn_standard_change_create_implementation.yml

---
- name: Create Standard Change, then set state to Implementing
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # Secrets from Morpheus Cypher
    sn_host: "{{ lookup('cypher', 'secret=servicenow/host') }}"
    sn_username: "{{ lookup('cypher', 'secret=servicenow/username') }}"
    sn_password: "{{ lookup('cypher', 'secret=servicenow/password') }}"
    sn_verify_ssl: "{{ (lookup('cypher', 'secret=servicenow/verify_ssl') | default('true')) | bool }}"

    # Template and change fields, prefer Morpheus Option Types
    sc_api_version: "v1"
    sc_template_sys_id: "{{ morpheus['customOptions']['scTemplateSysId'] | default('i9743hfe84f408hf4h83fy') }}"

    requested_by: "{{ morpheus['customOptions']['requestedBy'] | default('user1') }}"
    assigned_to:  "{{ morpheus['customOptions']['assignedTo']  | default('user1') }}"
    cmdb_ci:      "{{ morpheus['customOptions']['cmdbCi'] | default('fhdkjd7sishs9ss9s9sjksksd') }}"
    u_environment: "{{ morpheus['customOptions']['environment'] | default('production') }}"
    u_whost_role:  "{{ morpheus['customOptions']['whostRole'] | default('ds8ssksjfhfh7f6fd5d4s') }}"
    category:   "{{ morpheus['customOptions']['category'] | default('Other') }}"
    u_reason_for_change: "{{ morpheus['customOptions']['reason'] | default('Maintenance') }}"

    # Timing
    start_offset_sec: "{{ (morpheus['customOptions']['startOffsetSec'] | default(15)) | int }}"
    duration_sec: "{{ (morpheus['customOptions']['durationSec'] | default(10800)) | int }}"
    wait_for_planned_start: "{{ (morpheus['customOptions']['waitForPlannedStart'] | default(true)) | bool }}"

  tasks:
    - name: Compute planned start and end, UTC
      ansible.builtin.set_fact:
        start_date: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        end_date:   "{{ lookup('pipe', 'date -u -d \"+' ~ (start_offset_sec + duration_sec) ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        start_epoch: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%s\"') | int }}"
        now_epoch:   "{{ lookup('pipe', 'date -u +\"%s\"') | int }}"

    - name: Validate date formats
      ansible.builtin.assert:
        that:
          - start_date is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')
          - end_date   is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')

    - name: Build create URL and body
      ansible.builtin.set_fact:
        sc_url: "https://{{ sn_host }}/api/sn_chg_rest/{{ sc_api_version }}/change/standard/{{ sc_template_sys_id }}"
        sc_payload:
          requested_by: "{{ requested_by }}"
          assigned_to:  "{{ assigned_to }}"
          cmdb_ci:      "{{ cmdb_ci }}"
          u_environment: "{{ u_environment }}"
          u_whost_role:  "{{ u_whost_role }}"
          start_date: "{{ start_date }}"
          end_date:   "{{ end_date }}"
          category:   "{{ category }}"
          u_reason_for_change: "{{ u_reason_for_change }}"

    - name: Log endpoint and body
      ansible.builtin.debug:
        msg:
          url: "{{ sc_url }}"
          payload: "{{ sc_payload | to_nice_json }}"
          start_epoch: "{{ start_epoch }}"
          now_epoch: "{{ now_epoch }}"

    - name: Create Standard Change from template
      ansible.builtin.uri:
        url: "{{ sc_url }}"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ sc_payload }}"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        follow_redirects: all
        timeout: 120
        status_code: [200, 201]
      register: sc_out

    - name: Extract and sanitize change identifiers
      ansible.builtin.set_fact:
        cr_sys_id: >-
          {{ sc_out.json.result.sys_id.value
             | default(sc_out.json.result.sys_id)
             | default(sc_out.json.sys_id)
             | string | trim | regex_replace('\\s+', '') }}
        cr_number: >-
          {{ sc_out.json.result.number.value
             | default(sc_out.json.result.number)
             | default(sc_out.json.number)
             | string | trim }}

    - name: Show created change
      ansible.builtin.debug:
        msg:
          http_status: "{{ sc_out.status }}"
          sys_id: "{{ cr_sys_id }}"
          number: "{{ cr_number }}"
          planned_start: "{{ start_date }}"
          planned_end: "{{ end_date }}"
          nav_url: "https://{{ sn_host }}/nav_to.do?uri=change_request.do?sys_id={{ cr_sys_id }}"

    - name: Compute wait seconds until planned start, plus 5s buffer
      when: wait_for_planned_start | bool
      ansible.builtin.set_fact:
        wait_seconds: "{{ [ ((start_epoch | int) - (now_epoch | int) + 5), 0 ] | max }}"

    - name: Wait until planned start
      when: wait_for_planned_start | bool and (wait_seconds | int) > 0
      ansible.builtin.pause:
        seconds: "{{ wait_seconds | int }}"
        prompt: "Waiting for planned start window"

    - name: Log that no wait was needed
      when: wait_for_planned_start | bool and (wait_seconds | int) == 0
      ansible.builtin.debug:
        msg: "No wait needed, planned start already reached"

    - name: Build Table API URL for update
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Log Table API URL
      ansible.builtin.debug:
        msg:
          update_url: "{{ cr_table_url }}"
          body_numeric_state: "{{ {'state': -2} | to_nice_json }}"
          body_text_state: "{{ {'state': 'Implementing'} | to_nice_json }}"

    - name: Move change to Implementing by numeric state, -2
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: -2
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: cr_update_numeric
      failed_when: false

    - name: Decide if numeric state worked
      ansible.builtin.set_fact:
        _numeric_ok: "{{ cr_update_numeric.status | default(0) | int == 200 and (cr_update_numeric.json.result.state | default('')) | string | lower is search('implement') }}"

    - name: Fallback, move change to Implementing by text state
      when: not _numeric_ok
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: "Implementing"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: cr_update_text

    - name: Show update result
      ansible.builtin.debug:
        msg:
          numeric_try_status: "{{ cr_update_numeric.status | default('n/a') }}"
          numeric_try_state: "{{ cr_update_numeric.json.result.state | default('n/a') }}"
          text_try_status: "{{ cr_update_text.status | default('n/a') }}"
          text_try_state: "{{ cr_update_text.json.result.state | default('n/a') }}"

    - name: Verify change is in Implementing
      vars:
        final_state: >-
          {{ (cr_update_text.json.result.state
              if cr_update_text is defined
              else cr_update_numeric.json.result.state) | default('') | string | lower }}
      ansible.builtin.assert:
        that:
          - "final_state is search('implement')"
        success_msg: "Change moved to Implementing"
        fail_msg: "Could not move change to Implementing, check ACLs or required transitions"

    - name: Persist identifiers to Morpheus attributes for post stage
      local_action:
        module: morpheus_facts
        facts:
          SN_CHANGE_NUMBER: "{{ cr_number }}"
          SN_CHANGE_SYS_ID: "{{ cr_sys_id }}"


POST:

---
- name: Close a ServiceNow change after successful VM build
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # Secrets from Morpheus Cypher
    sn_host: "{{ lookup('cypher', 'secret=servicenow/host') }}"
    sn_username: "{{ lookup('cypher', 'secret=servicenow/username') }}"
    sn_password: "{{ lookup('cypher', 'secret=servicenow/password') }}"
    sn_verify_ssl: "{{ (lookup('cypher', 'secret=servicenow/verify_ssl') | default('true')) | bool }}"

    # Prefer identifiers written by the pre task into instance attributes, then env, then empty
    cr_sys_id: >-
      {{
        morpheus['instance']['attributes']['SN_CHANGE_SYS_ID'] | default('') |
        default(lookup('env','SN_CHANGE_SYS_ID') | default('', true), true)
      }}
    cr_number: >-
      {{
        morpheus['instance']['attributes']['SN_CHANGE_NUMBER'] | default('') |
        default(lookup('env','SN_CHANGE_NUMBER') | default('', true), true)
      }}

    # Fallback artifact location, optional
    change_ref_path: "/data/build_artifacts/change_ref.json"

    # Closure fields, can also be Option Types if needed
    close_state_numeric: "{{ (morpheus['customOptions']['closeStateNumeric'] | default(0)) | int }}"
    close_state_text: "{{ morpheus['customOptions']['closeStateText'] | default('Closed') }}"
    close_code: "{{ morpheus['customOptions']['closeCode'] | default('Successful') }}"
    close_notes: "{{ morpheus['customOptions']['closeNotes'] | default('Closed by automation after successful VM build') }}"
    set_work_end_now: "{{ (morpheus['customOptions']['setWorkEndNow'] | default(true)) | bool }}"

  tasks:
    - name: Try to load artifact if identifiers are missing
      when: (cr_sys_id | trim) == '' and (cr_number | trim) == ''
      ansible.builtin.slurp:
        src: "{{ change_ref_path }}"
      register: _ref
      failed_when: false

    - name: Set identifiers from artifact if available
      when: _ref is defined and _ref.content is defined
      ansible.builtin.set_fact:
        cr_sys_id: "{{ cr_sys_id | default('', true) | trim or (_ref.content | b64decode | from_json).sys_id | default('') }}"
        cr_number: "{{ cr_number | default('', true) | trim or (_ref.content | b64decode | from_json).number | default('') }}"

    - name: Resolve sys_id by number if still missing
      when: (cr_sys_id | trim) == '' and (cr_number | trim) != ''
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request?sysparm_limit=1&sysparm_fields=sys_id,number,state&sysparm_query=number={{ cr_number | urlencode }}"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: _lookup

    - name: Save sanitized sys_id
      when: cr_sys_id | trim == ''
      ansible.builtin.set_fact:
        cr_sys_id: "{{ _lookup.json.result[0].sys_id | string | trim | regex_replace('\\s+', '') }}"

    - name: Build Table API URL
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Compute timestamps
      ansible.builtin.set_fact:
        _now_display: "{{ lookup('pipe', 'date -u +\"%Y-%m-%d %H:%M:%S\"') }}"

    - name: Build closure body
      ansible.builtin.set_fact:
        _base_close_fields: >-
          {{
            {
              'close_code': close_code,
              'close_notes': close_notes
            }
            | combine( {'work_end': _now_display} if set_work_end_now | bool else {} )
          }}

    - name: First try, close using numeric state
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ _base_close_fields | combine({'state': close_state_numeric}) }}"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: _close_numeric
      failed_when: false

    - name: Decide if numeric close worked
      ansible.builtin.set_fact:
        _numeric_ok: "{{ _close_numeric.status | default(0) | int == 200 }}"

    - name: Fallback, close using textual state
      when: not _numeric_ok
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ _base_close_fields | combine({'state': close_state_text}) }}"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: _close_text

    - name: Pause briefly to let workflow update the record
      ansible.builtin.pause:
        seconds: 10

    - name: Read final record with display values
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,close_code,close_notes,work_end"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: _final

    - name: Normalize state
      ansible.builtin.set_fact:
        _state_obj: >-
          {{ _final.json.result.state
             if (_final.json.result.state is defined and _final.json.result.state is mapping)
             else {'value': _final.json.result.state | default(''),
                   'display_value': _final.json.result.state | default('')} }}

    - name: Show closure outcome
      ansible.builtin.debug:
        msg:
          number: "{{ _final.json.result.number | default(cr_number) }}"
          state_display: "{{ _state_obj.display_value | default('') }}"
          state_value: "{{ _state_obj.value | default('') }}"
          close_code: "{{ _final.json.result.close_code | default('') }}"
          close_notes: "{{ _final.json.result.close_notes | default('') }}"
          work_end: "{{ _final.json.result.work_end | default('') }}"

    - name: Decide if change is already closed
      ansible.builtin.set_fact:
        _state_disp_lower: "{{ (_state_obj.display_value | default('') | string | lower) }}"
        _already_closed: "{{ 'closed' in _state_disp_lower }}"
        _in_review: "{{ 'review' in _state_disp_lower }}"

    - name: Close via Change REST close action if not already closed
      when: not _already_closed
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/sn_chg_rest/v1/change/{{ cr_sys_id }}/close"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          close_code: "{{ close_code }}"
          close_notes: "{{ close_notes }}"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
        timeout: 120
      register: cr_close_action
      failed_when: false

    - name: Fallback, set state to Closed via Table API if action failed
      when: not _already_closed and (cr_close_action.status | default(0) | int != 200)
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: >-
          {{
            {
              'state': 'Closed',
              'close_code': close_code,
              'close_notes': close_notes
            }
            | combine( {'work_end': _now_display} if set_work_end_now | bool else {} )
          }}
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
        timeout: 120
      register: cr_close_patch

    - name: Pause briefly after close operation
      when: not _already_closed
      ansible.builtin.pause:
        seconds: 10

    - name: Read final record again with display values
      when: not _already_closed
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,close_code,close_notes,work_end"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: _final_close

    - name: Normalize final state
      when: not _already_closed
      ansible.builtin.set_fact:
        _final_state_disp: "{{ (_final_close.json.result.state.display_value | default(_final_close.json.result.state | default(''))) | string | lower }}"
        _final_close_code: "{{ (_final_close.json.result.close_code.display_value | default(_final_close.json.result.close_code | default(''))) | string }}"
        _final_close_notes: "{{ _final_close.json.result.close_notes | default('') }}"
        _final_work_end: "{{ _final_close.json.result.work_end | default('') }}"

    - name: Show final closure outcome
      when: not _already_closed
      ansible.builtin.debug:
        msg:
          number: "{{ _final_close.json.result.number | default(cr_number | default('')) }}"
          state: "{{ _final_state_disp }}"
          close_code: "{{ _final_close_code }}"
          close_notes: "{{ _final_close_notes }}"
          work_end: "{{ _final_work_end }}"

    - name: Assert final state is Closed
      when: not _already_closed
      ansible.builtin.assert:
        that:
          - "'closed' in _final_state_disp"
        success_msg: "Change is Closed"
        fail_msg: "Change still not Closed, check whether the close action is enabled or required fields"

What you will build

An Ansible integration and two Ansible tasks in Morpheus, a pre task that opens the Standard Change and moves it to Implementing, and a post task that closes it.

Secrets in Morpheus Cypher instead of secrets.yml.

A safe handoff of SN_CHANGE_NUMBER and SN_CHANGE_SYS_ID from pre to post.

Step 1, put ServiceNow secrets into Morpheus Cypher

Create these secrets, names are up to you, these are easy to read ones.

secret/servicenow/host, your Snow hostname only, no scheme, example, company.service-now.com

secret/servicenow/username

secret/servicenow/password

secret/servicenow/verify_ssl, optional, true or false

In Ansible inside Morpheus you can pull them with the built in Cypher lookup, example:

replace your vars_files: secrets.yml

vars: sn_host: "{{ lookup('cypher', 'secret=servicenow/host') }}" sn_username: "{{ lookup('cypher', 'secret=servicenow/username') }}" sn_password: "{{ lookup('cypher', 'secret=servicenow/password') }}" sn_verify_ssl: "{{ (lookup('cypher', 'secret=servicenow/verify_ssl') | default('true')) | bool }}"

Docs, Ansible vars map and Cypher lookups in Morpheus, shown in Morpheus Ansible guide and Cypher docs, cited here, Morpheus Data Documentation +1

Step 2, decide where non secret variables live

A clean split works well.

Template and change fields that vary per environment or per catalog item, put them in Morpheus Option Types so the requestor, or the catalog item, supplies them, then read them in Ansible as {{ morpheus['customOptions']['fieldName'] }}. Examples, requested_by, assigned_to, u_environment, u_whost_role, category, reason. The Ansible guide shows using the morpheus map in Jinja. Morpheus Data Documentation

Mostly static settings, keep in the playbook vars, example, sc_api_version, start_offset_sec, duration_sec.

Values computed at runtime, keep them in Ansible set_fact, as you already do.

Tip, the Variables page shows the general map structure, including how the morpheus map is injected for automation. The Python snippet there parallels the Ansible map structure, example access patterns. Morpheus Data Documentation

Step 3, import your playbooks as Morpheus tasks

Integration, Library, Automation, Integrations, add Ansible Git or Ansible Controller if you have Tower, then:

Library, Automation, Tasks, Add, Task Type, Ansible. Pick your repo and playbook path. Execute Target, Local, your playbooks already say hosts localhost and connection local, so running them on the appliance is the simplest and avoids agent or SSH targeting. This is a supported pattern. Morpheus Data Documentation

Create two tasks:

Task, pre, code, open_snow_std_change Ansible playbook, your first playbook, with secrets.yml removed and the Cypher lookups added. Execute Target, Local.

Task, post, code, close_snow_change Ansible playbook, your second playbook, with its identifier logic adjusted as described below. Execute Target, Local.

Step 4, pass SN_CHANGE_NUMBER and SN_CHANGE_SYS_ID from pre to post

You have two good choices. Pick one and stick with it.

Option A, recommended, Write Attributes, official and visible in the UI

Store the values from the pre task into the instance attributes map, then read them in the post task.

End of your pre playbook, do not change the Ansible, keep your current logic, but you need the values available to Morpheus. Since Ansible tasks do not expose a result payload to Morpheus results parsing, use a separate Write Attributes task right after your pre Ansible task.

Add Library, Automation, Tasks, Add, Task Type, Write Attributes, code, write_snow_attrs. In the Attributes JSON, reference variables that your pre task wrote somewhere Morpheus can read. Easiest path is to have the pre playbook write the values into the instance attributes itself, or, if you prefer a no code change approach, write a tiny library script that echoes a JSON blob and set the script Result Type to JSON, then feed it to Write Attributes. Most teams simply make the pre Ansible playbook call the Morpheus API to set attributes, but there is an even easier way, see Option B.

In general, Write Attributes persists values across workflow phases, Morpheus calls out this exact use case, store values from one phase for access in another phase. You can reference them later as <%= instance.attributes.SN_CHANGE_NUMBER %> and <%= instance.attributes.SN_CHANGE_SYS_ID %>. Docs and tip are here. Morpheus Data Documentation

In your post playbook, prefer the instance attribute values, keep your file fallback as last resort:

vars: cr_sys_id: >- {{ (morpheus['instance']['attributes']['SN_CHANGE_SYS_ID'] | default('')) | default(lookup('env','SN_CHANGE_SYS_ID') | default('', true), true) }} cr_number: >- {{ (morpheus['instance']['attributes']['SN_CHANGE_NUMBER'] | default('')) | default(lookup('env','SN_CHANGE_NUMBER') | default('', true), true) }}

That makes the post task self contained, it will read the attributes written during pre.

Notes on results or attributes, and why Write Attributes, Workflows and Task Results are designed for chaining output to later tasks within the same workflow phase, and Write Attributes is the documented pattern to persist across phases and to the instance. Morpheus Data Documentation +1

Option B, keep your existing morpheus_facts line

You already have:

  • name: Save change number to Morpheus local_action: module: morpheus_facts facts: SN_CHANGE_NUMBER: "{{ sc_out.json.result.number | default(sc_out.json.number) }}" SN_CHANGE_SYS_ID: "{{ sc_out.json.result.sys_id | default(sc_out.json.sys_id) }}"

Inside Morpheus, that module writes instance level facts, which you can read later as instance attributes, same access pattern as Option A, <%= instance.attributes.SN_CHANGE_NUMBER %>. If you stay with this, you do not need an extra Write Attributes task. If you want a fully no magic approach that is visible in the UI, Option A is still preferred because you can inspect attributes directly on the instance record without digging in play logs.

Step 5, wire the workflow

Library, Automation, Workflows, Add, Provisioning Workflow.

Pre Provision, add Task, open_snow_std_change, then if you use Option A add Task, write_snow_attrs.

Provision, your VM build.

Post Provision, add Task, close_snow_change.

Workflows and their phases are documented here, plus the Target and Task Type matrix for Ansible is on the Tasks page. Morpheus Data Documentation +1

Step 6, small edits to your playbooks for Morpheus

Changes to the pre playbook:

Replace secrets.yml include with Cypher lookups, shown above.

Keep hosts, localhost, connection, local, executes fine as a Local task.

Optional, make requested_by and assigned_to come from Morpheus inputs, example:

vars: requested_by: "{{ morpheus['customOptions']['requestedBy'] | default('user1') }}" assigned_to: "{{ morpheus['customOptions']['assignedTo'] | default('user1') }}" u_environment: "{{ morpheus['customOptions']['environment'] | default('production') }}"

Ansible inside Morpheus can read these as shown in the Ansible integration guide. Morpheus Data Documentation

Changes to the post playbook:

Prepend the vars stanza from Option A above so it prefers instance attributes.

Leave your artifact slurp fallback in place, it is a nice safety net.

Step 7, test it once outside a real build

Create a small Operational Workflow with only the pre Ansible task, plus Write Attributes if you chose Option A, run it against a test instance, verify in the Instance, Attributes section that SN_CHANGE_NUMBER and SN_CHANGE_SYS_ID are present.

Run the post Ansible task alone against the same instance, confirm it reads those values and closes the same change.

Where to keep each variable, quick map

Secrets, use Cypher, sn_host, sn_username, sn_password, sn_verify_ssl. Morpheus Data Documentation

Per environment knobs, use Option Types and read via morpheus.customOptions, requested_by, assigned_to, u_environment, u_whost_role, category, u_reason_for_change. Morpheus Data Documentation

Runtime values that must cross phases, use instance attributes, SN_CHANGE_NUMBER, SN_CHANGE_SYS_ID, written by Write Attributes or morpheus_facts, then read via morpheus['instance']['attributes'][...]. Morpheus Data Documentation

A couple of operational tips

Your pre task waits until planned start, this will hold the workflow open, make sure your Ansible task timeouts and any corporate reverse proxy timeouts allow your intended wait window. You can retry a task or cancel from the workflow view if needed. Morpheus Data Documentation +1

For Ansible tasks that run Local, keep an eye on the Requests log in Morpheus for ServiceNow calls and on the Task History for stdout parsing.

V4

---
- name: Create Standard Change, then set state to Implementing
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # ServiceNow connection, adjust as you prefer
    sn_host: "{{ lookup('cypher', 'secret=servicenow/host') | default('company.service-now.com') }}"
    sn_username: "{{ lookup('cypher', 'secret=servicenow/username') | default('svc_ansible') }}"
    sn_password: "{{ lookup('cypher', 'secret=servicenow/password') | default('SuperSecret123!') }}"
    sn_verify_ssl: "{{ (lookup('cypher', 'secret=servicenow/verify_ssl') | default('true')) | bool }}"

    # Template and change fields, prefer Morpheus Option Types when available
    sc_api_version: "v1"
    sc_template_sys_id: "{{ morpheus['customOptions']['scTemplateSysId'] | default('i9743hfe84f408hf4h83fy') }}"

    requested_by: "{{ morpheus['customOptions']['requestedBy'] | default('user1') }}"
    assigned_to:  "{{ morpheus['customOptions']['assignedTo']  | default('user1') }}"
    cmdb_ci:      "{{ morpheus['customOptions']['cmdbCi'] | default('fhdkjd7sishs9ss9s9sjksksd') }}"
    u_environment: "{{ morpheus['customOptions']['environment'] | default('production') }}"
    u_whost_role:  "{{ morpheus['customOptions']['whostRole'] | default('ds8ssksjfhfh7f6fd5d4s') }}"
    category:   "{{ morpheus['customOptions']['category'] | default('Other') }}"
    u_reason_for_change: "{{ morpheus['customOptions']['reason'] | default('Maintenance') }}"

    # Timing
    start_offset_sec: "{{ (morpheus['customOptions']['startOffsetSec'] | default(15)) | int }}"
    duration_sec: "{{ (morpheus['customOptions']['durationSec'] | default(10800)) | int }}"
    wait_for_planned_start: "{{ (morpheus['customOptions']['waitForPlannedStart'] | default(true)) | bool }}"

  tasks:
    - name: Compute planned start and end, UTC
      ansible.builtin.set_fact:
        start_date: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        end_date:   "{{ lookup('pipe', 'date -u -d \"+' ~ (start_offset_sec + duration_sec) ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        start_epoch: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%s\"') | int }}"
        now_epoch:   "{{ lookup('pipe', 'date -u +\"%s\"') | int }}"

    - name: Validate date formats
      ansible.builtin.assert:
        that:
          - start_date is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')
          - end_date   is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')

    - name: Build create URL and body
      ansible.builtin.set_fact:
        sc_url: "https://{{ sn_host }}/api/sn_chg_rest/{{ sc_api_version }}/change/standard/{{ sc_template_sys_id }}"
        sc_payload:
          requested_by: "{{ requested_by }}"
          assigned_to:  "{{ assigned_to }}"
          cmdb_ci:      "{{ cmdb_ci }}"
          u_environment: "{{ u_environment }}"
          u_whost_role:  "{{ u_whost_role }}"
          start_date: "{{ start_date }}"
          end_date:   "{{ end_date }}"
          category:   "{{ category }}"
          u_reason_for_change: "{{ u_reason_for_change }}"

    - name: Log endpoint and body
      ansible.builtin.debug:
        msg:
          url: "{{ sc_url }}"
          payload: "{{ sc_payload | to_nice_json }}"
          start_epoch: "{{ start_epoch }}"
          now_epoch: "{{ now_epoch }}"

    - name: Create Standard Change from template
      ansible.builtin.uri:
        url: "{{ sc_url }}"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ sc_payload }}"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        follow_redirects: all
        timeout: 120
        status_code: [200, 201]
      register: sc_out

    - name: Extract and sanitize change identifiers
      ansible.builtin.set_fact:
        cr_sys_id: >-
          {{ sc_out.json.result.sys_id.value
             | default(sc_out.json.result.sys_id)
             | default(sc_out.json.sys_id)
             | string | trim | regex_replace('\\s+', '') }}
        cr_number: >-
          {{ sc_out.json.result.number.value
             | default(sc_out.json.result.number)
             | default(sc_out.json.number)
             | string | trim }}

    - name: Show created change
      ansible.builtin.debug:
        msg:
          http_status: "{{ sc_out.status }}"
          sys_id: "{{ cr_sys_id }}"
          number: "{{ cr_number }}"
          planned_start: "{{ start_date }}"
          planned_end: "{{ end_date }}"
          nav_url: "https://{{ sn_host }}/nav_to.do?uri=change_request.do?sys_id={{ cr_sys_id }}"

    - name: Compute wait seconds until planned start, plus 5s buffer
      when: wait_for_planned_start | bool
      ansible.builtin.set_fact:
        wait_seconds: "{{ [ ((start_epoch | int) - (now_epoch | int) + 5), 0 ] | max }}"

    - name: Wait until planned start
      when: wait_for_planned_start | bool and (wait_seconds | int) > 0
      ansible.builtin.pause:
        seconds: "{{ wait_seconds | int }}"
        prompt: "Waiting for planned start window"

    - name: Log that no wait was needed
      when: wait_for_planned_start | bool and (wait_seconds | int) == 0
      ansible.builtin.debug:
        msg: "No wait needed, planned start already reached"

    - name: Build Table API URL for update
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Log Table API URL
      ansible.builtin.debug:
        msg:
          update_url: "{{ cr_table_url }}"
          body_numeric_state: "{{ {'state': -2} | to_nice_json }}"
          body_text_state: "{{ {'state': 'Implementing'} | to_nice_json }}"

    - name: Move change to Implementing by numeric state, -2
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: -2
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: cr_update_numeric
      failed_when: false

    - name: Decide if numeric state worked
      ansible.builtin.set_fact:
        _numeric_ok: "{{ cr_update_numeric.status | default(0) | int == 200 and (cr_update_numeric.json.result.state | default('')) | string | lower is search('implement') }}"

    - name: Fallback, move change to Implementing by text state
      when: not _numeric_ok
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: "Implementing"
        validate_certs: "{{ sn_verify_ssl }}"
        return_content: true
        status_code: [200]
      register: cr_update_text

    - name: Show update result
      ansible.builtin.debug:
        msg:
          numeric_try_status: "{{ cr_update_numeric.status | default('n/a') }}"
          numeric_try_state: "{{ cr_update_numeric.json.result.state | default('n/a') }}"
          text_try_status: "{{ cr_update_text.status | default('n/a') }}"
          text_try_state: "{{ cr_update_text.json.result.state | default('n/a') }}"

    # your previously working verification block, unchanged
    - name: Read change once for verification
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,stage"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: cr_verify

    - name: Normalize state and stage
      ansible.builtin.set_fact:
        _state_obj: >-
          {{ cr_verify.json.result.state
             if (cr_verify.json.result.state is defined and cr_verify.json.result.state is mapping)
             else {'value': cr_verify.json.result.state | default(''),
                   'display_value': cr_verify.json.result.state | default('')} }}
        _stage_obj: >-
          {{ cr_verify.json.result.stage
             if (cr_verify.json.result.stage is defined and cr_verify.json.result.stage is mapping)
             else {'value': cr_verify.json.result.stage | default(''),
                   'display_value': cr_verify.json.result.stage | default('')} }}

    - name: Show state and stage
      ansible.builtin.debug:
        msg:
          number: "{{ cr_number }}"
          state_display: "{{ _state_obj.display_value | default('') }}"
          state_value: "{{ _state_obj.value | default('') }}"
          stage_display: "{{ _stage_obj.display_value | default('') }}"
          stage_value: "{{ _stage_obj.value | default('') }}"

    - name: Verify Implementing
      vars:
        state_disp: "{{ (_state_obj.display_value | default('')) | string | lower }}"
        state_val:  "{{ (_state_obj.value | default('')) | string }}"
        stage_disp: "{{ (_stage_obj.display_value | default('')) | string | lower }}"
      ansible.builtin.assert:
        that:
          - "'implement' in state_disp or 'implement' in stage_disp or state_val == '-2'"
        success_msg: "Change is Implementing"
        fail_msg: "Not Implementing yet, increase pause or check workflow"

    - name: Persist identifiers to Morpheus attributes for post stage
      local_action:
        module: morpheus_facts
        facts:
          SN_CHANGE_NUMBER: "{{ cr_number }}"
          SN_CHANGE_SYS_ID: "{{ cr_sys_id }}"

PRE

---
- name: Pre, emit key=value for Morpheus results
  hosts: localhost
  gather_facts: false
  connection: local
  tasks:
    - name: Export identifiers
      ansible.builtin.debug:
        msg: |
          SN_CHANGE_NUMBER=CHG123456
          SN_CHANGE_SYS_ID=abcdef123456
      changed_when: false


POST:

---
- name: Next task in same phase, read env
  hosts: localhost
  gather_facts: false
  connection: local
  tasks:
    - name: Show env seen by this task
      ansible.builtin.debug:
        msg:
          TEST_FACT: "{{ lookup('env','TEST_FACT') | default('not set', true) }}"
          OTHER_FACT: "{{ lookup('env','OTHER_FACT') | default('not set', true) }}"

{
  "SN_CHANGE_NUMBER": "<%= previousTask.result.SN_CHANGE_NUMBER %>",
  "SN_CHANGE_SYS_ID": "<%= previousTask.result.SN_CHANGE_SYS_ID %>"
}
  
{
  "SN_CHANGE_NUMBER": "<%= results.kvPre.SN_CHANGE_NUMBER %>",
  "SN_CHANGE_SYS_ID": "<%= results.kvPre.SN_CHANGE_SYS_ID %>"
}

mkdir -p /tmp/certs
awk 'BEGIN {c=0;} /BEGIN CERTIFICATE/ {c++} {print > "/tmp/certs/cert." c ".pem"}' \
    /etc/pki/tls/certs/ca-bundle.crt

for cert in cert.*.pem; do
    enddate=$(openssl x509 -enddate -noout -in "$cert" | cut -d= -f2)
    if ! openssl x509 -checkend 0 -noout -in "$cert" >/dev/null; then
        echo "Expired: $cert ($enddate)"
    fi
done
  
# Success ⇒ your trusted roots are OK. Failure ⇒ trust is broken/stale.
curl -fsS --cacert /etc/pki/tls/certs/ca-bundle.crt https://rubygems.org/ > /dev/null \
  && echo "TRUST OK (curl)" \
  || echo "TRUST BROKEN (curl) — likely expired/missing CA"

gem env
grep -H . /etc/gemrc ~/.gemrc 2>/dev/null || true
gem sources -l
env | egrep -i 'http_proxy|https_proxy|no_proxy|RUBYGEMS_HOST|GEM_HOST_API_KEY|BUNDLE_'
bundle config get mirror.https://rubygems.org 2>/dev/null

# Metadata / index
curl -I https://api.rubygems.org/specs.4.8.gz
curl -I 'https://rubygems.org/api/v1/dependencies?gems=rake'

# Gem payload (pick a known-good version)
curl -I https://rubygems.org/downloads/rake-13.2.1.gem

# Use the same proxy curl uses (if any)
gem install rake --clear-sources --source https://rubygems.org \
  --http-proxy "${https_proxy:-${HTTPS_PROXY:-}}" -V

# Or bypass proxy completely
gem install rake --clear-sources --source https://rubygems.org \
  --no-http-proxy -V

####
Your proxy being “HTTP only” can absolutely be the culprit
If your environment variables point RubyGems at an HTTP forward proxy, RubyGems (Net::HTTP) will send HTTPS requests via the proxy (the proxy then establishes TLS to the origin). Some proxies/WAFs treat:

Specific endpoints (e.g., api.rubygems.org/specs.4.8.gz, /api/v1/dependencies, quick/Marshal.4.8/...)

HTTP methods (HEAD, Range)

User-Agents (RubyGems/Net::HTTP)
differently than they treat your plain curl to the homepage or to a single download URL.

That mismatch often yields 403 in RubyGems while curl to the homepage or a direct /downloads/*.gem returns 200.

Quick, targeted checks
Run these GET and HEAD probes (they mirror what RubyGems uses):

# Indexes
curl -I https://api.rubygems.org/specs.4.8.gz
curl -sD - 'https://rubygems.org/api/v1/dependencies?gems=rake' | head

# Gem download (you already saw 200 here)
curl -I https://rubygems.org/downloads/rake-13.2.1.gem

# Legacy Marshal endpoint some older clients still hit:
curl -I https://rubygems.org/quick/Marshal.4.8/rake-13.2.1.gemspec.rz


If any of those return 403, your proxy/WAF needs an allowlist tweak for those paths/hosts or for Range requests.

See whether gem is actually using the proxy

env | egrep -i 'http_proxy|https_proxy|no_proxy'
grep -H . /etc/gemrc ~/.gemrc 2>/dev/null || true


If http_proxy/https_proxy are set, RubyGems will use them.

A proxy that only supports HTTP can still relay HTTPS, but may enforce rules that hit RubyGems endpoints harder than your curl tests.

Workarounds / fixes you can try immediately

Bypass the proxy just for RubyGems (if your network allows direct egress):

gem install rake --clear-sources --source https://rubygems.org --no-http-proxy -V
# or temporarily:
NO_PROXY='rubygems.org,api.rubygems.org' no_proxy='rubygems.org,api.rubygems.org' \
  gem install rake --clear-sources --source https://rubygems.org -V


Force RubyGems to use the same proxy settings as curl (if curl succeeds to the same endpoints):

gem install rake --clear-sources --source https://rubygems.org \
  --http-proxy "${https_proxy:-${HTTPS_PROXY:-${http_proxy:-${HTTP_PROXY:-}}}}" -V


Update RubyGems (older clients hit legacy paths more likely to be blocked):

gem --version
sudo gem update --system


Ask the network team to allowlist (and allow redirects + HTTP Range) for:

rubygems.org, api.rubygems.org

rubygems.global.ssl.fastly.net (Fastly CDN)

*.s3.amazonaws.com (gem files)

If you want to be extra sure which host gem reaches
Even if the client won’t print URLs, you can watch the SNI/DNS with a quick capture:

sudo tcpdump -n -i any 'tcp port 443 and (host rubygems.org or host api.rubygems.org)'


If you see it connecting somewhere else (mirror/proxy host), that explains the 403.

gem sources -l shows the logical gem repository origins you’ve configured (e.g. https://rubygems.org/). It does not list every backing host or API endpoint the client talks to behind the scenes.

When your source is https://rubygems.org/, the RubyGems client will internally fetch metadata from service endpoints like:

https://api.rubygems.org/… (indexes, dependencies, compact index)

https://rubygems.org/downloads/... (the actual .gem payloads)

(via CDN) rubygems.global.ssl.fastly.net and sometimes S3 for storage

Those are implementation details of the rubygems.org source, so they don’t appear in gem sources -l. Think of gem sources -l as showing the front door, while the client is free to use back rooms (API/CDN hosts) to fulfill requests.

How to see what the client is really hitting

If your RubyGems version doesn’t print URLs with -V, you can still observe the hosts:

Temporarily bypass/force proxy to test: NO_PROXY='rubygems.org,api.rubygems.org' gem install rake --clear-sources --source https://rubygems.org -V

Watch connections: sudo tcpdump -n -i any 'tcp port 443 and (host rubygems.org or host api.rubygems.org)'

What to allowlist (even though they’re not in gem sources -l)

rubygems.org

api.rubygems.org

rubygems.global.ssl.fastly.net

*.s3.amazonaws.com …and allow GET + HEAD, redirects, and Range requests.

So: it doesn’t show up in the sources list because api.rubygems.org isn’t a configured source—it’s a service endpoint used by the rubygems.org source.

What to allowlist (even though they’re not in gem sources -l)

rubygems.org

api.rubygems.org

rubygems.global.ssl.fastly.net

*.s3.amazonaws.com
…and allow GET + HEAD, redirects, and Range requests.

PRE:

---
- name: Pre, write dummy identifiers to Morpheus Cypher using bearer token from Cypher
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # dummy data to prove the flow
    cr_number: "CHG123456"
    cr_sys_id: "abcdef123456"

    # fixed appliance URL, replace with your real base URL
    _appliance_url: "https://morpheus.mycompany.com"

    # where your bearer token lives in Morpheus Cypher, adjust to your path
    _api_token_secret_path: "secret/vault/token"

  tasks:
    - name: Resolve requested instance name from morpheus
      ansible.builtin.set_fact:
        _req_name: >-
          {{
            (morpheus['instance']['name'] | default(''))
            or (morpheus['instanceName'] | default(''))
            or (morpheus['instance']['hostname'] | default(''))
          }}

    - name: Build Cypher key for this request
      ansible.builtin.set_fact:
        _cypher_key: "servicenow/changerefs/{{ _req_name | regex_replace('[^A-Za-z0-9_.-]', '_') }}"

    # pull the hostname for no_proxy
    - name: Derive appliance host for proxy bypass
      ansible.builtin.set_fact:
        _appliance_host: "{{ _appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"

    - name: Build no_proxy list
      ansible.builtin.set_fact:
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Read Morpheus API bearer token from Cypher
      ansible.builtin.set_fact:
        bearer_token: "{{ lookup('cypher', 'secret=' + _api_token_secret_path) | default('', true) | trim }}"

    - name: Show inputs we will use
      ansible.builtin.debug:
        msg:
          cypher_key: "{{ _cypher_key }}"
          appliance_url: "{{ _appliance_url }}"
          appliance_host: "{{ _appliance_host }}"
          api_token_present: "{{ (bearer_token | length) > 0 }}"
          api_token_snippet: "{{ bearer_token[0:8] if bearer_token|length > 0 else 'missing' }}"
          cr_number: "{{ cr_number }}"
          cr_sys_id: "{{ cr_sys_id }}"

    - name: Fail clearly if required values are missing
      ansible.builtin.assert:
        that:
          - _req_name | length > 0
          - bearer_token | length > 0
        success_msg: "Inputs look good"
        fail_msg: >-
          Missing required values, check instance name or API token.
          Ensure a bearer token exists in Cypher at {{ _api_token_secret_path }}

    - name: Persist identifiers to Morpheus Cypher
      ansible.builtin.uri:
        url: "{{ _appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: PUT
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Content-Type: "application/json"
          Accept: "application/json"
        body_format: json
        body:
          value: "{{ {'number': cr_number, 'sys_id': cr_sys_id} | to_json }}"
        status_code: [200, 201, 204]
        validate_certs: false     # set true again after testing
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyput

    - name: Verify the Cypher write
      ansible.builtin.uri:
        url: "{{ _appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Accept: "application/json"
        status_code: [200]
        validate_certs: false     # set true again after testing
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget
      changed_when: false

    - name: Show stored value
      ansible.builtin.debug:
        msg: "{{ _cyget.json.data.value | default('missing') }}"

POST:

---
- name: Post, read identifiers from Morpheus Cypher and clean up
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    # fixed appliance URL, replace with your real base URL
    _appliance_url: "https://morpheus.mycompany.com"

    # where your bearer token lives in Morpheus Cypher, adjust if needed
    _api_token_secret_path: "secret/vault/token"

  tasks:
    - name: Resolve requested instance name from morpheus
      ansible.builtin.set_fact:
        _req_name: >-
          {{
            (morpheus['instance']['name'] | default(''))
            or (morpheus['instanceName'] | default(''))
            or (morpheus['instance']['hostname'] | default(''))
          }}

    - name: Build Cypher key
      ansible.builtin.set_fact:
        _cypher_key: "servicenow/changerefs/{{ _req_name | regex_replace('[^A-Za-z0-9_.-]', '_') }}"

    - name: Derive appliance host for proxy bypass
      ansible.builtin.set_fact:
        _appliance_host: "{{ _appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Read Morpheus API bearer token from Cypher
      ansible.builtin.set_fact:
        bearer_token: "{{ lookup('cypher', 'secret=' + _api_token_secret_path) | default('', true) | trim }}"

    - name: Debug URL and headers before GET
      ansible.builtin.debug:
        msg:
          url: "{{ _appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
          bearer_token_present: "{{ (bearer_token | length) > 0 }}"
          no_proxy: "{{ _no_proxy }}"

 
    - name: Read identifiers from Morpheus Cypher
      ansible.builtin.uri:
        url: "{{ _appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Accept: "application/json"
        status_code: [200, 404]
        validate_certs: false     # set true after testing
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget
      changed_when: false

    - name: Set identifiers from Cypher result
      when: _cyget.status | int == 200 and (_cyget.json.data.value | default('')) != ''
      ansible.builtin.set_fact:
        cr_number: "{{ (_cyget.json.data.value | from_json).number | default('') }}"
        cr_sys_id: "{{ (_cyget.json.data.value | from_json).sys_id | default('') }}"

    - name: Fail clearly if identifiers are missing
      when: (cr_number | default('') | trim) == '' or (cr_sys_id | default('') | trim) == ''
      ansible.builtin.fail:
        msg: "Missing change identifiers, expected at {{ _cypher_key }}"

    - name: Show identifiers we retrieved
      ansible.builtin.debug:
        msg:
          cr_number: "{{ cr_number }}"
          cr_sys_id: "{{ cr_sys_id }}"

    - name: Delete Cypher secret now that we are done
      ansible.builtin.uri:
        url: "{{ _appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: DELETE
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Accept: "application/json"
        status_code: [200, 204, 404]
        validate_certs: false     # set true after testing
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      changed_when: false

---
- name: Probe Morpheus variables
  hosts: localhost
  gather_facts: false
  connection: local
  tasks:
    - name: List top level keys on morpheus map
      ansible.builtin.debug:
        msg: "{{ morpheus.keys() | list }}"

    - name: Show common fields if present
      ansible.builtin.debug:
        msg:
          instance_name: "{{ morpheus['instance']['name'] | default(morpheus['instanceName'] | default('missing')) }}"
          instance_hostname: "{{ morpheus['instance']['hostname'] | default('missing') }}"
          custom_options: "{{ morpheus['customOptions'] | default({}) }}"
          user: "{{ morpheus['user'] | default({}) }}"
          app: "{{ morpheus['app'] | default({}) }}"

    - name: Probe for execution lease keys
      ansible.builtin.debug:
        msg:
          has_executionLeaseToken: "{{ 'executionLeaseToken' in morpheus }}"
          has_executionLease: "{{ 'executionLease' in morpheus }}"
          executionLeaseToken_snippet: "{{ (morpheus.get('executionLeaseToken',''))[0:10] if morpheus.get('executionLeaseToken','') else 'missing' }}"
          executionLease_snippet: "{{ (morpheus.get('executionLease',''))[0:10] if morpheus.get('executionLease','') else 'missing' }}"

    - name: Resolve execution lease from any known key or fallback extra var
      ansible.builtin.set_fact:
        _lease: >-
          {{
            morpheus.get('executionLeaseToken','')
            or morpheus.get('executionLease','')
            or MORPHEUS_EXECUTION_LEASE | default('')
          }}

    - name: Show resolved execution lease status
      ansible.builtin.debug:
        msg:
          lease_present: "{{ (_lease | length) > 0 }}"
          lease_value_snippet: "{{ _lease[0:10] if _lease|length > 0 else 'missing' }}"

NEW PRE:

---
- name: Create Standard Change, set to Implementing, persist identifiers to Cypher
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml   # sn_host, sn_username, sn_password, sn_verify_ssl

  vars:
    sc_api_version: "v1"
    sc_template_sys_id: "i9743hfe84f408hf4h83fy"

    requested_by: "user1"
    assigned_to:  "user1"
    cmdb_ci:      "fhdkjd7sishs9ss9s9sjksksd"
    u_environment: "production"
    u_whost_role:  "ds8ssksjfhfh7f6fd5d4s"
    category:   "Other"
    u_reason_for_change: "Maintenance"

    start_offset_sec: 15
    duration_sec: 10800
    wait_for_planned_start: true

    # Morpheus appliance config for Cypher API
    appliance_url: "https://morpheus.mycompany.com"   # change to your URL
    api_token_secret_path: "secret/vault/token"        # Cypher path that holds a Morpheus API bearer token
    cypher_prefix: "servicenow/changerefs"             # base path in Cypher for these refs
    appliance_validate_certs: false                    # set true once TLS path is clean

  tasks:
    - name: Compute planned start and end, UTC
      ansible.builtin.set_fact:
        start_date: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        end_date:   "{{ lookup('pipe', 'date -u -d \"+' ~ (start_offset_sec + duration_sec) ~ ' seconds\" +\"%Y-%m-%d %H:%M:%S\"') }}"
        start_epoch: "{{ lookup('pipe', 'date -u -d \"+' ~ start_offset_sec ~ ' seconds\" +\"%s\"') | int }}"
        now_epoch:   "{{ lookup('pipe', 'date -u +\"%s\"') | int }}"

    - name: Validate date formats
      ansible.builtin.assert:
        that:
          - start_date is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')
          - end_date   is match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}$')

    - name: Build create URL and body
      ansible.builtin.set_fact:
        sc_url: "https://{{ sn_host }}/api/sn_chg_rest/{{ sc_api_version }}/change/standard/{{ sc_template_sys_id }}"
        sc_payload:
          requested_by: "{{ requested_by }}"
          assigned_to:  "{{ assigned_to }}"
          cmdb_ci:      "{{ cmdb_ci }}"
          u_environment: "{{ u_environment }}"
          u_whost_role:  "{{ u_whost_role }}"
          start_date: "{{ start_date }}"
          end_date:   "{{ end_date }}"
          category:   "{{ category }}"
          u_reason_for_change: "{{ u_reason_for_change }}"

    - name: Create Standard Change from template
      ansible.builtin.uri:
        url: "{{ sc_url }}"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ sc_payload }}"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        follow_redirects: all
        timeout: 120
        status_code: [200, 201]
      register: sc_out

    - name: Extract and sanitize change identifiers
      ansible.builtin.set_fact:
        cr_sys_id: >-
          {{ sc_out.json.result.sys_id.value
             | default(sc_out.json.result.sys_id)
             | default(sc_out.json.sys_id)
             | string | trim | regex_replace('\\s+', '') }}
        cr_number: >-
          {{ sc_out.json.result.number.value
             | default(sc_out.json.result.number)
             | default(sc_out.json.number)
             | string | trim }}

    - name: Show created change
      ansible.builtin.debug:
        msg:
          http_status: "{{ sc_out.status }}"
          sys_id: "{{ cr_sys_id }}"
          number: "{{ cr_number }}"
          planned_start: "{{ start_date }}"
          planned_end: "{{ end_date }}"
          nav_url: "https://{{ sn_host }}/nav_to.do?uri=change_request.do?sys_id={{ cr_sys_id }}"

    - name: Compute wait seconds until planned start, plus 5s buffer
      when: wait_for_planned_start | bool
      ansible.builtin.set_fact:
        wait_seconds: "{{ [ ((start_epoch | int) - (now_epoch | int) + 5), 0 ] | max }}"

    - name: Wait until planned start
      when: wait_for_planned_start | bool and (wait_seconds | int) > 0
      ansible.builtin.pause:
        seconds: "{{ wait_seconds | int }}"
        prompt: "Waiting for planned start window"

    - name: Log that no wait was needed
      when: wait_for_planned_start | bool and (wait_seconds | int) == 0
      ansible.builtin.debug:
        msg: "No wait needed, planned start already reached"

    - name: Build Table API URL for update
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Move change to Implementing by numeric state, -2
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: -2
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: cr_update_numeric
      failed_when: false

    - name: Decide if numeric state worked
      ansible.builtin.set_fact:
        _numeric_ok: "{{ cr_update_numeric.status | default(0) | int == 200 and (cr_update_numeric.json.result.state | default('')) | string | lower is search('implement') }}"

    - name: Fallback, move change to Implementing by text state
      when: not _numeric_ok
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: "Implementing"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: cr_update_text
      failed_when: false

    # Verification that worked before, checks state and stage in display values
    - name: Read change once for verification
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,stage"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: cr_verify

    - name: Normalize state and stage
      ansible.builtin.set_fact:
        _state_obj: >-
          {{ cr_verify.json.result.state
             if (cr_verify.json.result.state is defined and cr_verify.json.result.state is mapping)
             else {'value': cr_verify.json.result.state | default(''),
                   'display_value': cr_verify.json.result.state | default('')} }}
        _stage_obj: >-
          {{ cr_verify.json.result.stage
             if (cr_verify.json.result.stage is defined and cr_verify.json.result.stage is mapping)
             else {'value': cr_verify.json.result.stage | default(''),
                   'display_value': cr_verify.json.result.stage | default('')} }}

    - name: Verify Implementing
      vars:
        state_disp: "{{ (_state_obj.display_value | default('')) | string | lower }}"
        state_val:  "{{ (_state_obj.value | default('')) | string }}"
        stage_disp: "{{ (_stage_obj.display_value | default('')) | string | lower }}"
      ansible.builtin.assert:
        that:
          - "'implement' in state_disp or 'implement' in stage_disp or state_val == '-2'"
        success_msg: "Change is Implementing"
        fail_msg: "Not Implementing yet, increase pause or check workflow"

    # Persist identifiers to Morpheus Cypher for later phases

    - name: Resolve requested instance name for key
      ansible.builtin.set_fact:
        _req_name: >-
          {{
            morpheus['instance']['name'] | default(morpheus['instanceName'] | default(morpheus['instance']['hostname'] | default('')))
          }}

    - name: Build Cypher key
      ansible.builtin.set_fact:
        _cypher_key: "{{ cypher_prefix }}/{{ _req_name | regex_replace('[^A-Za-z0-9_.-]', '_') }}"

    - name: Derive appliance host for proxy bypass
      ansible.builtin.set_fact:
        _appliance_host: "{{ appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"

    - name: Build no_proxy list
      ansible.builtin.set_fact:
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Read Morpheus API bearer token from Cypher
      ansible.builtin.set_fact:
        _api_token: "{{ lookup('cypher', 'secret=' ~ api_token_secret_path) | default('', true) | trim }}"

    - name: Fail if API token missing
      ansible.builtin.assert:
        that:
          - _api_token | length > 0
        fail_msg: "Missing Morpheus API token at {{ api_token_secret_path }}"

    # Store as flat JSON under value to avoid json in json
    - name: Persist identifiers to Morpheus Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: PUT
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Content-Type: "application/json"
          Accept: "application/json"
        body_format: json
        body:
          value:
            number: "{{ cr_number }}"
            sys_id: "{{ cr_sys_id }}"
        status_code: [200, 201, 204]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"

    - name: Verify the Cypher write
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget
      changed_when: false

    - name: Show stored refs
      ansible.builtin.debug:
        msg:
          cypher_key: "{{ _cypher_key }}"
          value: "{{ _cyget.json.data.value | default({}) }}"

NEW POST:

---
- name: Close a ServiceNow change using identifiers from Morpheus Cypher
  hosts: localhost
  gather_facts: false
  connection: local

  vars_files:
    - secrets.yml   # sn_host, sn_username, sn_password, sn_verify_ssl

  vars:
    # Cypher and appliance config must mirror Pre
    appliance_url: "https://morpheus.mycompany.com"   # change to your URL
    api_token_secret_path: "secret/vault/token"        # same token path
    cypher_prefix: "servicenow/changerefs"
    appliance_validate_certs: false                    # set true when TLS path is clean

    # Defaults if Cypher not found
    cr_sys_id: ""
    cr_number: ""

    # Closure fields
    close_state_numeric: 0
    close_state_text: "Closed"
    close_code: "Successful"
    close_notes: "Closed by automation after successful VM build"
    set_work_end_now: true

  tasks:
    - name: Resolve requested instance name for key
      ansible.builtin.set_fact:
        _req_name: >-
          {{
            morpheus['instance']['name'] | default(morpheus['instanceName'] | default(morpheus['instance']['hostname'] | default('')))
          }}

    - name: Build Cypher key
      ansible.builtin.set_fact:
        _cypher_key: "{{ cypher_prefix }}/{{ _req_name | regex_replace('[^A-Za-z0-9_.-]', '_') }}"

    - name: Derive appliance host for proxy bypass
      ansible.builtin.set_fact:
        _appliance_host: "{{ appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"

    - name: Build no_proxy list
      ansible.builtin.set_fact:
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Read Morpheus API bearer token from Cypher
      ansible.builtin.set_fact:
        _api_token: "{{ lookup('cypher', 'secret=' ~ api_token_secret_path) | default('', true) | trim }}"

    - name: Read identifiers from Morpheus Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200, 404]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget
      changed_when: false

    - name: Set identifiers from Cypher if present
      when: _cyget.status | int == 200
      ansible.builtin.set_fact:
        cr_number: "{{ _cyget.json.data.value.number | default(cr_number) }}"
        cr_sys_id: "{{ _cyget.json.data.value.sys_id | default(cr_sys_id) }}"

    - name: Fail clearly if identifiers are missing
      when: (cr_number | default('') | trim) == '' or (cr_sys_id | default('') | trim) == ''
      ansible.builtin.fail:
        msg: "Missing change identifiers, expected at {{ _cypher_key }}"

    - name: Build Table API URL
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Compute timestamps
      ansible.builtin.set_fact:
        _now_display: "{{ lookup('pipe', 'date -u +\"%Y-%m-%d %H:%M:%S\"') }}"

    - name: Build closure body
      ansible.builtin.set_fact:
        _base_close_fields: >-
          {{
            {
              'close_code': close_code,
              'close_notes': close_notes
            }
            | combine( {'work_end': _now_display} if set_work_end_now | bool else {} )
          }}

    - name: First try, close using numeric state
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ _base_close_fields | combine({'state': close_state_numeric}) }}"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _close_numeric
      failed_when: false

    - name: Decide if numeric close worked
      ansible.builtin.set_fact:
        _numeric_ok: "{{ _close_numeric.status | default(0) | int == 200 }}"

    - name: Fallback, close using textual state
      when: not _numeric_ok
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: "{{ _base_close_fields | combine({'state': close_state_text}) }}"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _close_text

    - name: Pause briefly to let workflow update the record
      ansible.builtin.pause:
        seconds: 10

    - name: Read final record with display values
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,close_code,close_notes,work_end"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _final

    - name: Normalize state for display
      ansible.builtin.set_fact:
        _state_obj: >-
          {{ _final.json.result.state
             if (_final.json.result.state is defined and _final.json.result.state is mapping)
             else {'value': _final.json.result.state | default(''),
                   'display_value': _final.json.result.state | default('')} }}

    - name: Show closure outcome
      ansible.builtin.debug:
        msg:
          number: "{{ _final.json.result.number | default(cr_number) }}"
          state_display: "{{ _state_obj.display_value | default('') }}"
          state_value: "{{ _state_obj.value | default('') }}"
          close_code: "{{ _final.json.result.close_code | default('') }}"
          close_notes: "{{ _final.json.result.close_notes | default('') }}"
          work_end: "{{ _final.json.result.work_end | default('') }}"

    # Best effort, delete the Cypher secret now that we are done
    - name: Delete Cypher secret for this instance
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: DELETE
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200, 204, 404]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      changed_when: false

    - name: Assert final state is Closed
      ansible.builtin.assert:
        that:
          - "'closed' in (_state_obj.display_value | string | lower)"
        success_msg: "Change is Closed"
        fail_msg: "Change still not Closed, check whether the close action is enabled or required fields"

update:

  tasks:
    # Build the same Cypher key the pre playbook used
    - name: Resolve requested instance name for key
      ansible.builtin.set_fact:
        _req_name: >-
          {{
            morpheus['instance']['name'] | default(
              morpheus['instanceName'] | default(
                morpheus['instance']['hostname'] | default('')
              )
            )
          }}

    - name: Build Cypher key
      ansible.builtin.set_fact:
        _cypher_key: "{{ cypher_prefix }}/{{ _req_name | regex_replace('[^A-Za-z0-9_.-]', '_') }}"

    # Morpheus API proxy bypass helpers
    - name: Derive appliance host for proxy bypass
      ansible.builtin.set_fact:
        _appliance_host: "{{ appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"

    - name: Build no_proxy list
      ansible.builtin.set_fact:
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    # Read Morpheus API bearer token
    - name: Read Morpheus API bearer token from Cypher
      ansible.builtin.set_fact:
        _api_token: "{{ lookup('cypher', 'secret=' ~ api_token_secret_path) | default('', true) | trim }}"

    # Pull identifiers from Cypher
    - name: Read identifiers from Morpheus Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200, 404]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget
      changed_when: false

    - name: Fail clearly if no identifiers
      when: _cyget.status | int != 200
      ansible.builtin.fail:
        msg: "Missing change identifiers in Cypher at {{ _cypher_key }}"

    - name: Set identifiers from Cypher value
      ansible.builtin.set_fact:
        cr_number: "{{ _cyget.json.data.value.number | default('') }}"
        cr_sys_id: "{{ _cyget.json.data.value.sys_id | default('') }}"

    - name: Validate identifiers
      ansible.builtin.assert:
        that:
          - cr_number | length > 0
          - cr_sys_id | length > 0
        fail_msg: "Identifiers empty in Cypher value at {{ _cypher_key }}"

    # ServiceNow close workflow

    - name: Build Table API URL
      ansible.builtin.set_fact:
        cr_table_url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}"

    - name: Compute timestamps
      ansible.builtin.set_fact:
        _now_display: "{{ lookup('pipe', 'date -u +\"%Y-%m-%d %H:%M:%S\"') }}"

    - name: Read current state with display values
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,stage"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _cur

    - name: Normalize state and stage
      ansible.builtin.set_fact:
        _state_obj: >-
          {{ _cur.json.result.state
             if (_cur.json.result.state is defined and _cur.json.result.state is mapping)
             else {'value': _cur.json.result.state | default(''),
                   'display_value': _cur.json.result.state | default('')} }}
        _stage_obj: >-
          {{ _cur.json.result.stage
             if (_cur.json.result.stage is defined and _cur.json.result.stage is mapping)
             else {'value': _cur.json.result.stage | default(''),
                   'display_value': _cur.json.result.stage | default('')} }}
        _state_disp: "{{ (_cur.json.result.state.display_value | default(_cur.json.result.state | default(''))) | string | lower }}"
        _stage_disp: "{{ (_cur.json.result.stage.display_value | default(_cur.json.result.stage | default(''))) | string | lower }}"

    # If not already Closed, and not already in Review, push to Review first, some workflows require this
    - name: Move to Review by numeric state first
      when: "'closed' not in _state_disp and 'closed' not in _stage_disp and 'review' not in _state_disp and 'review' not in _stage_disp"
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: -3
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _to_review_numeric
      failed_when: false

    - name: Fallback, move to Review by text state
      when: "'closed' not in _state_disp and 'closed' not in _stage_disp and 'review' not in _state_disp and 'review' not in _stage_disp and (_to_review_numeric.status | default(0) | int != 200)"
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          state: "Review"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _to_review_text
      failed_when: false

    - name: Pause briefly after Review transition
      when: "'closed' not in _state_disp and 'closed' not in _stage_disp"
      ansible.builtin.pause:
        seconds: 10

    # Build closure body, add work_end if requested
    - name: Build closure body
      ansible.builtin.set_fact:
        _base_close_fields: >-
          {{
            {
              'close_code': close_code,
              'close_notes': close_notes
            }
            | combine( {'work_end': _now_display} if set_work_end_now | bool else {} )
          }}

    # Official close action first
    - name: Close via Change REST close action
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/sn_chg_rest/v1/change/{{ cr_sys_id }}/close"
        method: POST
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body:
          close_code: "{{ close_code }}"
          close_notes: "{{ close_notes }}"
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
        timeout: 120
      register: cr_close_action
      failed_when: false

    # Fallback, hard set state to Closed with close fields
    - name: Fallback, set state to Closed via Table API if action failed
      when: cr_close_action.status | default(0) | int != 200
      ansible.builtin.uri:
        url: "{{ cr_table_url }}"
        method: PATCH
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers:
          Accept: "application/json"
          Content-Type: "application/json"
        body_format: json
        body: >-
          {{
            {
              'state': close_state_text,
              'close_code': close_code,
              'close_notes': close_notes
            }
            | combine( {'work_end': _now_display} if set_work_end_now | bool else {} )
          }}
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
        timeout: 120
      register: cr_close_patch

    - name: Pause briefly after close operation
      ansible.builtin.pause:
        seconds: 10

    - name: Read final record with display values
      ansible.builtin.uri:
        url: "https://{{ sn_host }}/api/now/table/change_request/{{ cr_sys_id }}?sysparm_display_value=true&sysparm_fields=number,state,close_code,close_notes,work_end,stage"
        method: GET
        force_basic_auth: true
        url_username: "{{ sn_username }}"
        url_password: "{{ sn_password }}"
        headers: { Accept: "application/json" }
        validate_certs: "{{ sn_verify_ssl | default(true) }}"
        return_content: true
        status_code: [200]
      register: _final

    - name: Normalize final state and stage
      ansible.builtin.set_fact:
        _final_state_disp: "{{ (_final.json.result.state.display_value | default(_final.json.result.state | default(''))) | string | lower }}"
        _final_stage_disp: "{{ (_final.json.result.stage.display_value | default(_final.json.result.stage | default(''))) | string | lower }}"

    - name: Show final closure outcome
      ansible.builtin.debug:
        msg:
          number: "{{ _final.json.result.number | default(cr_number) }}"
          state: "{{ _final_state_disp }}"
          stage: "{{ _final_stage_disp }}"
          close_code: "{{ _final.json.result.close_code | default('') }}"
          close_notes: "{{ _final.json.result.close_notes | default('') }}"
          work_end: "{{ _final.json.result.work_end | default('') }}"

    - name: Assert final state is Closed
      ansible.builtin.assert:
        that:
          - "'closed' in _final_state_disp"
        success_msg: "Change is Closed"
        fail_msg: "Change still not Closed, check whether the close action is enabled or required fields"

    # Cleanup, delete Cypher secret, best effort
    - name: Delete Cypher secret for this instance
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ _cypher_key | urlencode }}"
        method: DELETE
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200, 204, 404]
        validate_certs: "{{ appliance_validate_certs | bool }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      changed_when: false

MTSQL STUFF:

2) Check the replication channel and status (ClusterSet relay logs)
mysql --defaults-file=/home/<youruser>/.my.cnf -e \
"SELECT CHANNEL_NAME FROM performance_schema.replication_connection_status;"

mysql --defaults-file=/home/<youruser>/.my.cnf -e \
"SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
|| mysql --defaults-file=/home/<youruser>/.my.cnf -e \
"SHOW SLAVE STATUS FOR CHANNEL 'clusterset_replication'\G"


Confirm:

Replica_IO_Running: Yes, Replica_SQL_Running: Yes

Seconds_Behind_Source: 0 (or very small and dropping)

Relay_Log_Space explains your big *-relay-bin-clusterset_replication.* files

Note: PURGE BINARY LOGS won’t reclaim these on this replica—that purges source binlogs. You want a relay log reset here.

Check the replica status for the error:

mysql --defaults-file=/home/<youruser>/.my.cnf -e \
"SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
|| mysql --defaults-file=/home/<youruser>/.my.cnf -e \
"SHOW SLAVE STATUS FOR CHANNEL 'clusterset_replication'\G"


Look at:

Last_SQL_Error

Last_Error

Last_SQL_Errno

These will tell you exactly why the SQL thread stopped (e.g., duplicate key, missing table, disk space, etc.).

Do this now
# Start just the SQL applier on the clusterset channel
mysql -u ClusterAdmin -p -e "START REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';"

# Re-check status
mysql -u ClusterAdmin -p -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G"


1) Stop replication & GR on this node
# Stop async applier & IO for the clusterset channel
mysql -u ClusterAdmin -p -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"

# Make sure we don't accidentally bootstrap
mysql -u ClusterAdmin -p -e "SET GLOBAL group_replication_bootstrap_group=OFF;"

# Leave the group cleanly
mysql -u ClusterAdmin -p -e "STOP GROUP_REPLICATION;"

2) Enable relay-log recovery & purge the broken relay logs

This makes MySQL discard local relay logs and reconstruct them from the source at restart.

# Turn on automatic recovery (persist across restart)
mysql -u ClusterAdmin -p -e "SET PERSIST relay_log_recovery = ON;"
# (relay_log_purge is ON by default in 8.0; make sure)
mysql -u ClusterAdmin -p -e "SET PERSIST relay_log_purge = ON;"

What to do instead (works now)
0) (Optional) Confirm GTID is ON
mysql -u ClusterAdmin -p -e "SELECT @@gtid_mode, @@enforce_gtid_consistency;"


If you see ON / ON, follow Path A. If GTIDs are OFF, use Path B.

Path A — GTID ON (most Cluster/ClusterSet setups)

You can safely throw away the bad relay logs and re-fetch from the source using GTIDs:

# Stop async replication for the channel
mysql -u ClusterAdmin -p -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"

# Make sure Group Replication isn't blocking the applier
mysql -u ClusterAdmin -p -e "STOP GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=OFF;"

# Purge the corrupt relay logs but KEEP the connection info
mysql -u ClusterAdmin -p -e "RESET REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "RESET SLAVE FOR CHANNEL 'clusterset_replication';"

# (Re)start mysqld to get a clean slate (RHEL 9 service name)
sudo systemctl restart mysqld
sudo tail -n 80 /var/log/mysqld.log

# Rejoin Group Replication
mysql -u ClusterAdmin -p -e "START GROUP_REPLICATION;"

# Start the SQL applier (the GR hook should allow it now)
mysql -u ClusterAdmin -p -e "START REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "START SLAVE SQL_THREAD FOR CHANNEL 'clusterset_replication';"

# Verify it's applying
mysql -u ClusterAdmin -p -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
| egrep 'Replica_IO_Running|Replica_SQL_Running|Replica_SQL_Running_State|Seconds_Behind_Source'


Once it reaches Seconds_Behind_Source = 0, reclaim space (rotates to fresh relay logs):

mysql -u ClusterAdmin -p -e "\
STOP REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication'; \
RESET REPLICA FOR CHANNEL 'clusterset_replication'; \
START REPLICA FOR CHANNEL 'clusterset_replication';"

Path B — GTID OFF (less common in ClusterSet, but just in case)

You must point the replica at a known good source log file/position.

On the source (primary):

mysql -u ClusterAdmin -p -h <PRIMARY_HOST> -e "SHOW MASTER STATUS\G"


Note File and Position (or use a safe bookmark you recorded earlier).

On the replica (this node):

mysql -u ClusterAdmin -p -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"

# Wipe bad relay logs, keep connection
mysql -u ClusterAdmin -p -e "RESET REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "RESET SLAVE FOR CHANNEL 'clusterset_replication';"

# Point to the source coordinates you collected
mysql -u ClusterAdmin -p -e "\
CHANGE REPLICATION SOURCE TO
  SOURCE_LOG_FILE='<File_from_master>',
  SOURCE_LOG_POS=<Position_from_master>
FOR CHANNEL 'clusterset_replication';" \
|| mysql -u ClusterAdmin -p -e "\
CHANGE MASTER TO
  MASTER_LOG_FILE='<File_from_master>',
  MASTER_LOG_POS=<Position_from_master>
FOR CHANNEL 'clusterset_replication';"

# Restart & rejoin GR, then start applier
sudo systemctl restart mysqld
mysql -u ClusterAdmin -p -e "START GROUP_REPLICATION;"
mysql -u ClusterAdmin -p -e "START REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "START SLAVE FOR CHANNEL 'clusterset_replication';"

If you still prefer relay_log_recovery

That setting can be made persistent via config (since runtime SET is read-only on your build):

Edit /etc/my.cnf (or a drop-in in /etc/my.cnf.d/):

[mysqld]
relay_log_recovery=ON
relay_log_purge=ON


Restart:

sudo systemctl restart mysqld


This forces relay-log reconstruction at startup. It’s helpful if you expect to bounce nodes during cleanup.

Quick sanity checks (after either path)
# GR member health
mysql -u ClusterAdmin -p -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"

# Channel health
mysql -u ClusterAdmin -p -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
| egrep 'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Retrieved_Gtid_Set|Executed_Gtid_Set'


If anything here still blocks (e.g., GR won’t go ONLINE, or applier hook error returns), paste those two outputs and we’ll zero in on the next single command to run.

#################

You’ve got two separate root causes showing up now:

Encrypted relay logs can’t be opened (keyring problem).

Relay log parse failures (the old, partially written/corrupted relay files from when /var filled).

You can fix this in one of two ways. If you’re on a tight clock, take Path B (temporary: turn off relay-log encryption on the replica) to get the cluster healthy fast, then re-enable encryption after.

Path A — Keep relay-log encryption (fix the keyring)

(Preferred if you must keep encryption at rest on replicas.)

1) Inspect keyring + encryption settings
# Is relay-log encryption enabled?
mysql -u ClusterAdmin -p -e "SHOW VARIABLES LIKE 'relay_log_encryption';"

# What keyring is in use / is it loaded?
mysql -u ClusterAdmin -p -e "
SELECT PLUGIN_NAME, PLUGIN_STATUS
FROM information_schema.PLUGINS
WHERE PLUGIN_NAME LIKE 'keyring%';
"
mysql -u ClusterAdmin -p -e "SHOW VARIABLES LIKE 'keyring_file_data';"


Common RHEL/MySQL 8 layout:

keyring_file plugin with keyring_file_data=/var/lib/mysql-keyring/keyring
(or /var/lib/mysql/keyring/keyring).

Check the file exists and perms/SELinux are correct:

# Adjust path to whatever SHOW VARIABLES returned
sudo ls -l /var/lib/mysql-keyring
sudo ls -l /var/lib/mysql-keyring/keyring
sudo chown mysql:mysql /var/lib/mysql-keyring /var/lib/mysql-keyring/keyring
sudo chmod 600 /var/lib/mysql-keyring/keyring
# SELinux label so mysqld can read it:
sudo semanage fcontext -a -t mysqld_db_t "/var/lib/mysql-keyring(/.*)?"
sudo restorecon -Rv /var/lib/mysql-keyring


If the keyring file is missing or zero-length and you have no backup: you can’t decrypt old encrypted relay logs (that’s OK; we’ll discard them and re-fetch).

2) Stop GR & the replica channel
mysql -u ClusterAdmin -p -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"
mysql -u ClusterAdmin -p -e "STOP GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=OFF;"

3) Discard broken relay logs, keep connection info
mysql -u ClusterAdmin -p -e "RESET REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "RESET SLAVE FOR CHANNEL 'clusterset_replication';"
sudo systemctl restart mysqld

4) Rejoin GR, then start applier
mysql -u ClusterAdmin -p -e "START GROUP_REPLICATION;"
mysql -u ClusterAdmin -p -e "START REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "START SLAVE SQL_THREAD FOR CHANNEL 'clusterset_replication';"
mysql -u ClusterAdmin -p -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
  | egrep 'Replica_IO_Running|Replica_SQL_Running|Replica_SQL_Running_State|Seconds_Behind_Source'


If it still says “unable to fetch a keyring key,” the keyring plugin still isn’t loading or can’t read its file. Fix the path/permissions/SELinux, restart mysqld, and retry step 4.

Path B — Fastest recovery: temporarily disable relay-log encryption on this replica

Relay logs are ephemeral; if security policy allows, this is the quickest way through the keyring roadblock.

1) Stop GR & replica, discard relay logs
mysql -u ClusterAdmin -p -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"
mysql -u ClusterAdmin -p -e "STOP GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=OFF;"
mysql -u ClusterAdmin -p -e "RESET REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "RESET SLAVE FOR CHANNEL 'clusterset_replication';"

2) Turn relay-log encryption OFF in config (runtime is read-only)

Edit /etc/my.cnf (or a drop-in in /etc/my.cnf.d/), under [mysqld]:

[mysqld]
relay_log_encryption=OFF
relay_log_purge=ON
# optional but recommended if you want auto-recovery on restarts later:
# relay_log_recovery=ON


Restart:

sudo systemctl restart mysqld
sudo tail -n 80 /var/log/mysqld.log

3) Rejoin GR, start the applier
mysql -u ClusterAdmin -p -e "START GROUP_REPLICATION;"
mysql -u ClusterAdmin -p -e "START REPLICA FOR CHANNEL 'clusterset_replication';" \
  || mysql -u ClusterAdmin -p -e "START SLAVE FOR CHANNEL 'clusterset_replication';"
mysql -u ClusterAdmin -p -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
  | egrep 'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Retrieved_Gtid_Set|Executed_Gtid_Set'


Let it catch up. When Seconds_Behind_Source=0, run a quick rotate to keep relay logs small:

mysql -u ClusterAdmin -p -e "\
STOP REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication'; \
RESET REPLICA FOR CHANNEL 'clusterset_replication'; \
START REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';"

4) (Later) Re-enable encryption safely

Once the keyring is fixed and stable, flip relay_log_encryption=ON back in /etc/my.cnf, restart mysqld, and optionally RESET REPLICA again to rotate to freshly encrypted relay logs.

Extra integrity checks (recommended)

On the primary (source): make sure its binlogs are OK (so the replica isn’t pulling corrupt events):

mysql -u ClusterAdmin -p -h <PRIMARY_HOST> -e "SHOW BINARY LOGS;"
# Spot-check the latest binlog file:
mysqlbinlog --verify-binlog-checksum --base64-output=DECODE-ROWS -v -v --host=<PRIMARY_HOST> --read-from-remote-server <latest-binlog> | head


Network to primary stable? (replica host)

nc -vz <PRIMARY_HOST> 3306
# If GR also flapped earlier:
nc -vz <peer_host> 33061


Disk/FS health on replica:

df -h /var
sudo dmesg | tail -n 50
sudo smartctl -H /dev/$(lsblk -ndo PKNAME $(df --output=source /var | tail -1) | tr -d '\n') 2>/dev/null || true

What I’d do right now

Given the “cannot fetch keyring key to open an encrypted relay” message, I’d take Path B to get green quickly (turn off relay-log encryption on the replica only, discard relay logs, restart, rejoin GR, start applier). Then, when calm, fix/restore the keyring file and re-enable encryption.

If you want, paste these two outputs and I’ll tell you exactly which tiny edit you need:

mysql -u ClusterAdmin -p -e "SHOW VARIABLES LIKE 'relay_log_encryption';"
mysql -u ClusterAdmin -p -e "SHOW VARIABLES LIKE 'keyring_file_data';"
sudo ls -l $(mysql -N -u ClusterAdmin -p -e "SHOW VARIABLES LIKE 'keyring_file_data';" | awk '{print $2}')

COLLECTOR:

#!/usr/bin/env bash
# mysql_support_collect.sh — RHEL9 / Oracle MySQL 8.x / mysqld
# Creates /tmp/<host>_<UTCtimestamp>_mysql_support.tgz
# Uses --defaults-file for MySQL auth (no -p). Requires sudo for a few system logs.

set -Eeuo pipefail

# ===== User knobs =====
DEFAULTS="${DEFAULTS:-$HOME/.my.cnf}"   # e.g., export DEFAULTS=/home/dennis/.my.cnf
SVC="${SVC:-mysqld}"
JOURNAL_SINCE="${JOURNAL_SINCE:-2 days ago}"   # change if you want a longer/shorter window
INCLUDE_CONFIGS="${INCLUDE_CONFIGS:-1}"        # set to 0 to skip copying /etc/my.cnf*
LOG_TAIL_LINES="${LOG_TAIL_LINES:-4000}"

# ===== Setup =====
MYSQL_BIN="${MYSQL_BIN:-mysql}"
MYSQL="${MYSQL_BIN} --defaults-file=${DEFAULTS}"
HOST="$(hostname -f 2>/dev/null || hostname)"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
OUT="/tmp/mysql_support_${HOST}_${TS}"
mkdir -p "$OUT"

banner() { echo -e "\n### $* ###\n"; }
save_cmd() { banner "$*" | tee -a "$OUT/runbook.txt"; }

# ===== Sanity: defaults file present & readable by client =====
if [[ ! -f "$DEFAULTS" ]]; then
  echo "Defaults file not found: $DEFAULTS" >&2; exit 1
fi
chmod 600 "$DEFAULTS" || true
if ! my_print_defaults --defaults-file="$DEFAULTS" client >/dev/null 2>&1; then
  echo "WARNING: my_print_defaults could not read $DEFAULTS (format/perms?). Continuing anyway..." >&2
fi

# Record environment
{
  echo "host=$HOST"
  echo "utc_now=$(date -u)"
  echo "local_now=$(date)"
  echo "defaults_file=$DEFAULTS"
  echo "service=$SVC"
  echo "journal_since=$JOURNAL_SINCE"
} > "$OUT/meta.txt"

# ===== System / OS =====
save_cmd "OS / kernel / time / SELinux"
{
  echo "# /etc/redhat-release"; cat /etc/redhat-release 2>/dev/null || true
  echo -e "\n# uname -a"; uname -a
  echo -e "\n# uptime"; uptime || true
  echo -e "\n# timedatectl"; timedatectl 2>/dev/null || true
  echo -e "\n# SELinux"; sestatus 2>/dev/null || true
} > "$OUT/os.txt"

save_cmd "Service + logs (journal since: $JOURNAL_SINCE)"
systemctl status "$SVC" --no-pager > "$OUT/systemctl_${SVC}.txt" 2>&1 || true
sudo journalctl -u "$SVC" --since "$JOURNAL_SINCE" > "$OUT/journal_${SVC}.log" 2>&1 || true
sudo tail -n "$LOG_TAIL_LINES" /var/log/${SVC}.log > "$OUT/${SVC}.log.tail" 2>&1 || true

# ===== Disk / FS =====
save_cmd "Disk / FS usage"
{
  echo "## df -h"; df -h
  echo -e "\n## df -i"; df -i
  echo -e "\n## lsblk"; lsblk -o NAME,FSTYPE,SIZE,MOUNTPOINT,TYPE | sed 's/  */ /g'
  echo -e "\n## mounts for /var and datadir"; mount | egrep ' /var|mysql' || true
  echo -e "\n## du -sh /var/lib/mysql"; sudo du -sh /var/lib/mysql 2>/dev/null || true
  echo -e "\n## Relay/binlog files in datadir"; sudo ls -lh /var/lib/mysql/*relay* /var/lib/mysql/mysql-bin.* 2>/dev/null || true
  echo -e "\n## Largest 50 files in datadir"; sudo find /var/lib/mysql -xdev -type f -printf '%s %p\n' 2>/dev/null | sort -nr | head -n 50
} > "$OUT/disk.txt"

save_cmd "Kernel messages (tail)"
sudo dmesg | tail -n 500 > "$OUT/dmesg.tail" 2>&1 || true

# ===== Network =====
save_cmd "Listeners / firewall"
{
  echo "## ss -lntp (3306/33061)"; ss -lntp | egrep '3306|33061' || true
  echo -e "\n## firewall-cmd --list-ports"; sudo firewall-cmd --list-ports 2>/dev/null || true
  echo -e "\n## firewall-cmd --list-services"; sudo firewall-cmd --list-services 2>/dev/null || true
} > "$OUT/network.txt"

# ===== MySQL: versions, core vars =====
save_cmd "MySQL versions / vars"
$MYSQL -e "SELECT VERSION() AS version, @@version_comment AS build\G" > "$OUT/mysql_version.txt" 2>&1 || true
$MYSQL -e "SELECT
  @@server_uuid            AS server_uuid,
  @@server_id              AS server_id,
  @@hostname               AS hostname,
  @@port                   AS port,
  @@socket                 AS socket,
  @@datadir                AS datadir,
  @@log_error              AS log_error,
  @@read_only              AS read_only,
  @@super_read_only        AS super_read_only,
  @@gtid_mode              AS gtid_mode,
  @@enforce_gtid_consistency AS enforce_gtid_consistency,
  @@log_bin                AS log_bin,
  @@binlog_format          AS binlog_format,
  @@binlog_encryption      AS binlog_encryption,
  @@relay_log_purge        AS relay_log_purge
\G" > "$OUT/mysql_core_vars.txt" 2>&1 || true

# Key plugins / GR variables
$MYSQL -e "SELECT PLUGIN_NAME, PLUGIN_STATUS FROM information_schema.PLUGINS WHERE PLUGIN_NAME IN ('group_replication','keyring_file','keyring_encrypted_file','keyring_aws');" > "$OUT/mysql_plugins.txt" 2>&1 || true
$MYSQL -e "SHOW VARIABLES LIKE 'group_replication%';" > "$OUT/mysql_gr_vars.txt" 2>&1 || true
$MYSQL -e "SHOW STATUS LIKE 'group_replication_%';" > "$OUT/mysql_gr_status.txt" 2>&1 || true
$MYSQL -e "SHOW STATUS LIKE 'group_replication_primary_member';" > "$OUT/mysql_gr_primary.txt" 2>&1 || true

# ===== Group Replication membership =====
save_cmd "GR membership"
$MYSQL -e "SELECT MEMBER_ID, MEMBER_HOST, MEMBER_PORT, MEMBER_STATE, MEMBER_ROLE FROM performance_schema.replication_group_members;" > "$OUT/gr_members.txt" 2>&1 || true

# ===== Channels / replication status =====
save_cmd "Replication channels / status"
$MYSQL -e "SELECT CHANNEL_NAME FROM performance_schema.replication_connection_configuration;" > "$OUT/channels.txt" 2>&1 || true

# For each channel, dump status
channels=$($MYSQL -N -e "SELECT CHANNEL_NAME FROM performance_schema.replication_connection_configuration;" 2>/dev/null || true)
if [[ -z "${channels:-}" ]]; then
  channels="clusterset_replication group_replication_recovery"  # common names, try anyway
fi
for ch in $channels; do
  $MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL '$ch'\G" > "$OUT/replica_status_${ch}.txt" 2>&1 || \
  $MYSQL -e "SHOW SLAVE STATUS FOR CHANNEL '$ch'\G"    > "$OUT/replica_status_${ch}.txt" 2>&1 || true
done

# Applier/coordinator snapshots
$MYSQL -e "SELECT * FROM performance_schema.replication_applier_status\G" > "$OUT/applier_status.txt" 2>&1 || true
$MYSQL -e "SELECT THREAD_ID, SERVICE_STATE, LAST_ERROR_NUMBER, LAST_ERROR_MESSAGE FROM performance_schema.replication_applier_status_by_worker\G" > "$OUT/applier_workers.txt" 2>&1 || true
$MYSQL -e "SHOW BINARY LOGS;" > "$OUT/show_binary_logs.txt" 2>&1 || true
$MYSQL -e "SHOW MASTER STATUS\G" > "$OUT/show_master_status.txt" 2>&1 || true

# ===== Connectivity tests to seeds (33061) & MySQL (3306) =====
save_cmd "Connectivity to GR group seeds / peers"
$MYSQL -N -e "SELECT @@group_replication_group_seeds;" > "$OUT/gr_seeds.txt" 2>&1 || true
SEEDS=$(awk '{print $1}' "$OUT/gr_seeds.txt" 2>/dev/null | tr -d '\r' | tr ',' ' ' || true)
{
  echo "Seeds: ${SEEDS:-<none>}"
  if command -v nc >/dev/null 2>&1; then
    for hp in $SEEDS; do
      host="${hp%%:*}"; port="${hp##*:}"
      echo; echo "## nc -vz $host $port"; nc -vz -w2 "$host" "$port" 2>&1 || true
      echo "## nc -vz $host 3306"; nc -vz -w2 "$host" 3306 2>&1 || true
    done
  else
    echo "nc (netcat) not found; skipping seed connectivity tests."
  fi
} > "$OUT/seed_connectivity.txt"

# ===== Config copies (sanitized) =====
if [[ "$INCLUDE_CONFIGS" == "1" ]]; then
  save_cmd "Copying sanitized configs (/etc/my.cnf*)"
  mkdir -p "$OUT/configs"
  for path in /etc/my.cnf /etc/my.cnf.d/*.cnf; do
    [[ -f "$path" ]] || continue
    out="$OUT/configs/$(basename "$path").redacted"
    sudo sed -E \
      -e 's/([Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd]\s*=\s*).*/\1[REDACTED]/' \
      -e 's/(IDENTIFIED[[:space:]]+BY[[:space:]]+).*/\1[REDACTED]/' \
      -e 's/(ssl-key|ssl_cert|ssl_ca|ssl-key-file|ssl_cert_file|ssl_ca_file)(\s*=\s*).*/\1\2[REDACTED]/' \
      "$path" > "$out" 2>/dev/null || sudo cp -a "$path" "$out" 2>/dev/null || true
  done
else
  echo "Skipping config copies (INCLUDE_CONFIGS=$INCLUDE_CONFIGS)" > "$OUT/configs_skipped.txt"
fi

# ===== Package & finish =====
TARBALL="/tmp/${HOST}_${TS}_mysql_support.tgz"
tar -C /tmp -czf "$TARBALL" "$(basename "$OUT")"
echo
echo "Created support bundle: $TARBALL"
echo "Review/redact as needed, then attach to your HPE ticket."


  export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

$MYSQL -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"
$MYSQL -e "SHOW STATUS LIKE 'group_replication_cluster_status';"
$MYSQL -e "SHOW STATUS LIKE 'group_replication_primary_member';"
$MYSQL -e "SELECT @@global.gtid_executed\G"
# Make sure it can’t flap the group
$MYSQL -e "STOP GROUP_REPLICATION; SET GLOBAL group_replication_bootstrap_group=OFF;"
sudo systemctl stop mysqld  # or leave mysqld up but GR stopped while you repair


On the broken node:

# Clear any damaged relay logs on BOTH channels
$MYSQL -e "STOP REPLICA FOR CHANNEL 'clusterset_replication';" || $MYSQL -e "STOP SLAVE FOR CHANNEL 'clusterset_replication';"
$MYSQL -e "STOP REPLICA FOR CHANNEL 'group_replication_recovery';" || true

$MYSQL -e "RESET REPLICA FOR CHANNEL 'clusterset_replication';" || $MYSQL -e "RESET SLAVE FOR CHANNEL 'clusterset_replication';"
$MYSQL -e "RESET REPLICA FOR CHANNEL 'group_replication_recovery';" || true

# Make sure we’re not requiring at-rest encryption (no keyring needed during fix)
$MYSQL -e "SHOW VARIABLES LIKE 'binlog_encryption';"   # keep OFF for now

sudo systemctl restart mysqld


Now rejoin it to the group and let recovery run:

$MYSQL -e "START GROUP_REPLICATION;"
$MYSQL -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"
$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" | egrep 'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Last_SQL_Error'


Let Seconds_Behind_Source reach 0. Then rotate relay logs once to reclaim space:

$MYSQL -e "STOP REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';
RESET REPLICA FOR CHANNEL 'clusterset_replication';
START REPLICA SQL_THREAD FOR CHANNEL 'clusterset_replication';"

# GR members & health
$MYSQL -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"
$MYSQL -e "SHOW GLOBAL STATUS LIKE 'group_replication_cluster_status';"
$MYSQL -e "SELECT MEMBER_HOST,COUNT_TRANSACTIONS_IN_QUEUE
           FROM performance_schema.replication_group_member_stats;"

# ClusterSet / async channel on the cluster PRIMARY
$MYSQL -e "SELECT CHANNEL_NAME,SERVICE_STATE,LAST_ERROR_NUMBER,LAST_ERROR_MESSAGE
           FROM performance_schema.replication_connection_status;"

$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'clusterset_replication'\G" \
  || $MYSQL -e "SHOW SLAVE STATUS FOR CHANNEL 'clusterset_replication'\G"

export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

# Who is PRIMARY?
$MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='PRIMARY';"


# What channels exist / service state?
$MYSQL -e "SELECT CHANNEL_NAME,SERVICE_STATE,LAST_ERROR_NUMBER,LAST_ERROR_MESSAGE FROM performance_schema.replication_connection_status;"

# Full status for the channel
$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'cluster_replication'\G" || $MYSQL -e "SHOW SLAVE STATUS FOR CHANNEL 'cluster_replication'\G"


# Start
$MYSQL -e "START REPLICA FOR CHANNEL 'cluster_replication';" || $MYSQL -e "START SLAVE FOR CHANNEL 'cluster_replication';"

# If it fails or was previously stuck, do a clean rotate and retry:
$MYSQL -e "STOP REPLICA SQL_THREAD FOR CHANNEL 'cluster_replication'; RESET REPLICA FOR CHANNEL 'cluster_replication'; START REPLICA FOR CHANNEL 'cluster_replication';" || $MYSQL -e "STOP SLAVE SQL_THREAD FOR CHANNEL 'cluster_replication'; RESET SLAVE FOR CHANNEL 'cluster_replication'; START SLAVE FOR CHANNEL 'cluster_replication';"


$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'cluster_replication'\G" | egrep \
'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Auto_Position|Last_IO_Error|Last_SQL_Error'


export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'cluster_replication'\G" | egrep \
'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Auto_Position|Retrieved_Gtid_Set|Executed_Gtid_Set|Last_IO_Error|Last_SQL_Error'


export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

# Group Replication: all members ONLINE, one PRIMARY
$MYSQL -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"
$MYSQL -e "SHOW GLOBAL STATUS LIKE 'group_replication_cluster_status';"

# Queues must be 0 on all members (fully in-sync)
$MYSQL -e "SELECT MEMBER_HOST,COUNT_TRANSACTIONS_IN_QUEUE
           FROM performance_schema.replication_group_member_stats;"

# ClusterSet/async channel on the PRIMARY (your channel name is cluster_replication)
$MYSQL -e "SHOW REPLICA STATUS FOR CHANNEL 'cluster_replication'\G" \
  | egrep 'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Auto_Position|Last_IO_Error|Last_SQL_Error'


export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

# Show queues per member (should all be 0)
$MYSQL -e "
SELECT m.MEMBER_HOST,
       m.MEMBER_STATE,
       m.MEMBER_ROLE,
       s.COUNT_TRANSACTIONS_IN_QUEUE
FROM performance_schema.replication_group_member_stats s
JOIN performance_schema.replication_group_members m
  ON s.MEMBER_ID = m.MEMBER_ID
ORDER BY m.MEMBER_ROLE DESC, m.MEMBER_HOST;
"

# Only show members with a non-zero queue (handy to spot stragglers)
$MYSQL -e "
SELECT m.MEMBER_HOST,
       s.COUNT_TRANSACTIONS_IN_QUEUE
FROM performance_schema.replication_group_member_stats s
JOIN performance_schema.replication_group_members m
  ON s.MEMBER_ID = m.MEMBER_ID
WHERE s.COUNT_TRANSACTIONS_IN_QUEUE > 0;
"

  
  
  export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

# Who is PRIMARY?
PRIMARY="$($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='PRIMARY';")"
echo "PRIMARY = $PRIMARY"

# Channel states on each node (PRIMARY should have cluster_replication=ON; others OFF)
for H in $($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members;"); do
  echo "== $H =="
  $MYSQL -h "$H" -e "SELECT CHANNEL_NAME,SERVICE_STATE,LAST_ERROR_NUMBER,LAST_ERROR_MESSAGE
                      FROM performance_schema.replication_connection_status;"
done

  
# A) GR queue & role snapshot (all nodes)
$MYSQL -e "
SELECT m.MEMBER_HOST, m.MEMBER_STATE, m.MEMBER_ROLE, s.COUNT_TRANSACTIONS_IN_QUEUE
FROM performance_schema.replication_group_member_stats s
JOIN performance_schema.replication_group_members m ON s.MEMBER_ID=m.MEMBER_ID
ORDER BY m.MEMBER_ROLE DESC, m.MEMBER_HOST;"

# B) GTID equality across nodes (identical hashes = fully in-sync)
for H in $($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members;"); do
  $MYSQL -h "$H" -N -e "SELECT @@hostname, MD5(@@GLOBAL.gtid_executed);"
done



export DEFAULTS="$HOME/.my.cnf"
MYSQL="mysql --defaults-file=$DEFAULTS"

PRIMARY="$($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members WHERE MEMBER_ROLE='PRIMARY';")"

echo "PRIMARY=$PRIMARY"

# A) GR health and roles
$MYSQL -e "SELECT MEMBER_HOST,MEMBER_STATE,MEMBER_ROLE FROM performance_schema.replication_group_members;"
$MYSQL -e "SHOW GLOBAL STATUS LIKE 'group_replication_cluster_status';"

# B) Queues (must be all zero)
$MYSQL -e "
SELECT m.MEMBER_HOST, m.MEMBER_ROLE, s.COUNT_TRANSACTIONS_IN_QUEUE
FROM performance_schema.replication_group_member_stats s
JOIN performance_schema.replication_group_members m ON s.MEMBER_ID=m.MEMBER_ID
ORDER BY m.MEMBER_ROLE DESC, m.MEMBER_HOST;"

# C) RO flags (PRIMARY must be OFF/OFF; secondaries ON/ON)
for H in $($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members;"); do
  echo "== $H =="; $MYSQL -h "$H" -e "SELECT @@read_only ro, @@super_read_only sro\G"
done

# D) Channel states on each node (cluster_replication must be ON only on PRIMARY)
for H in $($MYSQL -N -e "SELECT MEMBER_HOST FROM performance_schema.replication_group_members;"); do
  echo "== $H =="; $MYSQL -h "$H" -e "SELECT CHANNEL_NAME,SERVICE_STATE,LAST_ERROR_NUMBER,LAST_ERROR_MESSAGE FROM performance_schema.replication_connection_status;"
done

# E) ClusterSet channel detailed status (on the PRIMARY only)
$MYSQL -h "$PRIMARY" -e "SHOW REPLICA STATUS FOR CHANNEL 'cluster_replication'\G" | egrep 'Replica_IO_Running|Replica_SQL_Running|Seconds_Behind_Source|Auto_Position|Last_IO_Error|Last_SQL_Error'



# On PRIMARY (should be OFF)
$MYSQL -e "SET GLOBAL super_read_only=OFF; SET GLOBAL read_only=OFF;"

# On a secondary (should be ON)
$MYSQL -e "SET GLOBAL read_only=ON; SET GLOBAL super_read_only=ON;"



---
- name: Simple Cypher write and read demo
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    appliance_url: "https://morpheus.mycompany.com"   # replace with your Morpheus URL
    api_token_secret_path: "secret/vault/token"       # Cypher path to your Bearer token
    cypher_key: "demo/testName"
    appliance_validate_certs: false                   # flip true once certs are good

  tasks:
    - name: Derive appliance host for no_proxy
      ansible.builtin.set_fact:
        _appliance_host: "{{ appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Read Morpheus API bearer token
      ansible.builtin.set_fact:
        _api_token: "{{ lookup('cypher', 'secret=' ~ api_token_secret_path) | trim }}"

    - name: Write key=value into Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ cypher_key | urlencode }}"
        method: PUT
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Content-Type: "application/json"
          Accept: "application/json"
        body_format: json
        body:
          value: "{{ {'name': 'cody'} | to_json }}"
        status_code: [200, 201, 204]
        validate_certs: "{{ appliance_validate_certs }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"

    - name: Read back the value from Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ _api_token }}"
          Accept: "application/json"
        status_code: [200]
        validate_certs: "{{ appliance_validate_certs }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget

    - name: Show retrieved value
      ansible.builtin.debug:
        msg: "{{ _cyget.json.data.value }}"

OUTSIDE:

---
- name: Simple Cypher write and read demo (outside Morpheus)
  hosts: localhost
  gather_facts: false
  connection: local

  vars:
    appliance_url: "https://morpheus.mycompany.com"   # your Morpheus URL
    bearer_token: "PASTE_YOUR_API_BEARER_TOKEN_HERE"
    cypher_key: "demo/testName"
    appliance_validate_certs: false                   # flip true once certs are good

  tasks:
    - name: Derive appliance host for no_proxy
      ansible.builtin.set_fact:
        _appliance_host: "{{ appliance_url | regex_replace('^https?://', '') | regex_replace('/.*', '') }}"
        _no_proxy: "{{ _appliance_host }},{{ _appliance_host }}:443,localhost,127.0.0.1"

    - name: Write key=value into Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ cypher_key | urlencode }}"
        method: PUT
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Content-Type: "application/json"
          Accept: "application/json"
        body_format: json
        body:
          value: "{{ {'name': 'cody'} | to_json }}"
        status_code: [200, 201, 204]
        validate_certs: "{{ appliance_validate_certs }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"

    - name: Read back the value from Cypher
      ansible.builtin.uri:
        url: "{{ appliance_url }}/api/cypher/secret/{{ cypher_key | urlencode }}"
        method: GET
        headers:
          Authorization: "Bearer {{ bearer_token }}"
          Accept: "application/json"
        status_code: [200]
        validate_certs: "{{ appliance_validate_certs }}"
        use_proxy: no
      environment:
        NO_PROXY: "{{ _no_proxy }}"
        no_proxy: "{{ _no_proxy }}"
      register: _cyget

    - name: Show retrieved value
      ansible.builtin.debug:
        msg: "{{ _cyget.json.data.value }}"
  1. Update your system sudo dnf update -y

  2. Enable Microsoft’s package repository

Download and install the Microsoft repo config for RHEL:

sudo rpm -Uvh https://packages.microsoft.com/config/rhel/9/packages-microsoft-prod.rpm

This adds the repo so that dnf can fetch Microsoft packages.

  1. Install PowerShell sudo dnf install -y powershell

  2. Run PowerShell

After installation, start it with:

pwsh

You’ll drop into the PowerShell prompt (PS />).

✅ Notes

You’ll get the latest stable PowerShell (pwsh) from Microsoft’s repo.

If you need the preview version, there’s also a powershell-preview package in the same repo.

Removal is just:

sudo dnf remove powershell

- name: Set ServiceNow template ID by platform
  ansible.builtin.set_fact:
    sc_template_sys_id: >-
      {% if morpheus['instance']['platform'] | lower == 'windows' %}
      11111111111111111111111111111111   # Windows template sys_id
      {% else %}
      22222222222222222222222222222222   # Linux template sys_id
      {% endif %}

- name: Show which template sys_id was chosen
  ansible.builtin.debug:
    msg:
      platform: "{{ morpheus['instance']['platform'] }}"
      sc_template_sys_id: "{{ sc_template_sys_id }}"
mkdir -p ~/morpheus-cache && cd ~/morpheus-cache
cat > Gemfile <<'EOF'
source "https://rubygems.org"
gem "morpheus-cli", "8.0.7"
EOF

# keep gems under ./vendor and also save the .gem files to ./vendor/cache
bundle config set path vendor/bundle
bundle lock --add-platform ruby
bundle install            # resolves and installs to vendor/bundle
bundle package --all      # writes every .gem file to vendor/cache

Is a replication channel configured

mysqlsh --sql -e "SELECT CHANNEL_NAME, HOST AS MASTER_HOST, PORT AS MASTER_PORT, USER AS MASTER_USER, AUTO_POSITION FROM performance_schema.replication_connection_configuration;"

OR: mysqlsh --sql -e "SELECT * FROM performance_schema.replication_connection_configuration\G"

What is the current state and last error

mysqlsh --sql -e "SHOW REPLICA STATUS\G"

On older versions

mysqlsh --sql -e "SHOW SLAVE STATUS\G"


🔹 1. Understand how Morpheus Approval Policies work

Approval Policies live under Administration → Policies in the Morpheus UI.

You can set a policy on Clouds, Groups, Tenants, or Users.

When someone requests a provision, Morpheus checks the policy before starting orchestration.

By default, Approvals can be manual (e.g. via Morpheus UI), or via ServiceNow plugin / REST task integration.

This is before build kicks off, which solves your timing issue.

🔹 2. Decide how to call ServiceNow

You have two routes:

Use the built-in Morpheus ServiceNow integration (if licensed & enabled)

Morpheus can talk directly to ServiceNow’s Change API.

The Change record is created automatically when a request is made.

The request won’t progress until SNOW approval is marked complete.

Roll your own Ansible/REST Task

You can create a Task in Morpheus (type: Ansible Playbook or REST).

This task will POST to ServiceNow’s Change API.

You wire that into the Approval Policy, so that every provision request first calls your Ansible Playbook.

Your playbook can:

Create the Change in ServiceNow (uri module).

Poll for its status until it’s Approved/Rejected.

Exit with success/failure accordingly.

3. Playbook to create the snow ticket:

🔹 4. Hooking into Morpheus Approval

Create a Task from the playbook above.

Create a Workflow if needed, but for Approval, the key is:

In Administration → Policies → Approvals, select Custom / Task-based Approval.

Assign your Ansible Task there.

Scope the Policy to your Cloud/Group so it runs before any VM build.

Now every provision request will:

Trigger the Approval check.

Run your Ansible Task → Create SNOW Change.

Wait for approval status.

Only then allow the VM build to proceed.

🔹 5. Extra: Polling for Approval

If you want Morpheus to wait until the Change is approved/rejected in ServiceNow:

Extend the playbook to GET the Change record status in a loop until it’s “Approved” or “Rejected”.

Exit with 0 (OK) for approved, or non-zero for rejected → Morpheus will halt build.
  
  #!/usr/bin/env python3
"""
Create a ServiceNow Standard Change from a template, move it to Implementing, then
persist { number, sys_id } in Morpheus Cypher.

Config comes from env vars or CLI flags. Safe defaults are provided where it makes sense.

Tested with Python 3.9 plus requests.

Usage examples, environment driven
  export SN_HOST="myinstance.service-now.com"
  export SN_USERNAME="api_user"
  export SN_PASSWORD="secret"
  export SN_VERIFY_SSL="true"
  export MORPHEUS_URL="https://morpheus.mycompany.com"
  export MORPHEUS_BEARER_TOKEN="xxxx"  (a Morpheus API token)
  export MORPHEUS_INSTANCE_NAME="myvm01"
  export MORPHEUS_INSTANCE_CONTEXT="production"
  export MORPHEUS_SERVER_PLATFORM="rhel"
  python3 snow_change_create_and_store.py

Or with flags
  python3 snow_change_create_and_store.py \
    --sn-host myinstance.service-now.com \
    --sn-username api_user \
    --sn-password secret \
    --morpheus-url https://morpheus.mycompany.com \
    --morpheus-token xxxx \
    --instance-name myvm01 \
    --instance-context production \
    --server-platform rhel
"""

import argparse
import json
import os
import re
import sys
import time
from datetime import datetime, timedelta, timezone

import requests


def env_bool(name, default):
    v = os.getenv(name, None)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}


def now_utc():
    return datetime.now(timezone.utc)


def fmt_ts(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")  # ServiceNow display format


def sanitize_key(s):
    return re.sub(r"[^A-Za-z0-9_.-]", "_", s or "")


def pick_template_sys_id(server_platform, rhel_template, windows_template):
    plat = (server_platform or "").strip().lower()
    return rhel_template if plat == "rhel" else windows_template


def parse_args():
    p = argparse.ArgumentParser(description="Create SNOW Standard Change and persist identifiers to Morpheus Cypher")

    # ServiceNow
    p.add_argument("--sn-host", default=os.getenv("SN_HOST", ""))
    p.add_argument("--sn-username", default=os.getenv("SN_USERNAME", ""))
    p.add_argument("--sn-password", default=os.getenv("SN_PASSWORD", ""))
    p.add_argument("--sn-verify-ssl", default=env_bool("SN_VERIFY_SSL", True), action="store_true")

    # Inputs equivalent to your vars
    p.add_argument("--requested-by", default=os.getenv("SN_REQUESTED_BY", "user1"))
    p.add_argument("--assigned-to", default=os.getenv("SN_ASSIGNED_TO", "user1"))
    p.add_argument("--cmdb-ci", default=os.getenv("SN_CMDB_CI", "fhdkjd7sishs9ss9s9sjksksd"))
    p.add_argument("--u-whost-role", default=os.getenv("SN_U_WH0ST_ROLE", "ds8ssksjfhfh7f6fd5d4s"))
    p.add_argument("--category", default=os.getenv("SN_CATEGORY", "Other"))
    p.add_argument("--u-reason-for-change", default=os.getenv("SN_REASON", "Maintenance"))

    p.add_argument("--start-offset-sec", type=int, default=int(os.getenv("SN_START_OFFSET_SEC", "15")))
    p.add_argument("--duration-sec", type=int, default=int(os.getenv("SN_DURATION_SEC", "10800")))
    p.add_argument("--wait-for-planned-start", default=env_bool("SN_WAIT_FOR_PLANNED_START", True), action="store_true")

    # These two are dynamic from Morpheus context
    p.add_argument("--instance-context", default=os.getenv("MORPHEUS_INSTANCE_CONTEXT", ""))  # used as u_environment
    p.add_argument("--server-platform", default=os.getenv("MORPHEUS_SERVER_PLATFORM", ""))    # used to pick template

    # Template sys ids (you can hardcode your real ones here)
    p.add_argument("--rhel-template-sys-id", default=os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "11111rhel1111111111111111111111111"))
    p.add_argument("--windows-template-sys-id", default=os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "22222222222222222222222222222222"))

    # API version
    p.add_argument("--sc-api-version", default=os.getenv("SN_SC_API_VERSION", "v1"))

    # Morpheus Cypher
    p.add_argument("--morpheus-url", default=os.getenv("MORPHEUS_URL", ""))
    p.add_argument("--morpheus-token", default=os.getenv("MORPHEUS_BEARER_TOKEN", ""))
    p.add_argument("--cypher-prefix", default=os.getenv("MORPHEUS_CYPHER_PREFIX", "servicenow/changerefs"))
    p.add_argument("--instance-name", default=os.getenv("MORPHEUS_INSTANCE_NAME", ""))
    p.add_argument("--appliance-validate-certs", default=env_bool("MORPHEUS_VALIDATE_CERTS", False), action="store_true")

    return p.parse_args()


def main():
    a = parse_args()

    # Required checks
    if not a.sn_host or not a.sn_username or not a.sn_password:
        print("Missing ServiceNow connection settings, set SN_HOST, SN_USERNAME, SN_PASSWORD or use flags", file=sys.stderr)
        sys.exit(2)

    # Resolve environment from Morpheus instanceContext with fallback to production
    u_environment = a.instance_context.strip() or "production"

    # Choose template id based on server platform (rhel means use RHEL template, else Windows)
    sc_template_sys_id = pick_template_sys_id(a.server_platform, a.rhel_template_sys_id, a.windows_template_sys_id)

    # Compute windows (UTC)
    start_dt = now_utc() + timedelta(seconds=a.start_offset_sec)
    end_dt = start_dt + timedelta(seconds=a.duration_sec)
    start_date = fmt_ts(start_dt)
    end_date = fmt_ts(end_dt)
    start_epoch = int(start_dt.timestamp())
    now_epoch = int(now_utc().timestamp())

    print("Computed times,", dict(start_date=start_date, end_date=end_date), flush=True)

    # Build create payload
    sc_url = f"https://{a.sn_host}/api/sn_chg_rest/{a.sc_api_version}/change/standard/{sc_template_sys_id}"
    sc_payload = {
        "requested_by": a.requested_by,
        "assigned_to": a.assigned_to,
        "cmdb_ci": a.cmdb_ci,
        "u_environment": u_environment,
        "u_whost_role": a.u_whost_role,
        "start_date": start_date,
        "end_date": end_date,
        "category": a.category,
        "u_reason_for_change": a.u_reason_for_change,
    }

    sess = requests.Session()
    sess.auth = (a.sn_username, a.sn_password)
    sess.headers.update({"Accept": "application/json", "Content-Type": "application/json"})

    # Create Standard Change
    print("Creating Standard Change from template,", dict(url=sc_url), flush=True)
    r = sess.post(sc_url, data=json.dumps(sc_payload), timeout=120, verify=a.sn_verify_ssl)
    if r.status_code not in (200, 201):
        print(f"Create failed, status {r.status_code}, body {r.text}", file=sys.stderr)
        sys.exit(1)
    sc_out = r.json()

    # Extract identifiers robustly
    def pick(path, *alts):
        v = path
        if v is None and alts:
            for alt in alts:
                if alt is not None:
                    v = alt
                    break
        return v

    res = sc_out.get("result", sc_out)
    sys_id = pick(
        (res.get("sys_id", {}) if isinstance(res.get("sys_id"), dict) else res.get("sys_id")),
    )
    if isinstance(sys_id, dict):
        sys_id = sys_id.get("value")
    number = pick(
        (res.get("number", {}) if isinstance(res.get("number"), dict) else res.get("number")),
    )
    if isinstance(number, dict):
        number = number.get("value")
    cr_sys_id = (sys_id or "").strip().replace(" ", "")
    cr_number = (number or "").strip()

    print("Created change,", dict(number=cr_number, sys_id=cr_sys_id, planned_start=start_date, planned_end=end_date), flush=True)

    # Optional wait for planned start
    if a.wait_for_planned_start:
        wait_seconds = max(start_epoch - now_epoch + 5, 0)
        if wait_seconds > 0:
            print(f"Waiting {wait_seconds} seconds for planned start", flush=True)
            time.sleep(wait_seconds)
        else:
            print("No wait needed, planned start already reached", flush=True)

    # Move to Implementing, numeric first then text
    cr_table_url = f"https://{a.sn_host}/api/now/table/change_request/{cr_sys_id}"

    def patch_state(state_value):
        body = {"state": state_value}
        rr = sess.patch(cr_table_url, data=json.dumps(body), timeout=120, verify=a.sn_verify_ssl)
        return rr

    numeric_try = patch_state(-2)
    numeric_ok = False
    try:
        numeric_ok = numeric_try.status_code == 200 and "implement" in str(
            numeric_try.json().get("result", {}).get("state", "")
        ).lower()
    except Exception:
        numeric_ok = False

    if not numeric_ok:
        text_try = patch_state("Implementing")
        if text_try.status_code != 200:
            print(f"Failed to move to Implementing, status {text_try.status_code}, body {text_try.text}", file=sys.stderr)
            sys.exit(1)

    # Verify implementing by state or stage
    verify_url = f"https://{a.sn_host}/api/now/table/change_request/{cr_sys_id}?sysparm_display_value=true&sysparm_fields=number,state,stage"
    vr = sess.get(verify_url, timeout=60, verify=a.sn_verify_ssl)
    if vr.status_code != 200:
        print(f"Verify fetch failed, status {vr.status_code}, body {vr.text}", file=sys.stderr)
        sys.exit(1)
    vj = vr.json().get("result", {})
    state_obj = vj.get("state")
    if not isinstance(state_obj, dict):
        state_obj = {"value": vj.get("state", ""), "display_value": vj.get("state", "")}
    stage_obj = vj.get("stage")
    if not isinstance(stage_obj, dict):
        stage_obj = {"value": vj.get("stage", ""), "display_value": vj.get("stage", "")}
    state_disp = str(state_obj.get("display_value", "")).lower()
    state_val = str(state_obj.get("value", ""))
    stage_disp = str(stage_obj.get("display_value", "")).lower()

    if not ("implement" in state_disp or "implement" in stage_disp or state_val == "-2"):
        print("Not Implementing yet, increase pause or check workflow", file=sys.stderr)
        sys.exit(1)

    print("Change is Implementing", flush=True)

    # Persist identifiers to Morpheus Cypher, only if Morpheus URL and token are provided
    if not a.morpheus_url or not a.morpheus_token:
        print("Skipping Cypher persistence, missing MORPHEUS_URL or MORPHEUS_BEARER_TOKEN", flush=True)
        return

    instance_name = a.instance_name or "unknown"
    cypher_key = f"{a.cypher_prefix}/{sanitize_key(instance_name)}"
    cy_headers = {
        "Authorization": f"Bearer {a.morpheus_token}",
        "Accept": "application/json",
        "Content-Type": "application/json",
    }

    # Store a flat JSON under value, no json in json
    cy_put_url = f"{a.morpheus_url}/api/cypher/secret/{requests.utils.quote(cypher_key, safe='')}"
    payload = {"value": {"number": cr_number, "sys_id": cr_sys_id}}

    print("Persisting identifiers to Cypher,", dict(key=cypher_key), flush=True)
    cy_put = requests.put(cy_put_url, headers=cy_headers, data=json.dumps(payload), timeout=60, verify=a.appliance_validate_certs)
    if cy_put.status_code not in (200, 201, 204):
        print(f"Cypher PUT failed, status {cy_put.status_code}, body {cy_put.text}", file=sys.stderr)
        sys.exit(1)

    # Verify read back
    cy_get_url = f"{a.morpheus_url}/api/cypher/secret/{requests.utils.quote(cypher_key, safe='')}"
    cy_get = requests.get(cy_get_url, headers=cy_headers, timeout=60, verify=a.appliance_validate_certs)
    if cy_get.status_code != 200:
        print(f"Cypher GET failed, status {cy_get.status_code}, body {cy_get.text}", file=sys.stderr)
        sys.exit(1)
    cy_value = cy_get.json().get("data", {}).get("value", {})
    print("Stored refs,", dict(cypher_key=cypher_key, value=cy_value), flush=True)


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("Interrupted", file=sys.stderr)
        sys.exit(130)

STEP 1:
#!/usr/bin/env python3
import os
import json

def getenv(name, default=""):
    return os.getenv(name, default)

vars_dict = {
    "SN_HOST": getenv("SN_HOST"),
    "SN_USERNAME": getenv("SN_USERNAME"),
    "SN_PASSWORD_present": bool(getenv("SN_PASSWORD")),  # do not print secrets
    "SN_VERIFY_SSL": getenv("SN_VERIFY_SSL", "true"),

    "MORPHEUS_URL": getenv("MORPHEUS_URL", ""),
    "MORPHEUS_BEARER_TOKEN_present": bool(getenv("MORPHEUS_BEARER_TOKEN")),  # do not print secrets
    "MORPHEUS_INSTANCE_NAME": getenv("MORPHEUS_INSTANCE_NAME", ""),
    "MORPHEUS_INSTANCE_CONTEXT": getenv("MORPHEUS_INSTANCE_CONTEXT", ""),
    "MORPHEUS_SERVER_PLATFORM": getenv("MORPHEUS_SERVER_PLATFORM", ""),

    "SN_RHEL_TEMPLATE_SYS_ID": getenv("SN_RHEL_TEMPLATE_SYS_ID", ""),
    "SN_WINDOWS_TEMPLATE_SYS_ID": getenv("SN_WINDOWS_TEMPLATE_SYS_ID", ""),
}

print(json.dumps(vars_dict, indent=2))
  
# .env for step 1
export SN_HOST="dev12345.service-now.com"
export SN_USERNAME="api.user"
export SN_PASSWORD="supersecret"
export SN_VERIFY_SSL="true"

export MORPHEUS_URL="https://morpheus.mycompany.com"
export MORPHEUS_BEARER_TOKEN="yourapitoken"
export MORPHEUS_INSTANCE_NAME="myvm01"
export MORPHEUS_INSTANCE_CONTEXT="production"
export MORPHEUS_SERVER_PLATFORM="rhel"

export SN_RHEL_TEMPLATE_SYS_ID="11111rhel1111111111111111111111111"
export SN_WINDOWS_TEMPLATE_SYS_ID="22222222222222222222222222222222"

  
python3 -m venv venv
source venv/bin/activate
pip install requests
source .env
python step1_vars.py

STEP 2:

#!/usr/bin/env python3
import os
import sys
import json
import socket
import requests

SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = os.getenv("SN_VERIFY_SSL", "true").strip().lower() in {"1", "true", "yes", "on"}

def fail(msg):
    print(json.dumps({"ok": False, "error": msg}), file=sys.stderr)
    sys.exit(1)

if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")

base_url = f"https://{SN_HOST}"
test_url = f"{base_url}/api/now/table/sys_user?sysparm_limit=1"

# Step 2a, DNS check
try:
    ip = socket.gethostbyname(SN_HOST)
    print(json.dumps({"dns_ok": True, "resolved_ip": ip}))
except Exception as e:
    fail(f"DNS resolution failed, {e}")

# Step 2b, HTTPS GET with basic auth
try:
    r = requests.get(
        test_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json"},
        timeout=20,
        verify=SN_VERIFY_SSL,
    )
    result = {
        "http_ok": r.status_code == 200,
        "status": r.status_code,
    }
    # Show a tiny snippet if JSON
    try:
        j = r.json()
        result["json_keys"] = list(j.keys())
    except Exception:
        result["body_snippet"] = r.text[:200]
    print(json.dumps(result))
    if r.status_code == 401:
        fail("Authentication failed, check SN_USERNAME or SN_PASSWORD")
    if r.status_code >= 400:
        fail(f"HTTP error, status {r.status_code}")
except requests.exceptions.SSLError as e:
    fail(f"TLS verify failed, set SN_VERIFY_SSL=false for a test, error {e}")
except requests.exceptions.ConnectTimeout:
    fail("Connection timeout, check network or firewall")
except Exception as e:
    fail(f"Request failed, {e}")

  
source venv/bin/activate
source .env
python step2_probe_snow.py

STEP 3:

#!/usr/bin/env python3
import os
import json
from datetime import datetime, timedelta, timezone

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

# Inputs
START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)     # seconds from now
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)      # three hours default

# Compute in UTC
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt   = start_dt + timedelta(seconds=DURATION_SEC)

# Formats that match your Ansible playbook
def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

out = {
    "now_utc": fmt(now_utc),
    "start_date": fmt(start_dt),
    "end_date": fmt(end_dt),
    "start_epoch": int(start_dt.timestamp()),
    "now_epoch": int(now_utc.timestamp()),
    "wait_seconds_suggested": max(int(start_dt.timestamp()) - int(now_utc.timestamp()) + 5, 0)
}

print(json.dumps(out, indent=2))


export SN_START_OFFSET_SEC=30
export SN_DURATION_SEC=7200

STEP 4:

#!/usr/bin/env python3
import os
import sys
import json
from datetime import datetime, timedelta, timezone
import requests

def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

# Required ServiceNow settings
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

# Standard Change template selection
# simplest path, set one template id, for example export SN_TEMPLATE_SYS_ID="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# optional: provide both and pick by platform, for example MORPHEUS_SERVER_PLATFORM=rhel or windows
SN_TEMPLATE_SYS_ID = os.getenv("SN_TEMPLATE_SYS_ID", "").strip()
SN_RHEL_TEMPLATE_SYS_ID = os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "").strip()
SN_WINDOWS_TEMPLATE_SYS_ID = os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "").strip()
SERVER_PLATFORM = os.getenv("MORPHEUS_SERVER_PLATFORM", "").strip().lower()

if not SN_TEMPLATE_SYS_ID:
    if SERVER_PLATFORM == "rhel" and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif SERVER_PLATFORM == "windows" and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID

SC_API_VERSION = os.getenv("SN_SC_API_VERSION", "v1").strip()

# Change payload inputs
REQUESTED_BY = os.getenv("SN_REQUESTED_BY", "user1")
ASSIGNED_TO  = os.getenv("SN_ASSIGNED_TO", "user1")
CMDB_CI      = os.getenv("SN_CMDB_CI", "fhdkjd7sishs9ss9s9sjksksd")
U_ENVIRONMENT = os.getenv("MORPHEUS_INSTANCE_CONTEXT", os.getenv("SN_U_ENVIRONMENT", "production"))
U_WH0ST_ROLE = os.getenv("SN_U_WH0ST_ROLE", "ds8ssksjfhfh7f6fd5d4s")
CATEGORY     = os.getenv("SN_CATEGORY", "Other")
U_REASON     = os.getenv("SN_REASON", "Maintenance")

# Window inputs
START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

# Validate must haves
if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")
if not SN_TEMPLATE_SYS_ID:
    fail("No template sys_id, set SN_TEMPLATE_SYS_ID or SN_RHEL_TEMPLATE_SYS_ID or SN_WINDOWS_TEMPLATE_SYS_ID")

# Compute window
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

# Build request
base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = requests.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
    )
except requests.exceptions.SSLError as e:
    fail(f"TLS error connecting to ServiceNow, set SN_VERIFY_SSL=false for a quick test, then fix trust, error {e}")
except requests.exceptions.ConnectTimeout:
    fail("Connection timeout to ServiceNow")
except Exception as e:
    fail(f"Request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

# Parse identifiers safely
try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip().replace(" ", "")
number = (number or "").strip()

if not sys_id or not number:
    fail("Could not extract sys_id or number from response", status=r.status_code, body=json.dumps(body)[:400])

out = {
    "ok": True,
    "http_status": r.status_code,
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}",
}
print(json.dumps(out, indent=2))


# required
export SN_HOST="dev12345.service-now.com"
export SN_USERNAME="api.user"
export SN_PASSWORD="supersecret"

# pick one approach for the template id
export SN_TEMPLATE_SYS_ID="PUT_YOUR_TEMPLATE_SYS_ID_HERE"
# or use platform based selection
# export MORPHEUS_SERVER_PLATFORM="rhel"
# export SN_RHEL_TEMPLATE_SYS_ID="11111rhel1111111111111111111111111"
# export SN_WINDOWS_TEMPLATE_SYS_ID="22222222222222222222222222222222"

# optional
export SN_VERIFY_SSL="true"
export MORPHEUS_INSTANCE_CONTEXT="production"
export SN_START_OFFSET_SEC=15
export SN_DURATION_SEC=10800


import os
from datetime import datetime, timedelta
import json

# Step 4 variables setup (replace with your values or env vars)
sn_host = os.environ.get("SN_HOST", "your-instance.service-now.com")
sn_username = os.environ.get("SN_USERNAME", "user")
sn_password = os.environ.get("SN_PASSWORD", "pass")
sn_verify_ssl = os.environ.get("SN_VERIFY_SSL", "true").lower() == "true"

sc_api_version = "v1"
sc_template_sys_id = os.environ.get("SN_TEMPLATE_SYS_ID", "PUT_YOUR_TEMPLATE_SYS_ID_HERE")

requested_by = os.environ.get("SN_REQUESTED_BY", "user1")
assigned_to = os.environ.get("SN_ASSIGNED_TO", "user1")
cmdb_ci = os.environ.get("SN_CMDB_CI", "default_ci_sys_id")
u_environment = os.environ.get("SN_ENVIRONMENT", "production")
u_whost_role = os.environ.get("SN_WHOST_ROLE", "default_role_sys_id")
category = os.environ.get("SN_CATEGORY", "Other")
u_reason_for_change = os.environ.get("SN_REASON", "Maintenance")

start_offset_sec = int(os.environ.get("SN_START_OFFSET", "15"))
duration_sec = int(os.environ.get("SN_DURATION", "10800"))

# Compute planned start and end times
start_date = (datetime.utcnow() + timedelta(seconds=start_offset_sec)).strftime("%Y-%m-%d %H:%M:%S")
end_date = (datetime.utcnow() + timedelta(seconds=start_offset_sec + duration_sec)).strftime("%Y-%m-%d %H:%M:%S")

# Build API URL
sc_url = f"https://{sn_host}/api/sn_chg_rest/{sc_api_version}/change/standard/{sc_template_sys_id}"

# Build payload
sc_payload = {
    "requested_by": requested_by,
    "assigned_to": assigned_to,
    "cmdb_ci": cmdb_ci,
    "u_environment": u_environment,
    "u_whost_role": u_whost_role,
    "start_date": start_date,
    "end_date": end_date,
    "category": category,
    "u_reason_for_change": u_reason_for_change
}

# === Dump all variables for inspection ===
print(json.dumps({
    "sn_host": sn_host,
    "sn_username": sn_username,
    "sn_verify_ssl": sn_verify_ssl,
    "sc_url": sc_url,
    "sc_payload": sc_payload
}, indent=2))

Checks:

import sys, pkgutil, json
out = {
  "executable": sys.executable,
  "version": sys.version,
  "has_requests": bool(pkgutil.find_loader("requests")),
}
print(json.dumps(out, indent=2))

Proxy checks in morpheus:

echo "whoami:" && whoami
echo "pip path:" && command -v pip || true
echo "python path:" && command -v python || true

echo "proxy env:"
env | grep -i proxy || echo "no proxy env found"

echo "pip config debug:"
python -m pip config debug || pip config debug || true

echo "pip version:"
python -m pip --version || pip --version || true

echo "connectivity checks via proxy:"
python - <<'PY'
import os, sys, socket, ssl
print("HTTPS_PROXY:", os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy"))
try:
    import urllib.request
    opener = urllib.request.build_opener(urllib.request.ProxyHandler())
    with opener.open("https://pypi.org/simple", timeout=20) as r:
        print("urllib ok, status", r.status)
except Exception as e:
    print("urllib error:", e)
PY

echo "pip verbose install:"
python -m pip install -vvv --timeout 30 requests

addition to step 4:

# --- Step 5: Move the change to Implementing ---
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

# First try numeric state (-2)
numeric_body = {"state": -2}
try:
    r2 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

# Fallback to textual state if numeric didn’t stick
if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

# --- Step 5b: Poll until the change is Implementing ---
import time
verify_url = f"{base}/api/now/table/change_request/{sys_id}?sysparm_display_value=true&sysparm_fields=number,state,stage"

for attempt in range(12):  # up to 60s total
    r = requests.get(
        verify_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json"},
        timeout=30,
        verify=SN_VERIFY_SSL,
    )
    if r.status_code != 200:
        fail("Failed to read back change record", status=r.status_code, body=r.text)

    res = r.json().get("result", {})
    state_val = res.get("state")
    if isinstance(state_val, dict):
        state_disp = state_val.get("display_value", "").lower()
        state_raw = state_val.get("value", "")
    else:
        state_disp = str(state_val).lower()
        state_raw = str(state_val)

    print(json.dumps({"poll": attempt, "state_display": state_disp, "state_value": state_raw}))

    if "implement" in state_disp or state_raw == "-2":
        print(json.dumps({"ok": True, "verified": True, "state": state_disp}))
        break

    time.sleep(5)
else:
    fail("Change never reached Implementing within timeout")

Step 6 - full script with [untested] cypher posting of the change and sys_id

#!/usr/bin/env python3
import os
import sys
import json
import time
import requests
from datetime import datetime, timedelta, timezone

# -------------------------
# Utility functions
# -------------------------
def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

# -------------------------
# Config from environment
# -------------------------
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

# Standard Change template selection
SN_TEMPLATE_SYS_ID = os.getenv("SN_TEMPLATE_SYS_ID", "").strip()
SN_RHEL_TEMPLATE_SYS_ID = os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "").strip()
SN_WINDOWS_TEMPLATE_SYS_ID = os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "").strip()
SERVER_PLATFORM = os.getenv("MORPHEUS_SERVER_PLATFORM", "").strip().lower()

if not SN_TEMPLATE_SYS_ID:
    if SERVER_PLATFORM == "rhel" and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif SERVER_PLATFORM == "windows" and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID

SC_API_VERSION = os.getenv("SN_SC_API_VERSION", "v1").strip()

# Change payload inputs
REQUESTED_BY = os.getenv("SN_REQUESTED_BY", "user1")
ASSIGNED_TO  = os.getenv("SN_ASSIGNED_TO", "user1")
CMDB_CI      = os.getenv("SN_CMDB_CI", "fhdkjd7sishs9ss9ssksksd")
U_ENVIRONMENT = os.getenv("MORPHEUS_INSTANCE_CONTEXT", os.getenv("SN_U_ENVIRONMENT", "production"))
U_WH0ST_ROLE = os.getenv("SN_U_WH0ST_ROLE", "ds8ssksjfhfh7f6fd5d4s")
CATEGORY     = os.getenv("SN_CATEGORY", "Other")
U_REASON     = os.getenv("SN_REASON", "Maintenance")

# Window inputs
START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

# Morpheus appliance config
MORPHEUS_URL = os.getenv("MORPHEUS_URL", "https://morpheus.mycompany.com").rstrip("/")
MORPHEUS_API_TOKEN = os.getenv("MORPHEUS_API_TOKEN", "").strip()
CYPHER_PREFIX = os.getenv("MORPHEUS_CYPHER_PREFIX", "servicenow/changerefs")
INSTANCE_NAME = os.getenv("MORPHEUS_INSTANCE_NAME", "unknown-instance")

# -------------------------
# Validations
# -------------------------
if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")
if not SN_TEMPLATE_SYS_ID:
    fail("No template sys_id, set SN_TEMPLATE_SYS_ID or SN_RHEL_TEMPLATE_SYS_ID or SN_WINDOWS_TEMPLATE_SYS_ID")

# -------------------------
# Step 4: Create Standard Change
# -------------------------
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = requests.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Create Standard Change request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip().replace(" ", "")
number = (number or "").strip()
if not sys_id or not number:
    fail("Could not extract sys_id or number", status=r.status_code, body=json.dumps(body)[:400])

print(json.dumps({
    "ok": True,
    "step": "create_change",
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}"
}, indent=2))

# -------------------------
# Step 5: Move to Implementing
# -------------------------
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

# First try numeric state (-2)
numeric_body = {"state": -2}
try:
    r2 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}, indent=2))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

# Fallback to textual state if numeric didn’t stick
if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}, indent=2))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

# Optional pause to allow SN to update
time.sleep(10)

# -------------------------
# Step 6: Persist to Morpheus Cypher
# -------------------------
safe_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in INSTANCE_NAME)
cypher_key = f"{CYPHER_PREFIX}/{safe_name}"
cypher_url = f"{MORPHEUS_URL}/api/cypher/secret/{cypher_key}"

if not MORPHEUS_API_TOKEN:
    fail("MORPHEUS_API_TOKEN missing, set it in environment or Cypher")

cypher_payload = {
    "value": {
        "number": number,
        "sys_id": sys_id
    }
}

try:
    r = requests.put(
        cypher_url,
        headers={
            "Authorization": f"Bearer {MORPHEUS_API_TOKEN}",
            "Accept": "application/json",
            "Content-Type": "application/json"
        },
        data=json.dumps(cypher_payload),
        timeout=30,
        verify=False   # set to True when TLS trust is clean
    )
except Exception as e:
    fail(f"Failed to PUT to Morpheus Cypher, {e}")

if r.status_code not in (200, 201, 204):
    fail("Could not persist to Morpheus Cypher", status=r.status_code, body=r.text)

print(json.dumps({
    "ok": True,
    "step": "persist_to_cypher",
    "cypher_key": cypher_key,
    "stored": cypher_payload["value"],
    "status": r.status_code
}, indent=2))

Force Proxy:

#!/usr/bin/env python3
import os
import sys
import json
import time
import requests
from urllib.parse import urlparse
from datetime import datetime, timedelta, timezone

# -------------------------
# Utility functions
# -------------------------
def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

# -------------------------
# Config from environment
# -------------------------
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

# Optional, force proxy for ServiceNow
SN_PROXY_URL = os.getenv("SN_PROXY_URL", "").strip()

# Standard Change template selection
SN_TEMPLATE_SYS_ID = os.getenv("SN_TEMPLATE_SYS_ID", "").strip()
SN_RHEL_TEMPLATE_SYS_ID = os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "").strip()
SN_WINDOWS_TEMPLATE_SYS_ID = os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "").strip()
SERVER_PLATFORM = os.getenv("MORPHEUS_SERVER_PLATFORM", "").strip().lower()

if not SN_TEMPLATE_SYS_ID:
    if SERVER_PLATFORM == "rhel" and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif SERVER_PLATFORM == "windows" and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID

SC_API_VERSION = os.getenv("SN_SC_API_VERSION", "v1").strip()

# Change payload inputs
REQUESTED_BY = os.getenv("SN_REQUESTED_BY", "user1")
ASSIGNED_TO  = os.getenv("SN_ASSIGNED_TO", "user1")
CMDB_CI      = os.getenv("SN_CMDB_CI", "fhdkjd7sishs9ss9ssksksd")
U_ENVIRONMENT = os.getenv("MORPHEUS_INSTANCE_CONTEXT", os.getenv("SN_U_ENVIRONMENT", "production"))
U_WH0ST_ROLE = os.getenv("SN_U_WH0ST_ROLE", "ds8ssksjfhfh7f6fd5d4s")
CATEGORY     = os.getenv("SN_CATEGORY", "Other")
U_REASON     = os.getenv("SN_REASON", "Maintenance")

# Window inputs
START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

# Morpheus appliance config
MORPHEUS_URL = os.getenv("MORPHEUS_URL", "https://morpheus.mycompany.com").rstrip("/")
MORPHEUS_API_TOKEN = os.getenv("MORPHEUS_API_TOKEN", "").strip()
CYPHER_PREFIX = os.getenv("MORPHEUS_CYPHER_PREFIX", "servicenow/changerefs")
INSTANCE_NAME = os.getenv("MORPHEUS_INSTANCE_NAME", "unknown-instance")

# -------------------------
# Validations
# -------------------------
if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")
if not SN_TEMPLATE_SYS_ID:
    fail("No template sys_id, set SN_TEMPLATE_SYS_ID or SN_RHEL_TEMPLATE_SYS_ID or SN_WINDOWS_TEMPLATE_SYS_ID")

# -------------------------
# Sessions
# -------------------------
# ServiceNow session, force proxy if provided, ignore environment proxies
sn = requests.Session()
sn.trust_env = False
if SN_PROXY_URL:
    sn.proxies = {"http": SN_PROXY_URL, "https": SN_PROXY_URL}

# Morpheus session, always bypass proxy
morph = requests.Session()
morph.trust_env = False
morph.proxies = {"http": None, "https": None}

# Optional, add Morpheus host to NO_PROXY for belt and braces
if MORPHEUS_URL:
    host = urlparse(MORPHEUS_URL).hostname or ""
    if host:
        existing = os.getenv("NO_PROXY", "")
        extra = ",".join([host, f"{host}:443", "localhost", "127.0.0.1"])
        os.environ["NO_PROXY"] = ",".join([p for p in [existing, extra] if p])
        os.environ["no_proxy"] = os.environ["NO_PROXY"]

# -------------------------
# Step 4, Create Standard Change
# -------------------------
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = sn.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Create Standard Change request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip().replace(" ", "")
number = (number or "").strip()
if not sys_id or not number:
    fail("Could not extract sys_id or number", status=r.status_code, body=json.dumps(body)[:400])

print(json.dumps({
    "ok": True,
    "step": "create_change",
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}"
}, indent=2))

# -------------------------
# Step 5, Move to Implementing
# -------------------------
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

# First try numeric state, minus 2
numeric_body = {"state": -2}
try:
    r2 = sn.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}, indent=2))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

# Fallback to textual state if numeric did not stick
if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = sn.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}, indent=2))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

# Optional pause to allow SN to update
time.sleep(10)

# -------------------------
# Step 6, Persist to Morpheus Cypher
# -------------------------

# Build Cypher key that definitely includes the instance/server name
safe_instance_name = (INSTANCE_NAME or "unknown").strip()
safe_instance_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in safe_instance_name)

# Construct the full key path explicitly
cypher_key = f"{CYPHER_PREFIX}/{safe_instance_name}"

print(json.dumps({
    "debug": True,
    "computed_cypher_key": cypher_key
}, indent=2))

# Example final key: servicenow/changerefs/web01
cypher_url = f"{MORPHEUS_URL}/api/cypher/secret/{cypher_key}"

if not MORPHEUS_API_TOKEN:
    fail("MORPHEUS_API_TOKEN missing, set it in environment or Cypher")

# Only store number and sys_id
cypher_payload = {
    "value": {
        "number": number,
        "sys_id": sys_id
    }
}

# Disable proxy for Morpheus internal connections
morph_session = requests.Session()
morph_session.trust_env = False  # ignore system proxy

try:
    r = morph_session.put(
        cypher_url,
        headers={
            "Authorization": f"Bearer {MORPHEUS_API_TOKEN}",
            "Accept": "application/json",
            "Content-Type": "application/json"
        },
        data=json.dumps(cypher_payload),
        timeout=30,
        verify=False   # set True after certs are valid
    )
except Exception as e:
    fail(f"Failed to PUT to Morpheus Cypher, {e}")

if r.status_code not in (200, 201, 204):
    fail("Could not persist to Morpheus Cypher", status=r.status_code, body=r.text)

print(json.dumps({
    "ok": True,
    "step": "persist_to_cypher",
    "cypher_key": cypher_key,
    "stored": cypher_payload["value"],
    "status": r.status_code
}, indent=2))


Final version?!?!

#!/usr/bin/env python3
import os
import sys
import json
import time
import requests
from datetime import datetime, timedelta, timezone

# -------------------------
# Utility functions
# -------------------------
def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

# -------------------------
# Config from environment
# -------------------------
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

# Standard Change template selection
SN_TEMPLATE_SYS_ID = os.getenv("SN_TEMPLATE_SYS_ID", "").strip()
SN_RHEL_TEMPLATE_SYS_ID = os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "").strip()
SN_WINDOWS_TEMPLATE_SYS_ID = os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "").strip()
SERVER_PLATFORM = os.getenv("MORPHEUS_SERVER_PLATFORM", "").strip().lower()

if not SN_TEMPLATE_SYS_ID:
    if SERVER_PLATFORM == "rhel" and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif SERVER_PLATFORM == "windows" and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID

SC_API_VERSION = os.getenv("SN_SC_API_VERSION", "v1").strip()

# Change payload inputs
REQUESTED_BY = os.getenv("SN_REQUESTED_BY", "user1")
ASSIGNED_TO  = os.getenv("SN_ASSIGNED_TO", "user1")
CMDB_CI      = os.getenv("SN_CMDB_CI", "fhdkjd7sishs9ss9ssksksd")
U_ENVIRONMENT = os.getenv("MORPHEUS_INSTANCE_CONTEXT", os.getenv("SN_U_ENVIRONMENT", "production"))
U_WH0ST_ROLE = os.getenv("SN_U_WH0ST_ROLE", "ds8ssksjfhfh7f6fd5d4s")
CATEGORY     = os.getenv("SN_CATEGORY", "Other")
U_REASON     = os.getenv("SN_REASON", "Maintenance")

# Window inputs
START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

# Morpheus appliance config
MORPHEUS_URL = os.getenv("MORPHEUS_URL", "https://morpheus.mycompany.com").rstrip("/")
MORPHEUS_API_TOKEN = os.getenv("MORPHEUS_API_TOKEN", "").strip()
CYPHER_PREFIX = os.getenv("MORPHEUS_CYPHER_PREFIX", "servicenow/changerefs")
INSTANCE_NAME = os.getenv("MORPHEUS_INSTANCE_NAME", "unknown-instance")

# -------------------------
# Validations
# -------------------------
if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")
if not SN_TEMPLATE_SYS_ID:
    fail("No template sys_id, set SN_TEMPLATE_SYS_ID or SN_RHEL_TEMPLATE_SYS_ID or SN_WINDOWS_TEMPLATE_SYS_ID")

# -------------------------
# Step 4, Create Standard Change
# -------------------------
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = requests.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Create Standard Change request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip().replace(" ", "")
number = (number or "").strip()
if not sys_id or not number:
    fail("Could not extract sys_id or number", status=r.status_code, body=json.dumps(body)[:400])

print(json.dumps({
    "ok": True,
    "step": "create_change",
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}"
}, indent=2))

# -------------------------
# Step 5, Move to Implementing
# -------------------------
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

# First try numeric state, minus 2
numeric_body = {"state": -2}
try:
    r2 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}, indent=2))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

# Fallback to textual state if numeric did not stick
if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}, indent=2))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

# Optional pause to allow SN to update
time.sleep(10)

# -------------------------
# Step 6, Persist to Morpheus Cypher, no proxy
# -------------------------
# Build a unique key: servicenow/changerefs/<servername>
safe_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in INSTANCE_NAME)
cypher_key = f"{CYPHER_PREFIX}/{safe_name}"

# API path is /api/cypher/secret/<key>, do not prefix secret into cypher_key
cypher_url = f"{MORPHEUS_URL}/api/cypher/secret/{cypher_key}"

if not MORPHEUS_API_TOKEN:
    fail("MORPHEUS_API_TOKEN missing, set it in environment or Cypher")

# Store only number and sys_id
cypher_payload = {
    "value": {
        "number": number,
        "sys_id": sys_id
    }
}

# Ensure these internal calls bypass any proxy
NO_PROXY_KW = {"proxies": {"http": None, "https": None}}

try:
    r = requests.put(
        cypher_url,
        headers={
            "Authorization": f"Bearer {MORPHEUS_API_TOKEN}",
            "Accept": "application/json",
            "Content-Type": "application/json"
        },
        data=json.dumps(cypher_payload),
        timeout=30,
        verify=False,  # set to True when TLS trust is clean
        **NO_PROXY_KW
    )
except Exception as e:
    fail(f"Failed to PUT to Morpheus Cypher, {e}")

if r.status_code not in (200, 201, 204):
    fail("Could not persist to Morpheus Cypher", status=r.status_code, body=r.text)

print(json.dumps({
    "ok": True,
    "step": "persist_to_cypher",
    "cypher_key": f"secret/{cypher_key}",  # how it appears in the UI
    "stored": cypher_payload["value"],
    "status": r.status_code
}, indent=2))

Variables to change:

INSTANCE_NAME = get_morpheus_value("instance.name", os.getenv("MORPHEUS_INSTANCE_NAME", "unknown-instance"))
    
MORPHEUS_URL = get_morpheus_value("applianceUrl", os.getenv("MORPHEUS_URL", "https://morpheus.mycompany.com")).rstrip("/")
MORPHEUS_API_TOKEN = get_morpheus_value("apiAccessToken", os.getenv("MORPHEUS_API_TOKEN", "")).strip()
SERVER_PLATFORM = get_morpheus_value("instance.platform", os.getenv("MORPHEUS_SERVER_PLATFORM", "")).strip().lower()
U_ENVIRONMENT = get_morpheus_value("instance.context", os.getenv("MORPHEUS_INSTANCE_CONTEXT", os.getenv("SN_U_ENVIRONMENT", "production")))
INSTANCE_NAME = get_morpheus_value("instance.name", os.getenv("MORPHEUS_INSTANCE_NAME", "unknown-instance"))
# -------------------------
# Morpheus context loader
# -------------------------
MORPHEUS_CTX = {}
try:
    MORPHEUS_CTX = json.loads(sys.stdin.read())
except Exception:
    MORPHEUS_CTX = {}

def get_morpheus_value(path, default=""):
    """Fetch nested values from Morpheus context JSON (e.g., instance.name)"""
    try:
        val = MORPHEUS_CTX
        for p in path.split("."):
            if isinstance(val, dict):
                val = val.get(p)
            else:
                return default
        return val if val is not None else default
    except Exception:
        return default

morpheus_ctx_probe.py


#!/usr/bin/env python3
import sys, json, socket, os
from datetime import datetime

def safe(s, default=""):
    return s if isinstance(s, str) else default

def g(ctx, path, default=None):
    cur = ctx
    for p in path.split("."):
        if isinstance(cur, dict) and p in cur:
            cur = cur[p]
        else:
            return default
    return cur

def redact(tok, keep=6):
    if not isinstance(tok, str) or len(tok) <= keep:
        return tok
    return tok[:keep] + "...redacted..."

def main():
    raw = sys.stdin.read()
    ctx = {}
    try:
        ctx = json.loads(raw) if raw.strip() else {}
    except Exception as e:
        print(json.dumps({"ok": False, "error": f"could not parse Morpheus input, {e}"}))
        sys.exit(1)

    inst = g(ctx, "instance", {}) or {}
    srv  = g(ctx, "server", {}) or {}
    job  = g(ctx, "job", {}) or {}
    user = g(ctx, "user", {}) or {}

    # common fields
    instance_name   = safe(inst.get("name"), safe(inst.get("hostname"), ""))
    instance_id     = inst.get("id")
    instance_ctx    = safe(inst.get("context"), "")
    instance_type   = safe(g(inst, "instanceType.code", ""), "")
    platform        = safe(inst.get("platform"), safe(srv.get("platform"), ""))  # often rhel or windows
    cloud_name      = safe(g(inst, "cloud.name", ""), "")
    plan_name       = safe(g(inst, "plan.name", ""), "")
    primary_ip      = safe(g(inst, "interfaces[0].ipAddress", ""), safe(srv.get("ipAddress"), ""))
    tags            = g(inst, "tags", []) or []
    networks        = g(inst, "interfaces", []) or []

    # appliance info
    appliance_url   = safe(ctx.get("applianceUrl"), os.getenv("MORPHEUS_URL", ""))
    api_token       = ctx.get("apiAccessToken") or os.getenv("MORPHEUS_API_TOKEN", "")
    api_token_red   = redact(api_token)

    # build a handy cypher key suggestion
    safe_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in (instance_name or socket.gethostname()))
    cypher_key_suggestion = f"servicenow/changerefs/{safe_name}"

    out = {
        "ok": True,
        "timestamp": datetime.utcnow().isoformat(timespec="seconds") + "Z",
        "morpheus_ctx_present": bool(ctx),
        "job": {
            "id": job.get("id"),
            "name": safe(job.get("name"), "")
        },
        "user": {
            "id": user.get("id"),
            "username": safe(user.get("username"), "")
        },
        "instance": {
            "id": instance_id,
            "name": instance_name or "missing",
            "hostname": safe(inst.get("hostname"), ""),
            "context": instance_ctx,
            "platform": platform.lower() if platform else "",
            "type_code": instance_type,
            "cloud_name": cloud_name,
            "plan_name": plan_name,
            "primary_ip": primary_ip,
            "tags": tags,
            "network_count": len(networks)
        },
        "server": {
            "id": srv.get("id"),
            "name": safe(srv.get("name"), ""),
            "platform": safe(srv.get("platform"), ""),
            "ip": safe(srv.get("ipAddress"), "")
        },
        "appliance": {
            "url": appliance_url,
            "api_token_present": bool(api_token),
            "api_token_preview": api_token_red
        },
        "helpers": {
            "suggested_cypher_key": cypher_key_suggestion
        }
    }

    print(json.dumps(out, indent=2))

if __name__ == "__main__":
    main()

Local run:

echo '{"instance":{"id":1,"name":"web01","context":"production","platform":"rhel","hostname":"web01","instanceType":{"code":"rhel"},"cloud":{"name":"Prod Cloud"},"plan":{"name":"Small"}},"applianceUrl":"https://morpheus.example.com","apiAccessToken":"abcdef1234567890"}'
| python3 morpheus_ctx_probe.py

final??

#!/usr/bin/env python3
import os
import sys
import json
import time
import requests
from datetime import datetime, timedelta, timezone

# -------------------------
# Utility functions
# -------------------------
def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)


# -------------------------
# Pull from Morpheus-provided variables
# -------------------------
# The 'morpheus' variable is automatically injected by Morpheus

INSTANCE_NAME = morpheus['instance']['name']
SERVER_PLATFORM = morpheus['instance']['platform'].lower()
MORPHEUS_URL = morpheus['applianceUrl'].rstrip("/")
MORPHEUS_API_TOKEN = morpheus['apiAccessToken']
CYPHER_PREFIX = "servicenow/changerefs"

# -------------------------
# ServiceNow Config (from environment or static values)
# -------------------------
SN_HOST = os.getenv("SN_HOST", "").strip()
SN_USERNAME = os.getenv("SN_USERNAME", "").strip()
SN_PASSWORD = os.getenv("SN_PASSWORD", "").strip()
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

SN_TEMPLATE_SYS_ID = os.getenv("SN_TEMPLATE_SYS_ID", "").strip()
SN_RHEL_TEMPLATE_SYS_ID = os.getenv("SN_RHEL_TEMPLATE_SYS_ID", "").strip()
SN_WINDOWS_TEMPLATE_SYS_ID = os.getenv("SN_WINDOWS_TEMPLATE_SYS_ID", "").strip()

if not SN_TEMPLATE_SYS_ID:
    if SERVER_PLATFORM == "rhel" and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif SERVER_PLATFORM == "windows" and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID

SC_API_VERSION = os.getenv("SN_SC_API_VERSION", "v1").strip()

REQUESTED_BY = os.getenv("SN_REQUESTED_BY", "user1")
ASSIGNED_TO  = os.getenv("SN_ASSIGNED_TO", "user1")
CMDB_CI      = os.getenv("SN_CMDB_CI", "default_ci_value")
U_ENVIRONMENT = morpheus['instance']['context'] or os.getenv("SN_U_ENVIRONMENT", "production")
U_WH0ST_ROLE = os.getenv("SN_U_WH0ST_ROLE", "default_role")
CATEGORY     = os.getenv("SN_CATEGORY", "Other")
U_REASON     = os.getenv("SN_REASON", "Maintenance")

START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

# -------------------------
# Validations
# -------------------------
if not SN_HOST:
    fail("SN_HOST is missing")
if not SN_USERNAME or not SN_PASSWORD:
    fail("SN_USERNAME or SN_PASSWORD is missing")
if not SN_TEMPLATE_SYS_ID:
    fail("No template sys_id set for platform")

# -------------------------
# Step 4: Create Standard Change
# -------------------------
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = requests.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Create Standard Change request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip()
number = (number or "").strip()
if not sys_id or not number:
    fail("Could not extract sys_id or number", status=r.status_code, body=json.dumps(body)[:400])

print(json.dumps({
    "ok": True,
    "step": "create_change",
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}"
}, indent=2))


# -------------------------
# Step 5: Move to Implementing
# -------------------------
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

# First try numeric state (-2)
numeric_body = {"state": -2}
try:
    r2 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}, indent=2))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}, indent=2))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

time.sleep(10)

# -------------------------
# Step 6: Persist to Morpheus Cypher (no proxy)
# -------------------------
safe_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in INSTANCE_NAME)
cypher_key = f"secret/{CYPHER_PREFIX}/{safe_name}"
cypher_url = f"{MORPHEUS_URL}/api/cypher/secret/{CYPHER_PREFIX}/{safe_name}"

cypher_payload = {
    "value": {
        "number": number,
        "sys_id": sys_id
    }
}

NO_PROXY_KW = {"proxies": {"http": None, "https": None}}

try:
    r = requests.put(
        cypher_url,
        headers={
            "Authorization": f"Bearer {MORPHEUS_API_TOKEN}",
            "Accept": "application/json",
            "Content-Type": "application/json"
        },
        data=json.dumps(cypher_payload),
        timeout=30,
        verify=False,
        **NO_PROXY_KW
    )
except Exception as e:
    fail(f"Failed to PUT to Morpheus Cypher, {e}")

if r.status_code not in (200, 201, 204):
    fail("Could not persist to Morpheus Cypher", status=r.status_code, body=r.text)

print(json.dumps({
    "ok": True,
    "step": "persist_to_cypher",
    "cypher_key": cypher_key,
    "stored": cypher_payload["value"],
    "status": r.status_code
}, indent=2))

morpheus variables:

#!/usr/bin/env python3
import json

# Morpheus automatically injects a dict named "morpheus" into Python tasks.
# This script displays just the relevant variables you might want to use in your integrations.

def safe_get(d, *keys, default=""):
    """Walk nested keys safely."""
    cur = d
    for k in keys:
        if not isinstance(cur, dict) or k not in cur:
            return default
        cur = cur[k]
    return cur

try:
    data = {
        "instance": {
            "name": safe_get(morpheus, "instance", "name"),
            "hostname": safe_get(morpheus, "instance", "hostname"),
            "platform": safe_get(morpheus, "instance", "platform"),
            "context": safe_get(morpheus, "instance", "context"),
            "instanceType_code": safe_get(morpheus, "instance", "instanceType", "code"),
            "instanceType_name": safe_get(morpheus, "instance", "instanceType", "name"),
        },
        "server": {
            "id": safe_get(morpheus, "server", "id"),
            "name": safe_get(morpheus, "server", "name"),
            "platform": safe_get(morpheus, "server", "platform"),
        },
        "appliance": {
            "url": safe_get(morpheus, "applianceUrl"),
            "api_token_present": bool(safe_get(morpheus, "apiAccessToken")),
        },
        "user": {
            "username": safe_get(morpheus, "user", "username"),
            "email": safe_get(morpheus, "user", "email"),
        },
    }

    print(json.dumps({
        "ok": True,
        "morpheus_variables": data
    }, indent=2))

except Exception as e:
    print(json.dumps({
        "ok": False,
        "error": str(e)
    }, indent=2))

other way:

#!/usr/bin/env python3
import os, sys, json, platform

def jprint(obj):
    print(json.dumps(obj, indent=2))

# Try to read the Morpheus map that tasks usually receive
m = {}
try:
    # In Morpheus Python tasks, "morpheus" is injected as a global
    m = globals().get("morpheus", {}) or {}
    if not isinstance(m, dict):
        m = {}
except Exception:
    m = {}

# Convenience getters that never raise
def g(path, default=""):
    cur = m
    try:
        for part in path.split("."):
            if isinstance(cur, dict) and part in cur:
                cur = cur[part]
            else:
                return default
        return cur if cur is not None else default
    except Exception:
        return default

# Raw candidates
raw_instance_name = g("instance.name") or os.getenv("MORPHEUS_INSTANCE_NAME", "")
raw_instance_platforms = [
    g("instance.platform"),
    g("server.platform"),
    g("instance.osType"),
    g("instance.instanceType.code"),
    g("instance.instanceType.name"),
    os.getenv("MORPHEUS_INSTANCE_PLATFORM", ""),
    os.getenv("MORPHEUS_SERVER_PLATFORM", ""),
]

# Resolve name
instance_name = (raw_instance_name or os.getenv("MORPHEUS_SERVER_NAME", "") or "unknown-instance")

# Resolve platform, try the list in order
plat = next((p for p in raw_instance_platforms if p), "")
plat = str(plat).strip().lower()

# Optional normalization, convert common strings to a simple bucket
def normalize_platform(p):
    p = (p or "").lower()
    if not p:
        return ""
    if "win" in p:
        return "windows"
    if "rhel" in p or "red hat" in p or "linux" in p or "ubuntu" in p or "debian" in p or "sles" in p:
        return "rhel"
    return p

resolved_platform = normalize_platform(plat)

# Final fallback, detect the OS running this task
if not resolved_platform:
    sys_os = platform.system().lower()
    if "win" in sys_os:
        resolved_platform = "windows"
    elif "linux" in sys_os:
        resolved_platform = "rhel"
    else:
        resolved_platform = "unknown-platform"

# Output both raw and resolved for easy debugging
out = {
    "ok": True,
    "raw_values": {
        "morpheus.instance.name": raw_instance_name or None,
        "morpheus.instance.platform": g("instance.platform") or None,
        "morpheus.server.platform": g("server.platform") or None,
        "morpheus.instance.osType": g("instance.osType") or None,
        "morpheus.instanceType.code": g("instance.instanceType.code") or None,
        "morpheus.instanceType.name": g("instance.instanceType.name") or None,
        "env.MORPHEUS_INSTANCE_NAME": os.getenv("MORPHEUS_INSTANCE_NAME"),
        "env.MORPHEUS_INSTANCE_PLATFORM": os.getenv("MORPHEUS_INSTANCE_PLATFORM"),
        "env.MORPHEUS_SERVER_PLATFORM": os.getenv("MORPHEUS_SERVER_PLATFORM"),
    },
    "resolved": {
        "instance_name_final": instance_name,
        "instance_platform_final": resolved_platform,
    }
}

jprint(out)

Back to Step 6:

#!/usr/bin/env python3
import os
import sys
import json
import time
import requests
from datetime import datetime, timedelta, timezone

# -------------------------
# Utility functions
# -------------------------
def env_bool(name, default=True):
    v = os.getenv(name)
    if v is None:
        return default
    return str(v).strip().lower() in {"1", "true", "yes", "on"}

def env_int(name, default):
    try:
        return int(os.getenv(name, str(default)).strip())
    except Exception:
        return default

def fmt(dt):
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def fail(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

# -------------------------
# Configuration (hardcoded or environment)
# -------------------------
SN_HOST = "yourinstance.service-now.com"
SN_USERNAME = "admin"
SN_PASSWORD = "password"
SN_VERIFY_SSL = env_bool("SN_VERIFY_SSL", True)

SN_TEMPLATE_SYS_ID = "template_sys_id_here"
SC_API_VERSION = "v1"

REQUESTED_BY = "user1"
ASSIGNED_TO  = "user1"
CMDB_CI      = "cmdb_ci_ref_here"
U_ENVIRONMENT = "production"
U_WH0ST_ROLE = "ops"
CATEGORY     = "Maintenance"
U_REASON     = "System patching"

START_OFFSET_SEC = env_int("SN_START_OFFSET_SEC", 15)
DURATION_SEC     = env_int("SN_DURATION_SEC", 10800)

MORPHEUS_URL = "https://morpheus.mycompany.com"
MORPHEUS_API_TOKEN = "YOUR_MORPHEUS_API_TOKEN"
CYPHER_PREFIX = "servicenow/changerefs"
INSTANCE_NAME = "web01"

# -------------------------
# Proxy settings
# -------------------------
PROXY = {
    "http": "http://proxy.mycompany.com:8080",
    "https": "http://proxy.mycompany.com:8080"
}
NO_PROXY = {"proxies": {"http": None, "https": None}}

# -------------------------
# Step 4: Create Standard Change (via proxy)
# -------------------------
now_utc = datetime.now(timezone.utc)
start_dt = now_utc + timedelta(seconds=START_OFFSET_SEC)
end_dt = start_dt + timedelta(seconds=DURATION_SEC)
start_date = fmt(start_dt)
end_date = fmt(end_dt)

base = f"https://{SN_HOST}"
create_url = f"{base}/api/sn_chg_rest/{SC_API_VERSION}/change/standard/{SN_TEMPLATE_SYS_ID}"
payload = {
    "requested_by": REQUESTED_BY,
    "assigned_to": ASSIGNED_TO,
    "cmdb_ci": CMDB_CI,
    "u_environment": U_ENVIRONMENT,
    "u_whost_role": U_WH0ST_ROLE,
    "start_date": start_date,
    "end_date": end_date,
    "category": CATEGORY,
    "u_reason_for_change": U_REASON,
}

try:
    r = requests.post(
        create_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(payload),
        timeout=120,
        verify=SN_VERIFY_SSL,
        proxies=PROXY,   # <-- use proxy for ServiceNow
    )
except Exception as e:
    fail(f"Create Standard Change request failed, {e}")

if r.status_code not in (200, 201):
    fail("Create Standard Change failed", status=r.status_code, body=r.text)

try:
    body = r.json()
except Exception:
    fail("Response was not JSON", status=r.status_code, body=r.text)

res = body.get("result", body)
sys_id = res.get("sys_id")
number = res.get("number")
if isinstance(sys_id, dict):
    sys_id = sys_id.get("value")
if isinstance(number, dict):
    number = number.get("value")

sys_id = (sys_id or "").strip()
number = (number or "").strip()
if not sys_id or not number:
    fail("Could not extract sys_id or number", status=r.status_code, body=json.dumps(body)[:400])

print(json.dumps({
    "ok": True,
    "step": "create_change",
    "number": number,
    "sys_id": sys_id,
    "planned_start": start_date,
    "planned_end": end_date,
    "nav_url": f"{base}/nav_to.do?uri=change_request.do?sys_id={sys_id}"
}, indent=2))

# -------------------------
# Step 5: Move to Implementing
# -------------------------
cr_table_url = f"{base}/api/now/table/change_request/{sys_id}"

numeric_body = {"state": -2}
try:
    r2 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(numeric_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
        proxies=PROXY,   # <-- proxy here too
    )
except Exception as e:
    fail(f"Failed to PATCH change_request state, {e}")

numeric_ok = r2.status_code == 200
if numeric_ok:
    try:
        j = r2.json()
        st = j.get("result", {}).get("state", "")
        if isinstance(st, dict):
            st = st.get("value", "")
        if "implement" in str(st).lower() or str(st) == "-2":
            print(json.dumps({"ok": True, "step": "set_state_numeric", "state": st}, indent=2))
        else:
            numeric_ok = False
    except Exception:
        numeric_ok = False

if not numeric_ok:
    text_body = {"state": "Implementing"}
    r3 = requests.patch(
        cr_table_url,
        auth=(SN_USERNAME, SN_PASSWORD),
        headers={"Accept": "application/json", "Content-Type": "application/json"},
        data=json.dumps(text_body),
        timeout=60,
        verify=SN_VERIFY_SSL,
        proxies=PROXY,
    )
    try:
        j = r3.json()
    except Exception:
        fail("Could not parse Implementing PATCH response", status=r3.status_code, body=r3.text)
    st = j.get("result", {}).get("state", "")
    if isinstance(st, dict):
        st = st.get("value", "")
    print(json.dumps({"ok": r3.status_code == 200, "step": "set_state_text", "state": st}, indent=2))
    if r3.status_code != 200:
        fail("Could not move change to Implementing", status=r3.status_code, body=r3.text)

time.sleep(10)

# -------------------------
# Step 6: Persist to Morpheus Cypher (no proxy)
# -------------------------
safe_name = "".join(c if c.isalnum() or c in "._-" else "_" for c in INSTANCE_NAME)
cypher_key = f"secret/{CYPHER_PREFIX}/{safe_name}"
cypher_url = f"{MORPHEUS_URL}/api/cypher/secret/{CYPHER_PREFIX}/{safe_name}"

cypher_payload = {
    "value": {
        "number": number,
        "sys_id": sys_id
    }
}

try:
    r = requests.put(
        cypher_url,
        headers={
            "Authorization": f"Bearer {MORPHEUS_API_TOKEN}",
            "Accept": "application/json",
            "Content-Type": "application/json"
        },
        data=json.dumps(cypher_payload),
        timeout=30,
        verify=False,
        **NO_PROXY
    )
except Exception as e:
    fail(f"Failed to PUT to Morpheus Cypher, {e}")

if r.status_code not in (200, 201, 204):
    fail("Could not persist to Morpheus Cypher", status=r.status_code, body=r.text)

print(json.dumps({
    "ok": True,
    "step": "persist_to_cypher",
    "cypher_key": cypher_key,
    "stored": cypher_payload["value"],
    "status": r.status_code
}, indent=2))

show all the variables:

# -------------------------
# Print Configuration Summary (for debugging)
# -------------------------
print("\n=== CONFIGURATION SUMMARY ===")
print(json.dumps({
    "ServiceNow": {
        "SN_HOST": SN_HOST,
        "SN_USERNAME": SN_USERNAME,
        "SN_VERIFY_SSL": SN_VERIFY_SSL,
        "SN_TEMPLATE_SYS_ID": SN_TEMPLATE_SYS_ID,
        "SC_API_VERSION": SC_API_VERSION,
        "REQUESTED_BY": REQUESTED_BY,
        "ASSIGNED_TO": ASSIGNED_TO,
        "CMDB_CI": CMDB_CI,
        "U_ENVIRONMENT": U_ENVIRONMENT,
        "U_WH0ST_ROLE": U_WH0ST_ROLE,
        "CATEGORY": CATEGORY,
        "U_REASON": U_REASON,
        "START_OFFSET_SEC": START_OFFSET_SEC,
        "DURATION_SEC": DURATION_SEC
    },
    "Morpheus": {
        "MORPHEUS_URL": MORPHEUS_URL,
        "MORPHEUS_API_TOKEN_SET": bool(MORPHEUS_API_TOKEN),
        "CYPHER_PREFIX": CYPHER_PREFIX,
        "INSTANCE_NAME": INSTANCE_NAME
    },
    "Proxy": {
        "Proxy_HTTP": PROXY["http"],
        "Proxy_HTTPS": PROXY["https"],
        "No_Proxy_Keys": list(NO_PROXY["proxies"].keys())
    }
}, indent=2))
print("=== END CONFIGURATION SUMMARY ===\n")

does cypher work??

#!/usr/bin/env python3
import json
from morpheuscypher import MorpheusCypher

# Instantiate the Cypher client using the injected context
client = MorpheusCypher(morpheus)

# --- READ example ---
secret_path = "secret/snowdev"
try:
    secret_value = client.read(secret_path)
    print(json.dumps({
        "ok": True,
        "read_secret": secret_path,
        "value": secret_value
    }, indent=2))
except Exception as e:
    print(json.dumps({"ok": False, "error": f"Failed to read secret: {e}"}))

# --- WRITE example ---
instance_name = morpheus.get("instance", {}).get("name", "unknown").replace(" ", "_")
cypher_path = f"secret/servicenow/changerefs/{instance_name}"
payload = {"number": "CHG000333", "sys_id": "0f49377gcugcy4iyive"}

try:
    client.write(cypher_path, payload)
    print(json.dumps({
        "ok": True,
        "wrote_secret": cypher_path,
        "payload": payload
    }, indent=2))
except Exception as e:
    print(json.dumps({"ok": False, "error": f"Failed to write secret: {e}"}))

getting the same variables 3 different ways:

#!/usr/bin/env python3
import os
import json

result = {
    "via_morpheus_context": {},
    "via_env_vars": {},
    "via_fallback": {}
}

# --- 1. Morpheus Context Object (if injected automatically) ---
try:
    instance_name = morpheus.get('instance', {}).get('name', '')
    instance_platform = morpheus.get('instance', {}).get('platform', '')
    instance_ostype = morpheus.get('instance', {}).get('osType', '')
    result["via_morpheus_context"] = {
        "instance.name": instance_name,
        "instance.platform": instance_platform,
        "instance.osType": instance_ostype
    }
except Exception as e:
    result["via_morpheus_context"] = {"error": str(e)}

# --- 2. Environment Variables (sometimes populated by Morpheus) ---
result["via_env_vars"] = {
    "MORPHEUS_INSTANCE_NAME": os.getenv("MORPHEUS_INSTANCE_NAME"),
    "MORPHEUS_INSTANCE_PLATFORM": os.getenv("MORPHEUS_INSTANCE_PLATFORM"),
    "MORPHEUS_INSTANCE_OSTYPE": os.getenv("MORPHEUS_INSTANCE_OSTYPE"),
    "MORPHEUS_SERVER_PLATFORM": os.getenv("MORPHEUS_SERVER_PLATFORM")
}

# --- 3. Fallback Attempt (different key paths Morpheus sometimes uses) ---
try:
    instance_type_code = morpheus.get('instance', {}).get('instanceType', {}).get('code', '')
    instance_type_name = morpheus.get('instance', {}).get('instanceType', {}).get('name', '')
    server_platform = morpheus.get('server', {}).get('platform', '')
    result["via_fallback"] = {
        "instance.instanceType.code": instance_type_code,
        "instance.instanceType.name": instance_type_name,
        "server.platform": server_platform
    }
except Exception as e:
    result["via_fallback"] = {"error": str(e)}

# --- Output ---
print(json.dumps(result, indent=2))

another test:

#!/usr/bin/env python3
import requests, json

instance_id = morpheus['instance']['id']
appliance_url = morpheus['morpheus']['applianceUrl'].rstrip("/")
api_token = morpheus['morpheus']['apiAccessToken']

# Call Morpheus API for full instance details
url = f"{appliance_url}/api/instances/{instance_id}?details=true"
headers = {
    "Authorization": f"Bearer {api_token}",
    "Accept": "application/json"
}

response = requests.get(url, headers=headers, verify=False)
if response.status_code != 200:
    print(f"Failed to fetch instance details: {response.status_code}")
    print(response.text)
else:
    data = response.json()
    instance = data.get("instance", {})
    platform = instance.get("platform")
    ostype = instance.get("osType")
    print(json.dumps({
        "instance_name": instance.get("name"),
        "platform": platform,
        "osType": ostype
    }, indent=2))

more tests:

#!/usr/bin/env python3
import os
import sys
import json
import requests

# Config
MORPHEUS_URL = os.getenv("MORPHEUS_URL", "https://morpheus.mycompany.com").rstrip("/")
MORPHEUS_TOKEN = os.getenv("MORPHEUS_API_TOKEN")
INSTANCE_ID = os.getenv("INSTANCE_ID") or (sys.argv[1] if len(sys.argv) > 1 else None)

if not MORPHEUS_TOKEN or not INSTANCE_ID:
    print("Usage: MORPHEUS_API_TOKEN=<token> python3 morpheus_instance_simple.py <instance_id>")
    sys.exit(1)

url = f"{MORPHEUS_URL}/api/instances/{INSTANCE_ID}?details=true"
headers = {
    "Authorization": f"Bearer {MORPHEUS_TOKEN}",
    "Accept": "application/json"
}

# Call the API
resp = requests.get(url, headers=headers, verify=False)
if resp.status_code != 200:
    print(f"Error {resp.status_code}: {resp.text}")
    sys.exit(1)

data = resp.json().get("instance", {})

# Extract a few key fields
out = {
    "id": data.get("id"),
    "name": data.get("name"),
    "hostname": data.get("hostname"),
    "platform": data.get("platform"),
    "osType": data.get("osType"),
    "instanceType": (data.get("instanceType") or {}).get("name")
}

print(json.dumps(out, indent=2))

more:

#!/usr/bin/env python3
"""
morpheus_instance_info.py

Query Morpheus API for an instance by ID and print its key details.
Works outside Morpheus (command line on Linux).

Usage:
  export MORPHEUS_URL="https://morpheus.mycompany.com"
  export MORPHEUS_API_TOKEN="YOUR_API_TOKEN"
  python3 morpheus_instance_info.py 12345

Or pass arguments directly:
  python3 morpheus_instance_info.py --url https://morpheus.mycompany.com --token YOUR_TOKEN --id 12345

Options:
  --no-proxy    Bypass proxy for Morpheus connection
  --verify false  Disable TLS verification (for testing)
  --raw         Print full JSON from API (for debugging)
"""

import os
import sys
import json
import argparse
import requests

# -------------- Helpers --------------
def die(msg, status=None, body=None):
    out = {"ok": False, "error": msg}
    if status is not None:
        out["status"] = status
    if body is not None:
        out["body_snippet"] = body[:400]
    print(json.dumps(out, indent=2), file=sys.stderr)
    sys.exit(1)

def parse_bool(v, default=True):
    if v is None:
        return default
    return str(v).strip().lower() in ("1", "true", "yes", "on")

# -------------- Main --------------
def main():
    p = argparse.ArgumentParser(description="Fetch Morpheus instance details by ID")
    p.add_argument("--url", help="Morpheus base URL (e.g. https://morpheus.example.com)", default=os.getenv("MORPHEUS_URL"))
    p.add_argument("--token", help="Morpheus API bearer token", default=os.getenv("MORPHEUS_API_TOKEN"))
    p.add_argument("--id", "-i", help="Instance ID (required)", type=int, required=True)
    p.add_argument("--verify", help="Verify TLS (true/false). Default true", default=os.getenv("MORPHEUS_VERIFY_SSL", "true"))
    p.add_argument("--no-proxy", help="Bypass proxy for this request", action="store_true")
    p.add_argument("--raw", help="Print raw JSON response", action="store_true")
    args = p.parse_args()

    if not args.url:
        die("Missing Morpheus URL. Set --url or MORPHEUS_URL environment variable.")
    if not args.token:
        die("Missing Morpheus API token. Set --token or MORPHEUS_API_TOKEN environment variable.")

    base = args.url.rstrip("/")
    inst_id = args.id
    endpoint = f"{base}/api/instances/{inst_id}?details=true"

    headers = {
        "Authorization": f"Bearer {args.token}",
        "Accept": "application/json"
    }

    verify = parse_bool(args.verify, True)
    request_kwargs = {"headers": headers, "timeout": 30, "verify": verify}
    if args.no_proxy:
        request_kwargs["proxies"] = {"http": None, "https": None}

    # --- API call ---
    try:
        resp = requests.get(endpoint, **request_kwargs)
    except requests.exceptions.SSLError as e:
        die("TLS/SSL error connecting to Morpheus (set --verify false for a quick test): " + str(e))
    except Exception as e:
        die("Failed to call Morpheus API: " + str(e))

    if resp.status_code != 200:
        die("Morpheus API returned non-200", status=resp.status_code, body=resp.text)

    try:
        data = resp.json()
    except Exception:
        die("Response not JSON", status=resp.status_code, body=resp.text)

    if args.raw:
        print(json.dumps(data, indent=2))
        return

    # --- Extract instance info ---
    inst = data.get("instance") or data  # some versions wrap in "instance"
    servers = inst.get("servers") or []
    server_info = servers[0] if servers else {}

    out = {
        "ok": True,
        "id": inst.get("id") or inst_id,
        "name": inst.get("name"),
        "hostname": inst.get("hostname") or server_info.get("hostname"),
        "platform": inst.get("platform") or server_info.get("platform"),
        "osType": inst.get("osType") or server_info.get("osType"),
        "instanceType": (inst.get("instanceType") or {}).get("name"),
    }

    print(json.dumps(out, indent=2))

if __name__ == "__main__":
    main()

updated morpheus variables:

# -------------------------
# Morpheus context + Cypher
# -------------------------
# "morpheus" is injected by Morpheus in Python tasks
ctx = morpheus

# Core Morpheus values
INSTANCE_NAME = ctx['instance']['name']                                   # required (works in your env)
SERVER_PLATFORM = (
    ctx.get('instance', {}).get('server', {}).get('platform', '')         # preferred in v8.x
    or ctx.get('server', {}).get('platform', '')                          # fallback (some task contexts)
).lower()

MORPHEUS_URL = ctx.get('applianceUrl', '').rstrip('/')                    # e.g. https://morpheus.myco.com
# Morpheus already injects a scoped API token for tasks – use it for Cypher writes
MORPHEUS_API_TOKEN = ctx.get('morpheus', {}).get('apiAccessToken', '') or ctx.get('apiAccessToken', '')

# Cypher client (set ssl_verify=True once trust is clean)
cy = Cypher(morpheus=ctx, ssl_verify=False)

new ansible Cypher call:

- name: Resolve appliance URL from Morpheus (fallback to static)
  ansible.builtin.set_fact:
    _appliance_url: >-
      {{
        morpheus['applianceUrl'] | default(appliance_url) | string | trim
      }}

- name: Resolve Morpheus API token and execution lease
  ansible.builtin.set_fact:
    _api_token: "{{ morpheus['apiAccessToken'] | default('') | trim }}"
    _exec_lease: >-
      {{
        morpheus['executionLease'] | default(lookup('env','MORPHEUS_EXECUTION_LEASE') | default('', true)) | trim
      }}

- name: Build Authorization header (prefer Bearer, else Execution lease)
  ansible.builtin.set_fact:
    _auth_header: >-
      {{
        ('Bearer ' ~ _api_token) if (_api_token | length > 0)
        else ('Execution ' ~ _exec_lease)
      }}

- name: Fail if we have neither token nor lease
  ansible.builtin.assert:
    that:
      - _auth_header is search('^(Bearer|Execution) ')
    fail_msg: "No Morpheus API credential available (apiAccessToken nor executionLease)."

api update:

- name: Show what Morpheus passed in
  debug:
    var: morpheus

- name: Save the full Morpheus context to a file
  ansible.builtin.copy:
    content: "{{ morpheus | to_nice_json }}"
    dest: /tmp/morpheus_context.json
    mode: '0600'

- name: Save only the instance object
  ansible.builtin.copy:
    content: "{{ morpheus.instance | to_nice_json }}"
    dest: /tmp/morpheus_instance.json
    mode: '0600'

# Optional, compute a best effort primary IP
- name: Derive primary ip
  ansible.builtin.set_fact:
    primary_ip: >-
      {{ morpheus.instance.connectionInfo.ipAddress
         | default(morpheus.instance.interfaces | map(attribute='ipAddress') | first, true)
         | default(morpheus.instance.externalIp, true)
         | default('n a') }}

- name: Use some fields
  debug:
    msg:
      - "Instance name, {{ morpheus.instance.name }}"
      - "Instance id, {{ morpheus.instance.id }}"
      - "Cloud, {{ morpheus.instance.cloud.name | default('n a') }}"
      - "Primary ip, {{ primary_ip }}"


the next iteration:

---
# tasks/morpheus_facts.yml
# Build and write a Morpheus local facts file
# Replaces the old JSON export method

- name: Ensure local facts directory exists
  become: true
  ansible.builtin.file:
    path: /etc/ansible/facts.d
    state: directory
    owner: root
    group: root
    mode: '0755'

# Optional: Uncomment this to inspect the full structure Morpheus injects
# - name: Save morpheus instance JSON for debugging
#   ansible.builtin.copy:
#     content: "{{ morpheus.instance | to_nice_json }}"
#     dest: /tmp/morpheus_instance.json
#     mode: '0600'

- name: Build Morpheus local fact values (with smart fallbacks)
  ansible.builtin.set_fact:
    mf_instance_id: "{{ morpheus.instance.id | default(morpheus.instance.instanceId, true) | default('n_a') }}"
    mf_hostname: "{{ morpheus.instance.hostname | default(morpheus.instance.serverName, true) | default('n_a') }}"
    mf_name: "{{ morpheus.instance.name | default(morpheus.instance.instanceName, true) | default('n_a') }}"
    mf_source_template: >-
      {{
        (
          morpheus.instance.containerDetails
          | default([], true)
          | map(attribute='server')
          | map(attribute='sourceImage')
          | map(attribute='name')
          | list
          | first
        )
        | default(morpheus.instance.sourceImageName, true)
        | default(morpheus.instance.imageName, true)
        | default('n_a', true)
      }}
    mf_layout: "{{ morpheus.instance.layout.name | default(morpheus.instance.layoutName, true) | default('n_a', true) }}"
    mf_instance_type: "{{ morpheus.instance.instanceType.code | default(morpheus.instance.instanceTypeCode, true) | default(morpheus.instance.typeCode, true) | default('n_a', true) }}"
    mf_requested_by: "{{ morpheus.instance.createdBy.username | default(morpheus.instance.createdByUsername, true) | default(morpheus.instance.ownerUsername, true) | default('n_a', true) }}"
    mf_created_date: "{{ morpheus.instance.dateCreated | default(morpheus.instance.createDate, true) | default('n_a') }}"
    mf_group: "{{ morpheus.instance.group.name | default(morpheus.instance.groupName, true) | default('n_a', true) }}"
    mf_cloud: "{{ morpheus.instance.cloud.name | default(morpheus.instance.cloudName, true) | default('n_a', true) }}"
    mf_site: "{{ morpheus.instance.site.name | default(morpheus.instance.siteName, true) | default('n_a', true) }}"
    mf_environment: "{{ morpheus.instance.instanceContext | default(morpheus.instance.environment, true) | default('n_a') }}"

- name: Write morpheus.fact file
  become: true
  ansible.builtin.copy:
    dest: /etc/ansible/facts.d/morpheus.fact
    mode: '0644'
    content: |
      [defaults]
      instance_id={{ mf_instance_id }}
      hostname={{ mf_hostname }}
      name={{ mf_name }}
      source_template={{ mf_source_template }}
      layout={{ mf_layout }}
      instance_type={{ mf_instance_type }}
      requested_by={{ mf_requested_by }}
      created_date={{ mf_created_date }}
      group={{ mf_group }}
      cloud={{ mf_cloud }}
      site={{ mf_site }}
      environment={{ mf_environment }}

- name: Refresh Ansible local facts
  become: true
  ansible.builtin.setup:
    filter: ansible_local

if not SN_TEMPLATE_SYS_ID:
    platform = (SERVER_PLATFORM or "").lower()

    if platform in ("rhel", "wprhel", "wpoel") and SN_RHEL_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_RHEL_TEMPLATE_SYS_ID
    elif platform in ("windows", "wpwin") and SN_WINDOWS_TEMPLATE_SYS_ID:
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID
    else:
        # Optional: fallback to Windows or leave blank if no match
        SN_TEMPLATE_SYS_ID = SN_WINDOWS_TEMPLATE_SYS_ID or ""

store facts:

---
- name: Save Morpheus facts to local Facter file
  hosts: all
  gather_facts: false
  become: true  # needed to write under /etc
  vars:
    _facts_dir: "/etc/facter/facts.d"
    _facts_file: "/etc/facter/facts.d/morpheus.json"

  tasks:
    - name: Ensure Facter facts directory exists
      ansible.builtin.file:
        path: "{{ _facts_dir }}"
        state: directory
        mode: "0755"

    # Build a trimmed/safe payload from the injected `morpheus` variable.
    # (Add/remove keys as you like; nothing here calls the Morpheus API.)
    - name: Build facts payload from Morpheus injected context
      ansible.builtin.set_fact:
        morpheus_facts_payload:
          instance: "{{ morpheus.instance | default({}) }}"
          server: "{{ morpheus.server | default({}) }}"
          instanceName: "{{ morpheus.instanceName | default('') }}"
          applianceUrl: "{{ morpheus.applianceUrl | default('') }}"
          account: "{{ morpheus.account | default({}) }}"
          customOptions: "{{ morpheus.customOptions | default({}) }}"
          # timestamp for traceability
          capturedAtUtc: "{{ lookup('pipe','date -u +%Y-%m-%dT%H:%M:%SZ') }}"

    - name: Write Morpheus facts to Facter JSON
      ansible.builtin.copy:
        dest: "{{ _facts_file }}"
        content: "{{ morpheus_facts_payload | to_nice_json }}"
        mode: "0644"

    - name: Show where facts were written
      ansible.builtin.debug:
        msg:
          path: "{{ _facts_file }}"
          keys: "{{ morpheus_facts_payload.keys() | list }}"

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment