Last active
October 13, 2020 15:34
-
-
Save rolinh/2a9a3104248750a413ca0e0621ea2cd1 to your computer and use it in GitHub Desktop.
semgrep on cilium master (2020-10-13)
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"results": [ | |
{ | |
"check_id": "go.lang.security.audit.reflect-makefunc.reflect-makefunc", | |
"path": "test/ginkgo-ext/scopes.go", | |
"start": { | |
"line": 564, | |
"col": 7 | |
}, | |
"end": { | |
"line": 564, | |
"col": 44 | |
}, | |
"extra": { | |
"message": "'reflect.MakeFunc' detected. This will sidestep protections that are\nnormally afforded by Go's type system. Audit this call and be sure that \nuser input cannot be used to affect the code generated by MakeFunc; \notherwise, you will have a serious security vulnerability.\n", | |
"metavars": {}, | |
"metadata": { | |
"owasp": "A8: Insecure Deserialization", | |
"cwe": "CWE-913: Improper Control of Dynamically-Managed Code Resources" | |
}, | |
"severity": "WARNING", | |
"lines": "\tv := reflect.MakeFunc(fn.Type(), template)" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 583, | |
"col": 21 | |
}, | |
"end": { | |
"line": 583, | |
"col": 90 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 583, | |
"col": 33, | |
"offset": 23701 | |
}, | |
"end": { | |
"line": 583, | |
"col": 40, | |
"offset": 23708 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 583, | |
"col": 21, | |
"offset": 23689 | |
}, | |
"end": { | |
"line": 583, | |
"col": 23, | |
"offset": 23691 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 100 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tgetIpv4Prefix := vm.Exec(fmt.Sprintf(`expr %s : '\\([0-9]*\\.[0-9]*\\.\\)'`, ipv4Address)).SingleOut()" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 585, | |
"col": 27 | |
}, | |
"end": { | |
"line": 585, | |
"col": 96 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 585, | |
"col": 39, | |
"offset": 23864 | |
}, | |
"end": { | |
"line": 585, | |
"col": 46, | |
"offset": 23871 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 585, | |
"col": 27, | |
"offset": 23852 | |
}, | |
"end": { | |
"line": 585, | |
"col": 29, | |
"offset": 23854 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 100 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tgetIpv4PrefixExcept := vm.Exec(fmt.Sprintf(`expr %s : '\\([0-9]*\\.[0-9]*\\.\\)'`, ipv4Address)).SingleOut()" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 1893, | |
"col": 10 | |
}, | |
"end": { | |
"line": 1893, | |
"col": 95 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1893, | |
"col": 22, | |
"offset": 75408 | |
}, | |
"end": { | |
"line": 1893, | |
"col": 29, | |
"offset": 75415 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1893, | |
"col": 10, | |
"offset": 75396 | |
}, | |
"end": { | |
"line": 1893, | |
"col": 12, | |
"offset": 75398 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 189 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres := vm.Exec(fmt.Sprintf(`cilium policy trace -s %s -d %s/TCP`, httpd2Label, httpd1Label))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 1955, | |
"col": 9 | |
}, | |
"end": { | |
"line": 1957, | |
"col": 52 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1955, | |
"col": 21, | |
"offset": 78454 | |
}, | |
"end": { | |
"line": 1955, | |
"col": 28, | |
"offset": 78461 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1955, | |
"col": 9, | |
"offset": 78442 | |
}, | |
"end": { | |
"line": 1955, | |
"col": 11, | |
"offset": 78444 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 189 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres = vm.Exec(fmt.Sprintf(\n\t\t\t`cilium policy trace --src-identity %d --dst-identity %d`,\n\t\t\thttpd2SecurityIdentity, httpd1SecurityIdentity))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 1961, | |
"col": 9 | |
}, | |
"end": { | |
"line": 1963, | |
"col": 40 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1961, | |
"col": 21, | |
"offset": 78757 | |
}, | |
"end": { | |
"line": 1961, | |
"col": 28, | |
"offset": 78764 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1961, | |
"col": 9, | |
"offset": 78745 | |
}, | |
"end": { | |
"line": 1961, | |
"col": 11, | |
"offset": 78747 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 189 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres = vm.Exec(fmt.Sprintf(\n\t\t\t`cilium policy trace --src-endpoint %s --dst-endpoint %s`,\n\t\t\thttpd2EndpointID, httpd1EndpointID))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/Policies.go", | |
"start": { | |
"line": 1997, | |
"col": 9 | |
}, | |
"end": { | |
"line": 1997, | |
"col": 124 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1997, | |
"col": 21, | |
"offset": 80796 | |
}, | |
"end": { | |
"line": 1997, | |
"col": 28, | |
"offset": 80803 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1997, | |
"col": 9, | |
"offset": 80784 | |
}, | |
"end": { | |
"line": 1997, | |
"col": 11, | |
"offset": 80786 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 189 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres = vm.Exec(fmt.Sprintf(`cilium policy trace --src-endpoint %s --dst-endpoint %s`, httpd2EndpointID, httpd1EndpointID))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/chaos.go", | |
"start": { | |
"line": 205, | |
"col": 3 | |
}, | |
"end": { | |
"line": 213, | |
"col": 4 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 212, | |
"col": 4, | |
"offset": 7427 | |
}, | |
"end": { | |
"line": 212, | |
"col": 6, | |
"offset": 7429 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 11 | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 211, | |
"col": 14, | |
"offset": 7368 | |
}, | |
"end": { | |
"line": 211, | |
"col": 30, | |
"offset": 7384 | |
}, | |
"abstract_content": "helpers.CurlFail", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "19ec0a067b2d0820a03d1346d273e21d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 211, | |
"col": 4, | |
"offset": 7358 | |
}, | |
"end": { | |
"line": 211, | |
"col": 10, | |
"offset": 7364 | |
}, | |
"abstract_content": "action", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "f315c53420b44ee65ec4e93f28d4fe2c" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 205, | |
"col": 3, | |
"offset": 7100 | |
}, | |
"end": { | |
"line": 205, | |
"col": 9, | |
"offset": 7106 | |
}, | |
"abstract_content": "prefix", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "aff6c58386b5dafc1132d061afe8af4d" | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tprefix := \"http://127.0.0.1:8500/v1/kv/cilium/state/identities/v1/id\"\n\t\tidentities, err := vm.GetEndpointsIdentityIds()\n\t\tExpect(err).To(BeNil(), \"Cannot get identities\")\n\n\t\tBy(\"Deleting identities from kvstore\")\n\t\tfor _, identityID := range identities {\n\t\t\taction := helpers.CurlFail(\"%s/%s -X DELETE\", prefix, identityID)\n\t\t\tvm.Exec(action).ExpectSuccess(\"Key %s cannot be deleted correctly\", identityID)\n\t\t}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/chaos.go", | |
"start": { | |
"line": 205, | |
"col": 3 | |
}, | |
"end": { | |
"line": 230, | |
"col": 4 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 229, | |
"col": 4, | |
"offset": 8084 | |
}, | |
"end": { | |
"line": 229, | |
"col": 6, | |
"offset": 8086 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 11 | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 228, | |
"col": 14, | |
"offset": 8035 | |
}, | |
"end": { | |
"line": 228, | |
"col": 30, | |
"offset": 8051 | |
}, | |
"abstract_content": "helpers.CurlFail", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "19ec0a067b2d0820a03d1346d273e21d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 228, | |
"col": 4, | |
"offset": 8025 | |
}, | |
"end": { | |
"line": 228, | |
"col": 10, | |
"offset": 8031 | |
}, | |
"abstract_content": "action", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "f315c53420b44ee65ec4e93f28d4fe2c" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 205, | |
"col": 3, | |
"offset": 7100 | |
}, | |
"end": { | |
"line": 205, | |
"col": 9, | |
"offset": 7106 | |
}, | |
"abstract_content": "prefix", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "aff6c58386b5dafc1132d061afe8af4d" | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tprefix := \"http://127.0.0.1:8500/v1/kv/cilium/state/identities/v1/id\"\n\t\tidentities, err := vm.GetEndpointsIdentityIds()\n\t\tExpect(err).To(BeNil(), \"Cannot get identities\")\n\n\t\tBy(\"Deleting identities from kvstore\")\n\t\tfor _, identityID := range identities {\n\t\t\taction := helpers.CurlFail(\"%s/%s -X DELETE\", prefix, identityID)\n\t\t\tvm.Exec(action).ExpectSuccess(\"Key %s cannot be deleted correctly\", identityID)\n\t\t}\n\n\t\tnewidentities, err := vm.GetEndpointsIdentityIds()\n\t\tExpect(err).To(BeNil(), \"Cannot get identities after delete keys\")\n\n\t\tExpect(newidentities).To(Equal(identities),\n\t\t\t\"Identities are not the same after delete keys from kvstore\")\n\n\t\tBy(\"Checking that identities were restored correctly after deletion\")\n\t\tfor _, identityID := range newidentities {\n\t\t\tid, err := identity.ParseNumericIdentity(identityID)\n\t\t\tExpect(err).To(BeNil(), \"Cannot parse identity\")\n\t\t\tif id.IsReservedIdentity() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taction := helpers.CurlFail(\"%s/%s\", prefix, identityID)\n\t\t\tvm.Exec(action).ExpectSuccess(\"Key %s was not restored correctly\", identityID)\n\t\t}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/chaos.go", | |
"start": { | |
"line": 240, | |
"col": 3 | |
}, | |
"end": { | |
"line": 263, | |
"col": 4 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 262, | |
"col": 4, | |
"offset": 9266 | |
}, | |
"end": { | |
"line": 262, | |
"col": 6, | |
"offset": 9268 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 11 | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 261, | |
"col": 14, | |
"offset": 9207 | |
}, | |
"end": { | |
"line": 261, | |
"col": 30, | |
"offset": 9223 | |
}, | |
"abstract_content": "helpers.CurlFail", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "19ec0a067b2d0820a03d1346d273e21d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 261, | |
"col": 4, | |
"offset": 9197 | |
}, | |
"end": { | |
"line": 261, | |
"col": 10, | |
"offset": 9203 | |
}, | |
"abstract_content": "action", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "f315c53420b44ee65ec4e93f28d4fe2c" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 240, | |
"col": 3, | |
"offset": 8630 | |
}, | |
"end": { | |
"line": 240, | |
"col": 9, | |
"offset": 8636 | |
}, | |
"abstract_content": "prefix", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "aff6c58386b5dafc1132d061afe8af4d" | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tprefix := \"http://127.0.0.1:8500/v1/kv/cilium/state/identities/v1/id\"\n\n\t\tBy(\"Installing CIDR policy\")\n\t\tpolicy := `\n\t\t[{\n\t\t\t\"endpointSelector\": {\"matchLabels\":{\"test\":\"\"}},\n\t\t\t\"egress\":\n\t\t\t[{\n\t\t\t\t\"toCIDR\": [\n\t\t\t\t\t\"10.10.10.10/32\"\n\t\t\t\t]\n\t\t\t}]\n\t\t}]\n\t\t`\n\t\t_, err := vm.PolicyRenderAndImport(policy)\n\t\tExpect(err).To(BeNil(), \"Unable to import policy: %s\", err)\n\n\t\tCIDRIdentities := vm.Exec(fmt.Sprintf(`cilium identity list -o json| %s`, jqFilter))\n\t\tCIDRIdentities.ExpectSuccess(\"Cannot get cidr identities\")\n\n\t\tfor _, identityID := range CIDRIdentities.ByLines() {\n\t\t\taction := helpers.CurlFail(\"%s/%s -X DELETE\", prefix, identityID)\n\t\t\tvm.Exec(action).ExpectSuccess(\"Key %s cannot be deleted correctly\", identityID)\n\t\t}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/chaos.go", | |
"start": { | |
"line": 257, | |
"col": 21 | |
}, | |
"end": { | |
"line": 257, | |
"col": 87 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 257, | |
"col": 33, | |
"offset": 9021 | |
}, | |
"end": { | |
"line": 257, | |
"col": 40, | |
"offset": 9028 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 257, | |
"col": 21, | |
"offset": 9009 | |
}, | |
"end": { | |
"line": 257, | |
"col": 23, | |
"offset": 9011 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 11 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tCIDRIdentities := vm.Exec(fmt.Sprintf(`cilium identity list -o json| %s`, jqFilter))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/chaos.go", | |
"start": { | |
"line": 265, | |
"col": 24 | |
}, | |
"end": { | |
"line": 265, | |
"col": 90 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 265, | |
"col": 36, | |
"offset": 9386 | |
}, | |
"end": { | |
"line": 265, | |
"col": 43, | |
"offset": 9393 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 265, | |
"col": 24, | |
"offset": 9374 | |
}, | |
"end": { | |
"line": 265, | |
"col": 26, | |
"offset": 9376 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 11 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tnewCIDRIdentities := vm.Exec(fmt.Sprintf(`cilium identity list -o json| %s`, jqFilter))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/fqdn.go", | |
"start": { | |
"line": 178, | |
"col": 10 | |
}, | |
"end": { | |
"line": 178, | |
"col": 86 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 178, | |
"col": 22, | |
"offset": 5008 | |
}, | |
"end": { | |
"line": 178, | |
"col": 29, | |
"offset": 5015 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 178, | |
"col": 10, | |
"offset": 4996 | |
}, | |
"end": { | |
"line": 178, | |
"col": 12, | |
"offset": 4998 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 73 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres := vm.Exec(fmt.Sprintf(\"docker network create %s\", helpers.WorldDockerNetwork))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/fqdn.go", | |
"start": { | |
"line": 221, | |
"col": 4 | |
}, | |
"end": { | |
"line": 222, | |
"col": 47 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 221, | |
"col": 16, | |
"offset": 7111 | |
}, | |
"end": { | |
"line": 221, | |
"col": 23, | |
"offset": 7118 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 221, | |
"col": 4, | |
"offset": 7099 | |
}, | |
"end": { | |
"line": 221, | |
"col": 6, | |
"offset": 7101 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 73 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tvm.Exec(fmt.Sprintf(\"mv %s /data/%s\",\n\t\t\t\tfilepath.Join(vm.BasePath(), file), file)).ExpectSuccess(" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/fqdn.go", | |
"start": { | |
"line": 262, | |
"col": 3 | |
}, | |
"end": { | |
"line": 262, | |
"col": 76 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 262, | |
"col": 15, | |
"offset": 8516 | |
}, | |
"end": { | |
"line": 262, | |
"col": 22, | |
"offset": 8523 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 262, | |
"col": 3, | |
"offset": 8504 | |
}, | |
"end": { | |
"line": 262, | |
"col": 5, | |
"offset": 8506 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 73 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tvm.Exec(fmt.Sprintf(\"docker network rm %s\", helpers.WorldDockerNetwork))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/fqdn.go", | |
"start": { | |
"line": 281, | |
"col": 15 | |
}, | |
"end": { | |
"line": 284, | |
"col": 24 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$X": { | |
"start": { | |
"line": 284, | |
"col": 5, | |
"offset": 9066 | |
}, | |
"end": { | |
"line": 284, | |
"col": 23, | |
"offset": 9084 | |
}, | |
"abstract_content": "`| sed 's/ \\// /'`", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "6888bce5d6d49055f37a6aae63fe3f7f" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 281, | |
"col": 15, | |
"offset": 8916 | |
}, | |
"end": { | |
"line": 281, | |
"col": 17, | |
"offset": 8918 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 73 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tGinkgoPrint(vm.Exec(\n\t\t\t`docker ps -q | xargs -n 1 docker inspect --format ` +\n\t\t\t\t`'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} {{ .Name }}'` +\n\t\t\t\t`| sed 's/ \\// /'`).Stdout())" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/fqdn.go", | |
"start": { | |
"line": 292, | |
"col": 11 | |
}, | |
"end": { | |
"line": 292, | |
"col": 81 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 292, | |
"col": 23, | |
"offset": 9546 | |
}, | |
"end": { | |
"line": 292, | |
"col": 30, | |
"offset": 9553 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 292, | |
"col": 11, | |
"offset": 9534 | |
}, | |
"end": { | |
"line": 292, | |
"col": 13, | |
"offset": 9536 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 73 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres := vm.Exec(fmt.Sprintf(`cilium policy selectors -o json | %s`, jqfilter))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/kafka.go", | |
"start": { | |
"line": 40, | |
"col": 3 | |
}, | |
"end": { | |
"line": 95, | |
"col": 15 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 95, | |
"col": 3, | |
"offset": 2985 | |
}, | |
"end": { | |
"line": 95, | |
"col": 5, | |
"offset": 2987 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 24 | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 91, | |
"col": 10, | |
"offset": 2824 | |
}, | |
"end": { | |
"line": 91, | |
"col": 21, | |
"offset": 2835 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 91, | |
"col": 3, | |
"offset": 2817 | |
}, | |
"end": { | |
"line": 91, | |
"col": 6, | |
"offset": 2820 | |
}, | |
"abstract_content": "cmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "9c5f8e71f4c15ad3de1edd0dc264f25a" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 40, | |
"col": 3, | |
"offset": 1170 | |
}, | |
"end": { | |
"line": 40, | |
"col": 9, | |
"offset": 1176 | |
}, | |
"abstract_content": "client", | |
"unique_id": { | |
"type": "id", | |
"value": "client", | |
"kind": "Local", | |
"sid": 32 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tclient = \"client\"\n\t)\n\n\tcontainers := func(mode string) {\n\n\t\timages := map[string]string{\n\t\t\t\"zook\": constants.ZookeeperImage,\n\t\t\t\"client\": constants.KafkaClientImage,\n\t\t}\n\n\t\tswitch mode {\n\t\tcase \"create\":\n\t\t\tfor k, v := range images {\n\t\t\t\tvm.ContainerCreate(k, v, helpers.CiliumDockerNetwork, fmt.Sprintf(\"-l id.%s\", k))\n\t\t\t}\n\t\t\tzook, err := vm.ContainerInspectNet(\"zook\")\n\t\t\tExpect(err).Should(BeNil())\n\n\t\t\tvm.ContainerCreate(\"kafka\", constants.KafkaImage, helpers.CiliumDockerNetwork, fmt.Sprintf(\n\t\t\t\t\"-l id.kafka -e KAFKA_ZOOKEEPER_CONNECT=%s:2181 -e KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS=60000 -e KAFKA_LISTENERS=PLAINTEXT://:9092 -e KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=60000\", zook[\"IPv4\"]))\n\n\t\tcase \"delete\":\n\t\t\tfor k := range images {\n\t\t\t\tvm.ContainerRm(k)\n\t\t\t}\n\t\t\tvm.ContainerRm(\"kafka\")\n\t\t}\n\t}\n\n\tcreateTopicCmd := func(topic string) string {\n\t\treturn fmt.Sprintf(\"/opt/kafka/bin/kafka-topics.sh --create --zookeeper zook:2181 \"+\n\t\t\t\"--replication-factor 1 --partitions 1 --topic %s\", topic)\n\t}\n\n\tcreateTopic := func(topic string) {\n\t\tlogger.Infof(\"Creating new kafka topic %s\", topic)\n\t\tres := vm.ContainerExec(client, createTopicCmd(topic))\n\t\tres.ExpectSuccess(\"Unable to create topic %s\", topic)\n\t}\n\n\tconsumerCmd := func(topic string, maxMsg int) string {\n\t\treturn fmt.Sprintf(\"/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server \"+\n\t\t\t\"kafka:9092 --topic %s --max-messages %d --timeout-ms 300000 --from-beginning\",\n\t\t\ttopic, maxMsg)\n\t}\n\n\tconsumer := func(topic string, maxMsg int) *helpers.CmdRes {\n\t\treturn vm.ContainerExec(client, consumerCmd(topic, maxMsg))\n\t}\n\n\tproducer := func(topic string, message string) {\n\t\tcmd := fmt.Sprintf(\n\t\t\t\"echo %s | docker exec -i %s /opt/kafka/bin/kafka-console-producer.sh \"+\n\t\t\t\t\"--broker-list kafka:9092 --topic %s\",\n\t\t\tmessage, client, topic)\n\t\tvm.Exec(cmd)" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/runtime/privileged_tests.go", | |
"start": { | |
"line": 56, | |
"col": 10 | |
}, | |
"end": { | |
"line": 56, | |
"col": 84 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 56, | |
"col": 34, | |
"offset": 1597 | |
}, | |
"end": { | |
"line": 56, | |
"col": 41, | |
"offset": 1604 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$CTX": { | |
"start": { | |
"line": 56, | |
"col": 25, | |
"offset": 1588 | |
}, | |
"end": { | |
"line": 56, | |
"col": 28, | |
"offset": 1591 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "0d8a965eeebc3732ff486151ae1ed06f" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 56, | |
"col": 10, | |
"offset": 1573 | |
}, | |
"end": { | |
"line": 56, | |
"col": 12, | |
"offset": 1575 | |
}, | |
"abstract_content": "vm", | |
"unique_id": { | |
"type": "id", | |
"value": "vm", | |
"kind": "Local", | |
"sid": 3 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres := vm.ExecContext(ctx, fmt.Sprintf(\"sudo make -C %s tests-privileged\", path))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/policygen/models.go", | |
"start": { | |
"line": 361, | |
"col": 3 | |
}, | |
"end": { | |
"line": 361, | |
"col": 74 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 361, | |
"col": 16, | |
"offset": 10786 | |
}, | |
"end": { | |
"line": 361, | |
"col": 23, | |
"offset": 10793 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 361, | |
"col": 3, | |
"offset": 10773 | |
}, | |
"end": { | |
"line": 361, | |
"col": 6, | |
"offset": 10776 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "id", | |
"value": "kub", | |
"kind": "Param", | |
"sid": 30 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tkub.Exec(fmt.Sprintf(\"%s delete cnp %s\", helpers.KubectlCmd, t.Prefix))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/policygen/models.go", | |
"start": { | |
"line": 820, | |
"col": 9 | |
}, | |
"end": { | |
"line": 820, | |
"col": 93 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 820, | |
"col": 22, | |
"offset": 23638 | |
}, | |
"end": { | |
"line": 820, | |
"col": 29, | |
"offset": 23645 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 820, | |
"col": 9, | |
"offset": 23625 | |
}, | |
"end": { | |
"line": 820, | |
"col": 12, | |
"offset": 23628 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "id", | |
"value": "kub", | |
"kind": "Param", | |
"sid": 70 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tres := kub.Exec(fmt.Sprintf(\"cat %s > %s\", strings.Join(manifests, \" \"), completeManifest))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/policygen/models.go", | |
"start": { | |
"line": 823, | |
"col": 8 | |
}, | |
"end": { | |
"line": 823, | |
"col": 85 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 823, | |
"col": 21, | |
"offset": 23752 | |
}, | |
"end": { | |
"line": 823, | |
"col": 28, | |
"offset": 23759 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 823, | |
"col": 8, | |
"offset": 23739 | |
}, | |
"end": { | |
"line": 823, | |
"col": 11, | |
"offset": 23742 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "id", | |
"value": "kub", | |
"kind": "Param", | |
"sid": 70 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tres = kub.Exec(fmt.Sprintf(\"%s apply -f %s\", helpers.KubectlCmd, completeManifest))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Chaos.go", | |
"start": { | |
"line": 132, | |
"col": 11 | |
}, | |
"end": { | |
"line": 133, | |
"col": 50 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 132, | |
"col": 28, | |
"offset": 4781 | |
}, | |
"end": { | |
"line": 132, | |
"col": 35, | |
"offset": 4788 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 132, | |
"col": 11, | |
"offset": 4764 | |
}, | |
"end": { | |
"line": 132, | |
"col": 18, | |
"offset": 4771 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 15 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres := kubectl.Exec(fmt.Sprintf(\"%s -n %s delete pods -l k8s-app=cilium\",\n\t\t\t\thelpers.KubectlCmd, helpers.CiliumNamespace))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Chaos.go", | |
"start": { | |
"line": 211, | |
"col": 11 | |
}, | |
"end": { | |
"line": 213, | |
"col": 64 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 211, | |
"col": 28, | |
"offset": 7152 | |
}, | |
"end": { | |
"line": 211, | |
"col": 35, | |
"offset": 7159 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 211, | |
"col": 11, | |
"offset": 7135 | |
}, | |
"end": { | |
"line": 211, | |
"col": 18, | |
"offset": 7142 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 15 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres := kubectl.Exec(fmt.Sprintf(\n\t\t\t\t\"%s -n %s delete pods -l %s\",\n\t\t\t\thelpers.KubectlCmd, helpers.CiliumNamespace, ciliumFilter))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Nightly.go", | |
"start": { | |
"line": 103, | |
"col": 10 | |
}, | |
"end": { | |
"line": 103, | |
"col": 107 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 103, | |
"col": 39, | |
"offset": 2983 | |
}, | |
"end": { | |
"line": 103, | |
"col": 46, | |
"offset": 2990 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$CTX": { | |
"start": { | |
"line": 103, | |
"col": 30, | |
"offset": 2974 | |
}, | |
"end": { | |
"line": 103, | |
"col": 33, | |
"offset": 2977 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "0d8a965eeebc3732ff486151ae1ed06f" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 103, | |
"col": 10, | |
"offset": 2954 | |
}, | |
"end": { | |
"line": 103, | |
"col": 17, | |
"offset": 2961 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 26 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tres := kubectl.ExecContext(ctx, fmt.Sprintf(\"%s apply -f %s\", helpers.KubectlCmd, vagrantManifestPath))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Nightly.go", | |
"start": { | |
"line": 325, | |
"col": 4 | |
}, | |
"end": { | |
"line": 326, | |
"col": 80 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 325, | |
"col": 21, | |
"offset": 10607 | |
}, | |
"end": { | |
"line": 325, | |
"col": 28, | |
"offset": 10614 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 325, | |
"col": 4, | |
"offset": 10590 | |
}, | |
"end": { | |
"line": 325, | |
"col": 11, | |
"offset": 10597 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 26 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tkubectl.Exec(fmt.Sprintf(\n\t\t\t\t\"%s delete --all cnp -n %s\", helpers.KubectlCmd, helpers.DefaultNamespace))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Nightly.go", | |
"start": { | |
"line": 399, | |
"col": 4 | |
}, | |
"end": { | |
"line": 400, | |
"col": 89 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 399, | |
"col": 21, | |
"offset": 12855 | |
}, | |
"end": { | |
"line": 399, | |
"col": 28, | |
"offset": 12862 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 399, | |
"col": 4, | |
"offset": 12838 | |
}, | |
"end": { | |
"line": 399, | |
"col": 11, | |
"offset": 12845 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 66 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tkubectl.Exec(fmt.Sprintf(\n\t\t\t\t\"%s delete --all pods,svc,cnp -n %s\", helpers.KubectlCmd, helpers.DefaultNamespace))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1106, | |
"col": 12 | |
}, | |
"end": { | |
"line": 1106, | |
"col": 115 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1106, | |
"col": 29, | |
"offset": 45122 | |
}, | |
"end": { | |
"line": 1106, | |
"col": 36, | |
"offset": 45129 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1106, | |
"col": 12, | |
"offset": 45105 | |
}, | |
"end": { | |
"line": 1106, | |
"col": 19, | |
"offset": 45112 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tExpect(kubectl.Exec(fmt.Sprintf(\"%s get pod -n %s %s -o json\", helpers.KubectlCmd, namespaceForTest, app1Pod)).Unmarshal(&app1PodModel)).To(BeNil())" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1124, | |
"col": 5 | |
}, | |
"end": { | |
"line": 1124, | |
"col": 151 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1124, | |
"col": 22, | |
"offset": 45911 | |
}, | |
"end": { | |
"line": 1124, | |
"col": 29, | |
"offset": 45918 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1124, | |
"col": 5, | |
"offset": 45894 | |
}, | |
"end": { | |
"line": 1124, | |
"col": 12, | |
"offset": 45901 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tkubectl.Exec(fmt.Sprintf(\"%s annotate pod %s -n %s %s-\", helpers.KubectlCmd, appPods[helpers.App1], namespaceForTest, annotation.ProxyVisibility))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1125, | |
"col": 5 | |
}, | |
"end": { | |
"line": 1125, | |
"col": 151 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1125, | |
"col": 22, | |
"offset": 46062 | |
}, | |
"end": { | |
"line": 1125, | |
"col": 29, | |
"offset": 46069 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1125, | |
"col": 5, | |
"offset": 46045 | |
}, | |
"end": { | |
"line": 1125, | |
"col": 12, | |
"offset": 46052 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tkubectl.Exec(fmt.Sprintf(\"%s annotate pod %s -n %s %s-\", helpers.KubectlCmd, appPods[helpers.App2], namespaceForTest, annotation.ProxyVisibility))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1224, | |
"col": 12 | |
}, | |
"end": { | |
"line": 1224, | |
"col": 162 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1224, | |
"col": 29, | |
"offset": 49909 | |
}, | |
"end": { | |
"line": 1224, | |
"col": 36, | |
"offset": 49916 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1224, | |
"col": 12, | |
"offset": 49892 | |
}, | |
"end": { | |
"line": 1224, | |
"col": 19, | |
"offset": 49899 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tres := kubectl.Exec(fmt.Sprintf(\"%s annotate pod %s -n %s %s=\\\"%s\\\"\", helpers.KubectlCmd, podToAnnotate, namespaceForTest, annotation.ProxyVisibility, anno))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1231, | |
"col": 5 | |
}, | |
"end": { | |
"line": 1231, | |
"col": 143 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1231, | |
"col": 22, | |
"offset": 50337 | |
}, | |
"end": { | |
"line": 1231, | |
"col": 29, | |
"offset": 50344 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1231, | |
"col": 5, | |
"offset": 50320 | |
}, | |
"end": { | |
"line": 1231, | |
"col": 12, | |
"offset": 50327 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tkubectl.Exec(fmt.Sprintf(\"%s annotate pod %s -n %s %s-\", helpers.KubectlCmd, podToAnnotate, namespaceForTest, annotation.ProxyVisibility)).ExpectSuccess()" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1249, | |
"col": 12 | |
}, | |
"end": { | |
"line": 1249, | |
"col": 169 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1249, | |
"col": 29, | |
"offset": 51187 | |
}, | |
"end": { | |
"line": 1249, | |
"col": 36, | |
"offset": 51194 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1249, | |
"col": 12, | |
"offset": 51170 | |
}, | |
"end": { | |
"line": 1249, | |
"col": 19, | |
"offset": 51177 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\t\tres := kubectl.Exec(fmt.Sprintf(\"%s annotate pod %s -n %s %s=\\\"<Ingress/80/TCP/HTTP>\\\"\", helpers.KubectlCmd, app1Pod, namespaceForTest, annotation.ProxyVisibility))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 1890, | |
"col": 10 | |
}, | |
"end": { | |
"line": 1890, | |
"col": 96 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 1890, | |
"col": 27, | |
"offset": 76881 | |
}, | |
"end": { | |
"line": 1890, | |
"col": 34, | |
"offset": 76888 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1890, | |
"col": 10, | |
"offset": 76864 | |
}, | |
"end": { | |
"line": 1890, | |
"col": 17, | |
"offset": 76871 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres = kubectl.Exec(fmt.Sprintf(\"kubectl label namespaces/%s nslabel=%s\", secondNS, nsLabel))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 2099, | |
"col": 10 | |
}, | |
"end": { | |
"line": 2099, | |
"col": 92 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 2099, | |
"col": 27, | |
"offset": 85394 | |
}, | |
"end": { | |
"line": 2099, | |
"col": 34, | |
"offset": 85401 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 2099, | |
"col": 10, | |
"offset": 85377 | |
}, | |
"end": { | |
"line": 2099, | |
"col": 17, | |
"offset": 85384 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres = kubectl.Exec(fmt.Sprintf(\"kubectl label namespaces/%[1]s nslabel=%[1]s\", firstNS))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Policies.go", | |
"start": { | |
"line": 2106, | |
"col": 10 | |
}, | |
"end": { | |
"line": 2106, | |
"col": 93 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 2106, | |
"col": 27, | |
"offset": 85687 | |
}, | |
"end": { | |
"line": 2106, | |
"col": 34, | |
"offset": 85694 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 2106, | |
"col": 10, | |
"offset": 85670 | |
}, | |
"end": { | |
"line": 2106, | |
"col": 17, | |
"offset": 85677 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 191 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres = kubectl.Exec(fmt.Sprintf(\"kubectl label namespaces/%[1]s nslabel=%[1]s\", secondNS))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/PoliciesNightly.go", | |
"start": { | |
"line": 51, | |
"col": 3 | |
}, | |
"end": { | |
"line": 53, | |
"col": 50 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 51, | |
"col": 20, | |
"offset": 1411 | |
}, | |
"end": { | |
"line": 51, | |
"col": 27, | |
"offset": 1418 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 51, | |
"col": 3, | |
"offset": 1394 | |
}, | |
"end": { | |
"line": 51, | |
"col": 10, | |
"offset": 1401 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 7 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\tkubectl.Exec(fmt.Sprintf(\n\t\t\t\"%s delete pods,svc,cnp -n %s -l test=policygen\",\n\t\t\thelpers.KubectlCmd, helpers.DefaultNamespace))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Services.go", | |
"start": { | |
"line": 373, | |
"col": 11 | |
}, | |
"end": { | |
"line": 373, | |
"col": 123 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 373, | |
"col": 28, | |
"offset": 14865 | |
}, | |
"end": { | |
"line": 373, | |
"col": 35, | |
"offset": 14872 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 373, | |
"col": 11, | |
"offset": 14848 | |
}, | |
"end": { | |
"line": 373, | |
"col": 18, | |
"offset": 14855 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 261 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres := kubectl.Exec(fmt.Sprintf(\"kubectl label services/%s %s=%s\", echoServiceName, serviceProxyLabelName, \"dummy-lb\"))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/k8sT/Services.go", | |
"start": { | |
"line": 399, | |
"col": 10 | |
}, | |
"end": { | |
"line": 399, | |
"col": 108 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$P": { | |
"start": { | |
"line": 399, | |
"col": 27, | |
"offset": 15877 | |
}, | |
"end": { | |
"line": 399, | |
"col": 34, | |
"offset": 15884 | |
}, | |
"abstract_content": "Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "529959cbba8055b67088b91945d02f52" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 399, | |
"col": 10, | |
"offset": 15860 | |
}, | |
"end": { | |
"line": 399, | |
"col": 17, | |
"offset": 15867 | |
}, | |
"abstract_content": "kubectl", | |
"unique_id": { | |
"type": "id", | |
"value": "kubectl", | |
"kind": "Local", | |
"sid": 261 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\t\t\tres = kubectl.Exec(fmt.Sprintf(\"kubectl label services/%s %s-\", echoServiceName, serviceProxyLabelName))" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/hubble.go", | |
"start": { | |
"line": 25, | |
"col": 2 | |
}, | |
"end": { | |
"line": 39, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 38, | |
"col": 9, | |
"offset": 1209 | |
}, | |
"end": { | |
"line": 38, | |
"col": 10, | |
"offset": 1210 | |
}, | |
"abstract_content": "s", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "8d526582e2b7ef0b8539f6fefd265a8e" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 36, | |
"col": 15, | |
"offset": 1113 | |
}, | |
"end": { | |
"line": 36, | |
"col": 26, | |
"offset": 1124 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 36, | |
"col": 2, | |
"offset": 1100 | |
}, | |
"end": { | |
"line": 36, | |
"col": 11, | |
"offset": 1109 | |
}, | |
"abstract_content": "hubbleCmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "05c50857bc175106b83eb49ad1f536b7" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 25, | |
"col": 2, | |
"offset": 728 | |
}, | |
"end": { | |
"line": 25, | |
"col": 12, | |
"offset": 738 | |
}, | |
"abstract_content": "hubbleSock", | |
"unique_id": { | |
"type": "id", | |
"value": "hubbleSock", | |
"kind": "Global", | |
"sid": 1 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\thubbleSock = \"unix:///var/run/cilium/hubble.sock\"\n)\n\n// HubbleObserve runs `hubble observe --output=json <args>`. JSON output is\n// enabled such that CmdRes.FilterLines may be used to grep for specific events\n// in the output.\nfunc (s *SSHMeta) HubbleObserve(args ...string) *CmdRes {\n\targsCoalesced := \"\"\n\tif len(args) > 0 {\n\t\targsCoalesced = strings.Join(args, \" \")\n\t}\n\thubbleCmd := fmt.Sprintf(\"hubble observe --server=%q --output=json %s\",\n\t\thubbleSock, argsCoalesced)\n\treturn s.Exec(hubbleCmd)\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/hubble.go", | |
"start": { | |
"line": 32, | |
"col": 2 | |
}, | |
"end": { | |
"line": 38, | |
"col": 26 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 38, | |
"col": 9, | |
"offset": 1209 | |
}, | |
"end": { | |
"line": 38, | |
"col": 10, | |
"offset": 1210 | |
}, | |
"abstract_content": "s", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "8d526582e2b7ef0b8539f6fefd265a8e" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 36, | |
"col": 15, | |
"offset": 1113 | |
}, | |
"end": { | |
"line": 36, | |
"col": 26, | |
"offset": 1124 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 36, | |
"col": 2, | |
"offset": 1100 | |
}, | |
"end": { | |
"line": 36, | |
"col": 11, | |
"offset": 1109 | |
}, | |
"abstract_content": "hubbleCmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "05c50857bc175106b83eb49ad1f536b7" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 32, | |
"col": 2, | |
"offset": 1014 | |
}, | |
"end": { | |
"line": 32, | |
"col": 15, | |
"offset": 1027 | |
}, | |
"abstract_content": "argsCoalesced", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ea5185163c122ab2e6dd79e710de228a" | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\targsCoalesced := \"\"\n\tif len(args) > 0 {\n\t\targsCoalesced = strings.Join(args, \" \")\n\t}\n\thubbleCmd := fmt.Sprintf(\"hubble observe --server=%q --output=json %s\",\n\t\thubbleSock, argsCoalesced)\n\treturn s.Exec(hubbleCmd)" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 1481, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 1480, | |
"col": 25, | |
"offset": 53196 | |
}, | |
"end": { | |
"line": 1480, | |
"col": 28, | |
"offset": 53199 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "0d8a965eeebc3732ff486151ae1ed06f" | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 1480, | |
"col": 9, | |
"offset": 53180 | |
}, | |
"end": { | |
"line": 1480, | |
"col": 12, | |
"offset": 53183 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 1457, | |
"col": 9, | |
"offset": 52579 | |
}, | |
"end": { | |
"line": 1457, | |
"col": 20, | |
"offset": 52590 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 1457, | |
"col": 2, | |
"offset": 52572 | |
}, | |
"end": { | |
"line": 1457, | |
"col": 5, | |
"offset": 52575 | |
}, | |
"abstract_content": "cmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "9c5f8e71f4c15ad3de1edd0dc264f25a" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 3278, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 3273, | |
"col": 26, | |
"offset": 116631 | |
}, | |
"end": { | |
"line": 3273, | |
"col": 29, | |
"offset": 116634 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 430 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 3273, | |
"col": 10, | |
"offset": 116615 | |
}, | |
"end": { | |
"line": 3273, | |
"col": 13, | |
"offset": 116618 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 3270, | |
"col": 10, | |
"offset": 116514 | |
}, | |
"end": { | |
"line": 3270, | |
"col": 21, | |
"offset": 116525 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 3270, | |
"col": 3, | |
"offset": 116507 | |
}, | |
"end": { | |
"line": 3270, | |
"col": 6, | |
"offset": 116510 | |
}, | |
"abstract_content": "cmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "9c5f8e71f4c15ad3de1edd0dc264f25a" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RedeployKubernetesDnsIfNecessary validates if the Kubernetes DNS is\n// functional and re-deploys it if it is not and then waits for it to deploy\n// successfully and become operational. See ValidateKubernetesDNS() for the\n// list of conditions that must be met for Kubernetes DNS to be considered\n// operational.\nfunc (kub *Kubectl) RedeployKubernetesDnsIfNecessary() {\n\tginkgoext.By(\"Validating if Kubernetes DNS is deployed\")\n\terr := kub.ValidateKubernetesDNS()\n\tif err == nil {\n\t\tginkgoext.By(\"Kubernetes DNS is up and operational\")\n\t\treturn\n\t} else {\n\t\tginkgoext.By(\"Kubernetes DNS is not ready: %s\", err)\n\t}\n\n\tginkgoext.By(\"Restarting Kubernetes DNS (-l %s)\", kubeDNSLabel)\n\tres := kub.DeleteResource(\"pod\", \"-n \"+KubeSystemNamespace+\" -l \"+kubeDNSLabel)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to delete DNS pods: %s\", res.OutputPrettyPrint())\n\t}\n\n\tginkgoext.By(\"Waiting for Kubernetes DNS to become operational\")\n\terr = RepeatUntilTrueDefaultTimeout(func() bool {\n\t\terr := kub.ValidateKubernetesDNS()\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Kubernetes DNS is not ready yet: %s\", err)\n\t\t}\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s -l %s\", KubectlCmd, KubeSystemNamespace, kubeDNSLabel))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\n\t\tginkgoext.Fail(\"Kubernetes DNS did not become ready in time\")\n\t}\n}\n\n// WaitKubeDNS waits until the kubeDNS pods are ready. In case of exceeding the\n// default timeout it returns an error.\nfunc (kub *Kubectl) WaitKubeDNS() error {\n\treturn kub.WaitforPods(KubeSystemNamespace, fmt.Sprintf(\"-l %s\", kubeDNSLabel), DNSHelperTimeout)\n}\n\n// WaitForKubeDNSEntry waits until the given DNS entry exists in the kube-dns\n// service. If the container is not ready after timeout it returns an error. The\n// name's format query should be `${name}.${namespace}`. If `svc.cluster.local`\n// is not present, it appends to the given name and it checks the service's FQDN.\nfunc (kub *Kubectl) WaitForKubeDNSEntry(serviceName, serviceNamespace string) error {\n\tlogger := kub.Logger().WithFields(logrus.Fields{\"serviceName\": serviceName, \"serviceNamespace\": serviceNamespace})\n\n\tserviceNameWithNamespace := fmt.Sprintf(\"%s.%s\", serviceName, serviceNamespace)\n\tif !strings.HasSuffix(serviceNameWithNamespace, ServiceSuffix) {\n\t\tserviceNameWithNamespace = fmt.Sprintf(\"%s.%s\", serviceNameWithNamespace, ServiceSuffix)\n\t}\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tdigCMD := \"dig +short %s @%s | grep -v -e '^;'\"\n\n\t// If it fails we want to know if it's because of connection cannot be\n\t// established or DNS does not exist.\n\tdigCMDFallback := \"dig +tcp %s @%s\"\n\n\tdnsClusterIP, _, err := kub.GetServiceHostPort(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"cannot get kube-dns service IP\")\n\t\treturn err\n\t}\n\n\tbody := func() bool {\n\t\tserviceIP, _, err := kub.GetServiceHostPort(serviceNamespace, serviceName)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"cannot get service IP for service %s\", serviceNameWithNamespace)\n\t\t\treturn false\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\t\tdefer cancel()\n\t\t// ClusterIPNone denotes that this service is headless; there is no\n\t\t// service IP for this service, and thus the IP returned by `dig` is\n\t\t// an IP of the pod itself, not ClusterIPNone, which is what Kubernetes\n\t\t// shows as the IP for the service for headless services.\n\t\tif serviceIP == v1.ClusterIPNone {\n\t\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\t\tif res.err != nil {\n\t\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\n\t\t\treturn res.WasSuccessful()\n\t\t}\n\t\tlog.Debugf(\"service is not headless; checking whether IP retrieved from DNS matches the IP for the service stored in Kubernetes\")\n\n\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\tif res.err != nil {\n\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\treturn false\n\t\t}\n\t\tserviceIPFromDNS := res.SingleOut()\n\t\tif !govalidator.IsIP(serviceIPFromDNS) {\n\t\t\tlogger.Debugf(\"output of dig (%s) did not return an IP\", serviceIPFromDNS)\n\t\t\treturn false\n\t\t}\n\n\t\t// Due to lag between new IPs for the same service being synced between // kube-apiserver and DNS, check if the IP for the service that is\n\t\t// stored in K8s matches the IP of the service cached in DNS. These\n\t\t// can be different, because some tests use the same service names.\n\t\t// Wait accordingly for services to match, and for resolving the service\n\t\t// name to resolve via DNS.\n\t\tif !strings.Contains(serviceIPFromDNS, serviceIP) {\n\t\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) does not match the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\t\t\treturn false\n\t\t}\n\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) matches the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\treturn true\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"DNS '%s' is not ready after timeout\", serviceNameWithNamespace),\n\t\t&TimeoutConfig{Timeout: DNSHelperTimeout})\n}\n\n// WaitCleanAllTerminatingPods waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\n\nfunc (kub *Kubectl) WaitCleanAllTerminatingPods(timeout time.Duration) error {\n\treturn kub.WaitCleanAllTerminatingPodsInNs(\"\", timeout)\n}\n\n// WaitCleanAllTerminatingPodsInNs waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\nfunc (kub *Kubectl) WaitCleanAllTerminatingPodsInNs(ns string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\twhere := ns\n\t\tif where == \"\" {\n\t\t\twhere = \"--all-namespaces\"\n\t\t} else {\n\t\t\twhere = \"-n \" + where\n\t\t}\n\t\tres := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get pods %s -o jsonpath='{.items[*].metadata.deletionTimestamp}'\",\n\t\t\tKubectlCmd, where))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\n\t\tif res.Stdout() == \"\" {\n\t\t\t// Output is empty so no terminating containers\n\t\t\treturn true\n\t\t}\n\n\t\tpodsTerminating := len(strings.Split(res.Stdout(), \" \"))\n\t\tkub.Logger().WithField(\"Terminating pods\", podsTerminating).Info(\"List of pods terminating\")\n\t\tif podsTerminating > 0 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tbody,\n\t\t\"Pods are still not deleted after a timeout\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\n// DeployPatchStdIn deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatchStdIn(original, patch string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local --dry-run -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch %s --local --dry-run`,\n\t\tKubectlCmd, original, patch))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local -o yaml`,\n\t\t\tKubectlCmd, original, patch),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// DeployPatch deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatch(original, patchFileName string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local --dry-run`,\n\t\tKubectlCmd, original, patchFileName))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patchFileName),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// Patch patches the given object with the given patch (string).\nfunc (kub *Kubectl) Patch(namespace, objType, objName, patch string) *CmdRes {\n\tginkgoext.By(\"Patching %s %s in namespace %s\", objType, objName, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s patch %s %s --patch %q\",\n\t\tKubectlCmd, namespace, objType, objName, patch))\n}\n\nfunc addIfNotOverwritten(options map[string]string, field, value string) map[string]string {\n\tif _, ok := options[field]; !ok {\n\t\toptions[field] = value\n\t}\n\treturn options\n}\n\nfunc (kub *Kubectl) overwriteHelmOptions(options map[string]string) error {\n\tif integration := GetCurrentIntegration(); integration != \"\" {\n\t\toverrides := helmOverrides[integration]\n\t\tfor key, value := range overrides {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\n\t}\n\tfor key, value := range defaultHelmOptions {\n\t\toptions = addIfNotOverwritten(options, key, value)\n\t}\n\n\t// Do not schedule cilium-agent on the NO_CILIUM_ON_NODE node\n\tif node := GetNodeWithoutCilium(); node != \"\" {\n\t\topts := map[string]string{\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"cilium.io/ci-node\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"NotIn\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": node,\n\t\t}\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif !RunsWithKubeProxy() {\n\t\tnodeIP, err := kub.GetNodeIPByLabel(K8s1, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot retrieve Node IP for k8s1: %s\", err)\n\t\t}\n\n\t\topts := map[string]string{\n\t\t\t\"kubeProxyReplacement\": \"strict\",\n\t\t\t\"k8sServiceHost\": nodeIP,\n\t\t\t\"k8sServicePort\": \"6443\",\n\t\t}\n\n\t\tif RunsOnNetNextOr419Kernel() {\n\t\t\topts[\"bpf.masquerade\"] = \"true\"\n\t\t}\n\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif RunsWithHostFirewall() {\n\t\taddIfNotOverwritten(options, \"hostFirewall\", \"true\")\n\t}\n\n\tif !RunsWithKubeProxy() || options[\"hostFirewall\"] == \"true\" {\n\t\t// Set devices\n\t\tprivateIface, err := kub.GetPrivateIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultIface, err := kub.GetDefaultIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdevices := fmt.Sprintf(`'{%s,%s}'`, privateIface, defaultIface)\n\t\taddIfNotOverwritten(options, \"devices\", devices)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) generateCiliumYaml(options map[string]string, filename string) error {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO GH-8753: Use helm rendering library instead of shelling out to\n\t// helm template\n\thelmTemplate := kub.GetFilePath(HelmTemplate)\n\tres := kub.HelmTemplate(helmTemplate, CiliumNamespace, filename, options)\n\tif !res.WasSuccessful() {\n\t\t// If the helm template generation is not successful remove the empty\n\t\t// manifest file.\n\t\t_ = os.Remove(filename)\n\t\treturn res.GetErr(\"Unable to generate YAML\")\n\t}\n\n\treturn nil\n}\n\n// GetPrivateIface returns an interface name of a netdev which has InternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPrivateIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have InternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\n// GetPublicIface returns an interface name of a netdev which has ExternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPublicIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have ExternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\nfunc (kub *Kubectl) waitToDelete(name, label string) error {\n\tvar (\n\t\tpods []string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\n\tstatus := 1\n\tfor status > 0 {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timed out waiting to delete %s: pods still remaining: %s\", name, pods)\n\t\tdefault:\n\t\t}\n\n\t\tpods, err = kub.GetPodNamesContext(ctx, CiliumNamespace, label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus = len(pods)\n\t\tkub.Logger().Infof(\"%s pods terminating '%d' err='%v' pods='%v'\", name, status, err, pods)\n\t\tif status == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\n// GetDefaultIface returns an interface name which is used by a default route.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetDefaultIface() (string, error) {\n\tcmd := `ip -o r | grep default | grep -o 'dev [a-zA-Z0-9]*' | cut -d' ' -f2 | head -n1`\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), K8s1, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve default iface: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\nfunc (kub *Kubectl) DeleteCiliumDS() error {\n\t// Do not assert on success in AfterEach intentionally to avoid\n\t// incomplete teardown.\n\tginkgoext.By(\"DeleteCiliumDS(namespace=%q)\", CiliumNamespace)\n\t_ = kub.DeleteResource(\"ds\", fmt.Sprintf(\"-n %s cilium\", CiliumNamespace))\n\treturn kub.waitToDelete(\"Cilium\", CiliumAgentLabel)\n}\n\nfunc (kub *Kubectl) DeleteHubbleRelay(ns string) error {\n\tginkgoext.By(\"DeleteHubbleRelay(namespace=%q)\", ns)\n\t_ = kub.DeleteResource(\"deployment\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\t_ = kub.DeleteResource(\"service\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\treturn kub.waitToDelete(\"HubbleRelay\", HubbleRelayLabel)\n}\n\n// CiliumInstall installs Cilium with the provided Helm options.\nfunc (kub *Kubectl) CiliumInstall(filename string, options map[string]string) error {\n\t// If the file does not exist, create it so that the command `kubectl delete -f <filename>`\n\t// does not fail because there is no file.\n\t_ = kub.ExecContextShort(context.TODO(), fmt.Sprintf(\"[[ ! -f %s ]] && echo '---' >> %s\", filename, filename))\n\n\t// First try to remove any existing cilium install. This is done by removing resources\n\t// from the file we generate cilium install manifest to.\n\tres := kub.DeleteAndWait(filename, true)\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to delete existing cilium YAML\")\n\t}\n\n\tif err := kub.generateCiliumYaml(options, filename); err != nil {\n\t\treturn err\n\t}\n\n\tres = kub.Apply(ApplyOptions{FilePath: filename, Force: true, Namespace: CiliumNamespace})\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to apply YAML\")\n\t}\n\n\treturn nil\n}\n\n// convertOptionsToLegacyOptions maps current helm values to old helm Values\n// TODO: When Cilium 1.10 branch is created, remove this function\nfunc (kub *Kubectl) convertOptionsToLegacyOptions(options map[string]string) map[string]string {\n\n\tresult := make(map[string]string)\n\n\tlegacyMappings := map[string]string{\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\",\n\t\t\"bpf.preallocateMaps\": \"global.bpf.preallocateMaps\",\n\t\t\"bpf.masquerade\": \"config.bpfMasquerade\",\n\t\t\"cleanState\": \"global.cleanState\",\n\t\t\"cni.binPath\": \"global.cni.binPath\",\n\t\t\"cni.chainingMode\": \"global.cni.chainingMode\",\n\t\t\"cni.confPath\": \"global.cni.confPath\",\n\t\t\"cni.customConf\": \"global.cni.customConf\",\n\t\t\"daemon.runPath\": \"global.daemon.runPath\",\n\t\t\"debug.enabled\": \"global.debug.enabled\",\n\t\t\"devices\": \"global.devices\", // Override \"eth0 eth0\\neth0\"\n\t\t\"enableCnpStatusUpdates\": \"config.enableCnpStatusUpdates\",\n\t\t\"etcd.leaseTTL\": \"global.etcd.leaseTTL\",\n\t\t\"externalIPs.enabled\": \"global.externalIPs.enabled\",\n\t\t\"flannel.enabled\": \"global.flannel.enabled\",\n\t\t\"gke.enabled\": \"global.gke.enabled\",\n\t\t\"hostFirewall\": \"global.hostFirewall\",\n\t\t\"hostPort.enabled\": \"global.hostPort.enabled\",\n\t\t\"hostServices.enabled\": \"global.hostServices.enabled\",\n\t\t\"hubble.enabled\": \"global.hubble.enabled\",\n\t\t\"hubble.listenAddress\": \"global.hubble.listenAddress\",\n\t\t\"hubble.relay.image.repository\": \"hubble-relay.image.repository\",\n\t\t\"hubble.relay.image.tag\": \"hubble-relay.image.tag\",\n\t\t\"image.tag\": \"global.tag\",\n\t\t\"ipam.mode\": \"config.ipam\",\n\t\t\"ipv4.enabled\": \"global.ipv4.enabled\",\n\t\t\"ipv6.enabled\": \"global.ipv6.enabled\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"global.k8s.requireIPv4PodCIDR\",\n\t\t\"k8sServiceHost\": \"global.k8sServiceHost\",\n\t\t\"k8sServicePort\": \"global.k8sServicePort\",\n\t\t\"kubeProxyReplacement\": \"global.kubeProxyReplacement\",\n\t\t\"logSystemLoad\": \"global.logSystemLoad\",\n\t\t\"masquerade\": \"global.masquerade\",\n\t\t\"nativeRoutingCIDR\": \"global.nativeRoutingCIDR\",\n\t\t\"nodeinit.enabled\": \"global.nodeinit.enabled\",\n\t\t\"nodeinit.reconfigureKubelet\": \"global.nodeinit.reconfigureKubelet\",\n\t\t\"nodeinit.removeCbrBridge\": \"global.nodeinit.removeCbrBridge\",\n\t\t\"nodeinit.restartPods\": \"globalnodeinit.restartPods\",\n\t\t\"nodePort.enabled\": \"global.nodePort.enabled\",\n\t\t\"nodePort.mode\": \"global.nodePort.mode\",\n\t\t\"operator.enabled\": \"operator.enabled\",\n\t\t\"pprof.enabled\": \"global.pprof.enabled\",\n\t\t\"sessionAffinity\": \"config.sessionAffinity\",\n\t\t\"sleepAfterInit\": \"agent.sleepAfterInit\",\n\t\t\"tunnel\": \"global.tunnel\",\n\t}\n\n\tfor newKey, v := range options {\n\t\tif oldKey, ok := legacyMappings[newKey]; ok {\n\t\t\tresult[oldKey] = v\n\t\t} else if !ok {\n\t\t\tif newKey == \"image.repository\" {\n\t\t\t\tresult[\"agent.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if newKey == \"operator.image.repository\" {\n\t\t\t\tif options[\"eni\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-aws:\" + options[\"image.tag\"]\n\t\t\t\t} else if options[\"azure.enabled\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-azure:\" + options[\"image.tag\"]\n\t\t\t\t} else {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-generic:\" + options[\"image.tag\"]\n\t\t\t\t}\n\t\t\t} else if newKey == \"preflight.image.repository\" {\n\t\t\t\tresult[\"preflight.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if strings.HasSuffix(newKey, \".tag\") {\n\t\t\t\t// Already handled in the if statement above\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Warningf(\"Skipping option %s\", newKey)\n\t\t\t}\n\t\t}\n\t}\n\tresult[\"ci.kubeCacheMutationDetector\"] = \"true\"\n\treturn result\n}\n\n// RunHelm runs the helm command with the given options.\nfunc (kub *Kubectl) RunHelm(action, repo, helmName, version, namespace string, options map[string]string) (*CmdRes, error) {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptionsString := \"\"\n\n\t//TODO: In 1.10 dev cycle, remove this\n\tif version == \"1.8-dev\" {\n\t\toptions = kub.convertOptionsToLegacyOptions(options)\n\t}\n\n\tfor k, v := range options {\n\t\toptionsString += fmt.Sprintf(\" --set %s=%s \", k, v)\n\t}\n\n\treturn kub.ExecMiddle(fmt.Sprintf(\"helm %s %s %s \"+\n\t\t\"--version=%s \"+\n\t\t\"--namespace=%s \"+\n\t\t\"%s\", action, helmName, repo, version, namespace, optionsString)), nil\n}\n\n// GetCiliumPods returns a list of all Cilium pods in the specified namespace,\n// and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPods() ([]string, error) {\n\treturn kub.GetPodNames(CiliumNamespace, \"k8s-app=cilium\")\n}\n\n// GetCiliumPodsContext returns a list of all Cilium pods in the specified\n// namespace, and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPodsContext(ctx context.Context, namespace string) ([]string, error) {\n\treturn kub.GetPodNamesContext(ctx, namespace, \"k8s-app=cilium\")\n}\n\n// CiliumEndpointsList returns the result of `cilium endpoint list` from the\n// specified pod.\nfunc (kub *Kubectl) CiliumEndpointsList(ctx context.Context, pod string) *CmdRes {\n\treturn kub.CiliumExecContext(ctx, pod, \"cilium endpoint list -o json\")\n}\n\n// CiliumEndpointsStatus returns a mapping of a pod name to it is corresponding\n// endpoint's status\nfunc (kub *Kubectl) CiliumEndpointsStatus(pod string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.state}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint list -o jsonpath='%s'\", filter)).KVOutput()\n}\n\n// CiliumEndpointIPv6 returns the IPv6 address of each endpoint which matches\n// the given endpoint selector.\nfunc (kub *Kubectl) CiliumEndpointIPv6(pod string, endpoint string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.networking.addressing[*].ipv6}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint get %s -o jsonpath='%s'\", endpoint, filter)).KVOutput()\n}\n\n// CiliumEndpointWaitReady waits until all endpoints managed by all Cilium pod\n// are ready. Returns an error if the Cilium pods cannot be retrieved via\n// Kubernetes, or endpoints are not ready after a specified timeout\nfunc (kub *Kubectl) CiliumEndpointWaitReady() error {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot get Cilium pods\")\n\t\treturn err\n\t}\n\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tvar wg sync.WaitGroup\n\t\tqueue := make(chan bool, len(ciliumPods))\n\t\tendpointsReady := func(pod string) {\n\t\t\tvalid := false\n\t\t\tdefer func() {\n\t\t\t\tqueue <- valid\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tlogCtx := kub.Logger().WithField(\"pod\", pod)\n\t\t\tstatus, err := kub.CiliumEndpointsList(ctx, pod).Filter(`{range [*]}{.status.state}{\"=\"}{.status.identity.id}{\"\\n\"}{end}`)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.WithError(err).Errorf(\"cannot get endpoints states on Cilium pod\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttotal := 0\n\t\t\tinvalid := 0\n\t\t\tfor _, line := range strings.Split(status.String(), \"\\n\") {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// each line is like status=identityID.\n\t\t\t\t// IdentityID is needed because the reserved:init identity\n\t\t\t\t// means that the pod is not ready to accept traffic.\n\t\t\t\ttotal++\n\t\t\t\tvals := strings.Split(line, \"=\")\n\t\t\t\tif len(vals) != 2 {\n\t\t\t\t\tlogCtx.Errorf(\"Endpoint list does not have a correct output '%s'\", line)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif vals[0] != \"ready\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t\t// Consider an endpoint with reserved identity 5 (reserved:init) as not ready.\n\t\t\t\tif vals[1] == \"5\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogCtx.WithFields(logrus.Fields{\n\t\t\t\t\"total\": total,\n\t\t\t\t\"invalid\": invalid,\n\t\t\t}).Info(\"Waiting for cilium endpoints to be ready\")\n\n\t\t\tif invalid != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalid = true\n\t\t}\n\t\twg.Add(len(ciliumPods))\n\t\tfor _, pod := range ciliumPods {\n\t\t\tgo endpointsReady(pod)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(queue)\n\n\t\tfor status := range queue {\n\t\t\tif status == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\terr = WithContext(ctx, body, 1*time.Second)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tcallback := func() string {\n\t\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\t\tdefer cancel()\n\n\t\tvar errorMessage string\n\t\tfor _, pod := range ciliumPods {\n\t\t\tvar endpoints []models.Endpoint\n\t\t\tcmdRes := kub.CiliumEndpointsList(ctx, pod)\n\t\t\tif !cmdRes.WasSuccessful() {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to get endpoint list: %s\",\n\t\t\t\t\tpod, cmdRes.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := cmdRes.Unmarshal(&endpoints)\n\t\t\tif err != nil {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to parse endpoint list: %s\",\n\t\t\t\t\tpod, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ep := range endpoints {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\tEndpoint: %d \\tIdentity: %d\\t State: %s\\n\",\n\t\t\t\t\tpod, ep.ID, ep.Status.Identity.ID, ep.Status.State)\n\t\t\t}\n\t\t}\n\t\treturn errorMessage\n\t}\n\treturn NewSSHMetaError(err.Error(), callback)\n}\n\n// WaitForCEPIdentity waits for a particular CEP to have an identity present.\nfunc (kub *Kubectl) WaitForCEPIdentity(ns, podName string) error {\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tep, err := kub.GetCiliumEndpoint(ns, podName)\n\t\tif err != nil || ep == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ep.Identity == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn ep.Identity.ID != 0, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\treturn WithContext(ctx, body, 1*time.Second)\n}\n\n// CiliumExecContext runs cmd in the specified Cilium pod with the given context.\nfunc (kub *Kubectl) CiliumExecContext(ctx context.Context, pod string, cmd string) *CmdRes {\n\tlimitTimes := 5\n\texecute := func() *CmdRes {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, CiliumNamespace, pod, cmd)\n\t\treturn kub.ExecContext(ctx, command)\n\t}\n\tvar res *CmdRes\n\t// Sometimes Kubectl returns 126 exit code, It use to happen in Nightly\n\t// tests when a lot of exec are in place (Cgroups issue). The upstream\n\t// changes did not fix the isse, and we need to make this workaround to\n\t// avoid Kubectl issue.\n\t// https://github.com/openshift/origin/issues/16246\n\tfor i := 0; i < limitTimes; i++ {\n\t\tres = execute()\n\t\tif res.GetExitCode() != 126 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\treturn res\n}\n\n// CiliumExecMustSucceed runs cmd in the specified Cilium pod.\n// it causes a test failure if the command was not successful.\nfunc (kub *Kubectl) CiliumExecMustSucceed(ctx context.Context, pod, cmd string, optionalDescription ...interface{}) *CmdRes {\n\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\tif !res.WasSuccessful() {\n\t\tres.SendToLog(false)\n\t}\n\tgomega.ExpectWithOffset(1, res).Should(\n\t\tCMDSuccess(), optionalDescription...)\n\treturn res\n}\n\n// CiliumExecUntilMatch executes the specified command repeatedly for the\n// specified Cilium pod until the given substring is present in stdout.\n// If the timeout is reached it will return an error.\nfunc (kub *Kubectl) CiliumExecUntilMatch(pod, cmd, substr string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\t\treturn strings.Contains(res.Stdout(), substr)\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"%s is not in the output after timeout\", substr),\n\t\t&TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// WaitForCiliumInitContainerToFinish waits for all Cilium init containers to\n// finish\nfunc (kub *Kubectl) WaitForCiliumInitContainerToFinish() error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(CiliumNamespace, \"-l k8s-app=cilium\").Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tfor _, pod := range podList.Items {\n\t\t\tfor _, v := range pod.Status.InitContainerStatuses {\n\t\t\t\tif v.State.Terminated != nil && (v.State.Terminated.Reason != \"Completed\" || v.State.Terminated.ExitCode != 0) {\n\t\t\t\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\t\t\t\"podName\": pod.Name,\n\t\t\t\t\t\t\"currentState\": v.State.String(),\n\t\t\t\t\t}).Infof(\"Cilium Init container not completed\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn WithTimeout(body, \"Cilium Init Container was not able to initialize or had a successful run\", &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumNodesWait waits until all nodes in the Kubernetes cluster are annotated\n// with Cilium annotations. Its runtime is bounded by a maximum of `HelperTimeout`.\n// When a node is annotated with said annotations, it indicates\n// that the tunnels in the nodes are set up and that cross-node traffic can be\n// tested. Returns an error if the timeout is exceeded for waiting for the nodes\n// to be annotated.\nfunc (kub *Kubectl) CiliumNodesWait() (bool, error) {\n\tbody := func() bool {\n\t\tfilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.metadata.annotations.io\\.cilium\\.network\\.ipv4-pod-cidr}{\"\\n\"}{end}`\n\t\tdata := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get nodes -o jsonpath='%s'\", KubectlCmd, filter))\n\t\tif !data.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\t\tresult := data.KVOutput()\n\t\tignoreNode := GetNodeWithoutCilium()\n\t\tfor k, v := range result {\n\t\t\tif k == ignoreNode {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == \"\" {\n\t\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' does not have Cilium metadata\", k)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' IPv4 address: '%v'\", k, v)\n\t\t}\n\t\treturn true\n\t}\n\terr := WithTimeout(body, \"Kubernetes node does not have cilium metadata\", &TimeoutConfig{Timeout: HelperTimeout})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// LoadedPolicyInFirstAgent returns the policy as loaded in the first cilium\n// agent that is found in the cluster\nfunc (kub *Kubectl) LoadedPolicyInFirstAgent() (string, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve cilium pods: %s\", err)\n\t}\n\tfor _, pod := range pods {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get\")\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot execute cilium policy get: %s\", res.Stdout())\n\t\t} else {\n\t\t\treturn res.CombineOutput().String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no running cilium pods\")\n}\n\n// WaitPolicyDeleted waits for policy policyName to be deleted from the\n// cilium-agent running in pod. Returns an error if policyName was unable to\n// be deleted after some amount of time.\nfunc (kub *Kubectl) WaitPolicyDeleted(pod string, policyName string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyName))\n\n\t\t// `cilium policy get <policy name>` fails if the policy is not loaded,\n\t\t// which is the condition we want.\n\t\treturn !res.WasSuccessful()\n\t}\n\n\treturn WithTimeout(body, fmt.Sprintf(\"Policy %s was not deleted in time\", policyName), &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumIsPolicyLoaded returns true if the policy is loaded in the given\n// cilium Pod. it returns false in case that the policy is not in place\nfunc (kub *Kubectl) CiliumIsPolicyLoaded(pod string, policyCmd string) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyCmd))\n\treturn res.WasSuccessful()\n}\n\n// CiliumPolicyRevision returns the policy revision in the specified Cilium pod.\n// Returns an error if the policy revision cannot be retrieved.\nfunc (kub *Kubectl) CiliumPolicyRevision(pod string) (int, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get -o json\")\n\tif !res.WasSuccessful() {\n\t\treturn -1, fmt.Errorf(\"cannot get the revision %s\", res.Stdout())\n\t}\n\n\trevision, err := res.Filter(\"{.revision}\")\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"cannot get revision from json: %s\", err)\n\t}\n\n\trevi, err := strconv.Atoi(strings.Trim(revision.String(), \"\\n\"))\n\tif err != nil {\n\t\tkub.Logger().Errorf(\"revision on pod '%s' is not valid '%s'\", pod, res.CombineOutput())\n\t\treturn -1, err\n\t}\n\treturn revi, nil\n}\n\n// ResourceLifeCycleAction represents an action performed upon objects in\n// Kubernetes.\ntype ResourceLifeCycleAction string\n\nfunc (kub *Kubectl) getPodRevisions() (map[string]int, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods\")\n\t\treturn nil, fmt.Errorf(\"Cannot get cilium pods: %s\", err)\n\t}\n\n\trevisions := make(map[string]int)\n\tfor _, pod := range pods {\n\t\trevision, err := kub.CiliumPolicyRevision(pod)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pod policy revision\")\n\t\t\treturn nil, fmt.Errorf(\"Cannot retrieve cilium pod %s policy revision: %s\", pod, err)\n\t\t}\n\t\trevisions[pod] = revision\n\t}\n\treturn revisions, nil\n}\n\nfunc (kub *Kubectl) waitNextPolicyRevisions(podRevisions map[string]int, mustHavePolicy bool, timeout time.Duration) error {\n\tnpFilter := fmt.Sprintf(\n\t\t`{range .items[*]}{\"%s=\"}{.metadata.name}{\" %s=\"}{.metadata.namespace}{\"\\n\"}{end}`,\n\t\tKubectlPolicyNameLabel, KubectlPolicyNameSpaceLabel)\n\n\tknpBody := func() bool {\n\t\tknp := kub.ExecShort(fmt.Sprintf(\"%s get --all-namespaces netpol -o jsonpath='%s'\",\n\t\t\tKubectlCmd, npFilter))\n\t\tresult := knp.ByLines()\n\t\tif len(result) == 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, item := range result {\n\t\t\tfor ciliumPod, revision := range podRevisions {\n\t\t\t\tif mustHavePolicy {\n\t\t\t\t\tif !kub.CiliumIsPolicyLoaded(ciliumPod, item) {\n\t\t\t\t\t\tkub.Logger().Infof(\"Policy '%s' is not ready on Cilium pod '%s'\", item, ciliumPod)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tdesiredRevision := revision + 1\n\t\t\t\tres := kub.CiliumExecContext(ctx, ciliumPod, fmt.Sprintf(\"cilium policy wait %d --max-wait-time %d\", desiredRevision, int(ShortCommandTimeout.Seconds())))\n\t\t\t\tif res.GetExitCode() != 0 {\n\t\t\t\t\tkub.Logger().Infof(\"Failed to wait for policy revision %d on pod %s\", desiredRevision, ciliumPod)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tknpBody,\n\t\t\"Timed out while waiting for CNP to be applied on all PODs\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\nfunc getPolicyEnforcingJqFilter(numNodes int) string {\n\t// Test filter: https://jqplay.org/s/EgNzc06Cgn\n\treturn fmt.Sprintf(\n\t\t`[.items[]|{name:.metadata.name, enforcing: (.status|if has(\"nodes\") then .nodes |to_entries|map_values(.value.enforcing) + [(.|length >= %d)]|all else true end)|tostring, status: has(\"status\")|tostring}]`,\n\t\tnumNodes)\n}\n\n// CiliumPolicyAction performs the specified action in Kubernetes for the policy\n// stored in path filepath and waits up until timeout seconds for the policy\n// to be applied in all Cilium endpoints. Returns an error if the policy is not\n// imported before the timeout is\n// exceeded.\nfunc (kub *Kubectl) CiliumPolicyAction(namespace, filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath, namespace); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tcmds := map[string]string{\n\t\t\t\t\"CNP\": fmt.Sprintf(\"%s get cnp --all-namespaces -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t\t\"CCNP\": fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t}\n\n\t\t\tfor ctx, cmd := range cmds {\n\t\t\t\tvar data []map[string]string\n\n\t\t\t\tres := kub.ExecShort(cmd)\n\t\t\t\tif !res.WasSuccessful() {\n\t\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Errorf(\"cannot get %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\terr := res.Unmarshal(&data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot unmarshal json for %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tfor _, item := range data {\n\t\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\t\tkub.Logger().Errorf(\"%s policy '%s' is not enforcing yet\", ctx, item[\"name\"])\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\t}\n\n\t\terr = WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting for policies to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumClusterwidePolicyAction applies a clusterwide policy action as described in action argument. It\n// then wait till timeout Duration for the policy to be applied to all the cilium endpoints.\nfunc (kub *Kubectl) CiliumClusterwidePolicyAction(filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tvar data []map[string]string\n\t\t\tcmd := fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\",\n\t\t\t\tKubectlCmd, jqFilter)\n\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Error(\"cannot get ccnp status\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\terr := res.Unmarshal(&data)\n\t\t\tif err != nil {\n\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot unmarshal json\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tfor _, item := range data {\n\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\tkub.Logger().Errorf(\"Clusterwide policy '%s' is not enforcing yet\", item[\"name\"])\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\terr := WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting CCNP to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumReport report the cilium pod to the log and appends the logs for the\n// given commands.\nfunc (kub *Kubectl) CiliumReport(commands ...string) {\n\tif config.CiliumTestConfig.SkipLogGathering {\n\t\tginkgoext.GinkgoPrint(\"Skipped gathering logs (-cilium.skipLogs=true)\\n\")\n\t\treturn\n\t}\n\n\t// Log gathering for Cilium should take at most 10 minutes. This ensures that\n\t// the CiliumReport stage doesn't cause the entire CI to hang.\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.GatherLogs(ctx)\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.DumpCiliumCommandOutput(ctx, CiliumNamespace)\n\t}()\n\n\tkub.CiliumCheckReport(ctx)\n\n\tpods, err := kub.GetCiliumPodsContext(ctx, CiliumNamespace)\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods on ReportDump\")\n\t}\n\tres := kub.ExecContextShort(ctx, fmt.Sprintf(\"%s get pods -o wide --all-namespaces\", KubectlCmd))\n\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\n\tresults := make([]*CmdRes, 0, len(pods)*len(commands))\n\tginkgoext.GinkgoPrint(\"Fetching command output from pods %s\", pods)\n\tfor _, pod := range pods {\n\t\tfor _, cmd := range commands {\n\t\t\tres = kub.ExecPodCmdBackground(ctx, CiliumNamespace, pod, cmd, ExecOptions{SkipLog: true})\n\t\t\tresults = append(results, res)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tfor _, res := range results {\n\t\tres.WaitUntilFinish()\n\t\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\t}\n}\n\n// CiliumCheckReport prints a few checks on the Junit output to provide more\n// context to users. The list of checks that prints are the following:\n// - Number of Kubernetes and Cilium policies installed.\n// - Policy enforcement status by endpoint.\n// - Controller, health, kvstore status.\nfunc (kub *Kubectl) CiliumCheckReport(ctx context.Context) {\n\tpods, _ := kub.GetCiliumPods()\n\tfmt.Fprintf(CheckLogs, \"Cilium pods: %v\\n\", pods)\n\n\tvar policiesFilter = `{range .items[*]}{.metadata.namespace}{\"::\"}{.metadata.name}{\" \"}{end}`\n\tnetpols := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get netpol -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"Netpols loaded: %v\\n\", netpols.GetStdOut())\n\n\tcnp := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cnp -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"CiliumNetworkPolicies loaded: %v\\n\", cnp.GetStdOut())\n\n\tcepFilter := `{range .items[*]}{.metadata.name}{\"=\"}{.status.policy.ingress.enforcing}{\":\"}{.status.policy.egress.enforcing}{\"\\n\"}{end}`\n\tcepStatus := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cep -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, cepFilter))\n\n\tfmt.Fprintf(CheckLogs, \"Endpoint Policy Enforcement:\\n\")\n\n\ttable := tabwriter.NewWriter(CheckLogs, 5, 0, 3, ' ', 0)\n\tfmt.Fprintf(table, \"Pod\\tIngress\\tEgress\\n\")\n\tfor pod, policy := range cepStatus.KVOutput() {\n\t\tdata := strings.SplitN(policy, \":\", 2)\n\t\tif len(data) != 2 {\n\t\t\tdata[0] = \"invalid value\"\n\t\t\tdata[1] = \"invalid value\"\n\t\t}\n\t\tfmt.Fprintf(table, \"%s\\t%s\\t%s\\n\", pod, data[0], data[1])\n\t}\n\ttable.Flush()\n\n\tvar controllersFilter = `{range .controllers[*]}{.name}{\"=\"}{.status.consecutive-failure-count}::{.status.last-failure-msg}{\"\\n\"}{end}`\n\tvar failedControllers string\n\tfor _, pod := range pods {\n\t\tvar prefix = \"\"\n\t\tstatus := kub.CiliumExecContext(ctx, pod, \"cilium status --all-controllers -o json\")\n\t\tresult, err := status.Filter(controllersFilter)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"Cannot filter controller status output\")\n\t\t\tcontinue\n\t\t}\n\t\tvar total = 0\n\t\tvar failed = 0\n\t\tfor name, data := range result.KVOutput() {\n\t\t\ttotal++\n\t\t\tstatus := strings.SplitN(data, \"::\", 2)\n\t\t\tif len(status) != 2 {\n\t\t\t\t// Just make sure that the the len of the output is 2 to not\n\t\t\t\t// fail on index error in the following lines.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif status[0] != \"\" {\n\t\t\t\tfailed++\n\t\t\t\tprefix = \"⚠️ \"\n\t\t\t\tfailedControllers += fmt.Sprintf(\"controller %s failure '%s'\\n\", name, status[1])\n\t\t\t}\n\t\t}\n\t\tstatusFilter := `Status: {.cilium.state} Health: {.cluster.ciliumHealth.state}` +\n\t\t\t` Nodes \"{.cluster.nodes[*].name}\" ContinerRuntime: {.container-runtime.state}` +\n\t\t\t` Kubernetes: {.kubernetes.state} KVstore: {.kvstore.state}`\n\t\tdata, _ := status.Filter(statusFilter)\n\t\tfmt.Fprintf(CheckLogs, \"%sCilium agent '%s': %s Controllers: Total %d Failed %d\\n\",\n\t\t\tprefix, pod, data, total, failed)\n\t\tif failedControllers != \"\" {\n\t\t\tfmt.Fprintf(CheckLogs, \"Failed controllers:\\n %s\", failedControllers)\n\t\t}\n\t}\n}\n\n// ValidateNoErrorsInLogs checks that cilium logs since the given duration (By\n// default `CurrentGinkgoTestDescription().Duration`) do not contain any of the\n// known-bad messages (e.g., `deadlocks` or `segmentation faults`). In case of\n// any of these messages, it'll mark the test as failed.\nfunc (kub *Kubectl) ValidateNoErrorsInLogs(duration time.Duration) {\n\tblacklist := GetBadLogMessages()\n\tkub.ValidateListOfErrorsInLogs(duration, blacklist)\n}\n\n// ValidateListOfErrorsInLogs is similar to ValidateNoErrorsInLogs, but\n// takes a blacklist of bad log messages instead of using the default list.\nfunc (kub *Kubectl) ValidateListOfErrorsInLogs(duration time.Duration, blacklist map[string][]string) {\n\tif kub == nil {\n\t\t// if `kub` is nil, this is run after the test failed while setting up `kub` and we are unable to gather logs\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tapps := map[string]string{\n\t\t\"k8s-app=cilium\": CiliumTestLog,\n\t\t\"k8s-app=hubble-relay\": HubbleRelayTestLog,\n\t\t\"io.cilium/app=operator\": CiliumOperatorTestLog,\n\t}\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(apps))\n\tfor app, file := range apps {\n\t\tgo func(app, file string) {\n\t\t\tvar logs string\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s logs --tail=-1 --timestamps=true -l %s --since=%vs\",\n\t\t\t\tKubectlCmd, CiliumNamespace, app, duration.Seconds())\n\t\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s --previous\", cmd), ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tres = kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// Keep the cilium logs for the given test in a separate file.\n\t\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot create report directory\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = ioutil.WriteFile(\n\t\t\t\t\tfmt.Sprintf(\"%s/%s\", testPath, file),\n\t\t\t\t\t[]byte(logs), LogPerm)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot create %s\", CiliumTestLog)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfailIfContainsBadLogMsg(logs, app, blacklist)\n\n\t\t\tfmt.Fprint(CheckLogs, logutils.LogErrorsSummary(logs))\n\t\t}(app, file)\n\t}\n\n\twg.Wait()\n}\n\n// GatherCiliumCoreDumps copies core dumps if are present in the /tmp folder\n// into the test report folder for further analysis.\nfunc (kub *Kubectl) GatherCiliumCoreDumps(ctx context.Context, ciliumPod string) {\n\tlog := kub.Logger().WithField(\"pod\", ciliumPod)\n\n\tcores := kub.CiliumExecContext(ctx, ciliumPod, \"ls /tmp/ | grep core\")\n\tif !cores.WasSuccessful() {\n\t\tlog.Debug(\"There is no core dumps in the pod\")\n\t\treturn\n\t}\n\n\ttestPath, err := CreateReportDirectory()\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\treturn\n\t}\n\tresultPath := filepath.Join(kub.BasePath(), testPath)\n\n\tfor _, core := range cores.ByLines() {\n\t\tdst := filepath.Join(resultPath, core)\n\t\tsrc := filepath.Join(\"/tmp/\", core)\n\t\tcmd := fmt.Sprintf(\"%s -n %s cp %s:%s %s\",\n\t\t\tKubectlCmd, CiliumNamespace,\n\t\t\tciliumPod, src, dst)\n\t\tres := kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.WithField(\"output\", res.CombineOutput()).Error(\"Cannot get core from pod\")\n\t\t}\n\t}\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 3424, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 3357, | |
"col": 26, | |
"offset": 119860 | |
}, | |
"end": { | |
"line": 3357, | |
"col": 29, | |
"offset": 119863 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 452 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 3357, | |
"col": 10, | |
"offset": 119844 | |
}, | |
"end": { | |
"line": 3357, | |
"col": 13, | |
"offset": 119847 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 3355, | |
"col": 17, | |
"offset": 119751 | |
}, | |
"end": { | |
"line": 3355, | |
"col": 28, | |
"offset": 119762 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 3355, | |
"col": 3, | |
"offset": 119737 | |
}, | |
"end": { | |
"line": 3355, | |
"col": 13, | |
"offset": 119747 | |
}, | |
"abstract_content": "bugtoolCmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad6efd712fbb55aeadd79bd2b3356795" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RedeployKubernetesDnsIfNecessary validates if the Kubernetes DNS is\n// functional and re-deploys it if it is not and then waits for it to deploy\n// successfully and become operational. See ValidateKubernetesDNS() for the\n// list of conditions that must be met for Kubernetes DNS to be considered\n// operational.\nfunc (kub *Kubectl) RedeployKubernetesDnsIfNecessary() {\n\tginkgoext.By(\"Validating if Kubernetes DNS is deployed\")\n\terr := kub.ValidateKubernetesDNS()\n\tif err == nil {\n\t\tginkgoext.By(\"Kubernetes DNS is up and operational\")\n\t\treturn\n\t} else {\n\t\tginkgoext.By(\"Kubernetes DNS is not ready: %s\", err)\n\t}\n\n\tginkgoext.By(\"Restarting Kubernetes DNS (-l %s)\", kubeDNSLabel)\n\tres := kub.DeleteResource(\"pod\", \"-n \"+KubeSystemNamespace+\" -l \"+kubeDNSLabel)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to delete DNS pods: %s\", res.OutputPrettyPrint())\n\t}\n\n\tginkgoext.By(\"Waiting for Kubernetes DNS to become operational\")\n\terr = RepeatUntilTrueDefaultTimeout(func() bool {\n\t\terr := kub.ValidateKubernetesDNS()\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Kubernetes DNS is not ready yet: %s\", err)\n\t\t}\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s -l %s\", KubectlCmd, KubeSystemNamespace, kubeDNSLabel))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\n\t\tginkgoext.Fail(\"Kubernetes DNS did not become ready in time\")\n\t}\n}\n\n// WaitKubeDNS waits until the kubeDNS pods are ready. In case of exceeding the\n// default timeout it returns an error.\nfunc (kub *Kubectl) WaitKubeDNS() error {\n\treturn kub.WaitforPods(KubeSystemNamespace, fmt.Sprintf(\"-l %s\", kubeDNSLabel), DNSHelperTimeout)\n}\n\n// WaitForKubeDNSEntry waits until the given DNS entry exists in the kube-dns\n// service. If the container is not ready after timeout it returns an error. The\n// name's format query should be `${name}.${namespace}`. If `svc.cluster.local`\n// is not present, it appends to the given name and it checks the service's FQDN.\nfunc (kub *Kubectl) WaitForKubeDNSEntry(serviceName, serviceNamespace string) error {\n\tlogger := kub.Logger().WithFields(logrus.Fields{\"serviceName\": serviceName, \"serviceNamespace\": serviceNamespace})\n\n\tserviceNameWithNamespace := fmt.Sprintf(\"%s.%s\", serviceName, serviceNamespace)\n\tif !strings.HasSuffix(serviceNameWithNamespace, ServiceSuffix) {\n\t\tserviceNameWithNamespace = fmt.Sprintf(\"%s.%s\", serviceNameWithNamespace, ServiceSuffix)\n\t}\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tdigCMD := \"dig +short %s @%s | grep -v -e '^;'\"\n\n\t// If it fails we want to know if it's because of connection cannot be\n\t// established or DNS does not exist.\n\tdigCMDFallback := \"dig +tcp %s @%s\"\n\n\tdnsClusterIP, _, err := kub.GetServiceHostPort(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"cannot get kube-dns service IP\")\n\t\treturn err\n\t}\n\n\tbody := func() bool {\n\t\tserviceIP, _, err := kub.GetServiceHostPort(serviceNamespace, serviceName)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"cannot get service IP for service %s\", serviceNameWithNamespace)\n\t\t\treturn false\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\t\tdefer cancel()\n\t\t// ClusterIPNone denotes that this service is headless; there is no\n\t\t// service IP for this service, and thus the IP returned by `dig` is\n\t\t// an IP of the pod itself, not ClusterIPNone, which is what Kubernetes\n\t\t// shows as the IP for the service for headless services.\n\t\tif serviceIP == v1.ClusterIPNone {\n\t\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\t\tif res.err != nil {\n\t\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\n\t\t\treturn res.WasSuccessful()\n\t\t}\n\t\tlog.Debugf(\"service is not headless; checking whether IP retrieved from DNS matches the IP for the service stored in Kubernetes\")\n\n\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\tif res.err != nil {\n\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\treturn false\n\t\t}\n\t\tserviceIPFromDNS := res.SingleOut()\n\t\tif !govalidator.IsIP(serviceIPFromDNS) {\n\t\t\tlogger.Debugf(\"output of dig (%s) did not return an IP\", serviceIPFromDNS)\n\t\t\treturn false\n\t\t}\n\n\t\t// Due to lag between new IPs for the same service being synced between // kube-apiserver and DNS, check if the IP for the service that is\n\t\t// stored in K8s matches the IP of the service cached in DNS. These\n\t\t// can be different, because some tests use the same service names.\n\t\t// Wait accordingly for services to match, and for resolving the service\n\t\t// name to resolve via DNS.\n\t\tif !strings.Contains(serviceIPFromDNS, serviceIP) {\n\t\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) does not match the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\t\t\treturn false\n\t\t}\n\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) matches the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\treturn true\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"DNS '%s' is not ready after timeout\", serviceNameWithNamespace),\n\t\t&TimeoutConfig{Timeout: DNSHelperTimeout})\n}\n\n// WaitCleanAllTerminatingPods waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\n\nfunc (kub *Kubectl) WaitCleanAllTerminatingPods(timeout time.Duration) error {\n\treturn kub.WaitCleanAllTerminatingPodsInNs(\"\", timeout)\n}\n\n// WaitCleanAllTerminatingPodsInNs waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\nfunc (kub *Kubectl) WaitCleanAllTerminatingPodsInNs(ns string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\twhere := ns\n\t\tif where == \"\" {\n\t\t\twhere = \"--all-namespaces\"\n\t\t} else {\n\t\t\twhere = \"-n \" + where\n\t\t}\n\t\tres := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get pods %s -o jsonpath='{.items[*].metadata.deletionTimestamp}'\",\n\t\t\tKubectlCmd, where))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\n\t\tif res.Stdout() == \"\" {\n\t\t\t// Output is empty so no terminating containers\n\t\t\treturn true\n\t\t}\n\n\t\tpodsTerminating := len(strings.Split(res.Stdout(), \" \"))\n\t\tkub.Logger().WithField(\"Terminating pods\", podsTerminating).Info(\"List of pods terminating\")\n\t\tif podsTerminating > 0 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tbody,\n\t\t\"Pods are still not deleted after a timeout\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\n// DeployPatchStdIn deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatchStdIn(original, patch string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local --dry-run -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch %s --local --dry-run`,\n\t\tKubectlCmd, original, patch))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local -o yaml`,\n\t\t\tKubectlCmd, original, patch),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// DeployPatch deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatch(original, patchFileName string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local --dry-run`,\n\t\tKubectlCmd, original, patchFileName))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patchFileName),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// Patch patches the given object with the given patch (string).\nfunc (kub *Kubectl) Patch(namespace, objType, objName, patch string) *CmdRes {\n\tginkgoext.By(\"Patching %s %s in namespace %s\", objType, objName, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s patch %s %s --patch %q\",\n\t\tKubectlCmd, namespace, objType, objName, patch))\n}\n\nfunc addIfNotOverwritten(options map[string]string, field, value string) map[string]string {\n\tif _, ok := options[field]; !ok {\n\t\toptions[field] = value\n\t}\n\treturn options\n}\n\nfunc (kub *Kubectl) overwriteHelmOptions(options map[string]string) error {\n\tif integration := GetCurrentIntegration(); integration != \"\" {\n\t\toverrides := helmOverrides[integration]\n\t\tfor key, value := range overrides {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\n\t}\n\tfor key, value := range defaultHelmOptions {\n\t\toptions = addIfNotOverwritten(options, key, value)\n\t}\n\n\t// Do not schedule cilium-agent on the NO_CILIUM_ON_NODE node\n\tif node := GetNodeWithoutCilium(); node != \"\" {\n\t\topts := map[string]string{\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"cilium.io/ci-node\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"NotIn\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": node,\n\t\t}\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif !RunsWithKubeProxy() {\n\t\tnodeIP, err := kub.GetNodeIPByLabel(K8s1, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot retrieve Node IP for k8s1: %s\", err)\n\t\t}\n\n\t\topts := map[string]string{\n\t\t\t\"kubeProxyReplacement\": \"strict\",\n\t\t\t\"k8sServiceHost\": nodeIP,\n\t\t\t\"k8sServicePort\": \"6443\",\n\t\t}\n\n\t\tif RunsOnNetNextOr419Kernel() {\n\t\t\topts[\"bpf.masquerade\"] = \"true\"\n\t\t}\n\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif RunsWithHostFirewall() {\n\t\taddIfNotOverwritten(options, \"hostFirewall\", \"true\")\n\t}\n\n\tif !RunsWithKubeProxy() || options[\"hostFirewall\"] == \"true\" {\n\t\t// Set devices\n\t\tprivateIface, err := kub.GetPrivateIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultIface, err := kub.GetDefaultIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdevices := fmt.Sprintf(`'{%s,%s}'`, privateIface, defaultIface)\n\t\taddIfNotOverwritten(options, \"devices\", devices)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) generateCiliumYaml(options map[string]string, filename string) error {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO GH-8753: Use helm rendering library instead of shelling out to\n\t// helm template\n\thelmTemplate := kub.GetFilePath(HelmTemplate)\n\tres := kub.HelmTemplate(helmTemplate, CiliumNamespace, filename, options)\n\tif !res.WasSuccessful() {\n\t\t// If the helm template generation is not successful remove the empty\n\t\t// manifest file.\n\t\t_ = os.Remove(filename)\n\t\treturn res.GetErr(\"Unable to generate YAML\")\n\t}\n\n\treturn nil\n}\n\n// GetPrivateIface returns an interface name of a netdev which has InternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPrivateIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have InternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\n// GetPublicIface returns an interface name of a netdev which has ExternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPublicIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have ExternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\nfunc (kub *Kubectl) waitToDelete(name, label string) error {\n\tvar (\n\t\tpods []string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\n\tstatus := 1\n\tfor status > 0 {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timed out waiting to delete %s: pods still remaining: %s\", name, pods)\n\t\tdefault:\n\t\t}\n\n\t\tpods, err = kub.GetPodNamesContext(ctx, CiliumNamespace, label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus = len(pods)\n\t\tkub.Logger().Infof(\"%s pods terminating '%d' err='%v' pods='%v'\", name, status, err, pods)\n\t\tif status == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\n// GetDefaultIface returns an interface name which is used by a default route.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetDefaultIface() (string, error) {\n\tcmd := `ip -o r | grep default | grep -o 'dev [a-zA-Z0-9]*' | cut -d' ' -f2 | head -n1`\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), K8s1, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve default iface: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\nfunc (kub *Kubectl) DeleteCiliumDS() error {\n\t// Do not assert on success in AfterEach intentionally to avoid\n\t// incomplete teardown.\n\tginkgoext.By(\"DeleteCiliumDS(namespace=%q)\", CiliumNamespace)\n\t_ = kub.DeleteResource(\"ds\", fmt.Sprintf(\"-n %s cilium\", CiliumNamespace))\n\treturn kub.waitToDelete(\"Cilium\", CiliumAgentLabel)\n}\n\nfunc (kub *Kubectl) DeleteHubbleRelay(ns string) error {\n\tginkgoext.By(\"DeleteHubbleRelay(namespace=%q)\", ns)\n\t_ = kub.DeleteResource(\"deployment\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\t_ = kub.DeleteResource(\"service\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\treturn kub.waitToDelete(\"HubbleRelay\", HubbleRelayLabel)\n}\n\n// CiliumInstall installs Cilium with the provided Helm options.\nfunc (kub *Kubectl) CiliumInstall(filename string, options map[string]string) error {\n\t// If the file does not exist, create it so that the command `kubectl delete -f <filename>`\n\t// does not fail because there is no file.\n\t_ = kub.ExecContextShort(context.TODO(), fmt.Sprintf(\"[[ ! -f %s ]] && echo '---' >> %s\", filename, filename))\n\n\t// First try to remove any existing cilium install. This is done by removing resources\n\t// from the file we generate cilium install manifest to.\n\tres := kub.DeleteAndWait(filename, true)\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to delete existing cilium YAML\")\n\t}\n\n\tif err := kub.generateCiliumYaml(options, filename); err != nil {\n\t\treturn err\n\t}\n\n\tres = kub.Apply(ApplyOptions{FilePath: filename, Force: true, Namespace: CiliumNamespace})\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to apply YAML\")\n\t}\n\n\treturn nil\n}\n\n// convertOptionsToLegacyOptions maps current helm values to old helm Values\n// TODO: When Cilium 1.10 branch is created, remove this function\nfunc (kub *Kubectl) convertOptionsToLegacyOptions(options map[string]string) map[string]string {\n\n\tresult := make(map[string]string)\n\n\tlegacyMappings := map[string]string{\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\",\n\t\t\"bpf.preallocateMaps\": \"global.bpf.preallocateMaps\",\n\t\t\"bpf.masquerade\": \"config.bpfMasquerade\",\n\t\t\"cleanState\": \"global.cleanState\",\n\t\t\"cni.binPath\": \"global.cni.binPath\",\n\t\t\"cni.chainingMode\": \"global.cni.chainingMode\",\n\t\t\"cni.confPath\": \"global.cni.confPath\",\n\t\t\"cni.customConf\": \"global.cni.customConf\",\n\t\t\"daemon.runPath\": \"global.daemon.runPath\",\n\t\t\"debug.enabled\": \"global.debug.enabled\",\n\t\t\"devices\": \"global.devices\", // Override \"eth0 eth0\\neth0\"\n\t\t\"enableCnpStatusUpdates\": \"config.enableCnpStatusUpdates\",\n\t\t\"etcd.leaseTTL\": \"global.etcd.leaseTTL\",\n\t\t\"externalIPs.enabled\": \"global.externalIPs.enabled\",\n\t\t\"flannel.enabled\": \"global.flannel.enabled\",\n\t\t\"gke.enabled\": \"global.gke.enabled\",\n\t\t\"hostFirewall\": \"global.hostFirewall\",\n\t\t\"hostPort.enabled\": \"global.hostPort.enabled\",\n\t\t\"hostServices.enabled\": \"global.hostServices.enabled\",\n\t\t\"hubble.enabled\": \"global.hubble.enabled\",\n\t\t\"hubble.listenAddress\": \"global.hubble.listenAddress\",\n\t\t\"hubble.relay.image.repository\": \"hubble-relay.image.repository\",\n\t\t\"hubble.relay.image.tag\": \"hubble-relay.image.tag\",\n\t\t\"image.tag\": \"global.tag\",\n\t\t\"ipam.mode\": \"config.ipam\",\n\t\t\"ipv4.enabled\": \"global.ipv4.enabled\",\n\t\t\"ipv6.enabled\": \"global.ipv6.enabled\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"global.k8s.requireIPv4PodCIDR\",\n\t\t\"k8sServiceHost\": \"global.k8sServiceHost\",\n\t\t\"k8sServicePort\": \"global.k8sServicePort\",\n\t\t\"kubeProxyReplacement\": \"global.kubeProxyReplacement\",\n\t\t\"logSystemLoad\": \"global.logSystemLoad\",\n\t\t\"masquerade\": \"global.masquerade\",\n\t\t\"nativeRoutingCIDR\": \"global.nativeRoutingCIDR\",\n\t\t\"nodeinit.enabled\": \"global.nodeinit.enabled\",\n\t\t\"nodeinit.reconfigureKubelet\": \"global.nodeinit.reconfigureKubelet\",\n\t\t\"nodeinit.removeCbrBridge\": \"global.nodeinit.removeCbrBridge\",\n\t\t\"nodeinit.restartPods\": \"globalnodeinit.restartPods\",\n\t\t\"nodePort.enabled\": \"global.nodePort.enabled\",\n\t\t\"nodePort.mode\": \"global.nodePort.mode\",\n\t\t\"operator.enabled\": \"operator.enabled\",\n\t\t\"pprof.enabled\": \"global.pprof.enabled\",\n\t\t\"sessionAffinity\": \"config.sessionAffinity\",\n\t\t\"sleepAfterInit\": \"agent.sleepAfterInit\",\n\t\t\"tunnel\": \"global.tunnel\",\n\t}\n\n\tfor newKey, v := range options {\n\t\tif oldKey, ok := legacyMappings[newKey]; ok {\n\t\t\tresult[oldKey] = v\n\t\t} else if !ok {\n\t\t\tif newKey == \"image.repository\" {\n\t\t\t\tresult[\"agent.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if newKey == \"operator.image.repository\" {\n\t\t\t\tif options[\"eni\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-aws:\" + options[\"image.tag\"]\n\t\t\t\t} else if options[\"azure.enabled\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-azure:\" + options[\"image.tag\"]\n\t\t\t\t} else {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-generic:\" + options[\"image.tag\"]\n\t\t\t\t}\n\t\t\t} else if newKey == \"preflight.image.repository\" {\n\t\t\t\tresult[\"preflight.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if strings.HasSuffix(newKey, \".tag\") {\n\t\t\t\t// Already handled in the if statement above\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Warningf(\"Skipping option %s\", newKey)\n\t\t\t}\n\t\t}\n\t}\n\tresult[\"ci.kubeCacheMutationDetector\"] = \"true\"\n\treturn result\n}\n\n// RunHelm runs the helm command with the given options.\nfunc (kub *Kubectl) RunHelm(action, repo, helmName, version, namespace string, options map[string]string) (*CmdRes, error) {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptionsString := \"\"\n\n\t//TODO: In 1.10 dev cycle, remove this\n\tif version == \"1.8-dev\" {\n\t\toptions = kub.convertOptionsToLegacyOptions(options)\n\t}\n\n\tfor k, v := range options {\n\t\toptionsString += fmt.Sprintf(\" --set %s=%s \", k, v)\n\t}\n\n\treturn kub.ExecMiddle(fmt.Sprintf(\"helm %s %s %s \"+\n\t\t\"--version=%s \"+\n\t\t\"--namespace=%s \"+\n\t\t\"%s\", action, helmName, repo, version, namespace, optionsString)), nil\n}\n\n// GetCiliumPods returns a list of all Cilium pods in the specified namespace,\n// and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPods() ([]string, error) {\n\treturn kub.GetPodNames(CiliumNamespace, \"k8s-app=cilium\")\n}\n\n// GetCiliumPodsContext returns a list of all Cilium pods in the specified\n// namespace, and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPodsContext(ctx context.Context, namespace string) ([]string, error) {\n\treturn kub.GetPodNamesContext(ctx, namespace, \"k8s-app=cilium\")\n}\n\n// CiliumEndpointsList returns the result of `cilium endpoint list` from the\n// specified pod.\nfunc (kub *Kubectl) CiliumEndpointsList(ctx context.Context, pod string) *CmdRes {\n\treturn kub.CiliumExecContext(ctx, pod, \"cilium endpoint list -o json\")\n}\n\n// CiliumEndpointsStatus returns a mapping of a pod name to it is corresponding\n// endpoint's status\nfunc (kub *Kubectl) CiliumEndpointsStatus(pod string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.state}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint list -o jsonpath='%s'\", filter)).KVOutput()\n}\n\n// CiliumEndpointIPv6 returns the IPv6 address of each endpoint which matches\n// the given endpoint selector.\nfunc (kub *Kubectl) CiliumEndpointIPv6(pod string, endpoint string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.networking.addressing[*].ipv6}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint get %s -o jsonpath='%s'\", endpoint, filter)).KVOutput()\n}\n\n// CiliumEndpointWaitReady waits until all endpoints managed by all Cilium pod\n// are ready. Returns an error if the Cilium pods cannot be retrieved via\n// Kubernetes, or endpoints are not ready after a specified timeout\nfunc (kub *Kubectl) CiliumEndpointWaitReady() error {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot get Cilium pods\")\n\t\treturn err\n\t}\n\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tvar wg sync.WaitGroup\n\t\tqueue := make(chan bool, len(ciliumPods))\n\t\tendpointsReady := func(pod string) {\n\t\t\tvalid := false\n\t\t\tdefer func() {\n\t\t\t\tqueue <- valid\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tlogCtx := kub.Logger().WithField(\"pod\", pod)\n\t\t\tstatus, err := kub.CiliumEndpointsList(ctx, pod).Filter(`{range [*]}{.status.state}{\"=\"}{.status.identity.id}{\"\\n\"}{end}`)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.WithError(err).Errorf(\"cannot get endpoints states on Cilium pod\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttotal := 0\n\t\t\tinvalid := 0\n\t\t\tfor _, line := range strings.Split(status.String(), \"\\n\") {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// each line is like status=identityID.\n\t\t\t\t// IdentityID is needed because the reserved:init identity\n\t\t\t\t// means that the pod is not ready to accept traffic.\n\t\t\t\ttotal++\n\t\t\t\tvals := strings.Split(line, \"=\")\n\t\t\t\tif len(vals) != 2 {\n\t\t\t\t\tlogCtx.Errorf(\"Endpoint list does not have a correct output '%s'\", line)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif vals[0] != \"ready\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t\t// Consider an endpoint with reserved identity 5 (reserved:init) as not ready.\n\t\t\t\tif vals[1] == \"5\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogCtx.WithFields(logrus.Fields{\n\t\t\t\t\"total\": total,\n\t\t\t\t\"invalid\": invalid,\n\t\t\t}).Info(\"Waiting for cilium endpoints to be ready\")\n\n\t\t\tif invalid != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalid = true\n\t\t}\n\t\twg.Add(len(ciliumPods))\n\t\tfor _, pod := range ciliumPods {\n\t\t\tgo endpointsReady(pod)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(queue)\n\n\t\tfor status := range queue {\n\t\t\tif status == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\terr = WithContext(ctx, body, 1*time.Second)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tcallback := func() string {\n\t\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\t\tdefer cancel()\n\n\t\tvar errorMessage string\n\t\tfor _, pod := range ciliumPods {\n\t\t\tvar endpoints []models.Endpoint\n\t\t\tcmdRes := kub.CiliumEndpointsList(ctx, pod)\n\t\t\tif !cmdRes.WasSuccessful() {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to get endpoint list: %s\",\n\t\t\t\t\tpod, cmdRes.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := cmdRes.Unmarshal(&endpoints)\n\t\t\tif err != nil {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to parse endpoint list: %s\",\n\t\t\t\t\tpod, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ep := range endpoints {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\tEndpoint: %d \\tIdentity: %d\\t State: %s\\n\",\n\t\t\t\t\tpod, ep.ID, ep.Status.Identity.ID, ep.Status.State)\n\t\t\t}\n\t\t}\n\t\treturn errorMessage\n\t}\n\treturn NewSSHMetaError(err.Error(), callback)\n}\n\n// WaitForCEPIdentity waits for a particular CEP to have an identity present.\nfunc (kub *Kubectl) WaitForCEPIdentity(ns, podName string) error {\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tep, err := kub.GetCiliumEndpoint(ns, podName)\n\t\tif err != nil || ep == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ep.Identity == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn ep.Identity.ID != 0, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\treturn WithContext(ctx, body, 1*time.Second)\n}\n\n// CiliumExecContext runs cmd in the specified Cilium pod with the given context.\nfunc (kub *Kubectl) CiliumExecContext(ctx context.Context, pod string, cmd string) *CmdRes {\n\tlimitTimes := 5\n\texecute := func() *CmdRes {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, CiliumNamespace, pod, cmd)\n\t\treturn kub.ExecContext(ctx, command)\n\t}\n\tvar res *CmdRes\n\t// Sometimes Kubectl returns 126 exit code, It use to happen in Nightly\n\t// tests when a lot of exec are in place (Cgroups issue). The upstream\n\t// changes did not fix the isse, and we need to make this workaround to\n\t// avoid Kubectl issue.\n\t// https://github.com/openshift/origin/issues/16246\n\tfor i := 0; i < limitTimes; i++ {\n\t\tres = execute()\n\t\tif res.GetExitCode() != 126 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\treturn res\n}\n\n// CiliumExecMustSucceed runs cmd in the specified Cilium pod.\n// it causes a test failure if the command was not successful.\nfunc (kub *Kubectl) CiliumExecMustSucceed(ctx context.Context, pod, cmd string, optionalDescription ...interface{}) *CmdRes {\n\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\tif !res.WasSuccessful() {\n\t\tres.SendToLog(false)\n\t}\n\tgomega.ExpectWithOffset(1, res).Should(\n\t\tCMDSuccess(), optionalDescription...)\n\treturn res\n}\n\n// CiliumExecUntilMatch executes the specified command repeatedly for the\n// specified Cilium pod until the given substring is present in stdout.\n// If the timeout is reached it will return an error.\nfunc (kub *Kubectl) CiliumExecUntilMatch(pod, cmd, substr string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\t\treturn strings.Contains(res.Stdout(), substr)\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"%s is not in the output after timeout\", substr),\n\t\t&TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// WaitForCiliumInitContainerToFinish waits for all Cilium init containers to\n// finish\nfunc (kub *Kubectl) WaitForCiliumInitContainerToFinish() error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(CiliumNamespace, \"-l k8s-app=cilium\").Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tfor _, pod := range podList.Items {\n\t\t\tfor _, v := range pod.Status.InitContainerStatuses {\n\t\t\t\tif v.State.Terminated != nil && (v.State.Terminated.Reason != \"Completed\" || v.State.Terminated.ExitCode != 0) {\n\t\t\t\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\t\t\t\"podName\": pod.Name,\n\t\t\t\t\t\t\"currentState\": v.State.String(),\n\t\t\t\t\t}).Infof(\"Cilium Init container not completed\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn WithTimeout(body, \"Cilium Init Container was not able to initialize or had a successful run\", &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumNodesWait waits until all nodes in the Kubernetes cluster are annotated\n// with Cilium annotations. Its runtime is bounded by a maximum of `HelperTimeout`.\n// When a node is annotated with said annotations, it indicates\n// that the tunnels in the nodes are set up and that cross-node traffic can be\n// tested. Returns an error if the timeout is exceeded for waiting for the nodes\n// to be annotated.\nfunc (kub *Kubectl) CiliumNodesWait() (bool, error) {\n\tbody := func() bool {\n\t\tfilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.metadata.annotations.io\\.cilium\\.network\\.ipv4-pod-cidr}{\"\\n\"}{end}`\n\t\tdata := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get nodes -o jsonpath='%s'\", KubectlCmd, filter))\n\t\tif !data.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\t\tresult := data.KVOutput()\n\t\tignoreNode := GetNodeWithoutCilium()\n\t\tfor k, v := range result {\n\t\t\tif k == ignoreNode {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == \"\" {\n\t\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' does not have Cilium metadata\", k)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' IPv4 address: '%v'\", k, v)\n\t\t}\n\t\treturn true\n\t}\n\terr := WithTimeout(body, \"Kubernetes node does not have cilium metadata\", &TimeoutConfig{Timeout: HelperTimeout})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// LoadedPolicyInFirstAgent returns the policy as loaded in the first cilium\n// agent that is found in the cluster\nfunc (kub *Kubectl) LoadedPolicyInFirstAgent() (string, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve cilium pods: %s\", err)\n\t}\n\tfor _, pod := range pods {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get\")\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot execute cilium policy get: %s\", res.Stdout())\n\t\t} else {\n\t\t\treturn res.CombineOutput().String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no running cilium pods\")\n}\n\n// WaitPolicyDeleted waits for policy policyName to be deleted from the\n// cilium-agent running in pod. Returns an error if policyName was unable to\n// be deleted after some amount of time.\nfunc (kub *Kubectl) WaitPolicyDeleted(pod string, policyName string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyName))\n\n\t\t// `cilium policy get <policy name>` fails if the policy is not loaded,\n\t\t// which is the condition we want.\n\t\treturn !res.WasSuccessful()\n\t}\n\n\treturn WithTimeout(body, fmt.Sprintf(\"Policy %s was not deleted in time\", policyName), &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumIsPolicyLoaded returns true if the policy is loaded in the given\n// cilium Pod. it returns false in case that the policy is not in place\nfunc (kub *Kubectl) CiliumIsPolicyLoaded(pod string, policyCmd string) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyCmd))\n\treturn res.WasSuccessful()\n}\n\n// CiliumPolicyRevision returns the policy revision in the specified Cilium pod.\n// Returns an error if the policy revision cannot be retrieved.\nfunc (kub *Kubectl) CiliumPolicyRevision(pod string) (int, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get -o json\")\n\tif !res.WasSuccessful() {\n\t\treturn -1, fmt.Errorf(\"cannot get the revision %s\", res.Stdout())\n\t}\n\n\trevision, err := res.Filter(\"{.revision}\")\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"cannot get revision from json: %s\", err)\n\t}\n\n\trevi, err := strconv.Atoi(strings.Trim(revision.String(), \"\\n\"))\n\tif err != nil {\n\t\tkub.Logger().Errorf(\"revision on pod '%s' is not valid '%s'\", pod, res.CombineOutput())\n\t\treturn -1, err\n\t}\n\treturn revi, nil\n}\n\n// ResourceLifeCycleAction represents an action performed upon objects in\n// Kubernetes.\ntype ResourceLifeCycleAction string\n\nfunc (kub *Kubectl) getPodRevisions() (map[string]int, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods\")\n\t\treturn nil, fmt.Errorf(\"Cannot get cilium pods: %s\", err)\n\t}\n\n\trevisions := make(map[string]int)\n\tfor _, pod := range pods {\n\t\trevision, err := kub.CiliumPolicyRevision(pod)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pod policy revision\")\n\t\t\treturn nil, fmt.Errorf(\"Cannot retrieve cilium pod %s policy revision: %s\", pod, err)\n\t\t}\n\t\trevisions[pod] = revision\n\t}\n\treturn revisions, nil\n}\n\nfunc (kub *Kubectl) waitNextPolicyRevisions(podRevisions map[string]int, mustHavePolicy bool, timeout time.Duration) error {\n\tnpFilter := fmt.Sprintf(\n\t\t`{range .items[*]}{\"%s=\"}{.metadata.name}{\" %s=\"}{.metadata.namespace}{\"\\n\"}{end}`,\n\t\tKubectlPolicyNameLabel, KubectlPolicyNameSpaceLabel)\n\n\tknpBody := func() bool {\n\t\tknp := kub.ExecShort(fmt.Sprintf(\"%s get --all-namespaces netpol -o jsonpath='%s'\",\n\t\t\tKubectlCmd, npFilter))\n\t\tresult := knp.ByLines()\n\t\tif len(result) == 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, item := range result {\n\t\t\tfor ciliumPod, revision := range podRevisions {\n\t\t\t\tif mustHavePolicy {\n\t\t\t\t\tif !kub.CiliumIsPolicyLoaded(ciliumPod, item) {\n\t\t\t\t\t\tkub.Logger().Infof(\"Policy '%s' is not ready on Cilium pod '%s'\", item, ciliumPod)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tdesiredRevision := revision + 1\n\t\t\t\tres := kub.CiliumExecContext(ctx, ciliumPod, fmt.Sprintf(\"cilium policy wait %d --max-wait-time %d\", desiredRevision, int(ShortCommandTimeout.Seconds())))\n\t\t\t\tif res.GetExitCode() != 0 {\n\t\t\t\t\tkub.Logger().Infof(\"Failed to wait for policy revision %d on pod %s\", desiredRevision, ciliumPod)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tknpBody,\n\t\t\"Timed out while waiting for CNP to be applied on all PODs\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\nfunc getPolicyEnforcingJqFilter(numNodes int) string {\n\t// Test filter: https://jqplay.org/s/EgNzc06Cgn\n\treturn fmt.Sprintf(\n\t\t`[.items[]|{name:.metadata.name, enforcing: (.status|if has(\"nodes\") then .nodes |to_entries|map_values(.value.enforcing) + [(.|length >= %d)]|all else true end)|tostring, status: has(\"status\")|tostring}]`,\n\t\tnumNodes)\n}\n\n// CiliumPolicyAction performs the specified action in Kubernetes for the policy\n// stored in path filepath and waits up until timeout seconds for the policy\n// to be applied in all Cilium endpoints. Returns an error if the policy is not\n// imported before the timeout is\n// exceeded.\nfunc (kub *Kubectl) CiliumPolicyAction(namespace, filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath, namespace); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tcmds := map[string]string{\n\t\t\t\t\"CNP\": fmt.Sprintf(\"%s get cnp --all-namespaces -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t\t\"CCNP\": fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t}\n\n\t\t\tfor ctx, cmd := range cmds {\n\t\t\t\tvar data []map[string]string\n\n\t\t\t\tres := kub.ExecShort(cmd)\n\t\t\t\tif !res.WasSuccessful() {\n\t\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Errorf(\"cannot get %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\terr := res.Unmarshal(&data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot unmarshal json for %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tfor _, item := range data {\n\t\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\t\tkub.Logger().Errorf(\"%s policy '%s' is not enforcing yet\", ctx, item[\"name\"])\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\t}\n\n\t\terr = WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting for policies to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumClusterwidePolicyAction applies a clusterwide policy action as described in action argument. It\n// then wait till timeout Duration for the policy to be applied to all the cilium endpoints.\nfunc (kub *Kubectl) CiliumClusterwidePolicyAction(filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tvar data []map[string]string\n\t\t\tcmd := fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\",\n\t\t\t\tKubectlCmd, jqFilter)\n\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Error(\"cannot get ccnp status\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\terr := res.Unmarshal(&data)\n\t\t\tif err != nil {\n\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot unmarshal json\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tfor _, item := range data {\n\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\tkub.Logger().Errorf(\"Clusterwide policy '%s' is not enforcing yet\", item[\"name\"])\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\terr := WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting CCNP to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumReport report the cilium pod to the log and appends the logs for the\n// given commands.\nfunc (kub *Kubectl) CiliumReport(commands ...string) {\n\tif config.CiliumTestConfig.SkipLogGathering {\n\t\tginkgoext.GinkgoPrint(\"Skipped gathering logs (-cilium.skipLogs=true)\\n\")\n\t\treturn\n\t}\n\n\t// Log gathering for Cilium should take at most 10 minutes. This ensures that\n\t// the CiliumReport stage doesn't cause the entire CI to hang.\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.GatherLogs(ctx)\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.DumpCiliumCommandOutput(ctx, CiliumNamespace)\n\t}()\n\n\tkub.CiliumCheckReport(ctx)\n\n\tpods, err := kub.GetCiliumPodsContext(ctx, CiliumNamespace)\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods on ReportDump\")\n\t}\n\tres := kub.ExecContextShort(ctx, fmt.Sprintf(\"%s get pods -o wide --all-namespaces\", KubectlCmd))\n\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\n\tresults := make([]*CmdRes, 0, len(pods)*len(commands))\n\tginkgoext.GinkgoPrint(\"Fetching command output from pods %s\", pods)\n\tfor _, pod := range pods {\n\t\tfor _, cmd := range commands {\n\t\t\tres = kub.ExecPodCmdBackground(ctx, CiliumNamespace, pod, cmd, ExecOptions{SkipLog: true})\n\t\t\tresults = append(results, res)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tfor _, res := range results {\n\t\tres.WaitUntilFinish()\n\t\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\t}\n}\n\n// CiliumCheckReport prints a few checks on the Junit output to provide more\n// context to users. The list of checks that prints are the following:\n// - Number of Kubernetes and Cilium policies installed.\n// - Policy enforcement status by endpoint.\n// - Controller, health, kvstore status.\nfunc (kub *Kubectl) CiliumCheckReport(ctx context.Context) {\n\tpods, _ := kub.GetCiliumPods()\n\tfmt.Fprintf(CheckLogs, \"Cilium pods: %v\\n\", pods)\n\n\tvar policiesFilter = `{range .items[*]}{.metadata.namespace}{\"::\"}{.metadata.name}{\" \"}{end}`\n\tnetpols := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get netpol -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"Netpols loaded: %v\\n\", netpols.GetStdOut())\n\n\tcnp := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cnp -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"CiliumNetworkPolicies loaded: %v\\n\", cnp.GetStdOut())\n\n\tcepFilter := `{range .items[*]}{.metadata.name}{\"=\"}{.status.policy.ingress.enforcing}{\":\"}{.status.policy.egress.enforcing}{\"\\n\"}{end}`\n\tcepStatus := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cep -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, cepFilter))\n\n\tfmt.Fprintf(CheckLogs, \"Endpoint Policy Enforcement:\\n\")\n\n\ttable := tabwriter.NewWriter(CheckLogs, 5, 0, 3, ' ', 0)\n\tfmt.Fprintf(table, \"Pod\\tIngress\\tEgress\\n\")\n\tfor pod, policy := range cepStatus.KVOutput() {\n\t\tdata := strings.SplitN(policy, \":\", 2)\n\t\tif len(data) != 2 {\n\t\t\tdata[0] = \"invalid value\"\n\t\t\tdata[1] = \"invalid value\"\n\t\t}\n\t\tfmt.Fprintf(table, \"%s\\t%s\\t%s\\n\", pod, data[0], data[1])\n\t}\n\ttable.Flush()\n\n\tvar controllersFilter = `{range .controllers[*]}{.name}{\"=\"}{.status.consecutive-failure-count}::{.status.last-failure-msg}{\"\\n\"}{end}`\n\tvar failedControllers string\n\tfor _, pod := range pods {\n\t\tvar prefix = \"\"\n\t\tstatus := kub.CiliumExecContext(ctx, pod, \"cilium status --all-controllers -o json\")\n\t\tresult, err := status.Filter(controllersFilter)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"Cannot filter controller status output\")\n\t\t\tcontinue\n\t\t}\n\t\tvar total = 0\n\t\tvar failed = 0\n\t\tfor name, data := range result.KVOutput() {\n\t\t\ttotal++\n\t\t\tstatus := strings.SplitN(data, \"::\", 2)\n\t\t\tif len(status) != 2 {\n\t\t\t\t// Just make sure that the the len of the output is 2 to not\n\t\t\t\t// fail on index error in the following lines.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif status[0] != \"\" {\n\t\t\t\tfailed++\n\t\t\t\tprefix = \"⚠️ \"\n\t\t\t\tfailedControllers += fmt.Sprintf(\"controller %s failure '%s'\\n\", name, status[1])\n\t\t\t}\n\t\t}\n\t\tstatusFilter := `Status: {.cilium.state} Health: {.cluster.ciliumHealth.state}` +\n\t\t\t` Nodes \"{.cluster.nodes[*].name}\" ContinerRuntime: {.container-runtime.state}` +\n\t\t\t` Kubernetes: {.kubernetes.state} KVstore: {.kvstore.state}`\n\t\tdata, _ := status.Filter(statusFilter)\n\t\tfmt.Fprintf(CheckLogs, \"%sCilium agent '%s': %s Controllers: Total %d Failed %d\\n\",\n\t\t\tprefix, pod, data, total, failed)\n\t\tif failedControllers != \"\" {\n\t\t\tfmt.Fprintf(CheckLogs, \"Failed controllers:\\n %s\", failedControllers)\n\t\t}\n\t}\n}\n\n// ValidateNoErrorsInLogs checks that cilium logs since the given duration (By\n// default `CurrentGinkgoTestDescription().Duration`) do not contain any of the\n// known-bad messages (e.g., `deadlocks` or `segmentation faults`). In case of\n// any of these messages, it'll mark the test as failed.\nfunc (kub *Kubectl) ValidateNoErrorsInLogs(duration time.Duration) {\n\tblacklist := GetBadLogMessages()\n\tkub.ValidateListOfErrorsInLogs(duration, blacklist)\n}\n\n// ValidateListOfErrorsInLogs is similar to ValidateNoErrorsInLogs, but\n// takes a blacklist of bad log messages instead of using the default list.\nfunc (kub *Kubectl) ValidateListOfErrorsInLogs(duration time.Duration, blacklist map[string][]string) {\n\tif kub == nil {\n\t\t// if `kub` is nil, this is run after the test failed while setting up `kub` and we are unable to gather logs\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tapps := map[string]string{\n\t\t\"k8s-app=cilium\": CiliumTestLog,\n\t\t\"k8s-app=hubble-relay\": HubbleRelayTestLog,\n\t\t\"io.cilium/app=operator\": CiliumOperatorTestLog,\n\t}\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(apps))\n\tfor app, file := range apps {\n\t\tgo func(app, file string) {\n\t\t\tvar logs string\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s logs --tail=-1 --timestamps=true -l %s --since=%vs\",\n\t\t\t\tKubectlCmd, CiliumNamespace, app, duration.Seconds())\n\t\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s --previous\", cmd), ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tres = kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// Keep the cilium logs for the given test in a separate file.\n\t\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot create report directory\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = ioutil.WriteFile(\n\t\t\t\t\tfmt.Sprintf(\"%s/%s\", testPath, file),\n\t\t\t\t\t[]byte(logs), LogPerm)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot create %s\", CiliumTestLog)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfailIfContainsBadLogMsg(logs, app, blacklist)\n\n\t\t\tfmt.Fprint(CheckLogs, logutils.LogErrorsSummary(logs))\n\t\t}(app, file)\n\t}\n\n\twg.Wait()\n}\n\n// GatherCiliumCoreDumps copies core dumps if are present in the /tmp folder\n// into the test report folder for further analysis.\nfunc (kub *Kubectl) GatherCiliumCoreDumps(ctx context.Context, ciliumPod string) {\n\tlog := kub.Logger().WithField(\"pod\", ciliumPod)\n\n\tcores := kub.CiliumExecContext(ctx, ciliumPod, \"ls /tmp/ | grep core\")\n\tif !cores.WasSuccessful() {\n\t\tlog.Debug(\"There is no core dumps in the pod\")\n\t\treturn\n\t}\n\n\ttestPath, err := CreateReportDirectory()\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\treturn\n\t}\n\tresultPath := filepath.Join(kub.BasePath(), testPath)\n\n\tfor _, core := range cores.ByLines() {\n\t\tdst := filepath.Join(resultPath, core)\n\t\tsrc := filepath.Join(\"/tmp/\", core)\n\t\tcmd := fmt.Sprintf(\"%s -n %s cp %s:%s %s\",\n\t\t\tKubectlCmd, CiliumNamespace,\n\t\t\tciliumPod, src, dst)\n\t\tres := kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.WithField(\"output\", res.CombineOutput()).Error(\"Cannot get core from pod\")\n\t\t}\n\t}\n}\n\n// ExecInFirstPod runs given command in one pod that matches given selector and namespace\n// An error is returned if no pods can be found\nfunc (kub *Kubectl) ExecInFirstPod(ctx context.Context, namespace, selector, cmd string, options ...ExecOptions) *CmdRes {\n\tnames, err := kub.GetPodNamesContext(ctx, namespace, selector)\n\tif err != nil {\n\t\treturn &CmdRes{err: err}\n\t}\n\tif len(names) == 0 {\n\t\treturn &CmdRes{err: fmt.Errorf(\"Cannot find pods matching %s to execute %s\", selector, cmd)}\n\t}\n\n\tname := names[0]\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, name, cmd)\n\treturn kub.ExecContext(ctx, command)\n}\n\n// ExecInPods runs given command on all pods in given namespace that match selector and returns map pod-name->CmdRes\nfunc (kub *Kubectl) ExecInPods(ctx context.Context, namespace, selector, cmd string, options ...ExecOptions) (results map[string]*CmdRes, err error) {\n\tnames, err := kub.GetPodNamesContext(ctx, namespace, selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults = make(map[string]*CmdRes)\n\tfor _, name := range names {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, name, cmd)\n\t\tresults[name] = kub.ExecContext(ctx, command)\n\t}\n\n\treturn results, nil\n}\n\n// ExecInHostNetNS runs given command in a pod running in a host network namespace\nfunc (kub *Kubectl) ExecInHostNetNS(ctx context.Context, node, cmd string) *CmdRes {\n\t// This is a hack, as we execute the given cmd in the log-gathering pod\n\t// which runs in the host netns. Also, the log-gathering pods lack some\n\t// packages, e.g. iproute2.\n\tselector := fmt.Sprintf(\"%s --field-selector spec.nodeName=%s\",\n\t\tlogGathererSelector(true), node)\n\n\treturn kub.ExecInFirstPod(ctx, LogGathererNamespace, selector, cmd)\n}\n\n// ExecInHostNetNSByLabel runs given command in a pod running in a host network namespace.\n// The pod's node is identified by the given label.\nfunc (kub *Kubectl) ExecInHostNetNSByLabel(ctx context.Context, label, cmd string) (string, error) {\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Cannot get node by label %s\", label)\n\t}\n\n\tres := kub.ExecInHostNetNS(ctx, nodeName, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"Failed to exec %q cmd on %q node: %s\", cmd, nodeName, res.GetErr(\"\"))\n\t}\n\n\treturn res.Stdout(), nil\n}\n\n// DumpCiliumCommandOutput runs a variety of commands (CiliumKubCLICommands) and writes the results to\n// TestResultsPath\nfunc (kub *Kubectl) DumpCiliumCommandOutput(ctx context.Context, namespace string) {\n\ttestPath, err := CreateReportDirectory()\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\treturn\n\t}\n\n\tReportOnPod := func(pod string) {\n\t\tlogger := kub.Logger().WithField(\"CiliumPod\", pod)\n\n\t\tlogsPath := filepath.Join(kub.BasePath(), testPath)\n\n\t\t// Get bugtool output. Since bugtool output is dumped in the pod's filesystem,\n\t\t// copy it over with `kubectl cp`.\n\t\tbugtoolCmd := fmt.Sprintf(\"%s exec -n %s %s -- %s\",\n\t\t\tKubectlCmd, namespace, pod, CiliumBugtool)\n\t\tres := kub.ExecContext(ctx, bugtoolCmd, ExecOptions{SkipLog: true})\n\t\tif !res.WasSuccessful() {\n\t\t\tlogger.Errorf(\"%s failed: %s\", bugtoolCmd, res.CombineOutput().String())\n\t\t\treturn\n\t\t}\n\t\t// Default output directory is /tmp for bugtool.\n\t\tres = kub.ExecContext(ctx, fmt.Sprintf(\"%s exec -n %s %s -- ls /tmp/\", KubectlCmd, namespace, pod))\n\t\ttmpList := res.ByLines()\n\t\tfor _, line := range tmpList {\n\t\t\t// Only copy over bugtool output to directory.\n\t\t\tif !strings.Contains(line, CiliumBugtool) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tres = kub.ExecContext(ctx, fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s /tmp/%[4]s\",\n\t\t\t\tKubectlCmd, namespace, pod, line),\n\t\t\t\tExecOptions{SkipLog: true})\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tlogger.Errorf(\"'%s' failed: %s\", res.GetCmd(), res.CombineOutput())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tarchiveName := filepath.Join(logsPath, fmt.Sprintf(\"bugtool-%s\", pod))\n\t\t\tres = kub.ExecContext(ctx, fmt.Sprintf(\"mkdir -p %q\", archiveName))\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tlogger.WithField(\"cmd\", res.GetCmd()).Errorf(\n\t\t\t\t\t\"cannot create bugtool archive folder: %s\", res.CombineOutput())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcmd := fmt.Sprintf(\"tar -xf /tmp/%s -C %q --strip-components=1\", line, archiveName)\n\t\t\tres = kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tlogger.WithField(\"cmd\", cmd).Errorf(\n\t\t\t\t\t\"Cannot untar bugtool output: %s\", res.CombineOutput())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//Remove bugtool artifact, so it'll be not used if any other fail test\n\t\t\t_ = kub.ExecPodCmdBackground(ctx, namespace, pod, fmt.Sprintf(\"rm /tmp/%s\", line))\n\t\t}\n\n\t}\n\n\tpods, err := kub.GetCiliumPodsContext(ctx, namespace)\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods on ReportDump\")\n\t\treturn\n\t}\n\n\tkub.reportMapContext(ctx, testPath, ciliumKubCLICommands, namespace, CiliumSelector)\n\n\t// Finally, get kvstore output - this is best effort; we do this last\n\t// because if connectivity to the kvstore is broken from a cilium pod,\n\t// we don't want the context above to timeout and as a result, get none\n\t// of the other logs from the tests.\n\n\t// Use a shorter context for kvstore-related commands to avoid having\n\t// further log-gathering fail as well if the first Cilium pod fails to\n\t// gather kvstore logs.\n\tkvstoreCmdCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\tkub.reportMapContext(kvstoreCmdCtx, testPath, ciliumKubCLICommandsKVStore, namespace, CiliumSelector)\n\n\tfor _, pod := range pods {\n\t\tReportOnPod(pod)\n\t\tkub.GatherCiliumCoreDumps(ctx, pod)\n\t}\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 756, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 755, | |
"col": 25, | |
"offset": 26552 | |
}, | |
"end": { | |
"line": 755, | |
"col": 28, | |
"offset": 26555 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 96 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 755, | |
"col": 9, | |
"offset": 26536 | |
}, | |
"end": { | |
"line": 755, | |
"col": 12, | |
"offset": 26539 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 754, | |
"col": 13, | |
"offset": 26457 | |
}, | |
"end": { | |
"line": 754, | |
"col": 24, | |
"offset": 26468 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 754, | |
"col": 2, | |
"offset": 26446 | |
}, | |
"end": { | |
"line": 754, | |
"col": 9, | |
"offset": 26453 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 2692, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 2676, | |
"col": 26, | |
"offset": 96047 | |
}, | |
"end": { | |
"line": 2676, | |
"col": 29, | |
"offset": 96050 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 352 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 2676, | |
"col": 10, | |
"offset": 96031 | |
}, | |
"end": { | |
"line": 2676, | |
"col": 13, | |
"offset": 96034 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 2675, | |
"col": 14, | |
"offset": 95945 | |
}, | |
"end": { | |
"line": 2675, | |
"col": 25, | |
"offset": 95956 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 2675, | |
"col": 3, | |
"offset": 95934 | |
}, | |
"end": { | |
"line": 2675, | |
"col": 10, | |
"offset": 95941 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RedeployKubernetesDnsIfNecessary validates if the Kubernetes DNS is\n// functional and re-deploys it if it is not and then waits for it to deploy\n// successfully and become operational. See ValidateKubernetesDNS() for the\n// list of conditions that must be met for Kubernetes DNS to be considered\n// operational.\nfunc (kub *Kubectl) RedeployKubernetesDnsIfNecessary() {\n\tginkgoext.By(\"Validating if Kubernetes DNS is deployed\")\n\terr := kub.ValidateKubernetesDNS()\n\tif err == nil {\n\t\tginkgoext.By(\"Kubernetes DNS is up and operational\")\n\t\treturn\n\t} else {\n\t\tginkgoext.By(\"Kubernetes DNS is not ready: %s\", err)\n\t}\n\n\tginkgoext.By(\"Restarting Kubernetes DNS (-l %s)\", kubeDNSLabel)\n\tres := kub.DeleteResource(\"pod\", \"-n \"+KubeSystemNamespace+\" -l \"+kubeDNSLabel)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to delete DNS pods: %s\", res.OutputPrettyPrint())\n\t}\n\n\tginkgoext.By(\"Waiting for Kubernetes DNS to become operational\")\n\terr = RepeatUntilTrueDefaultTimeout(func() bool {\n\t\terr := kub.ValidateKubernetesDNS()\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Kubernetes DNS is not ready yet: %s\", err)\n\t\t}\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s -l %s\", KubectlCmd, KubeSystemNamespace, kubeDNSLabel))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\n\t\tginkgoext.Fail(\"Kubernetes DNS did not become ready in time\")\n\t}\n}\n\n// WaitKubeDNS waits until the kubeDNS pods are ready. In case of exceeding the\n// default timeout it returns an error.\nfunc (kub *Kubectl) WaitKubeDNS() error {\n\treturn kub.WaitforPods(KubeSystemNamespace, fmt.Sprintf(\"-l %s\", kubeDNSLabel), DNSHelperTimeout)\n}\n\n// WaitForKubeDNSEntry waits until the given DNS entry exists in the kube-dns\n// service. If the container is not ready after timeout it returns an error. The\n// name's format query should be `${name}.${namespace}`. If `svc.cluster.local`\n// is not present, it appends to the given name and it checks the service's FQDN.\nfunc (kub *Kubectl) WaitForKubeDNSEntry(serviceName, serviceNamespace string) error {\n\tlogger := kub.Logger().WithFields(logrus.Fields{\"serviceName\": serviceName, \"serviceNamespace\": serviceNamespace})\n\n\tserviceNameWithNamespace := fmt.Sprintf(\"%s.%s\", serviceName, serviceNamespace)\n\tif !strings.HasSuffix(serviceNameWithNamespace, ServiceSuffix) {\n\t\tserviceNameWithNamespace = fmt.Sprintf(\"%s.%s\", serviceNameWithNamespace, ServiceSuffix)\n\t}\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tdigCMD := \"dig +short %s @%s | grep -v -e '^;'\"\n\n\t// If it fails we want to know if it's because of connection cannot be\n\t// established or DNS does not exist.\n\tdigCMDFallback := \"dig +tcp %s @%s\"\n\n\tdnsClusterIP, _, err := kub.GetServiceHostPort(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"cannot get kube-dns service IP\")\n\t\treturn err\n\t}\n\n\tbody := func() bool {\n\t\tserviceIP, _, err := kub.GetServiceHostPort(serviceNamespace, serviceName)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"cannot get service IP for service %s\", serviceNameWithNamespace)\n\t\t\treturn false\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\t\tdefer cancel()\n\t\t// ClusterIPNone denotes that this service is headless; there is no\n\t\t// service IP for this service, and thus the IP returned by `dig` is\n\t\t// an IP of the pod itself, not ClusterIPNone, which is what Kubernetes\n\t\t// shows as the IP for the service for headless services.\n\t\tif serviceIP == v1.ClusterIPNone {\n\t\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\t\tif res.err != nil {\n\t\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\n\t\t\treturn res.WasSuccessful()\n\t\t}\n\t\tlog.Debugf(\"service is not headless; checking whether IP retrieved from DNS matches the IP for the service stored in Kubernetes\")\n\n\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\tif res.err != nil {\n\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\treturn false\n\t\t}\n\t\tserviceIPFromDNS := res.SingleOut()\n\t\tif !govalidator.IsIP(serviceIPFromDNS) {\n\t\t\tlogger.Debugf(\"output of dig (%s) did not return an IP\", serviceIPFromDNS)\n\t\t\treturn false\n\t\t}\n\n\t\t// Due to lag between new IPs for the same service being synced between // kube-apiserver and DNS, check if the IP for the service that is\n\t\t// stored in K8s matches the IP of the service cached in DNS. These\n\t\t// can be different, because some tests use the same service names.\n\t\t// Wait accordingly for services to match, and for resolving the service\n\t\t// name to resolve via DNS.\n\t\tif !strings.Contains(serviceIPFromDNS, serviceIP) {\n\t\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) does not match the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\t\t\treturn false\n\t\t}\n\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) matches the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\treturn true\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"DNS '%s' is not ready after timeout\", serviceNameWithNamespace),\n\t\t&TimeoutConfig{Timeout: DNSHelperTimeout})\n}\n\n// WaitCleanAllTerminatingPods waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\n\nfunc (kub *Kubectl) WaitCleanAllTerminatingPods(timeout time.Duration) error {\n\treturn kub.WaitCleanAllTerminatingPodsInNs(\"\", timeout)\n}\n\n// WaitCleanAllTerminatingPodsInNs waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\nfunc (kub *Kubectl) WaitCleanAllTerminatingPodsInNs(ns string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\twhere := ns\n\t\tif where == \"\" {\n\t\t\twhere = \"--all-namespaces\"\n\t\t} else {\n\t\t\twhere = \"-n \" + where\n\t\t}\n\t\tres := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get pods %s -o jsonpath='{.items[*].metadata.deletionTimestamp}'\",\n\t\t\tKubectlCmd, where))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\n\t\tif res.Stdout() == \"\" {\n\t\t\t// Output is empty so no terminating containers\n\t\t\treturn true\n\t\t}\n\n\t\tpodsTerminating := len(strings.Split(res.Stdout(), \" \"))\n\t\tkub.Logger().WithField(\"Terminating pods\", podsTerminating).Info(\"List of pods terminating\")\n\t\tif podsTerminating > 0 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tbody,\n\t\t\"Pods are still not deleted after a timeout\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\n// DeployPatchStdIn deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatchStdIn(original, patch string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local --dry-run -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch %s --local --dry-run`,\n\t\tKubectlCmd, original, patch))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local -o yaml`,\n\t\t\tKubectlCmd, original, patch),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// DeployPatch deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatch(original, patchFileName string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local --dry-run`,\n\t\tKubectlCmd, original, patchFileName))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patchFileName),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// Patch patches the given object with the given patch (string).\nfunc (kub *Kubectl) Patch(namespace, objType, objName, patch string) *CmdRes {\n\tginkgoext.By(\"Patching %s %s in namespace %s\", objType, objName, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s patch %s %s --patch %q\",\n\t\tKubectlCmd, namespace, objType, objName, patch))\n}\n\nfunc addIfNotOverwritten(options map[string]string, field, value string) map[string]string {\n\tif _, ok := options[field]; !ok {\n\t\toptions[field] = value\n\t}\n\treturn options\n}\n\nfunc (kub *Kubectl) overwriteHelmOptions(options map[string]string) error {\n\tif integration := GetCurrentIntegration(); integration != \"\" {\n\t\toverrides := helmOverrides[integration]\n\t\tfor key, value := range overrides {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\n\t}\n\tfor key, value := range defaultHelmOptions {\n\t\toptions = addIfNotOverwritten(options, key, value)\n\t}\n\n\t// Do not schedule cilium-agent on the NO_CILIUM_ON_NODE node\n\tif node := GetNodeWithoutCilium(); node != \"\" {\n\t\topts := map[string]string{\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"cilium.io/ci-node\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"NotIn\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": node,\n\t\t}\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif !RunsWithKubeProxy() {\n\t\tnodeIP, err := kub.GetNodeIPByLabel(K8s1, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot retrieve Node IP for k8s1: %s\", err)\n\t\t}\n\n\t\topts := map[string]string{\n\t\t\t\"kubeProxyReplacement\": \"strict\",\n\t\t\t\"k8sServiceHost\": nodeIP,\n\t\t\t\"k8sServicePort\": \"6443\",\n\t\t}\n\n\t\tif RunsOnNetNextOr419Kernel() {\n\t\t\topts[\"bpf.masquerade\"] = \"true\"\n\t\t}\n\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif RunsWithHostFirewall() {\n\t\taddIfNotOverwritten(options, \"hostFirewall\", \"true\")\n\t}\n\n\tif !RunsWithKubeProxy() || options[\"hostFirewall\"] == \"true\" {\n\t\t// Set devices\n\t\tprivateIface, err := kub.GetPrivateIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultIface, err := kub.GetDefaultIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdevices := fmt.Sprintf(`'{%s,%s}'`, privateIface, defaultIface)\n\t\taddIfNotOverwritten(options, \"devices\", devices)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) generateCiliumYaml(options map[string]string, filename string) error {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO GH-8753: Use helm rendering library instead of shelling out to\n\t// helm template\n\thelmTemplate := kub.GetFilePath(HelmTemplate)\n\tres := kub.HelmTemplate(helmTemplate, CiliumNamespace, filename, options)\n\tif !res.WasSuccessful() {\n\t\t// If the helm template generation is not successful remove the empty\n\t\t// manifest file.\n\t\t_ = os.Remove(filename)\n\t\treturn res.GetErr(\"Unable to generate YAML\")\n\t}\n\n\treturn nil\n}\n\n// GetPrivateIface returns an interface name of a netdev which has InternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPrivateIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have InternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\n// GetPublicIface returns an interface name of a netdev which has ExternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPublicIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have ExternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\nfunc (kub *Kubectl) waitToDelete(name, label string) error {\n\tvar (\n\t\tpods []string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\n\tstatus := 1\n\tfor status > 0 {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timed out waiting to delete %s: pods still remaining: %s\", name, pods)\n\t\tdefault:\n\t\t}\n\n\t\tpods, err = kub.GetPodNamesContext(ctx, CiliumNamespace, label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus = len(pods)\n\t\tkub.Logger().Infof(\"%s pods terminating '%d' err='%v' pods='%v'\", name, status, err, pods)\n\t\tif status == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\n// GetDefaultIface returns an interface name which is used by a default route.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetDefaultIface() (string, error) {\n\tcmd := `ip -o r | grep default | grep -o 'dev [a-zA-Z0-9]*' | cut -d' ' -f2 | head -n1`\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), K8s1, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve default iface: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\nfunc (kub *Kubectl) DeleteCiliumDS() error {\n\t// Do not assert on success in AfterEach intentionally to avoid\n\t// incomplete teardown.\n\tginkgoext.By(\"DeleteCiliumDS(namespace=%q)\", CiliumNamespace)\n\t_ = kub.DeleteResource(\"ds\", fmt.Sprintf(\"-n %s cilium\", CiliumNamespace))\n\treturn kub.waitToDelete(\"Cilium\", CiliumAgentLabel)\n}\n\nfunc (kub *Kubectl) DeleteHubbleRelay(ns string) error {\n\tginkgoext.By(\"DeleteHubbleRelay(namespace=%q)\", ns)\n\t_ = kub.DeleteResource(\"deployment\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\t_ = kub.DeleteResource(\"service\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\treturn kub.waitToDelete(\"HubbleRelay\", HubbleRelayLabel)\n}\n\n// CiliumInstall installs Cilium with the provided Helm options.\nfunc (kub *Kubectl) CiliumInstall(filename string, options map[string]string) error {\n\t// If the file does not exist, create it so that the command `kubectl delete -f <filename>`\n\t// does not fail because there is no file.\n\t_ = kub.ExecContextShort(context.TODO(), fmt.Sprintf(\"[[ ! -f %s ]] && echo '---' >> %s\", filename, filename))\n\n\t// First try to remove any existing cilium install. This is done by removing resources\n\t// from the file we generate cilium install manifest to.\n\tres := kub.DeleteAndWait(filename, true)\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to delete existing cilium YAML\")\n\t}\n\n\tif err := kub.generateCiliumYaml(options, filename); err != nil {\n\t\treturn err\n\t}\n\n\tres = kub.Apply(ApplyOptions{FilePath: filename, Force: true, Namespace: CiliumNamespace})\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to apply YAML\")\n\t}\n\n\treturn nil\n}\n\n// convertOptionsToLegacyOptions maps current helm values to old helm Values\n// TODO: When Cilium 1.10 branch is created, remove this function\nfunc (kub *Kubectl) convertOptionsToLegacyOptions(options map[string]string) map[string]string {\n\n\tresult := make(map[string]string)\n\n\tlegacyMappings := map[string]string{\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\",\n\t\t\"bpf.preallocateMaps\": \"global.bpf.preallocateMaps\",\n\t\t\"bpf.masquerade\": \"config.bpfMasquerade\",\n\t\t\"cleanState\": \"global.cleanState\",\n\t\t\"cni.binPath\": \"global.cni.binPath\",\n\t\t\"cni.chainingMode\": \"global.cni.chainingMode\",\n\t\t\"cni.confPath\": \"global.cni.confPath\",\n\t\t\"cni.customConf\": \"global.cni.customConf\",\n\t\t\"daemon.runPath\": \"global.daemon.runPath\",\n\t\t\"debug.enabled\": \"global.debug.enabled\",\n\t\t\"devices\": \"global.devices\", // Override \"eth0 eth0\\neth0\"\n\t\t\"enableCnpStatusUpdates\": \"config.enableCnpStatusUpdates\",\n\t\t\"etcd.leaseTTL\": \"global.etcd.leaseTTL\",\n\t\t\"externalIPs.enabled\": \"global.externalIPs.enabled\",\n\t\t\"flannel.enabled\": \"global.flannel.enabled\",\n\t\t\"gke.enabled\": \"global.gke.enabled\",\n\t\t\"hostFirewall\": \"global.hostFirewall\",\n\t\t\"hostPort.enabled\": \"global.hostPort.enabled\",\n\t\t\"hostServices.enabled\": \"global.hostServices.enabled\",\n\t\t\"hubble.enabled\": \"global.hubble.enabled\",\n\t\t\"hubble.listenAddress\": \"global.hubble.listenAddress\",\n\t\t\"hubble.relay.image.repository\": \"hubble-relay.image.repository\",\n\t\t\"hubble.relay.image.tag\": \"hubble-relay.image.tag\",\n\t\t\"image.tag\": \"global.tag\",\n\t\t\"ipam.mode\": \"config.ipam\",\n\t\t\"ipv4.enabled\": \"global.ipv4.enabled\",\n\t\t\"ipv6.enabled\": \"global.ipv6.enabled\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"global.k8s.requireIPv4PodCIDR\",\n\t\t\"k8sServiceHost\": \"global.k8sServiceHost\",\n\t\t\"k8sServicePort\": \"global.k8sServicePort\",\n\t\t\"kubeProxyReplacement\": \"global.kubeProxyReplacement\",\n\t\t\"logSystemLoad\": \"global.logSystemLoad\",\n\t\t\"masquerade\": \"global.masquerade\",\n\t\t\"nativeRoutingCIDR\": \"global.nativeRoutingCIDR\",\n\t\t\"nodeinit.enabled\": \"global.nodeinit.enabled\",\n\t\t\"nodeinit.reconfigureKubelet\": \"global.nodeinit.reconfigureKubelet\",\n\t\t\"nodeinit.removeCbrBridge\": \"global.nodeinit.removeCbrBridge\",\n\t\t\"nodeinit.restartPods\": \"globalnodeinit.restartPods\",\n\t\t\"nodePort.enabled\": \"global.nodePort.enabled\",\n\t\t\"nodePort.mode\": \"global.nodePort.mode\",\n\t\t\"operator.enabled\": \"operator.enabled\",\n\t\t\"pprof.enabled\": \"global.pprof.enabled\",\n\t\t\"sessionAffinity\": \"config.sessionAffinity\",\n\t\t\"sleepAfterInit\": \"agent.sleepAfterInit\",\n\t\t\"tunnel\": \"global.tunnel\",\n\t}\n\n\tfor newKey, v := range options {\n\t\tif oldKey, ok := legacyMappings[newKey]; ok {\n\t\t\tresult[oldKey] = v\n\t\t} else if !ok {\n\t\t\tif newKey == \"image.repository\" {\n\t\t\t\tresult[\"agent.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if newKey == \"operator.image.repository\" {\n\t\t\t\tif options[\"eni\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-aws:\" + options[\"image.tag\"]\n\t\t\t\t} else if options[\"azure.enabled\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-azure:\" + options[\"image.tag\"]\n\t\t\t\t} else {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-generic:\" + options[\"image.tag\"]\n\t\t\t\t}\n\t\t\t} else if newKey == \"preflight.image.repository\" {\n\t\t\t\tresult[\"preflight.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if strings.HasSuffix(newKey, \".tag\") {\n\t\t\t\t// Already handled in the if statement above\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Warningf(\"Skipping option %s\", newKey)\n\t\t\t}\n\t\t}\n\t}\n\tresult[\"ci.kubeCacheMutationDetector\"] = \"true\"\n\treturn result\n}\n\n// RunHelm runs the helm command with the given options.\nfunc (kub *Kubectl) RunHelm(action, repo, helmName, version, namespace string, options map[string]string) (*CmdRes, error) {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptionsString := \"\"\n\n\t//TODO: In 1.10 dev cycle, remove this\n\tif version == \"1.8-dev\" {\n\t\toptions = kub.convertOptionsToLegacyOptions(options)\n\t}\n\n\tfor k, v := range options {\n\t\toptionsString += fmt.Sprintf(\" --set %s=%s \", k, v)\n\t}\n\n\treturn kub.ExecMiddle(fmt.Sprintf(\"helm %s %s %s \"+\n\t\t\"--version=%s \"+\n\t\t\"--namespace=%s \"+\n\t\t\"%s\", action, helmName, repo, version, namespace, optionsString)), nil\n}\n\n// GetCiliumPods returns a list of all Cilium pods in the specified namespace,\n// and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPods() ([]string, error) {\n\treturn kub.GetPodNames(CiliumNamespace, \"k8s-app=cilium\")\n}\n\n// GetCiliumPodsContext returns a list of all Cilium pods in the specified\n// namespace, and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPodsContext(ctx context.Context, namespace string) ([]string, error) {\n\treturn kub.GetPodNamesContext(ctx, namespace, \"k8s-app=cilium\")\n}\n\n// CiliumEndpointsList returns the result of `cilium endpoint list` from the\n// specified pod.\nfunc (kub *Kubectl) CiliumEndpointsList(ctx context.Context, pod string) *CmdRes {\n\treturn kub.CiliumExecContext(ctx, pod, \"cilium endpoint list -o json\")\n}\n\n// CiliumEndpointsStatus returns a mapping of a pod name to it is corresponding\n// endpoint's status\nfunc (kub *Kubectl) CiliumEndpointsStatus(pod string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.state}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint list -o jsonpath='%s'\", filter)).KVOutput()\n}\n\n// CiliumEndpointIPv6 returns the IPv6 address of each endpoint which matches\n// the given endpoint selector.\nfunc (kub *Kubectl) CiliumEndpointIPv6(pod string, endpoint string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.networking.addressing[*].ipv6}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint get %s -o jsonpath='%s'\", endpoint, filter)).KVOutput()\n}\n\n// CiliumEndpointWaitReady waits until all endpoints managed by all Cilium pod\n// are ready. Returns an error if the Cilium pods cannot be retrieved via\n// Kubernetes, or endpoints are not ready after a specified timeout\nfunc (kub *Kubectl) CiliumEndpointWaitReady() error {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot get Cilium pods\")\n\t\treturn err\n\t}\n\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tvar wg sync.WaitGroup\n\t\tqueue := make(chan bool, len(ciliumPods))\n\t\tendpointsReady := func(pod string) {\n\t\t\tvalid := false\n\t\t\tdefer func() {\n\t\t\t\tqueue <- valid\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tlogCtx := kub.Logger().WithField(\"pod\", pod)\n\t\t\tstatus, err := kub.CiliumEndpointsList(ctx, pod).Filter(`{range [*]}{.status.state}{\"=\"}{.status.identity.id}{\"\\n\"}{end}`)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.WithError(err).Errorf(\"cannot get endpoints states on Cilium pod\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttotal := 0\n\t\t\tinvalid := 0\n\t\t\tfor _, line := range strings.Split(status.String(), \"\\n\") {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// each line is like status=identityID.\n\t\t\t\t// IdentityID is needed because the reserved:init identity\n\t\t\t\t// means that the pod is not ready to accept traffic.\n\t\t\t\ttotal++\n\t\t\t\tvals := strings.Split(line, \"=\")\n\t\t\t\tif len(vals) != 2 {\n\t\t\t\t\tlogCtx.Errorf(\"Endpoint list does not have a correct output '%s'\", line)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif vals[0] != \"ready\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t\t// Consider an endpoint with reserved identity 5 (reserved:init) as not ready.\n\t\t\t\tif vals[1] == \"5\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogCtx.WithFields(logrus.Fields{\n\t\t\t\t\"total\": total,\n\t\t\t\t\"invalid\": invalid,\n\t\t\t}).Info(\"Waiting for cilium endpoints to be ready\")\n\n\t\t\tif invalid != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalid = true\n\t\t}\n\t\twg.Add(len(ciliumPods))\n\t\tfor _, pod := range ciliumPods {\n\t\t\tgo endpointsReady(pod)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(queue)\n\n\t\tfor status := range queue {\n\t\t\tif status == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\terr = WithContext(ctx, body, 1*time.Second)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tcallback := func() string {\n\t\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\t\tdefer cancel()\n\n\t\tvar errorMessage string\n\t\tfor _, pod := range ciliumPods {\n\t\t\tvar endpoints []models.Endpoint\n\t\t\tcmdRes := kub.CiliumEndpointsList(ctx, pod)\n\t\t\tif !cmdRes.WasSuccessful() {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to get endpoint list: %s\",\n\t\t\t\t\tpod, cmdRes.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := cmdRes.Unmarshal(&endpoints)\n\t\t\tif err != nil {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to parse endpoint list: %s\",\n\t\t\t\t\tpod, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ep := range endpoints {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\tEndpoint: %d \\tIdentity: %d\\t State: %s\\n\",\n\t\t\t\t\tpod, ep.ID, ep.Status.Identity.ID, ep.Status.State)\n\t\t\t}\n\t\t}\n\t\treturn errorMessage\n\t}\n\treturn NewSSHMetaError(err.Error(), callback)\n}\n\n// WaitForCEPIdentity waits for a particular CEP to have an identity present.\nfunc (kub *Kubectl) WaitForCEPIdentity(ns, podName string) error {\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tep, err := kub.GetCiliumEndpoint(ns, podName)\n\t\tif err != nil || ep == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ep.Identity == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn ep.Identity.ID != 0, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\treturn WithContext(ctx, body, 1*time.Second)\n}\n\n// CiliumExecContext runs cmd in the specified Cilium pod with the given context.\nfunc (kub *Kubectl) CiliumExecContext(ctx context.Context, pod string, cmd string) *CmdRes {\n\tlimitTimes := 5\n\texecute := func() *CmdRes {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, CiliumNamespace, pod, cmd)\n\t\treturn kub.ExecContext(ctx, command)\n\t}\n\tvar res *CmdRes\n\t// Sometimes Kubectl returns 126 exit code, It use to happen in Nightly\n\t// tests when a lot of exec are in place (Cgroups issue). The upstream\n\t// changes did not fix the isse, and we need to make this workaround to\n\t// avoid Kubectl issue.\n\t// https://github.com/openshift/origin/issues/16246\n\tfor i := 0; i < limitTimes; i++ {\n\t\tres = execute()\n\t\tif res.GetExitCode() != 126 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\treturn res\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 3294, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 3293, | |
"col": 25, | |
"offset": 117408 | |
}, | |
"end": { | |
"line": 3293, | |
"col": 28, | |
"offset": 117411 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 434 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 3293, | |
"col": 9, | |
"offset": 117392 | |
}, | |
"end": { | |
"line": 3293, | |
"col": 12, | |
"offset": 117395 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 3292, | |
"col": 13, | |
"offset": 117312 | |
}, | |
"end": { | |
"line": 3292, | |
"col": 24, | |
"offset": 117323 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 3292, | |
"col": 2, | |
"offset": 117301 | |
}, | |
"end": { | |
"line": 3292, | |
"col": 9, | |
"offset": 117308 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RedeployKubernetesDnsIfNecessary validates if the Kubernetes DNS is\n// functional and re-deploys it if it is not and then waits for it to deploy\n// successfully and become operational. See ValidateKubernetesDNS() for the\n// list of conditions that must be met for Kubernetes DNS to be considered\n// operational.\nfunc (kub *Kubectl) RedeployKubernetesDnsIfNecessary() {\n\tginkgoext.By(\"Validating if Kubernetes DNS is deployed\")\n\terr := kub.ValidateKubernetesDNS()\n\tif err == nil {\n\t\tginkgoext.By(\"Kubernetes DNS is up and operational\")\n\t\treturn\n\t} else {\n\t\tginkgoext.By(\"Kubernetes DNS is not ready: %s\", err)\n\t}\n\n\tginkgoext.By(\"Restarting Kubernetes DNS (-l %s)\", kubeDNSLabel)\n\tres := kub.DeleteResource(\"pod\", \"-n \"+KubeSystemNamespace+\" -l \"+kubeDNSLabel)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to delete DNS pods: %s\", res.OutputPrettyPrint())\n\t}\n\n\tginkgoext.By(\"Waiting for Kubernetes DNS to become operational\")\n\terr = RepeatUntilTrueDefaultTimeout(func() bool {\n\t\terr := kub.ValidateKubernetesDNS()\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Kubernetes DNS is not ready yet: %s\", err)\n\t\t}\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s -l %s\", KubectlCmd, KubeSystemNamespace, kubeDNSLabel))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\n\t\tginkgoext.Fail(\"Kubernetes DNS did not become ready in time\")\n\t}\n}\n\n// WaitKubeDNS waits until the kubeDNS pods are ready. In case of exceeding the\n// default timeout it returns an error.\nfunc (kub *Kubectl) WaitKubeDNS() error {\n\treturn kub.WaitforPods(KubeSystemNamespace, fmt.Sprintf(\"-l %s\", kubeDNSLabel), DNSHelperTimeout)\n}\n\n// WaitForKubeDNSEntry waits until the given DNS entry exists in the kube-dns\n// service. If the container is not ready after timeout it returns an error. The\n// name's format query should be `${name}.${namespace}`. If `svc.cluster.local`\n// is not present, it appends to the given name and it checks the service's FQDN.\nfunc (kub *Kubectl) WaitForKubeDNSEntry(serviceName, serviceNamespace string) error {\n\tlogger := kub.Logger().WithFields(logrus.Fields{\"serviceName\": serviceName, \"serviceNamespace\": serviceNamespace})\n\n\tserviceNameWithNamespace := fmt.Sprintf(\"%s.%s\", serviceName, serviceNamespace)\n\tif !strings.HasSuffix(serviceNameWithNamespace, ServiceSuffix) {\n\t\tserviceNameWithNamespace = fmt.Sprintf(\"%s.%s\", serviceNameWithNamespace, ServiceSuffix)\n\t}\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tdigCMD := \"dig +short %s @%s | grep -v -e '^;'\"\n\n\t// If it fails we want to know if it's because of connection cannot be\n\t// established or DNS does not exist.\n\tdigCMDFallback := \"dig +tcp %s @%s\"\n\n\tdnsClusterIP, _, err := kub.GetServiceHostPort(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"cannot get kube-dns service IP\")\n\t\treturn err\n\t}\n\n\tbody := func() bool {\n\t\tserviceIP, _, err := kub.GetServiceHostPort(serviceNamespace, serviceName)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"cannot get service IP for service %s\", serviceNameWithNamespace)\n\t\t\treturn false\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\t\tdefer cancel()\n\t\t// ClusterIPNone denotes that this service is headless; there is no\n\t\t// service IP for this service, and thus the IP returned by `dig` is\n\t\t// an IP of the pod itself, not ClusterIPNone, which is what Kubernetes\n\t\t// shows as the IP for the service for headless services.\n\t\tif serviceIP == v1.ClusterIPNone {\n\t\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\t\tif res.err != nil {\n\t\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\n\t\t\treturn res.WasSuccessful()\n\t\t}\n\t\tlog.Debugf(\"service is not headless; checking whether IP retrieved from DNS matches the IP for the service stored in Kubernetes\")\n\n\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\tif res.err != nil {\n\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\treturn false\n\t\t}\n\t\tserviceIPFromDNS := res.SingleOut()\n\t\tif !govalidator.IsIP(serviceIPFromDNS) {\n\t\t\tlogger.Debugf(\"output of dig (%s) did not return an IP\", serviceIPFromDNS)\n\t\t\treturn false\n\t\t}\n\n\t\t// Due to lag between new IPs for the same service being synced between // kube-apiserver and DNS, check if the IP for the service that is\n\t\t// stored in K8s matches the IP of the service cached in DNS. These\n\t\t// can be different, because some tests use the same service names.\n\t\t// Wait accordingly for services to match, and for resolving the service\n\t\t// name to resolve via DNS.\n\t\tif !strings.Contains(serviceIPFromDNS, serviceIP) {\n\t\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) does not match the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\t\t\treturn false\n\t\t}\n\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) matches the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\treturn true\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"DNS '%s' is not ready after timeout\", serviceNameWithNamespace),\n\t\t&TimeoutConfig{Timeout: DNSHelperTimeout})\n}\n\n// WaitCleanAllTerminatingPods waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\n\nfunc (kub *Kubectl) WaitCleanAllTerminatingPods(timeout time.Duration) error {\n\treturn kub.WaitCleanAllTerminatingPodsInNs(\"\", timeout)\n}\n\n// WaitCleanAllTerminatingPodsInNs waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\nfunc (kub *Kubectl) WaitCleanAllTerminatingPodsInNs(ns string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\twhere := ns\n\t\tif where == \"\" {\n\t\t\twhere = \"--all-namespaces\"\n\t\t} else {\n\t\t\twhere = \"-n \" + where\n\t\t}\n\t\tres := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get pods %s -o jsonpath='{.items[*].metadata.deletionTimestamp}'\",\n\t\t\tKubectlCmd, where))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\n\t\tif res.Stdout() == \"\" {\n\t\t\t// Output is empty so no terminating containers\n\t\t\treturn true\n\t\t}\n\n\t\tpodsTerminating := len(strings.Split(res.Stdout(), \" \"))\n\t\tkub.Logger().WithField(\"Terminating pods\", podsTerminating).Info(\"List of pods terminating\")\n\t\tif podsTerminating > 0 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tbody,\n\t\t\"Pods are still not deleted after a timeout\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\n// DeployPatchStdIn deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatchStdIn(original, patch string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local --dry-run -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch %s --local --dry-run`,\n\t\tKubectlCmd, original, patch))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local -o yaml`,\n\t\t\tKubectlCmd, original, patch),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// DeployPatch deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatch(original, patchFileName string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local --dry-run`,\n\t\tKubectlCmd, original, patchFileName))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patchFileName),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// Patch patches the given object with the given patch (string).\nfunc (kub *Kubectl) Patch(namespace, objType, objName, patch string) *CmdRes {\n\tginkgoext.By(\"Patching %s %s in namespace %s\", objType, objName, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s patch %s %s --patch %q\",\n\t\tKubectlCmd, namespace, objType, objName, patch))\n}\n\nfunc addIfNotOverwritten(options map[string]string, field, value string) map[string]string {\n\tif _, ok := options[field]; !ok {\n\t\toptions[field] = value\n\t}\n\treturn options\n}\n\nfunc (kub *Kubectl) overwriteHelmOptions(options map[string]string) error {\n\tif integration := GetCurrentIntegration(); integration != \"\" {\n\t\toverrides := helmOverrides[integration]\n\t\tfor key, value := range overrides {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\n\t}\n\tfor key, value := range defaultHelmOptions {\n\t\toptions = addIfNotOverwritten(options, key, value)\n\t}\n\n\t// Do not schedule cilium-agent on the NO_CILIUM_ON_NODE node\n\tif node := GetNodeWithoutCilium(); node != \"\" {\n\t\topts := map[string]string{\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"cilium.io/ci-node\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"NotIn\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": node,\n\t\t}\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif !RunsWithKubeProxy() {\n\t\tnodeIP, err := kub.GetNodeIPByLabel(K8s1, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot retrieve Node IP for k8s1: %s\", err)\n\t\t}\n\n\t\topts := map[string]string{\n\t\t\t\"kubeProxyReplacement\": \"strict\",\n\t\t\t\"k8sServiceHost\": nodeIP,\n\t\t\t\"k8sServicePort\": \"6443\",\n\t\t}\n\n\t\tif RunsOnNetNextOr419Kernel() {\n\t\t\topts[\"bpf.masquerade\"] = \"true\"\n\t\t}\n\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif RunsWithHostFirewall() {\n\t\taddIfNotOverwritten(options, \"hostFirewall\", \"true\")\n\t}\n\n\tif !RunsWithKubeProxy() || options[\"hostFirewall\"] == \"true\" {\n\t\t// Set devices\n\t\tprivateIface, err := kub.GetPrivateIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultIface, err := kub.GetDefaultIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdevices := fmt.Sprintf(`'{%s,%s}'`, privateIface, defaultIface)\n\t\taddIfNotOverwritten(options, \"devices\", devices)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) generateCiliumYaml(options map[string]string, filename string) error {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO GH-8753: Use helm rendering library instead of shelling out to\n\t// helm template\n\thelmTemplate := kub.GetFilePath(HelmTemplate)\n\tres := kub.HelmTemplate(helmTemplate, CiliumNamespace, filename, options)\n\tif !res.WasSuccessful() {\n\t\t// If the helm template generation is not successful remove the empty\n\t\t// manifest file.\n\t\t_ = os.Remove(filename)\n\t\treturn res.GetErr(\"Unable to generate YAML\")\n\t}\n\n\treturn nil\n}\n\n// GetPrivateIface returns an interface name of a netdev which has InternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPrivateIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have InternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\n// GetPublicIface returns an interface name of a netdev which has ExternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPublicIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have ExternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\nfunc (kub *Kubectl) waitToDelete(name, label string) error {\n\tvar (\n\t\tpods []string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\n\tstatus := 1\n\tfor status > 0 {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timed out waiting to delete %s: pods still remaining: %s\", name, pods)\n\t\tdefault:\n\t\t}\n\n\t\tpods, err = kub.GetPodNamesContext(ctx, CiliumNamespace, label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus = len(pods)\n\t\tkub.Logger().Infof(\"%s pods terminating '%d' err='%v' pods='%v'\", name, status, err, pods)\n\t\tif status == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\n// GetDefaultIface returns an interface name which is used by a default route.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetDefaultIface() (string, error) {\n\tcmd := `ip -o r | grep default | grep -o 'dev [a-zA-Z0-9]*' | cut -d' ' -f2 | head -n1`\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), K8s1, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve default iface: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\nfunc (kub *Kubectl) DeleteCiliumDS() error {\n\t// Do not assert on success in AfterEach intentionally to avoid\n\t// incomplete teardown.\n\tginkgoext.By(\"DeleteCiliumDS(namespace=%q)\", CiliumNamespace)\n\t_ = kub.DeleteResource(\"ds\", fmt.Sprintf(\"-n %s cilium\", CiliumNamespace))\n\treturn kub.waitToDelete(\"Cilium\", CiliumAgentLabel)\n}\n\nfunc (kub *Kubectl) DeleteHubbleRelay(ns string) error {\n\tginkgoext.By(\"DeleteHubbleRelay(namespace=%q)\", ns)\n\t_ = kub.DeleteResource(\"deployment\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\t_ = kub.DeleteResource(\"service\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\treturn kub.waitToDelete(\"HubbleRelay\", HubbleRelayLabel)\n}\n\n// CiliumInstall installs Cilium with the provided Helm options.\nfunc (kub *Kubectl) CiliumInstall(filename string, options map[string]string) error {\n\t// If the file does not exist, create it so that the command `kubectl delete -f <filename>`\n\t// does not fail because there is no file.\n\t_ = kub.ExecContextShort(context.TODO(), fmt.Sprintf(\"[[ ! -f %s ]] && echo '---' >> %s\", filename, filename))\n\n\t// First try to remove any existing cilium install. This is done by removing resources\n\t// from the file we generate cilium install manifest to.\n\tres := kub.DeleteAndWait(filename, true)\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to delete existing cilium YAML\")\n\t}\n\n\tif err := kub.generateCiliumYaml(options, filename); err != nil {\n\t\treturn err\n\t}\n\n\tres = kub.Apply(ApplyOptions{FilePath: filename, Force: true, Namespace: CiliumNamespace})\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to apply YAML\")\n\t}\n\n\treturn nil\n}\n\n// convertOptionsToLegacyOptions maps current helm values to old helm Values\n// TODO: When Cilium 1.10 branch is created, remove this function\nfunc (kub *Kubectl) convertOptionsToLegacyOptions(options map[string]string) map[string]string {\n\n\tresult := make(map[string]string)\n\n\tlegacyMappings := map[string]string{\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\",\n\t\t\"bpf.preallocateMaps\": \"global.bpf.preallocateMaps\",\n\t\t\"bpf.masquerade\": \"config.bpfMasquerade\",\n\t\t\"cleanState\": \"global.cleanState\",\n\t\t\"cni.binPath\": \"global.cni.binPath\",\n\t\t\"cni.chainingMode\": \"global.cni.chainingMode\",\n\t\t\"cni.confPath\": \"global.cni.confPath\",\n\t\t\"cni.customConf\": \"global.cni.customConf\",\n\t\t\"daemon.runPath\": \"global.daemon.runPath\",\n\t\t\"debug.enabled\": \"global.debug.enabled\",\n\t\t\"devices\": \"global.devices\", // Override \"eth0 eth0\\neth0\"\n\t\t\"enableCnpStatusUpdates\": \"config.enableCnpStatusUpdates\",\n\t\t\"etcd.leaseTTL\": \"global.etcd.leaseTTL\",\n\t\t\"externalIPs.enabled\": \"global.externalIPs.enabled\",\n\t\t\"flannel.enabled\": \"global.flannel.enabled\",\n\t\t\"gke.enabled\": \"global.gke.enabled\",\n\t\t\"hostFirewall\": \"global.hostFirewall\",\n\t\t\"hostPort.enabled\": \"global.hostPort.enabled\",\n\t\t\"hostServices.enabled\": \"global.hostServices.enabled\",\n\t\t\"hubble.enabled\": \"global.hubble.enabled\",\n\t\t\"hubble.listenAddress\": \"global.hubble.listenAddress\",\n\t\t\"hubble.relay.image.repository\": \"hubble-relay.image.repository\",\n\t\t\"hubble.relay.image.tag\": \"hubble-relay.image.tag\",\n\t\t\"image.tag\": \"global.tag\",\n\t\t\"ipam.mode\": \"config.ipam\",\n\t\t\"ipv4.enabled\": \"global.ipv4.enabled\",\n\t\t\"ipv6.enabled\": \"global.ipv6.enabled\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"global.k8s.requireIPv4PodCIDR\",\n\t\t\"k8sServiceHost\": \"global.k8sServiceHost\",\n\t\t\"k8sServicePort\": \"global.k8sServicePort\",\n\t\t\"kubeProxyReplacement\": \"global.kubeProxyReplacement\",\n\t\t\"logSystemLoad\": \"global.logSystemLoad\",\n\t\t\"masquerade\": \"global.masquerade\",\n\t\t\"nativeRoutingCIDR\": \"global.nativeRoutingCIDR\",\n\t\t\"nodeinit.enabled\": \"global.nodeinit.enabled\",\n\t\t\"nodeinit.reconfigureKubelet\": \"global.nodeinit.reconfigureKubelet\",\n\t\t\"nodeinit.removeCbrBridge\": \"global.nodeinit.removeCbrBridge\",\n\t\t\"nodeinit.restartPods\": \"globalnodeinit.restartPods\",\n\t\t\"nodePort.enabled\": \"global.nodePort.enabled\",\n\t\t\"nodePort.mode\": \"global.nodePort.mode\",\n\t\t\"operator.enabled\": \"operator.enabled\",\n\t\t\"pprof.enabled\": \"global.pprof.enabled\",\n\t\t\"sessionAffinity\": \"config.sessionAffinity\",\n\t\t\"sleepAfterInit\": \"agent.sleepAfterInit\",\n\t\t\"tunnel\": \"global.tunnel\",\n\t}\n\n\tfor newKey, v := range options {\n\t\tif oldKey, ok := legacyMappings[newKey]; ok {\n\t\t\tresult[oldKey] = v\n\t\t} else if !ok {\n\t\t\tif newKey == \"image.repository\" {\n\t\t\t\tresult[\"agent.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if newKey == \"operator.image.repository\" {\n\t\t\t\tif options[\"eni\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-aws:\" + options[\"image.tag\"]\n\t\t\t\t} else if options[\"azure.enabled\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-azure:\" + options[\"image.tag\"]\n\t\t\t\t} else {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-generic:\" + options[\"image.tag\"]\n\t\t\t\t}\n\t\t\t} else if newKey == \"preflight.image.repository\" {\n\t\t\t\tresult[\"preflight.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if strings.HasSuffix(newKey, \".tag\") {\n\t\t\t\t// Already handled in the if statement above\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Warningf(\"Skipping option %s\", newKey)\n\t\t\t}\n\t\t}\n\t}\n\tresult[\"ci.kubeCacheMutationDetector\"] = \"true\"\n\treturn result\n}\n\n// RunHelm runs the helm command with the given options.\nfunc (kub *Kubectl) RunHelm(action, repo, helmName, version, namespace string, options map[string]string) (*CmdRes, error) {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptionsString := \"\"\n\n\t//TODO: In 1.10 dev cycle, remove this\n\tif version == \"1.8-dev\" {\n\t\toptions = kub.convertOptionsToLegacyOptions(options)\n\t}\n\n\tfor k, v := range options {\n\t\toptionsString += fmt.Sprintf(\" --set %s=%s \", k, v)\n\t}\n\n\treturn kub.ExecMiddle(fmt.Sprintf(\"helm %s %s %s \"+\n\t\t\"--version=%s \"+\n\t\t\"--namespace=%s \"+\n\t\t\"%s\", action, helmName, repo, version, namespace, optionsString)), nil\n}\n\n// GetCiliumPods returns a list of all Cilium pods in the specified namespace,\n// and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPods() ([]string, error) {\n\treturn kub.GetPodNames(CiliumNamespace, \"k8s-app=cilium\")\n}\n\n// GetCiliumPodsContext returns a list of all Cilium pods in the specified\n// namespace, and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPodsContext(ctx context.Context, namespace string) ([]string, error) {\n\treturn kub.GetPodNamesContext(ctx, namespace, \"k8s-app=cilium\")\n}\n\n// CiliumEndpointsList returns the result of `cilium endpoint list` from the\n// specified pod.\nfunc (kub *Kubectl) CiliumEndpointsList(ctx context.Context, pod string) *CmdRes {\n\treturn kub.CiliumExecContext(ctx, pod, \"cilium endpoint list -o json\")\n}\n\n// CiliumEndpointsStatus returns a mapping of a pod name to it is corresponding\n// endpoint's status\nfunc (kub *Kubectl) CiliumEndpointsStatus(pod string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.state}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint list -o jsonpath='%s'\", filter)).KVOutput()\n}\n\n// CiliumEndpointIPv6 returns the IPv6 address of each endpoint which matches\n// the given endpoint selector.\nfunc (kub *Kubectl) CiliumEndpointIPv6(pod string, endpoint string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.networking.addressing[*].ipv6}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint get %s -o jsonpath='%s'\", endpoint, filter)).KVOutput()\n}\n\n// CiliumEndpointWaitReady waits until all endpoints managed by all Cilium pod\n// are ready. Returns an error if the Cilium pods cannot be retrieved via\n// Kubernetes, or endpoints are not ready after a specified timeout\nfunc (kub *Kubectl) CiliumEndpointWaitReady() error {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot get Cilium pods\")\n\t\treturn err\n\t}\n\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tvar wg sync.WaitGroup\n\t\tqueue := make(chan bool, len(ciliumPods))\n\t\tendpointsReady := func(pod string) {\n\t\t\tvalid := false\n\t\t\tdefer func() {\n\t\t\t\tqueue <- valid\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tlogCtx := kub.Logger().WithField(\"pod\", pod)\n\t\t\tstatus, err := kub.CiliumEndpointsList(ctx, pod).Filter(`{range [*]}{.status.state}{\"=\"}{.status.identity.id}{\"\\n\"}{end}`)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.WithError(err).Errorf(\"cannot get endpoints states on Cilium pod\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttotal := 0\n\t\t\tinvalid := 0\n\t\t\tfor _, line := range strings.Split(status.String(), \"\\n\") {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// each line is like status=identityID.\n\t\t\t\t// IdentityID is needed because the reserved:init identity\n\t\t\t\t// means that the pod is not ready to accept traffic.\n\t\t\t\ttotal++\n\t\t\t\tvals := strings.Split(line, \"=\")\n\t\t\t\tif len(vals) != 2 {\n\t\t\t\t\tlogCtx.Errorf(\"Endpoint list does not have a correct output '%s'\", line)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif vals[0] != \"ready\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t\t// Consider an endpoint with reserved identity 5 (reserved:init) as not ready.\n\t\t\t\tif vals[1] == \"5\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogCtx.WithFields(logrus.Fields{\n\t\t\t\t\"total\": total,\n\t\t\t\t\"invalid\": invalid,\n\t\t\t}).Info(\"Waiting for cilium endpoints to be ready\")\n\n\t\t\tif invalid != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalid = true\n\t\t}\n\t\twg.Add(len(ciliumPods))\n\t\tfor _, pod := range ciliumPods {\n\t\t\tgo endpointsReady(pod)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(queue)\n\n\t\tfor status := range queue {\n\t\t\tif status == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\terr = WithContext(ctx, body, 1*time.Second)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tcallback := func() string {\n\t\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\t\tdefer cancel()\n\n\t\tvar errorMessage string\n\t\tfor _, pod := range ciliumPods {\n\t\t\tvar endpoints []models.Endpoint\n\t\t\tcmdRes := kub.CiliumEndpointsList(ctx, pod)\n\t\t\tif !cmdRes.WasSuccessful() {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to get endpoint list: %s\",\n\t\t\t\t\tpod, cmdRes.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := cmdRes.Unmarshal(&endpoints)\n\t\t\tif err != nil {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to parse endpoint list: %s\",\n\t\t\t\t\tpod, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ep := range endpoints {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\tEndpoint: %d \\tIdentity: %d\\t State: %s\\n\",\n\t\t\t\t\tpod, ep.ID, ep.Status.Identity.ID, ep.Status.State)\n\t\t\t}\n\t\t}\n\t\treturn errorMessage\n\t}\n\treturn NewSSHMetaError(err.Error(), callback)\n}\n\n// WaitForCEPIdentity waits for a particular CEP to have an identity present.\nfunc (kub *Kubectl) WaitForCEPIdentity(ns, podName string) error {\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tep, err := kub.GetCiliumEndpoint(ns, podName)\n\t\tif err != nil || ep == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ep.Identity == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn ep.Identity.ID != 0, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\treturn WithContext(ctx, body, 1*time.Second)\n}\n\n// CiliumExecContext runs cmd in the specified Cilium pod with the given context.\nfunc (kub *Kubectl) CiliumExecContext(ctx context.Context, pod string, cmd string) *CmdRes {\n\tlimitTimes := 5\n\texecute := func() *CmdRes {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, CiliumNamespace, pod, cmd)\n\t\treturn kub.ExecContext(ctx, command)\n\t}\n\tvar res *CmdRes\n\t// Sometimes Kubectl returns 126 exit code, It use to happen in Nightly\n\t// tests when a lot of exec are in place (Cgroups issue). The upstream\n\t// changes did not fix the isse, and we need to make this workaround to\n\t// avoid Kubectl issue.\n\t// https://github.com/openshift/origin/issues/16246\n\tfor i := 0; i < limitTimes; i++ {\n\t\tres = execute()\n\t\tif res.GetExitCode() != 126 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\treturn res\n}\n\n// CiliumExecMustSucceed runs cmd in the specified Cilium pod.\n// it causes a test failure if the command was not successful.\nfunc (kub *Kubectl) CiliumExecMustSucceed(ctx context.Context, pod, cmd string, optionalDescription ...interface{}) *CmdRes {\n\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\tif !res.WasSuccessful() {\n\t\tres.SendToLog(false)\n\t}\n\tgomega.ExpectWithOffset(1, res).Should(\n\t\tCMDSuccess(), optionalDescription...)\n\treturn res\n}\n\n// CiliumExecUntilMatch executes the specified command repeatedly for the\n// specified Cilium pod until the given substring is present in stdout.\n// If the timeout is reached it will return an error.\nfunc (kub *Kubectl) CiliumExecUntilMatch(pod, cmd, substr string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\t\treturn strings.Contains(res.Stdout(), substr)\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"%s is not in the output after timeout\", substr),\n\t\t&TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// WaitForCiliumInitContainerToFinish waits for all Cilium init containers to\n// finish\nfunc (kub *Kubectl) WaitForCiliumInitContainerToFinish() error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(CiliumNamespace, \"-l k8s-app=cilium\").Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tfor _, pod := range podList.Items {\n\t\t\tfor _, v := range pod.Status.InitContainerStatuses {\n\t\t\t\tif v.State.Terminated != nil && (v.State.Terminated.Reason != \"Completed\" || v.State.Terminated.ExitCode != 0) {\n\t\t\t\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\t\t\t\"podName\": pod.Name,\n\t\t\t\t\t\t\"currentState\": v.State.String(),\n\t\t\t\t\t}).Infof(\"Cilium Init container not completed\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn WithTimeout(body, \"Cilium Init Container was not able to initialize or had a successful run\", &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumNodesWait waits until all nodes in the Kubernetes cluster are annotated\n// with Cilium annotations. Its runtime is bounded by a maximum of `HelperTimeout`.\n// When a node is annotated with said annotations, it indicates\n// that the tunnels in the nodes are set up and that cross-node traffic can be\n// tested. Returns an error if the timeout is exceeded for waiting for the nodes\n// to be annotated.\nfunc (kub *Kubectl) CiliumNodesWait() (bool, error) {\n\tbody := func() bool {\n\t\tfilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.metadata.annotations.io\\.cilium\\.network\\.ipv4-pod-cidr}{\"\\n\"}{end}`\n\t\tdata := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get nodes -o jsonpath='%s'\", KubectlCmd, filter))\n\t\tif !data.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\t\tresult := data.KVOutput()\n\t\tignoreNode := GetNodeWithoutCilium()\n\t\tfor k, v := range result {\n\t\t\tif k == ignoreNode {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == \"\" {\n\t\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' does not have Cilium metadata\", k)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' IPv4 address: '%v'\", k, v)\n\t\t}\n\t\treturn true\n\t}\n\terr := WithTimeout(body, \"Kubernetes node does not have cilium metadata\", &TimeoutConfig{Timeout: HelperTimeout})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// LoadedPolicyInFirstAgent returns the policy as loaded in the first cilium\n// agent that is found in the cluster\nfunc (kub *Kubectl) LoadedPolicyInFirstAgent() (string, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve cilium pods: %s\", err)\n\t}\n\tfor _, pod := range pods {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get\")\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot execute cilium policy get: %s\", res.Stdout())\n\t\t} else {\n\t\t\treturn res.CombineOutput().String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no running cilium pods\")\n}\n\n// WaitPolicyDeleted waits for policy policyName to be deleted from the\n// cilium-agent running in pod. Returns an error if policyName was unable to\n// be deleted after some amount of time.\nfunc (kub *Kubectl) WaitPolicyDeleted(pod string, policyName string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyName))\n\n\t\t// `cilium policy get <policy name>` fails if the policy is not loaded,\n\t\t// which is the condition we want.\n\t\treturn !res.WasSuccessful()\n\t}\n\n\treturn WithTimeout(body, fmt.Sprintf(\"Policy %s was not deleted in time\", policyName), &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumIsPolicyLoaded returns true if the policy is loaded in the given\n// cilium Pod. it returns false in case that the policy is not in place\nfunc (kub *Kubectl) CiliumIsPolicyLoaded(pod string, policyCmd string) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyCmd))\n\treturn res.WasSuccessful()\n}\n\n// CiliumPolicyRevision returns the policy revision in the specified Cilium pod.\n// Returns an error if the policy revision cannot be retrieved.\nfunc (kub *Kubectl) CiliumPolicyRevision(pod string) (int, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get -o json\")\n\tif !res.WasSuccessful() {\n\t\treturn -1, fmt.Errorf(\"cannot get the revision %s\", res.Stdout())\n\t}\n\n\trevision, err := res.Filter(\"{.revision}\")\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"cannot get revision from json: %s\", err)\n\t}\n\n\trevi, err := strconv.Atoi(strings.Trim(revision.String(), \"\\n\"))\n\tif err != nil {\n\t\tkub.Logger().Errorf(\"revision on pod '%s' is not valid '%s'\", pod, res.CombineOutput())\n\t\treturn -1, err\n\t}\n\treturn revi, nil\n}\n\n// ResourceLifeCycleAction represents an action performed upon objects in\n// Kubernetes.\ntype ResourceLifeCycleAction string\n\nfunc (kub *Kubectl) getPodRevisions() (map[string]int, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods\")\n\t\treturn nil, fmt.Errorf(\"Cannot get cilium pods: %s\", err)\n\t}\n\n\trevisions := make(map[string]int)\n\tfor _, pod := range pods {\n\t\trevision, err := kub.CiliumPolicyRevision(pod)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pod policy revision\")\n\t\t\treturn nil, fmt.Errorf(\"Cannot retrieve cilium pod %s policy revision: %s\", pod, err)\n\t\t}\n\t\trevisions[pod] = revision\n\t}\n\treturn revisions, nil\n}\n\nfunc (kub *Kubectl) waitNextPolicyRevisions(podRevisions map[string]int, mustHavePolicy bool, timeout time.Duration) error {\n\tnpFilter := fmt.Sprintf(\n\t\t`{range .items[*]}{\"%s=\"}{.metadata.name}{\" %s=\"}{.metadata.namespace}{\"\\n\"}{end}`,\n\t\tKubectlPolicyNameLabel, KubectlPolicyNameSpaceLabel)\n\n\tknpBody := func() bool {\n\t\tknp := kub.ExecShort(fmt.Sprintf(\"%s get --all-namespaces netpol -o jsonpath='%s'\",\n\t\t\tKubectlCmd, npFilter))\n\t\tresult := knp.ByLines()\n\t\tif len(result) == 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, item := range result {\n\t\t\tfor ciliumPod, revision := range podRevisions {\n\t\t\t\tif mustHavePolicy {\n\t\t\t\t\tif !kub.CiliumIsPolicyLoaded(ciliumPod, item) {\n\t\t\t\t\t\tkub.Logger().Infof(\"Policy '%s' is not ready on Cilium pod '%s'\", item, ciliumPod)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tdesiredRevision := revision + 1\n\t\t\t\tres := kub.CiliumExecContext(ctx, ciliumPod, fmt.Sprintf(\"cilium policy wait %d --max-wait-time %d\", desiredRevision, int(ShortCommandTimeout.Seconds())))\n\t\t\t\tif res.GetExitCode() != 0 {\n\t\t\t\t\tkub.Logger().Infof(\"Failed to wait for policy revision %d on pod %s\", desiredRevision, ciliumPod)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tknpBody,\n\t\t\"Timed out while waiting for CNP to be applied on all PODs\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\nfunc getPolicyEnforcingJqFilter(numNodes int) string {\n\t// Test filter: https://jqplay.org/s/EgNzc06Cgn\n\treturn fmt.Sprintf(\n\t\t`[.items[]|{name:.metadata.name, enforcing: (.status|if has(\"nodes\") then .nodes |to_entries|map_values(.value.enforcing) + [(.|length >= %d)]|all else true end)|tostring, status: has(\"status\")|tostring}]`,\n\t\tnumNodes)\n}\n\n// CiliumPolicyAction performs the specified action in Kubernetes for the policy\n// stored in path filepath and waits up until timeout seconds for the policy\n// to be applied in all Cilium endpoints. Returns an error if the policy is not\n// imported before the timeout is\n// exceeded.\nfunc (kub *Kubectl) CiliumPolicyAction(namespace, filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath, namespace); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tcmds := map[string]string{\n\t\t\t\t\"CNP\": fmt.Sprintf(\"%s get cnp --all-namespaces -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t\t\"CCNP\": fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t}\n\n\t\t\tfor ctx, cmd := range cmds {\n\t\t\t\tvar data []map[string]string\n\n\t\t\t\tres := kub.ExecShort(cmd)\n\t\t\t\tif !res.WasSuccessful() {\n\t\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Errorf(\"cannot get %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\terr := res.Unmarshal(&data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot unmarshal json for %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tfor _, item := range data {\n\t\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\t\tkub.Logger().Errorf(\"%s policy '%s' is not enforcing yet\", ctx, item[\"name\"])\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\t}\n\n\t\terr = WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting for policies to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumClusterwidePolicyAction applies a clusterwide policy action as described in action argument. It\n// then wait till timeout Duration for the policy to be applied to all the cilium endpoints.\nfunc (kub *Kubectl) CiliumClusterwidePolicyAction(filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tvar data []map[string]string\n\t\t\tcmd := fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\",\n\t\t\t\tKubectlCmd, jqFilter)\n\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Error(\"cannot get ccnp status\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\terr := res.Unmarshal(&data)\n\t\t\tif err != nil {\n\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot unmarshal json\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tfor _, item := range data {\n\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\tkub.Logger().Errorf(\"Clusterwide policy '%s' is not enforcing yet\", item[\"name\"])\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\terr := WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting CCNP to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumReport report the cilium pod to the log and appends the logs for the\n// given commands.\nfunc (kub *Kubectl) CiliumReport(commands ...string) {\n\tif config.CiliumTestConfig.SkipLogGathering {\n\t\tginkgoext.GinkgoPrint(\"Skipped gathering logs (-cilium.skipLogs=true)\\n\")\n\t\treturn\n\t}\n\n\t// Log gathering for Cilium should take at most 10 minutes. This ensures that\n\t// the CiliumReport stage doesn't cause the entire CI to hang.\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.GatherLogs(ctx)\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.DumpCiliumCommandOutput(ctx, CiliumNamespace)\n\t}()\n\n\tkub.CiliumCheckReport(ctx)\n\n\tpods, err := kub.GetCiliumPodsContext(ctx, CiliumNamespace)\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods on ReportDump\")\n\t}\n\tres := kub.ExecContextShort(ctx, fmt.Sprintf(\"%s get pods -o wide --all-namespaces\", KubectlCmd))\n\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\n\tresults := make([]*CmdRes, 0, len(pods)*len(commands))\n\tginkgoext.GinkgoPrint(\"Fetching command output from pods %s\", pods)\n\tfor _, pod := range pods {\n\t\tfor _, cmd := range commands {\n\t\t\tres = kub.ExecPodCmdBackground(ctx, CiliumNamespace, pod, cmd, ExecOptions{SkipLog: true})\n\t\t\tresults = append(results, res)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tfor _, res := range results {\n\t\tres.WaitUntilFinish()\n\t\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\t}\n}\n\n// CiliumCheckReport prints a few checks on the Junit output to provide more\n// context to users. The list of checks that prints are the following:\n// - Number of Kubernetes and Cilium policies installed.\n// - Policy enforcement status by endpoint.\n// - Controller, health, kvstore status.\nfunc (kub *Kubectl) CiliumCheckReport(ctx context.Context) {\n\tpods, _ := kub.GetCiliumPods()\n\tfmt.Fprintf(CheckLogs, \"Cilium pods: %v\\n\", pods)\n\n\tvar policiesFilter = `{range .items[*]}{.metadata.namespace}{\"::\"}{.metadata.name}{\" \"}{end}`\n\tnetpols := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get netpol -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"Netpols loaded: %v\\n\", netpols.GetStdOut())\n\n\tcnp := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cnp -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"CiliumNetworkPolicies loaded: %v\\n\", cnp.GetStdOut())\n\n\tcepFilter := `{range .items[*]}{.metadata.name}{\"=\"}{.status.policy.ingress.enforcing}{\":\"}{.status.policy.egress.enforcing}{\"\\n\"}{end}`\n\tcepStatus := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cep -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, cepFilter))\n\n\tfmt.Fprintf(CheckLogs, \"Endpoint Policy Enforcement:\\n\")\n\n\ttable := tabwriter.NewWriter(CheckLogs, 5, 0, 3, ' ', 0)\n\tfmt.Fprintf(table, \"Pod\\tIngress\\tEgress\\n\")\n\tfor pod, policy := range cepStatus.KVOutput() {\n\t\tdata := strings.SplitN(policy, \":\", 2)\n\t\tif len(data) != 2 {\n\t\t\tdata[0] = \"invalid value\"\n\t\t\tdata[1] = \"invalid value\"\n\t\t}\n\t\tfmt.Fprintf(table, \"%s\\t%s\\t%s\\n\", pod, data[0], data[1])\n\t}\n\ttable.Flush()\n\n\tvar controllersFilter = `{range .controllers[*]}{.name}{\"=\"}{.status.consecutive-failure-count}::{.status.last-failure-msg}{\"\\n\"}{end}`\n\tvar failedControllers string\n\tfor _, pod := range pods {\n\t\tvar prefix = \"\"\n\t\tstatus := kub.CiliumExecContext(ctx, pod, \"cilium status --all-controllers -o json\")\n\t\tresult, err := status.Filter(controllersFilter)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"Cannot filter controller status output\")\n\t\t\tcontinue\n\t\t}\n\t\tvar total = 0\n\t\tvar failed = 0\n\t\tfor name, data := range result.KVOutput() {\n\t\t\ttotal++\n\t\t\tstatus := strings.SplitN(data, \"::\", 2)\n\t\t\tif len(status) != 2 {\n\t\t\t\t// Just make sure that the the len of the output is 2 to not\n\t\t\t\t// fail on index error in the following lines.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif status[0] != \"\" {\n\t\t\t\tfailed++\n\t\t\t\tprefix = \"⚠️ \"\n\t\t\t\tfailedControllers += fmt.Sprintf(\"controller %s failure '%s'\\n\", name, status[1])\n\t\t\t}\n\t\t}\n\t\tstatusFilter := `Status: {.cilium.state} Health: {.cluster.ciliumHealth.state}` +\n\t\t\t` Nodes \"{.cluster.nodes[*].name}\" ContinerRuntime: {.container-runtime.state}` +\n\t\t\t` Kubernetes: {.kubernetes.state} KVstore: {.kvstore.state}`\n\t\tdata, _ := status.Filter(statusFilter)\n\t\tfmt.Fprintf(CheckLogs, \"%sCilium agent '%s': %s Controllers: Total %d Failed %d\\n\",\n\t\t\tprefix, pod, data, total, failed)\n\t\tif failedControllers != \"\" {\n\t\t\tfmt.Fprintf(CheckLogs, \"Failed controllers:\\n %s\", failedControllers)\n\t\t}\n\t}\n}\n\n// ValidateNoErrorsInLogs checks that cilium logs since the given duration (By\n// default `CurrentGinkgoTestDescription().Duration`) do not contain any of the\n// known-bad messages (e.g., `deadlocks` or `segmentation faults`). In case of\n// any of these messages, it'll mark the test as failed.\nfunc (kub *Kubectl) ValidateNoErrorsInLogs(duration time.Duration) {\n\tblacklist := GetBadLogMessages()\n\tkub.ValidateListOfErrorsInLogs(duration, blacklist)\n}\n\n// ValidateListOfErrorsInLogs is similar to ValidateNoErrorsInLogs, but\n// takes a blacklist of bad log messages instead of using the default list.\nfunc (kub *Kubectl) ValidateListOfErrorsInLogs(duration time.Duration, blacklist map[string][]string) {\n\tif kub == nil {\n\t\t// if `kub` is nil, this is run after the test failed while setting up `kub` and we are unable to gather logs\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tapps := map[string]string{\n\t\t\"k8s-app=cilium\": CiliumTestLog,\n\t\t\"k8s-app=hubble-relay\": HubbleRelayTestLog,\n\t\t\"io.cilium/app=operator\": CiliumOperatorTestLog,\n\t}\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(apps))\n\tfor app, file := range apps {\n\t\tgo func(app, file string) {\n\t\t\tvar logs string\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s logs --tail=-1 --timestamps=true -l %s --since=%vs\",\n\t\t\t\tKubectlCmd, CiliumNamespace, app, duration.Seconds())\n\t\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s --previous\", cmd), ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tres = kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// Keep the cilium logs for the given test in a separate file.\n\t\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot create report directory\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = ioutil.WriteFile(\n\t\t\t\t\tfmt.Sprintf(\"%s/%s\", testPath, file),\n\t\t\t\t\t[]byte(logs), LogPerm)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot create %s\", CiliumTestLog)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfailIfContainsBadLogMsg(logs, app, blacklist)\n\n\t\t\tfmt.Fprint(CheckLogs, logutils.LogErrorsSummary(logs))\n\t\t}(app, file)\n\t}\n\n\twg.Wait()\n}\n\n// GatherCiliumCoreDumps copies core dumps if are present in the /tmp folder\n// into the test report folder for further analysis.\nfunc (kub *Kubectl) GatherCiliumCoreDumps(ctx context.Context, ciliumPod string) {\n\tlog := kub.Logger().WithField(\"pod\", ciliumPod)\n\n\tcores := kub.CiliumExecContext(ctx, ciliumPod, \"ls /tmp/ | grep core\")\n\tif !cores.WasSuccessful() {\n\t\tlog.Debug(\"There is no core dumps in the pod\")\n\t\treturn\n\t}\n\n\ttestPath, err := CreateReportDirectory()\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\treturn\n\t}\n\tresultPath := filepath.Join(kub.BasePath(), testPath)\n\n\tfor _, core := range cores.ByLines() {\n\t\tdst := filepath.Join(resultPath, core)\n\t\tsrc := filepath.Join(\"/tmp/\", core)\n\t\tcmd := fmt.Sprintf(\"%s -n %s cp %s:%s %s\",\n\t\t\tKubectlCmd, CiliumNamespace,\n\t\t\tciliumPod, src, dst)\n\t\tres := kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.WithField(\"output\", res.CombineOutput()).Error(\"Cannot get core from pod\")\n\t\t}\n\t}\n}\n\n// ExecInFirstPod runs given command in one pod that matches given selector and namespace\n// An error is returned if no pods can be found\nfunc (kub *Kubectl) ExecInFirstPod(ctx context.Context, namespace, selector, cmd string, options ...ExecOptions) *CmdRes {\n\tnames, err := kub.GetPodNamesContext(ctx, namespace, selector)\n\tif err != nil {\n\t\treturn &CmdRes{err: err}\n\t}\n\tif len(names) == 0 {\n\t\treturn &CmdRes{err: fmt.Errorf(\"Cannot find pods matching %s to execute %s\", selector, cmd)}\n\t}\n\n\tname := names[0]\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, name, cmd)\n\treturn kub.ExecContext(ctx, command)\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 3310, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$CTX": { | |
"start": { | |
"line": 3306, | |
"col": 35, | |
"offset": 117981 | |
}, | |
"end": { | |
"line": 3306, | |
"col": 38, | |
"offset": 117984 | |
}, | |
"abstract_content": "ctx", | |
"unique_id": { | |
"type": "id", | |
"value": "ctx", | |
"kind": "Param", | |
"sid": 439 | |
} | |
}, | |
"$OBJ": { | |
"start": { | |
"line": 3306, | |
"col": 19, | |
"offset": 117965 | |
}, | |
"end": { | |
"line": 3306, | |
"col": 22, | |
"offset": 117968 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 3305, | |
"col": 14, | |
"offset": 117875 | |
}, | |
"end": { | |
"line": 3305, | |
"col": 25, | |
"offset": 117886 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 3305, | |
"col": 3, | |
"offset": 117864 | |
}, | |
"end": { | |
"line": 3305, | |
"col": 10, | |
"offset": 117871 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RedeployKubernetesDnsIfNecessary validates if the Kubernetes DNS is\n// functional and re-deploys it if it is not and then waits for it to deploy\n// successfully and become operational. See ValidateKubernetesDNS() for the\n// list of conditions that must be met for Kubernetes DNS to be considered\n// operational.\nfunc (kub *Kubectl) RedeployKubernetesDnsIfNecessary() {\n\tginkgoext.By(\"Validating if Kubernetes DNS is deployed\")\n\terr := kub.ValidateKubernetesDNS()\n\tif err == nil {\n\t\tginkgoext.By(\"Kubernetes DNS is up and operational\")\n\t\treturn\n\t} else {\n\t\tginkgoext.By(\"Kubernetes DNS is not ready: %s\", err)\n\t}\n\n\tginkgoext.By(\"Restarting Kubernetes DNS (-l %s)\", kubeDNSLabel)\n\tres := kub.DeleteResource(\"pod\", \"-n \"+KubeSystemNamespace+\" -l \"+kubeDNSLabel)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to delete DNS pods: %s\", res.OutputPrettyPrint())\n\t}\n\n\tginkgoext.By(\"Waiting for Kubernetes DNS to become operational\")\n\terr = RepeatUntilTrueDefaultTimeout(func() bool {\n\t\terr := kub.ValidateKubernetesDNS()\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Kubernetes DNS is not ready yet: %s\", err)\n\t\t}\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s -l %s\", KubectlCmd, KubeSystemNamespace, kubeDNSLabel))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\n\t\tginkgoext.Fail(\"Kubernetes DNS did not become ready in time\")\n\t}\n}\n\n// WaitKubeDNS waits until the kubeDNS pods are ready. In case of exceeding the\n// default timeout it returns an error.\nfunc (kub *Kubectl) WaitKubeDNS() error {\n\treturn kub.WaitforPods(KubeSystemNamespace, fmt.Sprintf(\"-l %s\", kubeDNSLabel), DNSHelperTimeout)\n}\n\n// WaitForKubeDNSEntry waits until the given DNS entry exists in the kube-dns\n// service. If the container is not ready after timeout it returns an error. The\n// name's format query should be `${name}.${namespace}`. If `svc.cluster.local`\n// is not present, it appends to the given name and it checks the service's FQDN.\nfunc (kub *Kubectl) WaitForKubeDNSEntry(serviceName, serviceNamespace string) error {\n\tlogger := kub.Logger().WithFields(logrus.Fields{\"serviceName\": serviceName, \"serviceNamespace\": serviceNamespace})\n\n\tserviceNameWithNamespace := fmt.Sprintf(\"%s.%s\", serviceName, serviceNamespace)\n\tif !strings.HasSuffix(serviceNameWithNamespace, ServiceSuffix) {\n\t\tserviceNameWithNamespace = fmt.Sprintf(\"%s.%s\", serviceNameWithNamespace, ServiceSuffix)\n\t}\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tdigCMD := \"dig +short %s @%s | grep -v -e '^;'\"\n\n\t// If it fails we want to know if it's because of connection cannot be\n\t// established or DNS does not exist.\n\tdigCMDFallback := \"dig +tcp %s @%s\"\n\n\tdnsClusterIP, _, err := kub.GetServiceHostPort(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"cannot get kube-dns service IP\")\n\t\treturn err\n\t}\n\n\tbody := func() bool {\n\t\tserviceIP, _, err := kub.GetServiceHostPort(serviceNamespace, serviceName)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"cannot get service IP for service %s\", serviceNameWithNamespace)\n\t\t\treturn false\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\t\tdefer cancel()\n\t\t// ClusterIPNone denotes that this service is headless; there is no\n\t\t// service IP for this service, and thus the IP returned by `dig` is\n\t\t// an IP of the pod itself, not ClusterIPNone, which is what Kubernetes\n\t\t// shows as the IP for the service for headless services.\n\t\tif serviceIP == v1.ClusterIPNone {\n\t\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\t\tif res.err != nil {\n\t\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\n\t\t\treturn res.WasSuccessful()\n\t\t}\n\t\tlog.Debugf(\"service is not headless; checking whether IP retrieved from DNS matches the IP for the service stored in Kubernetes\")\n\n\t\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMD, serviceNameWithNamespace, dnsClusterIP))\n\t\tif res.err != nil {\n\t\t\tlogger.Debugf(\"failed to run dig in log-gatherer pod\")\n\t\t\treturn false\n\t\t}\n\t\tserviceIPFromDNS := res.SingleOut()\n\t\tif !govalidator.IsIP(serviceIPFromDNS) {\n\t\t\tlogger.Debugf(\"output of dig (%s) did not return an IP\", serviceIPFromDNS)\n\t\t\treturn false\n\t\t}\n\n\t\t// Due to lag between new IPs for the same service being synced between // kube-apiserver and DNS, check if the IP for the service that is\n\t\t// stored in K8s matches the IP of the service cached in DNS. These\n\t\t// can be different, because some tests use the same service names.\n\t\t// Wait accordingly for services to match, and for resolving the service\n\t\t// name to resolve via DNS.\n\t\tif !strings.Contains(serviceIPFromDNS, serviceIP) {\n\t\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) does not match the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), fmt.Sprintf(digCMDFallback, serviceNameWithNamespace, dnsClusterIP))\n\t\t\treturn false\n\t\t}\n\t\tlogger.Debugf(\"service IP retrieved from DNS (%s) matches the IP for the service stored in Kubernetes (%s)\", serviceIPFromDNS, serviceIP)\n\t\treturn true\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"DNS '%s' is not ready after timeout\", serviceNameWithNamespace),\n\t\t&TimeoutConfig{Timeout: DNSHelperTimeout})\n}\n\n// WaitCleanAllTerminatingPods waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\n\nfunc (kub *Kubectl) WaitCleanAllTerminatingPods(timeout time.Duration) error {\n\treturn kub.WaitCleanAllTerminatingPodsInNs(\"\", timeout)\n}\n\n// WaitCleanAllTerminatingPodsInNs waits until all nodes that are in `Terminating`\n// state are deleted correctly in the platform. In case of excedding the\n// given timeout (in seconds) it returns an error\nfunc (kub *Kubectl) WaitCleanAllTerminatingPodsInNs(ns string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\twhere := ns\n\t\tif where == \"\" {\n\t\t\twhere = \"--all-namespaces\"\n\t\t} else {\n\t\t\twhere = \"-n \" + where\n\t\t}\n\t\tres := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get pods %s -o jsonpath='{.items[*].metadata.deletionTimestamp}'\",\n\t\t\tKubectlCmd, where))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\n\t\tif res.Stdout() == \"\" {\n\t\t\t// Output is empty so no terminating containers\n\t\t\treturn true\n\t\t}\n\n\t\tpodsTerminating := len(strings.Split(res.Stdout(), \" \"))\n\t\tkub.Logger().WithField(\"Terminating pods\", podsTerminating).Info(\"List of pods terminating\")\n\t\tif podsTerminating > 0 {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tbody,\n\t\t\"Pods are still not deleted after a timeout\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\n// DeployPatchStdIn deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatchStdIn(original, patch string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local --dry-run -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch %s --local --dry-run`,\n\t\tKubectlCmd, original, patch))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch %s --local -o yaml`,\n\t\t\tKubectlCmd, original, patch),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patch)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// DeployPatch deploys the original kubernetes descriptor with the given patch.\nfunc (kub *Kubectl) DeployPatch(original, patchFileName string) error {\n\t// debugYaml only dumps the full created yaml file to the test output if\n\t// the cilium manifest can not be created correctly.\n\tdebugYaml := func(original, patch string) {\n\t\t_ = kub.ExecShort(fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patch))\n\t}\n\n\t// validation 1st\n\tres := kub.ExecShort(fmt.Sprintf(\n\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local --dry-run`,\n\t\tKubectlCmd, original, patchFileName))\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium patch validation failed\")\n\t}\n\n\tres = kub.Apply(ApplyOptions{\n\t\tFilePath: \"-\",\n\t\tForce: true,\n\t\tPiped: fmt.Sprintf(\n\t\t\t`%s patch --filename='%s' --patch \"$(cat '%s')\" --local -o yaml`,\n\t\t\tKubectlCmd, original, patchFileName),\n\t})\n\tif !res.WasSuccessful() {\n\t\tdebugYaml(original, patchFileName)\n\t\treturn res.GetErr(\"Cilium manifest patch installation failed\")\n\t}\n\treturn nil\n}\n\n// Patch patches the given object with the given patch (string).\nfunc (kub *Kubectl) Patch(namespace, objType, objName, patch string) *CmdRes {\n\tginkgoext.By(\"Patching %s %s in namespace %s\", objType, objName, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s patch %s %s --patch %q\",\n\t\tKubectlCmd, namespace, objType, objName, patch))\n}\n\nfunc addIfNotOverwritten(options map[string]string, field, value string) map[string]string {\n\tif _, ok := options[field]; !ok {\n\t\toptions[field] = value\n\t}\n\treturn options\n}\n\nfunc (kub *Kubectl) overwriteHelmOptions(options map[string]string) error {\n\tif integration := GetCurrentIntegration(); integration != \"\" {\n\t\toverrides := helmOverrides[integration]\n\t\tfor key, value := range overrides {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\n\t}\n\tfor key, value := range defaultHelmOptions {\n\t\toptions = addIfNotOverwritten(options, key, value)\n\t}\n\n\t// Do not schedule cilium-agent on the NO_CILIUM_ON_NODE node\n\tif node := GetNodeWithoutCilium(); node != \"\" {\n\t\topts := map[string]string{\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"cilium.io/ci-node\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"NotIn\",\n\t\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": node,\n\t\t}\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif !RunsWithKubeProxy() {\n\t\tnodeIP, err := kub.GetNodeIPByLabel(K8s1, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot retrieve Node IP for k8s1: %s\", err)\n\t\t}\n\n\t\topts := map[string]string{\n\t\t\t\"kubeProxyReplacement\": \"strict\",\n\t\t\t\"k8sServiceHost\": nodeIP,\n\t\t\t\"k8sServicePort\": \"6443\",\n\t\t}\n\n\t\tif RunsOnNetNextOr419Kernel() {\n\t\t\topts[\"bpf.masquerade\"] = \"true\"\n\t\t}\n\n\t\tfor key, value := range opts {\n\t\t\toptions = addIfNotOverwritten(options, key, value)\n\t\t}\n\t}\n\n\tif RunsWithHostFirewall() {\n\t\taddIfNotOverwritten(options, \"hostFirewall\", \"true\")\n\t}\n\n\tif !RunsWithKubeProxy() || options[\"hostFirewall\"] == \"true\" {\n\t\t// Set devices\n\t\tprivateIface, err := kub.GetPrivateIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultIface, err := kub.GetDefaultIface()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdevices := fmt.Sprintf(`'{%s,%s}'`, privateIface, defaultIface)\n\t\taddIfNotOverwritten(options, \"devices\", devices)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) generateCiliumYaml(options map[string]string, filename string) error {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO GH-8753: Use helm rendering library instead of shelling out to\n\t// helm template\n\thelmTemplate := kub.GetFilePath(HelmTemplate)\n\tres := kub.HelmTemplate(helmTemplate, CiliumNamespace, filename, options)\n\tif !res.WasSuccessful() {\n\t\t// If the helm template generation is not successful remove the empty\n\t\t// manifest file.\n\t\t_ = os.Remove(filename)\n\t\treturn res.GetErr(\"Unable to generate YAML\")\n\t}\n\n\treturn nil\n}\n\n// GetPrivateIface returns an interface name of a netdev which has InternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPrivateIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have InternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\n// GetPublicIface returns an interface name of a netdev which has ExternalIP\n// addr.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetPublicIface() (string, error) {\n\tipAddr, err := kub.GetNodeIPByLabel(K8s1, true)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if ipAddr == \"\" {\n\t\treturn \"\", fmt.Errorf(\"%s does not have ExternalIP\", K8s1)\n\t}\n\n\treturn kub.getIfaceByIPAddr(K8s1, ipAddr)\n}\n\nfunc (kub *Kubectl) waitToDelete(name, label string) error {\n\tvar (\n\t\tpods []string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\n\tstatus := 1\n\tfor status > 0 {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fmt.Errorf(\"timed out waiting to delete %s: pods still remaining: %s\", name, pods)\n\t\tdefault:\n\t\t}\n\n\t\tpods, err = kub.GetPodNamesContext(ctx, CiliumNamespace, label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstatus = len(pods)\n\t\tkub.Logger().Infof(\"%s pods terminating '%d' err='%v' pods='%v'\", name, status, err, pods)\n\t\tif status == 0 {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\n// GetDefaultIface returns an interface name which is used by a default route.\n// Assumes that all nodes have identical interfaces.\nfunc (kub *Kubectl) GetDefaultIface() (string, error) {\n\tcmd := `ip -o r | grep default | grep -o 'dev [a-zA-Z0-9]*' | cut -d' ' -f2 | head -n1`\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), K8s1, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve default iface: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\nfunc (kub *Kubectl) DeleteCiliumDS() error {\n\t// Do not assert on success in AfterEach intentionally to avoid\n\t// incomplete teardown.\n\tginkgoext.By(\"DeleteCiliumDS(namespace=%q)\", CiliumNamespace)\n\t_ = kub.DeleteResource(\"ds\", fmt.Sprintf(\"-n %s cilium\", CiliumNamespace))\n\treturn kub.waitToDelete(\"Cilium\", CiliumAgentLabel)\n}\n\nfunc (kub *Kubectl) DeleteHubbleRelay(ns string) error {\n\tginkgoext.By(\"DeleteHubbleRelay(namespace=%q)\", ns)\n\t_ = kub.DeleteResource(\"deployment\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\t_ = kub.DeleteResource(\"service\", fmt.Sprintf(\"-n %s hubble-relay\", ns))\n\treturn kub.waitToDelete(\"HubbleRelay\", HubbleRelayLabel)\n}\n\n// CiliumInstall installs Cilium with the provided Helm options.\nfunc (kub *Kubectl) CiliumInstall(filename string, options map[string]string) error {\n\t// If the file does not exist, create it so that the command `kubectl delete -f <filename>`\n\t// does not fail because there is no file.\n\t_ = kub.ExecContextShort(context.TODO(), fmt.Sprintf(\"[[ ! -f %s ]] && echo '---' >> %s\", filename, filename))\n\n\t// First try to remove any existing cilium install. This is done by removing resources\n\t// from the file we generate cilium install manifest to.\n\tres := kub.DeleteAndWait(filename, true)\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to delete existing cilium YAML\")\n\t}\n\n\tif err := kub.generateCiliumYaml(options, filename); err != nil {\n\t\treturn err\n\t}\n\n\tres = kub.Apply(ApplyOptions{FilePath: filename, Force: true, Namespace: CiliumNamespace})\n\tif !res.WasSuccessful() {\n\t\treturn res.GetErr(\"Unable to apply YAML\")\n\t}\n\n\treturn nil\n}\n\n// convertOptionsToLegacyOptions maps current helm values to old helm Values\n// TODO: When Cilium 1.10 branch is created, remove this function\nfunc (kub *Kubectl) convertOptionsToLegacyOptions(options map[string]string) map[string]string {\n\n\tresult := make(map[string]string)\n\n\tlegacyMappings := map[string]string{\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator\",\n\t\t\"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\": \"global.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]\",\n\t\t\"bpf.preallocateMaps\": \"global.bpf.preallocateMaps\",\n\t\t\"bpf.masquerade\": \"config.bpfMasquerade\",\n\t\t\"cleanState\": \"global.cleanState\",\n\t\t\"cni.binPath\": \"global.cni.binPath\",\n\t\t\"cni.chainingMode\": \"global.cni.chainingMode\",\n\t\t\"cni.confPath\": \"global.cni.confPath\",\n\t\t\"cni.customConf\": \"global.cni.customConf\",\n\t\t\"daemon.runPath\": \"global.daemon.runPath\",\n\t\t\"debug.enabled\": \"global.debug.enabled\",\n\t\t\"devices\": \"global.devices\", // Override \"eth0 eth0\\neth0\"\n\t\t\"enableCnpStatusUpdates\": \"config.enableCnpStatusUpdates\",\n\t\t\"etcd.leaseTTL\": \"global.etcd.leaseTTL\",\n\t\t\"externalIPs.enabled\": \"global.externalIPs.enabled\",\n\t\t\"flannel.enabled\": \"global.flannel.enabled\",\n\t\t\"gke.enabled\": \"global.gke.enabled\",\n\t\t\"hostFirewall\": \"global.hostFirewall\",\n\t\t\"hostPort.enabled\": \"global.hostPort.enabled\",\n\t\t\"hostServices.enabled\": \"global.hostServices.enabled\",\n\t\t\"hubble.enabled\": \"global.hubble.enabled\",\n\t\t\"hubble.listenAddress\": \"global.hubble.listenAddress\",\n\t\t\"hubble.relay.image.repository\": \"hubble-relay.image.repository\",\n\t\t\"hubble.relay.image.tag\": \"hubble-relay.image.tag\",\n\t\t\"image.tag\": \"global.tag\",\n\t\t\"ipam.mode\": \"config.ipam\",\n\t\t\"ipv4.enabled\": \"global.ipv4.enabled\",\n\t\t\"ipv6.enabled\": \"global.ipv6.enabled\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"global.k8s.requireIPv4PodCIDR\",\n\t\t\"k8sServiceHost\": \"global.k8sServiceHost\",\n\t\t\"k8sServicePort\": \"global.k8sServicePort\",\n\t\t\"kubeProxyReplacement\": \"global.kubeProxyReplacement\",\n\t\t\"logSystemLoad\": \"global.logSystemLoad\",\n\t\t\"masquerade\": \"global.masquerade\",\n\t\t\"nativeRoutingCIDR\": \"global.nativeRoutingCIDR\",\n\t\t\"nodeinit.enabled\": \"global.nodeinit.enabled\",\n\t\t\"nodeinit.reconfigureKubelet\": \"global.nodeinit.reconfigureKubelet\",\n\t\t\"nodeinit.removeCbrBridge\": \"global.nodeinit.removeCbrBridge\",\n\t\t\"nodeinit.restartPods\": \"globalnodeinit.restartPods\",\n\t\t\"nodePort.enabled\": \"global.nodePort.enabled\",\n\t\t\"nodePort.mode\": \"global.nodePort.mode\",\n\t\t\"operator.enabled\": \"operator.enabled\",\n\t\t\"pprof.enabled\": \"global.pprof.enabled\",\n\t\t\"sessionAffinity\": \"config.sessionAffinity\",\n\t\t\"sleepAfterInit\": \"agent.sleepAfterInit\",\n\t\t\"tunnel\": \"global.tunnel\",\n\t}\n\n\tfor newKey, v := range options {\n\t\tif oldKey, ok := legacyMappings[newKey]; ok {\n\t\t\tresult[oldKey] = v\n\t\t} else if !ok {\n\t\t\tif newKey == \"image.repository\" {\n\t\t\t\tresult[\"agent.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if newKey == \"operator.image.repository\" {\n\t\t\t\tif options[\"eni\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-aws:\" + options[\"image.tag\"]\n\t\t\t\t} else if options[\"azure.enabled\"] == \"true\" {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-azure:\" + options[\"image.tag\"]\n\t\t\t\t} else {\n\t\t\t\t\tresult[\"operator.image\"] = v + \"-generic:\" + options[\"image.tag\"]\n\t\t\t\t}\n\t\t\t} else if newKey == \"preflight.image.repository\" {\n\t\t\t\tresult[\"preflight.image\"] = v + \":\" + options[\"image.tag\"]\n\t\t\t} else if strings.HasSuffix(newKey, \".tag\") {\n\t\t\t\t// Already handled in the if statement above\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Warningf(\"Skipping option %s\", newKey)\n\t\t\t}\n\t\t}\n\t}\n\tresult[\"ci.kubeCacheMutationDetector\"] = \"true\"\n\treturn result\n}\n\n// RunHelm runs the helm command with the given options.\nfunc (kub *Kubectl) RunHelm(action, repo, helmName, version, namespace string, options map[string]string) (*CmdRes, error) {\n\terr := kub.overwriteHelmOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toptionsString := \"\"\n\n\t//TODO: In 1.10 dev cycle, remove this\n\tif version == \"1.8-dev\" {\n\t\toptions = kub.convertOptionsToLegacyOptions(options)\n\t}\n\n\tfor k, v := range options {\n\t\toptionsString += fmt.Sprintf(\" --set %s=%s \", k, v)\n\t}\n\n\treturn kub.ExecMiddle(fmt.Sprintf(\"helm %s %s %s \"+\n\t\t\"--version=%s \"+\n\t\t\"--namespace=%s \"+\n\t\t\"%s\", action, helmName, repo, version, namespace, optionsString)), nil\n}\n\n// GetCiliumPods returns a list of all Cilium pods in the specified namespace,\n// and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPods() ([]string, error) {\n\treturn kub.GetPodNames(CiliumNamespace, \"k8s-app=cilium\")\n}\n\n// GetCiliumPodsContext returns a list of all Cilium pods in the specified\n// namespace, and an error if the Cilium pods were not able to be retrieved.\nfunc (kub *Kubectl) GetCiliumPodsContext(ctx context.Context, namespace string) ([]string, error) {\n\treturn kub.GetPodNamesContext(ctx, namespace, \"k8s-app=cilium\")\n}\n\n// CiliumEndpointsList returns the result of `cilium endpoint list` from the\n// specified pod.\nfunc (kub *Kubectl) CiliumEndpointsList(ctx context.Context, pod string) *CmdRes {\n\treturn kub.CiliumExecContext(ctx, pod, \"cilium endpoint list -o json\")\n}\n\n// CiliumEndpointsStatus returns a mapping of a pod name to it is corresponding\n// endpoint's status\nfunc (kub *Kubectl) CiliumEndpointsStatus(pod string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.state}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint list -o jsonpath='%s'\", filter)).KVOutput()\n}\n\n// CiliumEndpointIPv6 returns the IPv6 address of each endpoint which matches\n// the given endpoint selector.\nfunc (kub *Kubectl) CiliumEndpointIPv6(pod string, endpoint string) map[string]string {\n\tfilter := `{range [*]}{@.status.external-identifiers.pod-name}{\"=\"}{@.status.networking.addressing[*].ipv6}{\"\\n\"}{end}`\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\n\t\t\"cilium endpoint get %s -o jsonpath='%s'\", endpoint, filter)).KVOutput()\n}\n\n// CiliumEndpointWaitReady waits until all endpoints managed by all Cilium pod\n// are ready. Returns an error if the Cilium pods cannot be retrieved via\n// Kubernetes, or endpoints are not ready after a specified timeout\nfunc (kub *Kubectl) CiliumEndpointWaitReady() error {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot get Cilium pods\")\n\t\treturn err\n\t}\n\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tvar wg sync.WaitGroup\n\t\tqueue := make(chan bool, len(ciliumPods))\n\t\tendpointsReady := func(pod string) {\n\t\t\tvalid := false\n\t\t\tdefer func() {\n\t\t\t\tqueue <- valid\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tlogCtx := kub.Logger().WithField(\"pod\", pod)\n\t\t\tstatus, err := kub.CiliumEndpointsList(ctx, pod).Filter(`{range [*]}{.status.state}{\"=\"}{.status.identity.id}{\"\\n\"}{end}`)\n\t\t\tif err != nil {\n\t\t\t\tlogCtx.WithError(err).Errorf(\"cannot get endpoints states on Cilium pod\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttotal := 0\n\t\t\tinvalid := 0\n\t\t\tfor _, line := range strings.Split(status.String(), \"\\n\") {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// each line is like status=identityID.\n\t\t\t\t// IdentityID is needed because the reserved:init identity\n\t\t\t\t// means that the pod is not ready to accept traffic.\n\t\t\t\ttotal++\n\t\t\t\tvals := strings.Split(line, \"=\")\n\t\t\t\tif len(vals) != 2 {\n\t\t\t\t\tlogCtx.Errorf(\"Endpoint list does not have a correct output '%s'\", line)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif vals[0] != \"ready\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t\t// Consider an endpoint with reserved identity 5 (reserved:init) as not ready.\n\t\t\t\tif vals[1] == \"5\" {\n\t\t\t\t\tinvalid++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogCtx.WithFields(logrus.Fields{\n\t\t\t\t\"total\": total,\n\t\t\t\t\"invalid\": invalid,\n\t\t\t}).Info(\"Waiting for cilium endpoints to be ready\")\n\n\t\t\tif invalid != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalid = true\n\t\t}\n\t\twg.Add(len(ciliumPods))\n\t\tfor _, pod := range ciliumPods {\n\t\t\tgo endpointsReady(pod)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(queue)\n\n\t\tfor status := range queue {\n\t\t\tif status == false {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\terr = WithContext(ctx, body, 1*time.Second)\n\tif err == nil {\n\t\treturn err\n\t}\n\n\tcallback := func() string {\n\t\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\t\tdefer cancel()\n\n\t\tvar errorMessage string\n\t\tfor _, pod := range ciliumPods {\n\t\t\tvar endpoints []models.Endpoint\n\t\t\tcmdRes := kub.CiliumEndpointsList(ctx, pod)\n\t\t\tif !cmdRes.WasSuccessful() {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to get endpoint list: %s\",\n\t\t\t\t\tpod, cmdRes.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := cmdRes.Unmarshal(&endpoints)\n\t\t\tif err != nil {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\terror: unable to parse endpoint list: %s\",\n\t\t\t\t\tpod, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ep := range endpoints {\n\t\t\t\terrorMessage += fmt.Sprintf(\n\t\t\t\t\t\"\\tCilium Pod: %s \\tEndpoint: %d \\tIdentity: %d\\t State: %s\\n\",\n\t\t\t\t\tpod, ep.ID, ep.Status.Identity.ID, ep.Status.State)\n\t\t\t}\n\t\t}\n\t\treturn errorMessage\n\t}\n\treturn NewSSHMetaError(err.Error(), callback)\n}\n\n// WaitForCEPIdentity waits for a particular CEP to have an identity present.\nfunc (kub *Kubectl) WaitForCEPIdentity(ns, podName string) error {\n\tbody := func(ctx context.Context) (bool, error) {\n\t\tep, err := kub.GetCiliumEndpoint(ns, podName)\n\t\tif err != nil || ep == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif ep.Identity == nil {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn ep.Identity.ID != 0, nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), HelperTimeout)\n\tdefer cancel()\n\treturn WithContext(ctx, body, 1*time.Second)\n}\n\n// CiliumExecContext runs cmd in the specified Cilium pod with the given context.\nfunc (kub *Kubectl) CiliumExecContext(ctx context.Context, pod string, cmd string) *CmdRes {\n\tlimitTimes := 5\n\texecute := func() *CmdRes {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, CiliumNamespace, pod, cmd)\n\t\treturn kub.ExecContext(ctx, command)\n\t}\n\tvar res *CmdRes\n\t// Sometimes Kubectl returns 126 exit code, It use to happen in Nightly\n\t// tests when a lot of exec are in place (Cgroups issue). The upstream\n\t// changes did not fix the isse, and we need to make this workaround to\n\t// avoid Kubectl issue.\n\t// https://github.com/openshift/origin/issues/16246\n\tfor i := 0; i < limitTimes; i++ {\n\t\tres = execute()\n\t\tif res.GetExitCode() != 126 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\treturn res\n}\n\n// CiliumExecMustSucceed runs cmd in the specified Cilium pod.\n// it causes a test failure if the command was not successful.\nfunc (kub *Kubectl) CiliumExecMustSucceed(ctx context.Context, pod, cmd string, optionalDescription ...interface{}) *CmdRes {\n\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\tif !res.WasSuccessful() {\n\t\tres.SendToLog(false)\n\t}\n\tgomega.ExpectWithOffset(1, res).Should(\n\t\tCMDSuccess(), optionalDescription...)\n\treturn res\n}\n\n// CiliumExecUntilMatch executes the specified command repeatedly for the\n// specified Cilium pod until the given substring is present in stdout.\n// If the timeout is reached it will return an error.\nfunc (kub *Kubectl) CiliumExecUntilMatch(pod, cmd, substr string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, cmd)\n\t\treturn strings.Contains(res.Stdout(), substr)\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"%s is not in the output after timeout\", substr),\n\t\t&TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// WaitForCiliumInitContainerToFinish waits for all Cilium init containers to\n// finish\nfunc (kub *Kubectl) WaitForCiliumInitContainerToFinish() error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(CiliumNamespace, \"-l k8s-app=cilium\").Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tfor _, pod := range podList.Items {\n\t\t\tfor _, v := range pod.Status.InitContainerStatuses {\n\t\t\t\tif v.State.Terminated != nil && (v.State.Terminated.Reason != \"Completed\" || v.State.Terminated.ExitCode != 0) {\n\t\t\t\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\t\t\t\"podName\": pod.Name,\n\t\t\t\t\t\t\"currentState\": v.State.String(),\n\t\t\t\t\t}).Infof(\"Cilium Init container not completed\")\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\treturn WithTimeout(body, \"Cilium Init Container was not able to initialize or had a successful run\", &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumNodesWait waits until all nodes in the Kubernetes cluster are annotated\n// with Cilium annotations. Its runtime is bounded by a maximum of `HelperTimeout`.\n// When a node is annotated with said annotations, it indicates\n// that the tunnels in the nodes are set up and that cross-node traffic can be\n// tested. Returns an error if the timeout is exceeded for waiting for the nodes\n// to be annotated.\nfunc (kub *Kubectl) CiliumNodesWait() (bool, error) {\n\tbody := func() bool {\n\t\tfilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.metadata.annotations.io\\.cilium\\.network\\.ipv4-pod-cidr}{\"\\n\"}{end}`\n\t\tdata := kub.ExecShort(fmt.Sprintf(\n\t\t\t\"%s get nodes -o jsonpath='%s'\", KubectlCmd, filter))\n\t\tif !data.WasSuccessful() {\n\t\t\treturn false\n\t\t}\n\t\tresult := data.KVOutput()\n\t\tignoreNode := GetNodeWithoutCilium()\n\t\tfor k, v := range result {\n\t\t\tif k == ignoreNode {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == \"\" {\n\t\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' does not have Cilium metadata\", k)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tkub.Logger().Infof(\"Kubernetes node '%v' IPv4 address: '%v'\", k, v)\n\t\t}\n\t\treturn true\n\t}\n\terr := WithTimeout(body, \"Kubernetes node does not have cilium metadata\", &TimeoutConfig{Timeout: HelperTimeout})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// LoadedPolicyInFirstAgent returns the policy as loaded in the first cilium\n// agent that is found in the cluster\nfunc (kub *Kubectl) LoadedPolicyInFirstAgent() (string, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve cilium pods: %s\", err)\n\t}\n\tfor _, pod := range pods {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get\")\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot execute cilium policy get: %s\", res.Stdout())\n\t\t} else {\n\t\t\treturn res.CombineOutput().String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no running cilium pods\")\n}\n\n// WaitPolicyDeleted waits for policy policyName to be deleted from the\n// cilium-agent running in pod. Returns an error if policyName was unable to\n// be deleted after some amount of time.\nfunc (kub *Kubectl) WaitPolicyDeleted(pod string, policyName string) error {\n\tbody := func() bool {\n\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\tdefer cancel()\n\t\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyName))\n\n\t\t// `cilium policy get <policy name>` fails if the policy is not loaded,\n\t\t// which is the condition we want.\n\t\treturn !res.WasSuccessful()\n\t}\n\n\treturn WithTimeout(body, fmt.Sprintf(\"Policy %s was not deleted in time\", policyName), &TimeoutConfig{Timeout: HelperTimeout})\n}\n\n// CiliumIsPolicyLoaded returns true if the policy is loaded in the given\n// cilium Pod. it returns false in case that the policy is not in place\nfunc (kub *Kubectl) CiliumIsPolicyLoaded(pod string, policyCmd string) bool {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, fmt.Sprintf(\"cilium policy get %s\", policyCmd))\n\treturn res.WasSuccessful()\n}\n\n// CiliumPolicyRevision returns the policy revision in the specified Cilium pod.\n// Returns an error if the policy revision cannot be retrieved.\nfunc (kub *Kubectl) CiliumPolicyRevision(pod string) (int, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\tres := kub.CiliumExecContext(ctx, pod, \"cilium policy get -o json\")\n\tif !res.WasSuccessful() {\n\t\treturn -1, fmt.Errorf(\"cannot get the revision %s\", res.Stdout())\n\t}\n\n\trevision, err := res.Filter(\"{.revision}\")\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"cannot get revision from json: %s\", err)\n\t}\n\n\trevi, err := strconv.Atoi(strings.Trim(revision.String(), \"\\n\"))\n\tif err != nil {\n\t\tkub.Logger().Errorf(\"revision on pod '%s' is not valid '%s'\", pod, res.CombineOutput())\n\t\treturn -1, err\n\t}\n\treturn revi, nil\n}\n\n// ResourceLifeCycleAction represents an action performed upon objects in\n// Kubernetes.\ntype ResourceLifeCycleAction string\n\nfunc (kub *Kubectl) getPodRevisions() (map[string]int, error) {\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods\")\n\t\treturn nil, fmt.Errorf(\"Cannot get cilium pods: %s\", err)\n\t}\n\n\trevisions := make(map[string]int)\n\tfor _, pod := range pods {\n\t\trevision, err := kub.CiliumPolicyRevision(pod)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pod policy revision\")\n\t\t\treturn nil, fmt.Errorf(\"Cannot retrieve cilium pod %s policy revision: %s\", pod, err)\n\t\t}\n\t\trevisions[pod] = revision\n\t}\n\treturn revisions, nil\n}\n\nfunc (kub *Kubectl) waitNextPolicyRevisions(podRevisions map[string]int, mustHavePolicy bool, timeout time.Duration) error {\n\tnpFilter := fmt.Sprintf(\n\t\t`{range .items[*]}{\"%s=\"}{.metadata.name}{\" %s=\"}{.metadata.namespace}{\"\\n\"}{end}`,\n\t\tKubectlPolicyNameLabel, KubectlPolicyNameSpaceLabel)\n\n\tknpBody := func() bool {\n\t\tknp := kub.ExecShort(fmt.Sprintf(\"%s get --all-namespaces netpol -o jsonpath='%s'\",\n\t\t\tKubectlCmd, npFilter))\n\t\tresult := knp.ByLines()\n\t\tif len(result) == 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, item := range result {\n\t\t\tfor ciliumPod, revision := range podRevisions {\n\t\t\t\tif mustHavePolicy {\n\t\t\t\t\tif !kub.CiliumIsPolicyLoaded(ciliumPod, item) {\n\t\t\t\t\t\tkub.Logger().Infof(\"Policy '%s' is not ready on Cilium pod '%s'\", item, ciliumPod)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tdesiredRevision := revision + 1\n\t\t\t\tres := kub.CiliumExecContext(ctx, ciliumPod, fmt.Sprintf(\"cilium policy wait %d --max-wait-time %d\", desiredRevision, int(ShortCommandTimeout.Seconds())))\n\t\t\t\tif res.GetExitCode() != 0 {\n\t\t\t\t\tkub.Logger().Infof(\"Failed to wait for policy revision %d on pod %s\", desiredRevision, ciliumPod)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\terr := WithTimeout(\n\t\tknpBody,\n\t\t\"Timed out while waiting for CNP to be applied on all PODs\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\treturn err\n}\n\nfunc getPolicyEnforcingJqFilter(numNodes int) string {\n\t// Test filter: https://jqplay.org/s/EgNzc06Cgn\n\treturn fmt.Sprintf(\n\t\t`[.items[]|{name:.metadata.name, enforcing: (.status|if has(\"nodes\") then .nodes |to_entries|map_values(.value.enforcing) + [(.|length >= %d)]|all else true end)|tostring, status: has(\"status\")|tostring}]`,\n\t\tnumNodes)\n}\n\n// CiliumPolicyAction performs the specified action in Kubernetes for the policy\n// stored in path filepath and waits up until timeout seconds for the policy\n// to be applied in all Cilium endpoints. Returns an error if the policy is not\n// imported before the timeout is\n// exceeded.\nfunc (kub *Kubectl) CiliumPolicyAction(namespace, filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath, namespace); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tcmds := map[string]string{\n\t\t\t\t\"CNP\": fmt.Sprintf(\"%s get cnp --all-namespaces -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t\t\"CCNP\": fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\", KubectlCmd, jqFilter),\n\t\t\t}\n\n\t\t\tfor ctx, cmd := range cmds {\n\t\t\t\tvar data []map[string]string\n\n\t\t\t\tres := kub.ExecShort(cmd)\n\t\t\t\tif !res.WasSuccessful() {\n\t\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Errorf(\"cannot get %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\terr := res.Unmarshal(&data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot unmarshal json for %s status\", ctx)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tfor _, item := range data {\n\t\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\t\tkub.Logger().Errorf(\"%s policy '%s' is not enforcing yet\", ctx, item[\"name\"])\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\t}\n\n\t\terr = WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting for policies to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumClusterwidePolicyAction applies a clusterwide policy action as described in action argument. It\n// then wait till timeout Duration for the policy to be applied to all the cilium endpoints.\nfunc (kub *Kubectl) CiliumClusterwidePolicyAction(filepath string, action ResourceLifeCycleAction, timeout time.Duration) (string, error) {\n\tpodRevisions, err := kub.getPodRevisions()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnumNodes := len(podRevisions)\n\n\tkub.Logger().Infof(\"Performing %s action on resource '%s'\", action, filepath)\n\n\tif status := kub.Action(action, filepath); !status.WasSuccessful() {\n\t\treturn \"\", status.GetErr(fmt.Sprintf(\"Cannot perform '%s' on resource '%s'\", action, filepath))\n\t}\n\n\t// If policy is uninstalled we can't require a policy being enforced.\n\tif action != KubectlDelete {\n\t\tjqFilter := getPolicyEnforcingJqFilter(numNodes)\n\t\tbody := func() bool {\n\t\t\tvar data []map[string]string\n\t\t\tcmd := fmt.Sprintf(\"%s get ccnp -o json | jq '%s'\",\n\t\t\t\tKubectlCmd, jqFilter)\n\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tkub.Logger().WithError(res.GetErr(\"\")).Error(\"cannot get ccnp status\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\terr := res.Unmarshal(&data)\n\t\t\tif err != nil {\n\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot unmarshal json\")\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tfor _, item := range data {\n\t\t\t\tif item[\"enforcing\"] != \"true\" || item[\"status\"] != \"true\" {\n\t\t\t\t\tkub.Logger().Errorf(\"Clusterwide policy '%s' is not enforcing yet\", item[\"name\"])\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\terr := WithTimeout(\n\t\t\tbody,\n\t\t\t\"Timed out while waiting CCNP to be enforced\",\n\t\t\t&TimeoutConfig{Timeout: timeout})\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn \"\", kub.waitNextPolicyRevisions(podRevisions, action != KubectlDelete, timeout)\n}\n\n// CiliumReport report the cilium pod to the log and appends the logs for the\n// given commands.\nfunc (kub *Kubectl) CiliumReport(commands ...string) {\n\tif config.CiliumTestConfig.SkipLogGathering {\n\t\tginkgoext.GinkgoPrint(\"Skipped gathering logs (-cilium.skipLogs=true)\\n\")\n\t\treturn\n\t}\n\n\t// Log gathering for Cilium should take at most 10 minutes. This ensures that\n\t// the CiliumReport stage doesn't cause the entire CI to hang.\n\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)\n\tdefer cancel()\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.GatherLogs(ctx)\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tkub.DumpCiliumCommandOutput(ctx, CiliumNamespace)\n\t}()\n\n\tkub.CiliumCheckReport(ctx)\n\n\tpods, err := kub.GetCiliumPodsContext(ctx, CiliumNamespace)\n\tif err != nil {\n\t\tkub.Logger().WithError(err).Error(\"cannot retrieve cilium pods on ReportDump\")\n\t}\n\tres := kub.ExecContextShort(ctx, fmt.Sprintf(\"%s get pods -o wide --all-namespaces\", KubectlCmd))\n\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\n\tresults := make([]*CmdRes, 0, len(pods)*len(commands))\n\tginkgoext.GinkgoPrint(\"Fetching command output from pods %s\", pods)\n\tfor _, pod := range pods {\n\t\tfor _, cmd := range commands {\n\t\t\tres = kub.ExecPodCmdBackground(ctx, CiliumNamespace, pod, cmd, ExecOptions{SkipLog: true})\n\t\t\tresults = append(results, res)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tfor _, res := range results {\n\t\tres.WaitUntilFinish()\n\t\tginkgoext.GinkgoPrint(res.GetDebugMessage())\n\t}\n}\n\n// CiliumCheckReport prints a few checks on the Junit output to provide more\n// context to users. The list of checks that prints are the following:\n// - Number of Kubernetes and Cilium policies installed.\n// - Policy enforcement status by endpoint.\n// - Controller, health, kvstore status.\nfunc (kub *Kubectl) CiliumCheckReport(ctx context.Context) {\n\tpods, _ := kub.GetCiliumPods()\n\tfmt.Fprintf(CheckLogs, \"Cilium pods: %v\\n\", pods)\n\n\tvar policiesFilter = `{range .items[*]}{.metadata.namespace}{\"::\"}{.metadata.name}{\" \"}{end}`\n\tnetpols := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get netpol -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"Netpols loaded: %v\\n\", netpols.GetStdOut())\n\n\tcnp := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cnp -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, policiesFilter))\n\tfmt.Fprintf(CheckLogs, \"CiliumNetworkPolicies loaded: %v\\n\", cnp.GetStdOut())\n\n\tcepFilter := `{range .items[*]}{.metadata.name}{\"=\"}{.status.policy.ingress.enforcing}{\":\"}{.status.policy.egress.enforcing}{\"\\n\"}{end}`\n\tcepStatus := kub.ExecContextShort(ctx, fmt.Sprintf(\n\t\t\"%s get cep -o jsonpath='%s' --all-namespaces\",\n\t\tKubectlCmd, cepFilter))\n\n\tfmt.Fprintf(CheckLogs, \"Endpoint Policy Enforcement:\\n\")\n\n\ttable := tabwriter.NewWriter(CheckLogs, 5, 0, 3, ' ', 0)\n\tfmt.Fprintf(table, \"Pod\\tIngress\\tEgress\\n\")\n\tfor pod, policy := range cepStatus.KVOutput() {\n\t\tdata := strings.SplitN(policy, \":\", 2)\n\t\tif len(data) != 2 {\n\t\t\tdata[0] = \"invalid value\"\n\t\t\tdata[1] = \"invalid value\"\n\t\t}\n\t\tfmt.Fprintf(table, \"%s\\t%s\\t%s\\n\", pod, data[0], data[1])\n\t}\n\ttable.Flush()\n\n\tvar controllersFilter = `{range .controllers[*]}{.name}{\"=\"}{.status.consecutive-failure-count}::{.status.last-failure-msg}{\"\\n\"}{end}`\n\tvar failedControllers string\n\tfor _, pod := range pods {\n\t\tvar prefix = \"\"\n\t\tstatus := kub.CiliumExecContext(ctx, pod, \"cilium status --all-controllers -o json\")\n\t\tresult, err := status.Filter(controllersFilter)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err).Error(\"Cannot filter controller status output\")\n\t\t\tcontinue\n\t\t}\n\t\tvar total = 0\n\t\tvar failed = 0\n\t\tfor name, data := range result.KVOutput() {\n\t\t\ttotal++\n\t\t\tstatus := strings.SplitN(data, \"::\", 2)\n\t\t\tif len(status) != 2 {\n\t\t\t\t// Just make sure that the the len of the output is 2 to not\n\t\t\t\t// fail on index error in the following lines.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif status[0] != \"\" {\n\t\t\t\tfailed++\n\t\t\t\tprefix = \"⚠️ \"\n\t\t\t\tfailedControllers += fmt.Sprintf(\"controller %s failure '%s'\\n\", name, status[1])\n\t\t\t}\n\t\t}\n\t\tstatusFilter := `Status: {.cilium.state} Health: {.cluster.ciliumHealth.state}` +\n\t\t\t` Nodes \"{.cluster.nodes[*].name}\" ContinerRuntime: {.container-runtime.state}` +\n\t\t\t` Kubernetes: {.kubernetes.state} KVstore: {.kvstore.state}`\n\t\tdata, _ := status.Filter(statusFilter)\n\t\tfmt.Fprintf(CheckLogs, \"%sCilium agent '%s': %s Controllers: Total %d Failed %d\\n\",\n\t\t\tprefix, pod, data, total, failed)\n\t\tif failedControllers != \"\" {\n\t\t\tfmt.Fprintf(CheckLogs, \"Failed controllers:\\n %s\", failedControllers)\n\t\t}\n\t}\n}\n\n// ValidateNoErrorsInLogs checks that cilium logs since the given duration (By\n// default `CurrentGinkgoTestDescription().Duration`) do not contain any of the\n// known-bad messages (e.g., `deadlocks` or `segmentation faults`). In case of\n// any of these messages, it'll mark the test as failed.\nfunc (kub *Kubectl) ValidateNoErrorsInLogs(duration time.Duration) {\n\tblacklist := GetBadLogMessages()\n\tkub.ValidateListOfErrorsInLogs(duration, blacklist)\n}\n\n// ValidateListOfErrorsInLogs is similar to ValidateNoErrorsInLogs, but\n// takes a blacklist of bad log messages instead of using the default list.\nfunc (kub *Kubectl) ValidateListOfErrorsInLogs(duration time.Duration, blacklist map[string][]string) {\n\tif kub == nil {\n\t\t// if `kub` is nil, this is run after the test failed while setting up `kub` and we are unable to gather logs\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tapps := map[string]string{\n\t\t\"k8s-app=cilium\": CiliumTestLog,\n\t\t\"k8s-app=hubble-relay\": HubbleRelayTestLog,\n\t\t\"io.cilium/app=operator\": CiliumOperatorTestLog,\n\t}\n\n\twg := sync.WaitGroup{}\n\twg.Add(len(apps))\n\tfor app, file := range apps {\n\t\tgo func(app, file string) {\n\t\t\tvar logs string\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s logs --tail=-1 --timestamps=true -l %s --since=%vs\",\n\t\t\t\tKubectlCmd, CiliumNamespace, app, duration.Seconds())\n\t\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s --previous\", cmd), ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tres = kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\t\tif res.WasSuccessful() {\n\t\t\t\tlogs += res.Stdout()\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// Keep the cilium logs for the given test in a separate file.\n\t\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Error(\"Cannot create report directory\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = ioutil.WriteFile(\n\t\t\t\t\tfmt.Sprintf(\"%s/%s\", testPath, file),\n\t\t\t\t\t[]byte(logs), LogPerm)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tkub.Logger().WithError(err).Errorf(\"Cannot create %s\", CiliumTestLog)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfailIfContainsBadLogMsg(logs, app, blacklist)\n\n\t\t\tfmt.Fprint(CheckLogs, logutils.LogErrorsSummary(logs))\n\t\t}(app, file)\n\t}\n\n\twg.Wait()\n}\n\n// GatherCiliumCoreDumps copies core dumps if are present in the /tmp folder\n// into the test report folder for further analysis.\nfunc (kub *Kubectl) GatherCiliumCoreDumps(ctx context.Context, ciliumPod string) {\n\tlog := kub.Logger().WithField(\"pod\", ciliumPod)\n\n\tcores := kub.CiliumExecContext(ctx, ciliumPod, \"ls /tmp/ | grep core\")\n\tif !cores.WasSuccessful() {\n\t\tlog.Debug(\"There is no core dumps in the pod\")\n\t\treturn\n\t}\n\n\ttestPath, err := CreateReportDirectory()\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\treturn\n\t}\n\tresultPath := filepath.Join(kub.BasePath(), testPath)\n\n\tfor _, core := range cores.ByLines() {\n\t\tdst := filepath.Join(resultPath, core)\n\t\tsrc := filepath.Join(\"/tmp/\", core)\n\t\tcmd := fmt.Sprintf(\"%s -n %s cp %s:%s %s\",\n\t\t\tKubectlCmd, CiliumNamespace,\n\t\t\tciliumPod, src, dst)\n\t\tres := kub.ExecContext(ctx, cmd, ExecOptions{SkipLog: true})\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.WithField(\"output\", res.CombineOutput()).Error(\"Cannot get core from pod\")\n\t\t}\n\t}\n}\n\n// ExecInFirstPod runs given command in one pod that matches given selector and namespace\n// An error is returned if no pods can be found\nfunc (kub *Kubectl) ExecInFirstPod(ctx context.Context, namespace, selector, cmd string, options ...ExecOptions) *CmdRes {\n\tnames, err := kub.GetPodNamesContext(ctx, namespace, selector)\n\tif err != nil {\n\t\treturn &CmdRes{err: err}\n\t}\n\tif len(names) == 0 {\n\t\treturn &CmdRes{err: fmt.Errorf(\"Cannot find pods matching %s to execute %s\", selector, cmd)}\n\t}\n\n\tname := names[0]\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, name, cmd)\n\treturn kub.ExecContext(ctx, command)\n}\n\n// ExecInPods runs given command on all pods in given namespace that match selector and returns map pod-name->CmdRes\nfunc (kub *Kubectl) ExecInPods(ctx context.Context, namespace, selector, cmd string, options ...ExecOptions) (results map[string]*CmdRes, err error) {\n\tnames, err := kub.GetPodNamesContext(ctx, namespace, selector)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults = make(map[string]*CmdRes)\n\tfor _, name := range names {\n\t\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, name, cmd)\n\t\tresults[name] = kub.ExecContext(ctx, command)\n\t}\n\n\treturn results, nil\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 1947, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 1941, | |
"col": 10, | |
"offset": 68955 | |
}, | |
"end": { | |
"line": 1941, | |
"col": 13, | |
"offset": 68958 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 1457, | |
"col": 9, | |
"offset": 52579 | |
}, | |
"end": { | |
"line": 1457, | |
"col": 20, | |
"offset": 52590 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 1457, | |
"col": 2, | |
"offset": 52572 | |
}, | |
"end": { | |
"line": 1457, | |
"col": 5, | |
"offset": 52575 | |
}, | |
"abstract_content": "cmd", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "9c5f8e71f4c15ad3de1edd0dc264f25a" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}\n\n// ExecPodCmd executes command cmd in the specified pod residing in the specified\n// namespace. It returns a pointer to CmdRes with all the output\nfunc (kub *Kubectl) ExecPodCmd(namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodContainerCmd executes command cmd in the specified container residing\n// in the specified namespace and pod. It returns a pointer to CmdRes with all\n// the output\nfunc (kub *Kubectl) ExecPodContainerCmd(namespace, pod, container, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -c %s -- %s\", KubectlCmd, namespace, pod, container, cmd)\n\treturn kub.Exec(command, options...)\n}\n\n// ExecPodCmdContext synchronously executes command cmd in the specified pod residing in the\n// specified namespace. It returns a pointer to CmdRes with all the output.\nfunc (kub *Kubectl) ExecPodCmdContext(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecContext(ctx, command, options...)\n}\n\n// ExecPodCmdBackground executes command cmd in background in the specified pod residing\n// in the specified namespace. It returns a pointer to CmdRes with all the\n// output\n//\n// To receive the output of this function, the caller must invoke either\n// kub.WaitUntilFinish() or kub.WaitUntilMatch() then subsequently fetch the\n// output out of the result.\nfunc (kub *Kubectl) ExecPodCmdBackground(ctx context.Context, namespace string, pod string, cmd string, options ...ExecOptions) *CmdRes {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, cmd)\n\treturn kub.ExecInBackground(ctx, command, options...)\n}\n\n// Get retrieves the provided Kubernetes objects from the specified namespace.\nfunc (kub *Kubectl) Get(namespace string, command string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s -n %s get %s -o json\", KubectlCmd, namespace, command))\n}\n\n// GetFromAllNS retrieves provided Kubernetes objects from all namespaces\nfunc (kub *Kubectl) GetFromAllNS(kind string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%s get %s --all-namespaces -o json\", KubectlCmd, kind))\n}\n\n// GetCNP retrieves the output of `kubectl get cnp` in the given namespace for\n// the given CNP and return a CNP struct. If the CNP does not exists or cannot\n// unmarshal the Json output will return nil.\nfunc (kub *Kubectl) GetCNP(namespace string, cnp string) *cnpv2.CiliumNetworkPolicy {\n\tlog := kub.Logger().WithFields(logrus.Fields{\n\t\t\"fn\": \"GetCNP\",\n\t\t\"cnp\": cnp,\n\t\t\"ns\": namespace,\n\t})\n\tres := kub.Get(namespace, fmt.Sprintf(\"cnp %s\", cnp))\n\tif !res.WasSuccessful() {\n\t\tlog.WithField(\"error\", res.CombineOutput()).Info(\"cannot get CNP\")\n\t\treturn nil\n\t}\n\tvar result cnpv2.CiliumNetworkPolicy\n\terr := res.Unmarshal(&result)\n\tif err != nil {\n\t\tlog.WithError(err).Errorf(\"cannot unmarshal CNP output\")\n\t\treturn nil\n\t}\n\treturn &result\n}\n\nfunc (kub *Kubectl) WaitForCRDCount(filter string, count int, timeout time.Duration) error {\n\t// Set regexp flag m for multi-line matching, then add the\n\t// matches for beginning and end of a line, so that we count\n\t// at most one match per line (like \"grep <filter> | wc -l\")\n\tregex := regexp.MustCompile(\"(?m:^.*(?:\" + filter + \").*$)\")\n\tbody := func() bool {\n\t\tres := kub.ExecShort(fmt.Sprintf(\"%s get crds\", KubectlCmd))\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Error(res.GetErr(\"kubectl get crds failed\"))\n\t\t\treturn false\n\t\t}\n\t\treturn len(regex.FindAllString(res.Stdout(), -1)) == count\n\t}\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for %d CRDs matching filter \\\"%s\\\" to be ready\", count, filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// GetPods gets all of the pods in the given namespace that match the provided\n// filter.\nfunc (kub *Kubectl) GetPods(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetPodsNodes returns a map with pod name as a key and node name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsNodes(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.spec.nodeName}{\"\\n\"}{end}`\n\tres := kub.Exec(fmt.Sprintf(\"%s -n %s get pods %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodOnNodeLabeledWithOffset retrieves name and ip of a pod matching filter and residing on a node with label cilium.io/ci-node=<label>\nfunc (kub *Kubectl) GetPodOnNodeLabeledWithOffset(label string, podFilter string, callOffset int) (string, string) {\n\tcallOffset++\n\n\tnodeName, err := kub.GetNodeNameByLabel(label)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil())\n\tgomega.ExpectWithOffset(callOffset, nodeName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve node name with label cilium.io/ci-node=%s\", label)\n\n\tvar podName string\n\n\tpodsNodes, err := kub.GetPodsNodes(DefaultNamespace, fmt.Sprintf(\"-l %s\", podFilter))\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods nodes with filter %q\", podFilter)\n\tgomega.Expect(podsNodes).ShouldNot(gomega.BeEmpty(), \"No pod found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tfor pod, node := range podsNodes {\n\t\tif node == nodeName {\n\t\t\tpodName = pod\n\t\t\tbreak\n\t\t}\n\t}\n\tgomega.ExpectWithOffset(callOffset, podName).ShouldNot(gomega.BeEmpty(), \"Cannot retrieve pod on node %s with filter %q\", nodeName, podFilter)\n\tpodsIPs, err := kub.GetPodsIPs(DefaultNamespace, podFilter)\n\tgomega.ExpectWithOffset(callOffset, err).Should(gomega.BeNil(), \"Cannot retrieve pods IPs with filter %q\", podFilter)\n\tgomega.Expect(podsIPs).ShouldNot(gomega.BeEmpty(), \"No pod IP found in namespace %s with filter %q\", DefaultNamespace, podFilter)\n\tpodIP := podsIPs[podName]\n\treturn podName, podIP\n}\n\n// GetSvcIP returns the cluster IP for the given service. If the service\n// does not contain a cluster IP, the function keeps retrying until it has or\n// the context timesout.\nfunc (kub *Kubectl) GetSvcIP(ctx context.Context, namespace, name string) (string, error) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tjsonFilter := `{.spec.clusterIP}`\n\t\tres := kub.ExecContext(ctx, fmt.Sprintf(\"%s -n %s get svc %s -o jsonpath='%s'\",\n\t\t\tKubectlCmd, namespace, name, jsonFilter))\n\t\tif !res.WasSuccessful() {\n\t\t\treturn \"\", fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t\t}\n\t\tclusterIP := res.CombineOutput().String()\n\t\tif clusterIP != \"\" {\n\t\t\treturn clusterIP, nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}\n\n// GetPodsIPs returns a map with pod name as a key and pod IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsIPs(namespace string, filter string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.podIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, filter, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetPodsHostIPs returns a map with pod name as a key and host IP name as value. It\n// only gets pods in the given namespace that match the provided filter. It\n// returns an error if pods cannot be retrieved correctly\nfunc (kub *Kubectl) GetPodsHostIPs(namespace string, label string) (map[string]string, error) {\n\tjsonFilter := `{range .items[*]}{@.metadata.name}{\"=\"}{@.status.hostIP}{\"\\n\"}{end}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o jsonpath='%s'\",\n\t\tKubectlCmd, namespace, label, jsonFilter))\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve pods: %s\", res.CombineOutput())\n\t}\n\treturn res.KVOutput(), nil\n}\n\n// GetEndpoints gets all of the endpoints in the given namespace that match the\n// provided filter.\nfunc (kub *Kubectl) GetEndpoints(namespace string, filter string) *CmdRes {\n\treturn kub.ExecShort(fmt.Sprintf(\"%s -n %s get endpoints %s -o json\", KubectlCmd, namespace, filter))\n}\n\n// GetAllPods returns a slice of all pods present in Kubernetes cluster, along\n// with an error if the pods could not be retrieved via `kubectl`, or if the\n// pod objects are unable to be marshaled from JSON.\nfunc (kub *Kubectl) GetAllPods(ctx context.Context, options ...ExecOptions) ([]v1.Pod, error) {\n\tvar ops ExecOptions\n\tif len(options) > 0 {\n\t\tops = options[0]\n\t}\n\n\tgetPodsCtx, cancel := context.WithTimeout(ctx, MidCommandTimeout)\n\tdefer cancel()\n\n\tvar podsList v1.List\n\tres := kub.ExecContext(getPodsCtx,\n\t\tfmt.Sprintf(\"%s get pods --all-namespaces -o json\", KubectlCmd),\n\t\tExecOptions{SkipLog: ops.SkipLog})\n\n\tif !res.WasSuccessful() {\n\t\treturn nil, res.GetError()\n\t}\n\n\terr := res.Unmarshal(&podsList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpods := make([]v1.Pod, len(podsList.Items))\n\tfor _, item := range podsList.Items {\n\t\tvar pod v1.Pod\n\t\terr = json.Unmarshal(item.Raw, &pod)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpods = append(pods, pod)\n\t}\n\n\treturn pods, nil\n}\n\n// GetPodNames returns the names of all of the pods that are labeled with label\n// in the specified namespace, along with an error if the pod names cannot be\n// retrieved.\nfunc (kub *Kubectl) GetPodNames(namespace string, label string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetPodNamesContext(ctx, namespace, label)\n}\n\n// GetPodNamesContext returns the names of all of the pods that are labeled with\n// label in the specified namespace, along with an error if the pod names cannot\n// be retrieved.\nfunc (kub *Kubectl) GetPodNamesContext(ctx context.Context, namespace string, label string) ([]string, error) {\n\tstdout := new(bytes.Buffer)\n\tfilter := \"-o jsonpath='{.items[*].metadata.name}'\"\n\n\tcmd := fmt.Sprintf(\"%s -n %s get pods -l %s %s\", KubectlCmd, namespace, label, filter)\n\n\t// Taking more than 30 seconds to get pods means that something is wrong\n\t// connecting to the node.\n\tpodNamesCtx, cancel := context.WithTimeout(ctx, ShortCommandTimeout)\n\tdefer cancel()\n\terr := kub.ExecuteContext(podNamesCtx, cmd, stdout, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"could not find pods in namespace '%v' with label '%v': %s\", namespace, label, err)\n\t}\n\n\tout := strings.Trim(stdout.String(), \"\\n\")\n\tif len(out) == 0 {\n\t\t//Small hack. String split always return an array with an empty string\n\t\treturn []string{}, nil\n\t}\n\treturn strings.Split(out, \" \"), nil\n}\n\n// GetNodeNameByLabel returns the names of the node with a matching cilium.io/ci-node label\nfunc (kub *Kubectl) GetNodeNameByLabel(label string) (string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), ShortCommandTimeout)\n\tdefer cancel()\n\treturn kub.GetNodeNameByLabelContext(ctx, label)\n}\n\n// GetNodeNameByLabelContext returns the names of all nodes with a matching label\nfunc (kub *Kubectl) GetNodeNameByLabelContext(ctx context.Context, label string) (string, error) {\n\tfilter := `{.items[*].metadata.name}`\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read name: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read name with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\n// GetNodeIPByLabel returns the IP of the node with cilium.io/ci-node=label.\n// An error is returned if a node cannot be found.\nfunc (kub *Kubectl) GetNodeIPByLabel(label string, external bool) (string, error) {\n\tipType := \"InternalIP\"\n\tif external {\n\t\tipType = \"ExternalIP\"\n\t}\n\tfilter := `{@.items[*].status.addresses[?(@.type == \"` + ipType + `\")].address}`\n\tres := kub.ExecShort(fmt.Sprintf(\"%s get nodes -l cilium.io/ci-node=%s -o jsonpath='%s'\",\n\t\tKubectlCmd, label, filter))\n\tif !res.WasSuccessful() {\n\t\treturn \"\", fmt.Errorf(\"cannot retrieve node to read IP: %s\", res.CombineOutput())\n\t}\n\n\tout := strings.Trim(res.Stdout(), \"\\n\")\n\tif len(out) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no matching node to read IP with label '%v'\", label)\n\t}\n\n\treturn out, nil\n}\n\nfunc (kub *Kubectl) getIfaceByIPAddr(label string, ipAddr string) (string, error) {\n\tcmd := fmt.Sprintf(\n\t\t`ip -j a s | jq -r '.[] | select(.addr_info[] | .local == \"%s\") | .ifname'`,\n\t\tipAddr)\n\tiface, err := kub.ExecInHostNetNSByLabel(context.TODO(), label, cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to retrieve iface by IP addr: %s\", err)\n\t}\n\n\treturn strings.Trim(iface, \"\\n\"), nil\n}\n\n// GetServiceHostPort returns the host and the first port for the given service name.\n// It will return an error if service cannot be retrieved.\nfunc (kub *Kubectl) GetServiceHostPort(namespace string, service string) (string, int, error) {\n\tvar data v1.Service\n\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tif len(data.Spec.Ports) == 0 {\n\t\treturn \"\", 0, fmt.Errorf(\"Service '%s' does not have ports defined\", service)\n\t}\n\treturn data.Spec.ClusterIP, int(data.Spec.Ports[0].Port), nil\n}\n\n// GetLoadBalancerIP waits until a loadbalancer IP addr has been assigned for\n// the given service, and then returns the IP addr.\nfunc (kub *Kubectl) GetLoadBalancerIP(namespace string, service string, timeout time.Duration) (string, error) {\n\tvar data v1.Service\n\n\tbody := func() bool {\n\t\terr := kub.Get(namespace, fmt.Sprintf(\"service %s\", service)).Unmarshal(&data)\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(data.Status.LoadBalancer.Ingress) != 0 {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"service\": service,\n\t\t}).Info(\"GetLoadBalancerIP: loadbalancer IP was not assigned\")\n\n\t\treturn false\n\t}\n\n\terr := WithTimeout(body, \"could not get service LoadBalancer IP addr\",\n\t\t&TimeoutConfig{Timeout: timeout})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn data.Status.LoadBalancer.Ingress[0].IP, nil\n}\n\n// Logs returns a CmdRes with containing the resulting metadata from the\n// execution of `kubectl logs <pod> -n <namespace>`.\nfunc (kub *Kubectl) Logs(namespace string, pod string) *CmdRes {\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s -n %s logs %s\", KubectlCmd, namespace, pod))\n}\n\n// MonitorStart runs cilium monitor in the background and returns the command\n// result, CmdRes, along with a cancel function. The cancel function is used to\n// stop the monitor.\nfunc (kub *Kubectl) MonitorStart(pod string) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv\", KubectlCmd, CiliumNamespace, pod)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// MonitorEndpointStart runs cilium monitor only on a specified endpoint. This\n// function is the same as MonitorStart.\nfunc (kub *Kubectl) MonitorEndpointStart(pod string, epID int64) (res *CmdRes, cancel func()) {\n\tcmd := fmt.Sprintf(\"%s exec -n %s %s -- cilium monitor -vv --related-to %d\",\n\t\tKubectlCmd, CiliumNamespace, pod, epID)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn kub.ExecInBackground(ctx, cmd, ExecOptions{SkipLog: true}), cancel\n}\n\n// BackgroundReport dumps the result of the given commands on cilium pods each\n// five seconds.\nfunc (kub *Kubectl) BackgroundReport(commands ...string) (context.CancelFunc, error) {\n\tbackgroundCtx, cancel := context.WithCancel(context.Background())\n\tpods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn cancel, fmt.Errorf(\"Cannot retrieve cilium pods: %s\", err)\n\t}\n\tretrieveInfo := func() {\n\t\tfor _, pod := range pods {\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tkub.CiliumExecContext(context.TODO(), pod, cmd)\n\t\t\t}\n\t\t}\n\t}\n\tgo func(ctx context.Context) {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tretrieveInfo()\n\t\t\t}\n\t\t}\n\t}(backgroundCtx)\n\treturn cancel, nil\n}\n\n// PprofReport runs pprof on cilium nodes each 5 minutes and saves the data\n// into the test folder saved with pprof suffix.\nfunc (kub *Kubectl) PprofReport() {\n\tPProfCadence := 5 * time.Minute\n\tticker := time.NewTicker(PProfCadence)\n\tlog := kub.Logger().WithField(\"subsys\", \"pprofReport\")\n\n\tretrievePProf := func(pod, testPath string) {\n\t\tres := kub.ExecPodCmd(CiliumNamespace, pod, \"gops pprof-cpu 1\")\n\t\tif !res.WasSuccessful() {\n\t\t\tlog.Errorf(\"cannot execute pprof: %s\", res.OutputPrettyPrint())\n\t\t\treturn\n\t\t}\n\t\tfiles := kub.ExecPodCmd(CiliumNamespace, pod, `ls -1 /tmp/`)\n\t\tfor _, file := range files.ByLines() {\n\t\t\tif !strings.Contains(file, \"profile\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdest := filepath.Join(\n\t\t\t\tkub.BasePath(), testPath,\n\t\t\t\tfmt.Sprintf(\"%s-profile-%s.pprof\", pod, file))\n\t\t\t_ = kub.Exec(fmt.Sprintf(\"%[1]s cp %[2]s/%[3]s:/tmp/%[4]s %[5]s\",\n\t\t\t\tKubectlCmd, CiliumNamespace, pod, file, dest),\n\t\t\t\tExecOptions{SkipLog: true})\n\n\t\t\t_ = kub.ExecPodCmd(CiliumNamespace, pod, fmt.Sprintf(\n\t\t\t\t\"rm %s\", filepath.Join(\"/tmp/\", file)))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\n\t\t\ttestPath, err := CreateReportDirectory()\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"cannot create test result path '%s'\", testPath)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tpods, err := kub.GetCiliumPods()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot get cilium pods\")\n\t\t\t}\n\n\t\t\tfor _, pod := range pods {\n\t\t\t\tretrievePProf(pod, testPath)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n// NamespaceCreate creates a new Kubernetes namespace with the given name\nfunc (kub *Kubectl) NamespaceCreate(name string) *CmdRes {\n\tginkgoext.By(\"Creating namespace %s\", name)\n\tkub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\treturn kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n}\n\n// NamespaceDelete deletes a given Kubernetes namespace\nfunc (kub *Kubectl) NamespaceDelete(name string) *CmdRes {\n\tginkgoext.By(\"Deleting namespace %s\", name)\n\tif err := kub.DeleteAllInNamespace(name); err != nil {\n\t\tkub.Logger().Infof(\"Error while deleting all objects from %s ns: %s\", name, err)\n\t}\n\tres := kub.ExecShort(fmt.Sprintf(\"%s delete namespace %s\", KubectlCmd, name))\n\tif !res.WasSuccessful() {\n\t\tkub.Logger().Infof(\"Error while deleting ns %s: %s\", name, res.GetError())\n\t}\n\treturn kub.ExecShort(fmt.Sprintf(\n\t\t\"%[1]s get namespace %[2]s -o json | tr -d \\\"\\\\n\\\" | sed \\\"s/\\\\\\\"finalizers\\\\\\\": \\\\[[^]]\\\\+\\\\]/\\\\\\\"finalizers\\\\\\\": []/\\\" | %[1]s replace --raw /api/v1/namespaces/%[2]s/finalize -f -\", KubectlCmd, name))\n\n}\n\n// EnsureNamespaceExists creates a namespace, ignoring the AlreadyExists error.\nfunc (kub *Kubectl) EnsureNamespaceExists(name string) error {\n\tginkgoext.By(\"Ensuring the namespace %s exists\", name)\n\tres := kub.ExecShort(fmt.Sprintf(\"%s create namespace %s\", KubectlCmd, name))\n\tif !res.success && !strings.Contains(res.Stderr(), \"AlreadyExists\") {\n\t\treturn res.err\n\t}\n\treturn nil\n}\n\n// DeleteAllInNamespace deletes all k8s objects in a namespace\nfunc (kub *Kubectl) DeleteAllInNamespace(name string) error {\n\t// we are getting all namespaced resources from k8s apiserver, and delete all objects of these types in a provided namespace\n\tcmd := fmt.Sprintf(\"%s delete $(%s api-resources --namespaced=true --verbs=delete -o name | tr '\\n' ',' | sed -e 's/,$//') -n %s --all\", KubectlCmd, KubectlCmd, name)\n\tif res := kub.ExecShort(cmd); !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to run '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\treturn nil\n}\n\n// NamespaceLabel sets a label in a Kubernetes namespace\nfunc (kub *Kubectl) NamespaceLabel(namespace string, label string) *CmdRes {\n\tginkgoext.By(\"Setting label %s in namespace %s\", label, namespace)\n\treturn kub.ExecShort(fmt.Sprintf(\"%s label --overwrite namespace %s %s\", KubectlCmd, namespace, label))\n}\n\n// WaitforPods waits up until timeout seconds have elapsed for all pods in the\n// specified namespace that match the provided JSONPath filter to have their\n// containterStatuses equal to \"ready\". Returns true if all pods achieve\n// the aforementioned desired state within timeout seconds. Returns false and\n// an error if the command failed or the timeout was exceeded.\nfunc (kub *Kubectl) WaitforPods(namespace string, filter string, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, 0, timeout)\n\tginkgoext.By(\"WaitforPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// checkPodStatusFunc returns true if the pod is in the desired state, or false\n// otherwise.\ntype checkPodStatusFunc func(v1.Pod) bool\n\n// checkRunning checks that the pods are running, but not necessarily ready.\nfunc checkRunning(pod v1.Pod) bool {\n\tif pod.Status.Phase != v1.PodRunning || pod.ObjectMeta.DeletionTimestamp != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// checkReady determines whether the pods are running and ready.\nfunc checkReady(pod v1.Pod) bool {\n\tif !checkRunning(pod) {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// WaitforNPodsRunning waits up until timeout duration has elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"running\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPodsRunning(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPodsRunning(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkRunning, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\n// WaitforNPods waits up until timeout seconds have elapsed for at least\n// minRequired pods in the specified namespace that match the provided JSONPath\n// filter to have their containterStatuses equal to \"ready\".\n// Returns no error if minRequired pods achieve the aforementioned desired\n// state within timeout seconds. Returns an error if the command failed or the\n// timeout was exceeded.\n// When minRequired is 0, the function will derive required pod count from number\n// of pods in the cluster for every iteration.\nfunc (kub *Kubectl) WaitforNPods(namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q)\", namespace, filter)\n\terr := kub.waitForNPods(checkReady, namespace, filter, minRequired, timeout)\n\tginkgoext.By(\"WaitforNPods(namespace=%q, filter=%q) => %v\", namespace, filter, err)\n\tif err != nil {\n\t\tdesc := kub.ExecShort(fmt.Sprintf(\"%s describe pods -n %s %s\", KubectlCmd, namespace, filter))\n\t\tginkgoext.By(desc.GetDebugMessage())\n\t}\n\treturn err\n}\n\nfunc (kub *Kubectl) waitForNPods(checkStatus checkPodStatusFunc, namespace string, filter string, minRequired int, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tpodList := &v1.PodList{}\n\t\terr := kub.GetPods(namespace, filter).Unmarshal(podList)\n\t\tif err != nil {\n\t\t\tkub.Logger().Infof(\"Error while getting PodList: %s\", err)\n\t\t\treturn false\n\t\t}\n\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tvar required int\n\n\t\tif minRequired == 0 {\n\t\t\trequired = len(podList.Items)\n\t\t} else {\n\t\t\trequired = minRequired\n\t\t}\n\n\t\tif len(podList.Items) < required {\n\t\t\treturn false\n\t\t}\n\n\t\t// For each pod, count it as running when all conditions are true:\n\t\t// - It is scheduled via Phase == v1.PodRunning\n\t\t// - It is not scheduled for deletion when DeletionTimestamp is set\n\t\t// - All containers in the pod have passed the liveness check via\n\t\t// containerStatuses.Ready\n\t\tcurrScheduled := 0\n\t\tfor _, pod := range podList.Items {\n\t\t\tif checkStatus(pod) {\n\t\t\t\tcurrScheduled++\n\t\t\t}\n\t\t}\n\n\t\treturn currScheduled >= required\n\t}\n\n\treturn WithTimeout(\n\t\tbody,\n\t\tfmt.Sprintf(\"timed out waiting for pods with filter %s to be ready\", filter),\n\t\t&TimeoutConfig{Timeout: timeout})\n}\n\n// WaitForServiceEndpoints waits up until timeout seconds have elapsed for all\n// endpoints in the specified namespace that match the provided JSONPath\n// filter. Returns true if all pods achieve the aforementioned desired state\n// within timeout seconds. Returns false and an error if the command failed or\n// the timeout was exceeded.\nfunc (kub *Kubectl) WaitForServiceEndpoints(namespace string, filter string, service string, timeout time.Duration) error {\n\tbody := func() bool {\n\t\tvar jsonPath = fmt.Sprintf(\"{.items[?(@.metadata.name == '%s')].subsets[0].ports[0].port}\", service)\n\t\tdata, err := kub.GetEndpoints(namespace, filter).Filter(jsonPath)\n\n\t\tif err != nil {\n\t\t\tkub.Logger().WithError(err)\n\t\t\treturn false\n\t\t}\n\n\t\tif data.String() != \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\tkub.Logger().WithFields(logrus.Fields{\n\t\t\t\"namespace\": namespace,\n\t\t\t\"filter\": filter,\n\t\t\t\"data\": data,\n\t\t\t\"service\": service,\n\t\t}).Info(\"WaitForServiceEndpoints: service endpoint not ready\")\n\t\treturn false\n\t}\n\n\treturn WithTimeout(body, \"could not get service endpoints\", &TimeoutConfig{Timeout: timeout})\n}\n\n// Action performs the specified ResourceLifeCycleAction on the Kubernetes\n// manifest located at path filepath in the given namespace\nfunc (kub *Kubectl) Action(action ResourceLifeCycleAction, filePath string, namespace ...string) *CmdRes {\n\tif len(namespace) == 0 {\n\t\tkub.Logger().Debugf(\"performing '%v' on '%v'\", action, filePath)\n\t\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s\", KubectlCmd, action, filePath))\n\t}\n\n\tkub.Logger().Debugf(\"performing '%v' on '%v' in namespace '%v'\", action, filePath, namespace[0])\n\treturn kub.ExecShort(fmt.Sprintf(\"%s %s -f %s -n %s\", KubectlCmd, action, filePath, namespace[0]))\n}\n\n// ApplyOptions stores options for kubectl apply command\ntype ApplyOptions struct {\n\tFilePath string\n\tNamespace string\n\tForce bool\n\tDryRun bool\n\tOutput string\n\tPiped string\n}\n\n// Apply applies the Kubernetes manifest located at path filepath.\nfunc (kub *Kubectl) Apply(options ApplyOptions) *CmdRes {\n\tvar force string\n\tif options.Force {\n\t\tforce = \"--force=true\"\n\t} else {\n\t\tforce = \"--force=false\"\n\t}\n\n\tcmd := fmt.Sprintf(\"%s apply %s -f %s\", KubectlCmd, force, options.FilePath)\n\n\tif options.DryRun {\n\t\tcmd = cmd + \" --dry-run\"\n\t}\n\n\tif len(options.Output) > 0 {\n\t\tcmd = cmd + \" -o \" + options.Output\n\t}\n\n\tif len(options.Namespace) == 0 {\n\t\tkub.Logger().Debugf(\"applying %s\", options.FilePath)\n\t} else {\n\t\tkub.Logger().Debugf(\"applying %s in namespace %s\", options.FilePath, options.Namespace)\n\t\tcmd = cmd + \" -n \" + options.Namespace\n\t}\n\n\tif len(options.Piped) > 0 {\n\t\tcmd = options.Piped + \" | \" + cmd\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout*2)\n\tdefer cancel()\n\treturn kub.ExecContext(ctx, cmd)\n}\n\n// ApplyDefault applies give filepath with other options set to default\nfunc (kub *Kubectl) ApplyDefault(filePath string) *CmdRes {\n\treturn kub.Apply(ApplyOptions{FilePath: filePath})\n}\n\n// Create creates the Kubernetes kanifest located at path filepath.\nfunc (kub *Kubectl) Create(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"creating %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s create -f %s\", KubectlCmd, filePath))\n}\n\n// CreateResource is a wrapper around `kubernetes create <resource>\n// <resourceName>.\nfunc (kub *Kubectl) CreateResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating resource %s with name %s\", resource, resourceName))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create %s %s\", resource, resourceName))\n}\n\n// DeleteResource is a wrapper around `kubernetes delete <resource>\n// resourceName>.\nfunc (kub *Kubectl) DeleteResource(resource, resourceName string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"deleting resource %s with name %s\", resource, resourceName))\n\treturn kub.Exec(fmt.Sprintf(\"kubectl delete %s %s\", resource, resourceName))\n}\n\n// DeleteInNamespace deletes the Kubernetes manifest at path filepath in a\n// particular namespace\nfunc (kub *Kubectl) DeleteInNamespace(namespace, filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s in namespace %s\", filePath, namespace)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s -n %s delete -f %s\", KubectlCmd, namespace, filePath))\n}\n\n// Delete deletes the Kubernetes manifest at path filepath.\nfunc (kub *Kubectl) Delete(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.ExecShort(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// DeleteAndWait deletes the Kubernetes manifest at path filePath and wait\n// for the associated resources to be gone.\n// If ignoreNotFound parameter is true we don't error if the resource to be\n// deleted is not found in the cluster.\nfunc (kub *Kubectl) DeleteAndWait(filePath string, ignoreNotFound bool) *CmdRes {\n\tkub.Logger().Debugf(\"waiting for resources in %q to be deleted\", filePath)\n\tvar ignoreOpt string\n\tif ignoreNotFound {\n\t\tignoreOpt = \"--ignore-not-found\"\n\t}\n\treturn kub.ExecMiddle(\n\t\tfmt.Sprintf(\"%s delete -f %s --wait %s\", KubectlCmd, filePath, ignoreOpt))\n}\n\n// DeleteLong deletes the Kubernetes manifest at path filepath with longer timeout.\nfunc (kub *Kubectl) DeleteLong(filePath string) *CmdRes {\n\tkub.Logger().Debugf(\"deleting %s\", filePath)\n\treturn kub.Exec(\n\t\tfmt.Sprintf(\"%s delete -f %s\", KubectlCmd, filePath))\n}\n\n// PodsHaveCiliumIdentity validates that all pods matching th podSelector have\n// a CiliumEndpoint resource mirroring it and an identity is assigned to it. If\n// any pods do not match this criteria, an error is returned.\nfunc (kub *Kubectl) PodsHaveCiliumIdentity(namespace, podSelector string) error {\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get pods -l %s -o json\", KubectlCmd, namespace, podSelector))\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve pods for selector %s: %s\", podSelector, res.OutputPrettyPrint())\n\t}\n\n\tpodList := &v1.PodList{}\n\terr := res.Unmarshal(podList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal pods for selector %s: %s\", podSelector, err)\n\t}\n\n\tfor _, pod := range podList.Items {\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ep == nil {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumEndpoint\", namespace, pod.Name)\n\t\t}\n\n\t\tif ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\treturn fmt.Errorf(\"pod %s/%s has no CiliumIdentity\", namespace, pod.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeploymentIsReady validate that a deployment has at least one replica and\n// that all replicas are:\n// - up-to-date\n// - ready\n//\n// If the above condition is not met, an error is returned. If all replicas are\n// ready, then the number of replicas is returned.\nfunc (kub *Kubectl) DeploymentIsReady(namespace, deployment string) (int, error) {\n\tfullName := namespace + \"/\" + deployment\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get deployment %s -o json\", KubectlCmd, namespace, deployment))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve deployment %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.Deployment{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal deployment %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.Replicas == 0 {\n\t\treturn 0, fmt.Errorf(\"replicas count is zero\")\n\t}\n\n\tif d.Status.AvailableReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are available\", d.Status.AvailableReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.ReadyReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are ready\", d.Status.ReadyReplicas, d.Status.Replicas)\n\t}\n\n\tif d.Status.UpdatedReplicas != d.Status.Replicas {\n\t\treturn 0, fmt.Errorf(\"only %d of %d replicas are up-to-date\", d.Status.UpdatedReplicas, d.Status.Replicas)\n\t}\n\n\treturn int(d.Status.Replicas), nil\n}\n\nfunc (kub *Kubectl) GetService(namespace, service string) (*v1.Service, error) {\n\tfullName := namespace + \"/\" + service\n\tres := kub.Get(namespace, \"service \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to retrieve service %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tvar serviceObj v1.Service\n\terr := res.Unmarshal(&serviceObj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal service %s: %s\", fullName, err)\n\t}\n\n\treturn &serviceObj, nil\n}\n\nfunc absoluteServiceName(namespace, service string) string {\n\tfullServiceName := service + \".\" + namespace\n\n\tif !strings.HasSuffix(fullServiceName, ServiceSuffix) {\n\t\tfullServiceName = fullServiceName + \".\" + ServiceSuffix\n\t}\n\n\treturn fullServiceName\n}\n\nfunc (kub *Kubectl) KubernetesDNSCanResolve(namespace, service string) error {\n\tserviceToResolve := absoluteServiceName(namespace, service)\n\n\tkubeDnsService, err := kub.GetService(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(kubeDnsService.Spec.Ports) == 0 {\n\t\treturn fmt.Errorf(\"kube-dns service has no ports defined\")\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), MidCommandTimeout)\n\tdefer cancel()\n\n\t// https://bugs.launchpad.net/ubuntu/+source/bind9/+bug/854705\n\tcmd := fmt.Sprintf(\"dig +short %s @%s | grep -v -e '^;'\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\tres := kub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\tif res.err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve service name %s with DND server %s by running '%s' Cilium pod: %s\",\n\t\t\tserviceToResolve, kubeDnsService.Spec.ClusterIP, cmd, res.OutputPrettyPrint())\n\t}\n\tif net.ParseIP(res.SingleOut()) == nil {\n\t\treturn fmt.Errorf(\"dig did not return an IP: %s\", res.SingleOut())\n\t}\n\n\tdestinationService, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the destination service is headless, there is no ClusterIP, the\n\t// IP returned by the dig is the IP of one of the pods.\n\tif destinationService.Spec.ClusterIP == v1.ClusterIPNone {\n\t\tcmd := fmt.Sprintf(\"dig +tcp %s @%s\", serviceToResolve, kubeDnsService.Spec.ClusterIP)\n\t\tkub.ExecInFirstPod(ctx, LogGathererNamespace, logGathererSelector(false), cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to resolve service name %s by running '%s': %s\",\n\t\t\t\tserviceToResolve, cmd, res.OutputPrettyPrint())\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif !strings.Contains(res.SingleOut(), destinationService.Spec.ClusterIP) {\n\t\treturn fmt.Errorf(\"IP returned '%s' does not match the ClusterIP '%s' of the destination service\",\n\t\t\tres.SingleOut(), destinationService.Spec.ClusterIP)\n\t}\n\n\treturn nil\n}\n\nfunc (kub *Kubectl) validateServicePlumbingInCiliumPod(fullName, ciliumPod string, serviceObj *v1.Service, endpointsObj v1.Endpoints) error {\n\tjq := \"jq -r '[ .[].status.realized | select(.\\\"frontend-address\\\".ip==\\\"\" + serviceObj.Spec.ClusterIP + \"\\\") | . ] '\"\n\tcmd := \"cilium service list -o json | \" + jq\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn fmt.Errorf(\"ClusterIP %s not found in service list of cilium pod %s\",\n\t\t\tserviceObj.Spec.ClusterIP, ciliumPod)\n\t}\n\n\tvar realizedServices []models.ServiceSpec\n\terr := res.Unmarshal(&realizedServices)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal service spec '%s': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tcmd = \"cilium bpf lb list -o json\"\n\tres = kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to validate cilium service by running '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar lbMap map[string][]string\n\terr = res.Unmarshal(&lbMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal cilium bpf lb list output: %s\", err)\n\t}\n\n\tfor _, port := range serviceObj.Spec.Ports {\n\t\tvar foundPort *v1.ServicePort\n\t\tfor _, realizedService := range realizedServices {\n\t\t\tif compareServicePortToFrontEnd(&port, realizedService.FrontendAddress) {\n\t\t\t\tfoundPort = &port\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundPort == nil {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t\tlKey := serviceAddressKey(serviceObj.Spec.ClusterIP, fmt.Sprintf(\"%d\", port.Port), string(port.Protocol), \"\")\n\t\tif _, ok := lbMap[lKey]; !ok {\n\t\t\treturn fmt.Errorf(\"port %d of service %s (%s) not found in cilium bpf lb list of pod %s\",\n\t\t\t\tport.Port, fullName, serviceObj.Spec.ClusterIP, ciliumPod)\n\t\t}\n\t}\n\n\tfor _, subset := range endpointsObj.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\tfor _, port := range subset.Ports {\n\t\t\t\tfoundBackend, foundBackendLB := false, false\n\t\t\t\tfor _, realizedService := range realizedServices {\n\t\t\t\t\tfrontEnd := realizedService.FrontendAddress\n\t\t\t\t\tlbKey := serviceAddressKey(frontEnd.IP, fmt.Sprintf(\"%d\", frontEnd.Port), string(frontEnd.Protocol), \"\")\n\t\t\t\t\tlb := lbMap[lbKey]\n\t\t\t\t\tfor _, backAddr := range realizedService.BackendAddresses {\n\t\t\t\t\t\tif addr.IP == *backAddr.IP && uint16(port.Port) == backAddr.Port &&\n\t\t\t\t\t\t\tcompareProto(string(port.Protocol), backAddr.Protocol) {\n\t\t\t\t\t\t\tfoundBackend = true\n\t\t\t\t\t\t\tfor _, backend := range lb {\n\t\t\t\t\t\t\t\tif strings.Contains(backend, net.JoinHostPort(*backAddr.IP, fmt.Sprintf(\"%d\", port.Port))) {\n\t\t\t\t\t\t\t\t\tfoundBackendLB = true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !foundBackend {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\n\t\t\t\tif !foundBackendLB {\n\t\t\t\t\treturn fmt.Errorf(\"unable to find service backend %s in datapath of cilium pod %s\",\n\t\t\t\t\t\tnet.JoinHostPort(addr.IP, fmt.Sprintf(\"%d\", port.Port)), ciliumPod)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ValidateServicePlumbing ensures that a service in a namespace successfully\n// plumbed by all Cilium pods in the cluster:\n// - The service and endpoints are found in `cilium service list`\n// - The service and endpoints are found in `cilium bpf lb list`\nfunc (kub *Kubectl) ValidateServicePlumbing(namespace, service string) error {\n\tfullName := namespace + \"/\" + service\n\n\tserviceObj, err := kub.GetService(namespace, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceObj == nil {\n\t\treturn fmt.Errorf(\"%s service not found\", fullName)\n\t}\n\n\tres := kub.Get(namespace, \"endpoints \"+service)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve endpoints %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\tif serviceObj.Spec.ClusterIP == v1.ClusterIPNone {\n\t\treturn nil\n\t}\n\n\tvar endpointsObj v1.Endpoints\n\terr = res.Unmarshal(&endpointsObj)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal endpoints %s: %s\", fullName, err)\n\t}\n\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg, _ := errgroup.WithContext(context.TODO())\n\tfor _, ciliumPod := range ciliumPods {\n\t\tciliumPod := ciliumPod\n\t\tg.Go(func() error {\n\t\t\tvar err error\n\t\t\t// The plumbing of Kubernetes services typically lags\n\t\t\t// behind a little bit if Cilium was just restarted.\n\t\t\t// Give this a thight timeout to avoid always failing.\n\t\t\ttimeoutErr := RepeatUntilTrue(func() bool {\n\t\t\t\terr = kub.validateServicePlumbingInCiliumPod(fullName, ciliumPod, serviceObj, endpointsObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tginkgoext.By(\"Checking service %s plumbing in cilium pod %s: %s\", fullName, ciliumPod, err)\n\t\t\t\t}\n\t\t\t\treturn err == nil\n\t\t\t}, &TimeoutConfig{Timeout: 5 * time.Second, Ticker: 1 * time.Second})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if timeoutErr != nil {\n\t\t\t\treturn timeoutErr\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ValidateKubernetesDNS validates that the Kubernetes DNS server has been\n// deployed correctly and can resolve DNS names. The following validations are\n// done:\n// - The Kuberentes DNS deployment has at least one replica\n// - All replicas are up-to-date and ready\n// - All pods matching the deployment are represented by a CiliumEndpoint with an identity\n// - The kube-system/kube-dns service is correctly pumbed in all Cilium agents\n// - The service \"default/kubernetes\" can be resolved via the KubernetesDNS\n// and the IP returned matches the ClusterIP in the service\nfunc (kub *Kubectl) ValidateKubernetesDNS() error {\n\t// The deployment is always validated first and not in parallel. There\n\t// is no point in validating correct plumbing if the DNS is not even up\n\t// and running.\n\tginkgoext.By(\"Checking if deployment is ready\")\n\t_, err := kub.DeploymentIsReady(KubeSystemNamespace, \"kube-dns\")\n\tif err != nil {\n\t\t_, err = kub.DeploymentIsReady(KubeSystemNamespace, \"coredns\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\terrQueue = make(chan error, 3)\n\t)\n\twg.Add(3)\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if pods have identity\")\n\t\tif err := kub.PodsHaveCiliumIdentity(KubeSystemNamespace, kubeDNSLabel); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if DNS can resolve\")\n\t\tif err := kub.KubernetesDNSCanResolve(\"default\", \"kubernetes\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tginkgoext.By(\"Checking if kube-dns service is plumbed correctly\")\n\t\tif err := kub.ValidateServicePlumbing(KubeSystemNamespace, \"kube-dns\"); err != nil {\n\t\t\terrQueue <- err\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errQueue:\n\t\treturn err\n\tdefault:\n\t}\n\n\treturn nil\n}\n\n// RestartUnmanagedPodsInNamespace restarts all pods in a namespace which are:\n// * not host networking\n// * not managed by Cilium already\nfunc (kub *Kubectl) RestartUnmanagedPodsInNamespace(namespace string, excludePodPrefix ...string) {\n\tpodList := &v1.PodList{}\n\tcmd := KubectlCmd + \" -n \" + namespace + \" get pods -o json\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Failf(\"Unable to retrieve all pods to restart unmanaged pods with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\tif err := res.Unmarshal(podList); err != nil {\n\t\tginkgoext.Failf(\"Unable to unmarshal podlist: %s\", err)\n\t}\n\niteratePods:\n\tfor _, pod := range podList.Items {\n\t\tif pod.Spec.HostNetwork || pod.DeletionTimestamp != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, prefix := range excludePodPrefix {\n\t\t\tif strings.HasPrefix(pod.Name, prefix) {\n\t\t\t\tcontinue iteratePods\n\t\t\t}\n\t\t}\n\n\t\tep, err := kub.GetCiliumEndpoint(namespace, pod.Name)\n\t\tif err != nil || ep.Identity == nil || ep.Identity.ID == 0 {\n\t\t\tginkgoext.By(\"Restarting unmanaged pod %s/%s\", namespace, pod.Name)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete pod \" + pod.Name\n\t\t\tres = kub.Exec(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.Failf(\"Unable to restart unmanaged pod with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 734, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 723, | |
"col": 9, | |
"offset": 25011 | |
}, | |
"end": { | |
"line": 723, | |
"col": 12, | |
"offset": 25014 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 722, | |
"col": 13, | |
"offset": 24932 | |
}, | |
"end": { | |
"line": 722, | |
"col": 24, | |
"offset": 24943 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 722, | |
"col": 2, | |
"offset": 24921 | |
}, | |
"end": { | |
"line": 722, | |
"col": 9, | |
"offset": 24928 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// nodes and all pods are ready. If this condition is not met, an error is\n// returned. If all pods are ready, then the number of pods is returned.\nfunc (kub *Kubectl) DaemonSetIsReady(namespace, daemonset string) (int, error) {\n\tfullName := namespace + \"/\" + daemonset\n\n\tres := kub.ExecShort(fmt.Sprintf(\"%s -n %s get daemonset %s -o json\", KubectlCmd, namespace, daemonset))\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to retrieve daemonset %s: %s\", fullName, res.OutputPrettyPrint())\n\t}\n\n\td := &appsv1.DaemonSet{}\n\terr := res.Unmarshal(d)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to unmarshal DaemonSet %s: %s\", fullName, err)\n\t}\n\n\tif d.Status.DesiredNumberScheduled == 0 {\n\t\treturn 0, fmt.Errorf(\"desired number of pods is zero\")\n\t}\n\n\tif d.Status.CurrentNumberScheduled != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are scheduled\", d.Status.CurrentNumberScheduled, d.Status.DesiredNumberScheduled)\n\t}\n\n\tif d.Status.NumberAvailable != d.Status.DesiredNumberScheduled {\n\t\treturn 0, fmt.Errorf(\"only %d of %d desired pods are ready\", d.Status.NumberAvailable, d.Status.DesiredNumberScheduled)\n\t}\n\n\treturn int(d.Status.DesiredNumberScheduled), nil\n}\n\n// WaitForCiliumReadiness waits for the Cilium DaemonSet to become ready.\n// Readiness is achieved when all Cilium pods which are desired to run on a\n// node are in ready state.\nfunc (kub *Kubectl) WaitForCiliumReadiness() error {\n\tginkgoext.By(\"Waiting for Cilium to become ready\")\n\treturn RepeatUntilTrue(func() bool {\n\t\tnumPods, err := kub.DaemonSetIsReady(CiliumNamespace, \"cilium\")\n\t\tif err != nil {\n\t\t\tginkgoext.By(\"Cilium DaemonSet not ready yet: %s\", err)\n\t\t} else {\n\t\t\tginkgoext.By(\"Number of ready Cilium pods: %d\", numPods)\n\t\t}\n\t\treturn err == nil\n\t}, &TimeoutConfig{Timeout: 4 * time.Minute})\n}\n\n// DeleteResourceInAnyNamespace deletes all objects with the provided name of\n// the specified resource type in all namespaces.\nfunc (kub *Kubectl) DeleteResourcesInAnyNamespace(resource string, names []string) error {\n\tcmd := KubectlCmd + \" get \" + resource + \" --all-namespaces -o json | jq -r '[ .items[].metadata | (.namespace + \\\"/\\\" + .name) ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve %s in all namespaces '%s': %s\", resource, cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar allNames []string\n\tif err := res.Unmarshal(&allNames); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", res.OutputPrettyPrint(), err)\n\t}\n\n\tnamesMap := map[string]struct{}{}\n\tfor _, name := range names {\n\t\tnamesMap[name] = struct{}{}\n\t}\n\n\tfor _, combinedName := range allNames {\n\t\tparts := strings.SplitN(combinedName, \"/\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"The %s idenfifier '%s' is not in the form <namespace>/<name>\", resource, combinedName)\n\t\t}\n\t\tnamespace, name := parts[0], parts[1]\n\t\tif _, ok := namesMap[name]; ok {\n\t\t\tginkgoext.By(\"Deleting %s %s in namespace %s\", resource, name, namespace)\n\t\t\tcmd = KubectlCmd + \" -n \" + namespace + \" delete \" + resource + \" \" + name\n\t\t\tres = kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\treturn fmt.Errorf(\"unable to delete %s %s in namespaces %s with command '%s': %s\",\n\t\t\t\t\tresource, name, namespace, cmd, res.OutputPrettyPrint())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ParallelResourceDelete deletes all instances of a resource in a namespace\n// based on the list of names provided. Waits until all delete API calls\n// return.\nfunc (kub *Kubectl) ParallelResourceDelete(namespace, resource string, names []string) {\n\tginkgoext.By(\"Deleting %s [%s] in namespace %s\", resource, strings.Join(names, \",\"), namespace)\n\tvar wg sync.WaitGroup\n\tfor _, name := range names {\n\t\twg.Add(1)\n\t\tgo func(name string) {\n\t\t\tcmd := fmt.Sprintf(\"%s -n %s delete %s %s\",\n\t\t\t\tKubectlCmd, namespace, resource, name)\n\t\t\tres := kub.ExecShort(cmd)\n\t\t\tif !res.WasSuccessful() {\n\t\t\t\tginkgoext.By(\"Unable to delete %s %s with '%s': %s\",\n\t\t\t\t\tresource, name, cmd, res.OutputPrettyPrint())\n\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(name)\n\t}\n\tginkgoext.By(\"Waiting for %d deletes to return (%s)\",\n\t\tlen(names), strings.Join(names, \",\"))\n\twg.Wait()\n}\n\n// DeleteAllResourceInNamespace deletes all instances of a resource in a namespace\nfunc (kub *Kubectl) DeleteAllResourceInNamespace(namespace, resource string) {\n\tcmd := fmt.Sprintf(\"%s -n %s get %s -o json | jq -r '[ .items[].metadata.name ]'\",\n\t\tKubectlCmd, namespace, resource)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\tginkgoext.By(\"Unable to retrieve list of resource '%s' with '%s': %s\",\n\t\t\tresource, cmd, res.stdout.Bytes())\n\t\treturn\n\t}\n\n\tif len(res.stdout.Bytes()) > 0 {\n\t\tvar nameList []string\n\t\tif err := res.Unmarshal(&nameList); err != nil {\n\t\t\tginkgoext.By(\"Unable to unmarshal string slice '%#v': %s\",\n\t\t\t\tres.OutputPrettyPrint(), err)\n\t\t\treturn\n\t\t}\n\n\t\tif len(nameList) > 0 {\n\t\t\tkub.ParallelResourceDelete(namespace, resource, nameList)\n\t\t}\n\t}\n}\n\n// CleanNamespace removes all artifacts from a namespace\nfunc (kub *Kubectl) CleanNamespace(namespace string) {\n\tvar wg sync.WaitGroup\n\n\tfor _, resource := range resourcesToClean {\n\t\twg.Add(1)\n\t\tgo func(resource string) {\n\t\t\tkub.DeleteAllResourceInNamespace(namespace, resource)\n\t\t\twg.Done()\n\n\t\t}(resource)\n\t}\n\twg.Wait()\n}\n\n// DeleteAllInNamespace deletes all namespaces except the ones provided in the\n// exception list\nfunc (kub *Kubectl) DeleteAllNamespacesExcept(except []string) error {\n\tcmd := KubectlCmd + \" get namespace -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all namespaces with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar namespaceList []string\n\tif err := res.Unmarshal(&namespaceList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", namespaceList, err)\n\t}\n\n\texceptMap := map[string]struct{}{}\n\tfor _, e := range except {\n\t\texceptMap[e] = struct{}{}\n\t}\n\n\tfor _, namespace := range namespaceList {\n\t\tif _, ok := exceptMap[namespace]; !ok {\n\t\t\tkub.NamespaceDelete(namespace)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// PrepareCluster will prepare the cluster to run tests. It will:\n// - Delete all existing namespaces\n// - Label all nodes so the tests can use them\nfunc (kub *Kubectl) PrepareCluster() {\n\tginkgoext.By(\"Preparing cluster\")\n\terr := kub.DeleteAllNamespacesExcept([]string{\n\t\tKubeSystemNamespace,\n\t\tCiliumNamespace,\n\t\t\"default\",\n\t\t\"kube-node-lease\",\n\t\t\"kube-public\",\n\t\t\"container-registry\",\n\t\t\"cilium-ci-lock\",\n\t\t\"prom\",\n\t})\n\tif err != nil {\n\t\tginkgoext.Failf(\"Unable to delete non-essential namespaces: %s\", err)\n\t}\n\n\tginkgoext.By(\"Labelling nodes\")\n\tif err = kub.labelNodes(); err != nil {\n\t\tginkgoext.Failf(\"unable label nodes: %s\", err)\n\t}\n}\n\n// labelNodes labels all Kubernetes nodes for use by the CI tests\nfunc (kub *Kubectl) labelNodes() error {\n\tcmd := KubectlCmd + \" get nodes -o json | jq -r '[ .items[].metadata.name ]'\"\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"unable to retrieve all nodes with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t}\n\n\tvar nodesList []string\n\tif err := res.Unmarshal(&nodesList); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal string slice '%#v': %s\", nodesList, err)\n\t}\n\n\tindex := 1\n\tfor _, nodeName := range nodesList {\n\t\tcmd := fmt.Sprintf(\"%s label --overwrite node %s cilium.io/ci-node=k8s%d\", KubectlCmd, nodeName, index)\n\t\tres := kub.ExecShort(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to label node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t\tindex++\n\t}\n\n\tnode := GetNodeWithoutCilium()\n\tif node != \"\" {\n\t\t// Prevent scheduling any pods on the node, as it will be used as an external client\n\t\t// to send requests to k8s{1,2}\n\t\tcmd := fmt.Sprintf(\"%s taint --overwrite nodes %s key=value:NoSchedule\", KubectlCmd, node)\n\t\tres := kub.ExecMiddle(cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn fmt.Errorf(\"unable to taint node with '%s': %s\", cmd, res.OutputPrettyPrint())\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// GetCiliumEndpoint returns the CiliumEndpoint for the specified pod.\nfunc (kub *Kubectl) GetCiliumEndpoint(namespace string, pod string) (*cnpv2.EndpointStatus, error) {\n\tfullName := namespace + \"/\" + pod\n\tcmd := fmt.Sprintf(\"%s -n %s get cep %s -o json | jq '.status'\", KubectlCmd, namespace, pod)\n\tres := kub.ExecShort(cmd)\n\tif !res.WasSuccessful() {\n\t\treturn nil, fmt.Errorf(\"unable to run command '%s' to retrieve CiliumEndpoint %s: %s\",\n\t\t\tcmd, fullName, res.OutputPrettyPrint())\n\t}\n\n\tif len(res.stdout.Bytes()) == 0 {\n\t\treturn nil, fmt.Errorf(\"CiliumEndpoint does not exist\")\n\t}\n\n\tvar data *cnpv2.EndpointStatus\n\terr := res.Unmarshal(&data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to unmarshal CiliumEndpoint %s: %s\", fullName, err)\n\t}\n\n\treturn data, nil\n}\n\n// GetCiliumHostEndpointID returns the ID of the host endpoint on a given node.\nfunc (kub *Kubectl) GetCiliumHostEndpointID(ciliumPod string) (int64, error) {\n\tcmd := fmt.Sprintf(\"cilium endpoint list -o jsonpath='{[?(@.status.identity.id==%d)].id}'\",\n\t\tReservedIdentityHost)\n\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0, fmt.Errorf(\"unable to run command '%s' to retrieve ID of host endpoint from %s: %s\",\n\t\t\tcmd, ciliumPod, res.OutputPrettyPrint())\n\t}\n\n\thostEpID, err := strconv.ParseInt(strings.TrimSpace(res.Stdout()), 10, 64)\n\tif err != nil || hostEpID == 0 {\n\t\treturn 0, fmt.Errorf(\"incorrect host endpoint ID %s: %s\",\n\t\t\tstrings.TrimSpace(res.Stdout()), err)\n\t}\n\treturn hostEpID, nil\n}\n\n// GetNumCiliumNodes returns the number of Kubernetes nodes running cilium\nfunc (kub *Kubectl) GetNumCiliumNodes() int {\n\tgetNodesCmd := fmt.Sprintf(\"%s get nodes -o jsonpath='{.items.*.metadata.name}'\", KubectlCmd)\n\tres := kub.ExecShort(getNodesCmd)\n\tif !res.WasSuccessful() {\n\t\treturn 0\n\t}\n\tsub := 0\n\tif ExistNodeWithoutCilium() {\n\t\tsub = 1\n\t}\n\n\treturn len(strings.Split(res.SingleOut(), \" \")) - sub\n}\n\n// CountMissedTailCalls returns the number of the sum of all drops due to\n// missed tail calls that happened on all Cilium-managed nodes.\nfunc (kub *Kubectl) CountMissedTailCalls() (int, error) {\n\tciliumPods, err := kub.GetCiliumPods()\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\ttotalMissedTailCalls := 0\n\tfor _, ciliumPod := range ciliumPods {\n\t\tcmd := \"cilium metrics list -o json | jq '.[] | select( .name == \\\"cilium_drop_count_total\\\" and .labels.reason == \\\"Missed tail call\\\" ).value'\"\n\t\tres := kub.CiliumExecContext(context.Background(), ciliumPod, cmd)\n\t\tif !res.WasSuccessful() {\n\t\t\treturn -1, fmt.Errorf(\"Failed to run %s in pod %s: %s\", cmd, ciliumPod, res.CombineOutput())\n\t\t}\n\t\tif res.Stdout() == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tfor _, cnt := range res.ByLines() {\n\t\t\tnbMissedTailCalls, err := strconv.Atoi(cnt)\n\t\t\tif err != nil {\n\t\t\t\treturn -1, err\n\t\t\t}\n\t\t\ttotalMissedTailCalls += nbMissedTailCalls\n\t\t}\n\t}\n\n\treturn totalMissedTailCalls, nil\n}\n\n// CreateSecret is a wrapper around `kubernetes create secret\n// <resourceName>.\nfunc (kub *Kubectl) CreateSecret(secretType, name, namespace, args string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"creating secret %s in namespace %s\", name, namespace))\n\tkub.ExecShort(fmt.Sprintf(\"kubectl delete secret %s %s -n %s\", secretType, name, namespace))\n\treturn kub.ExecShort(fmt.Sprintf(\"kubectl create secret %s %s -n %s %s\", secretType, name, namespace, args))\n}\n\n// CopyFileToPod copies a file to a pod's file-system.\nfunc (kub *Kubectl) CopyFileToPod(namespace string, pod string, fromFile, toFile string) *CmdRes {\n\tkub.Logger().Debug(fmt.Sprintf(\"copyiong file %s to pod %s/%s:%s\", fromFile, namespace, pod, toFile))\n\treturn kub.Exec(fmt.Sprintf(\"%s cp %s %s/%s:%s\", KubectlCmd, fromFile, namespace, pod, toFile))\n}\n\n// ExecKafkaPodCmd executes shell command with arguments arg in the specified pod residing in the specified\n// namespace. It returns the stdout of the command that was executed.\n// The kafka producer and consumer scripts do not return error if command\n// leads to TopicAuthorizationException or any other error. Hence the\n// function needs to also take into account the stderr messages returned.\nfunc (kub *Kubectl) ExecKafkaPodCmd(namespace string, pod string, arg string) error {\n\tcommand := fmt.Sprintf(\"%s exec -n %s %s -- %s\", KubectlCmd, namespace, pod, arg)\n\tres := kub.Exec(command)\n\tif !res.WasSuccessful() {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed %s\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\n\tif strings.Contains(res.Stderr(), \"ERROR\") {\n\t\treturn fmt.Errorf(\"ExecKafkaPodCmd: command '%s' failed '%s'\",\n\t\t\tres.GetCmd(), res.OutputPrettyPrint())\n\t}\n\treturn nil\n}" | |
} | |
}, | |
{ | |
"check_id": "go.lang.security.audit.database.string-formatted-query.string-formatted-query", | |
"path": "test/helpers/kubectl.go", | |
"start": { | |
"line": 49, | |
"col": 2 | |
}, | |
"end": { | |
"line": 741, | |
"col": 2 | |
}, | |
"extra": { | |
"message": "String-formatted SQL query detected. This could lead to SQL injection if\nthe string is not sanitized properly. Audit this call to ensure the\nSQL is not manipulatable by external data.\n", | |
"metavars": { | |
"$OBJ": { | |
"start": { | |
"line": 740, | |
"col": 9, | |
"offset": 25681 | |
}, | |
"end": { | |
"line": 740, | |
"col": 12, | |
"offset": 25684 | |
}, | |
"abstract_content": "kub", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "4f488c7065cfbb1c6b2300ef4033052b" | |
} | |
}, | |
"$FXN": { | |
"start": { | |
"line": 739, | |
"col": 13, | |
"offset": 25602 | |
}, | |
"end": { | |
"line": 739, | |
"col": 24, | |
"offset": 25613 | |
}, | |
"abstract_content": "fmt.Sprintf", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "ad1fa69d9897544ca352e048b2a3cf1d" | |
} | |
}, | |
"$OTHER": { | |
"start": { | |
"line": 739, | |
"col": 2, | |
"offset": 25591 | |
}, | |
"end": { | |
"line": 739, | |
"col": 9, | |
"offset": 25598 | |
}, | |
"abstract_content": "command", | |
"unique_id": { | |
"type": "AST", | |
"md5sum": "22de25c79fec71b1caca4adfb91b6622" | |
} | |
}, | |
"$QUERY": { | |
"start": { | |
"line": 49, | |
"col": 2, | |
"offset": 1261 | |
}, | |
"end": { | |
"line": 49, | |
"col": 12, | |
"offset": 1271 | |
}, | |
"abstract_content": "KubectlCmd", | |
"unique_id": { | |
"type": "id", | |
"value": "KubectlCmd", | |
"kind": "Global", | |
"sid": 16 | |
} | |
} | |
}, | |
"metadata": { | |
"owasp": "A1: Injection", | |
"cwe": "CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", | |
"source-rule-url": "https://github.com/securego/gosec" | |
}, | |
"severity": "WARNING", | |
"lines": "\tKubectlCmd = \"kubectl\"\n\tmanifestsPath = \"k8sT/manifests/\"\n\tkubeDNSLabel = \"k8s-app=kube-dns\"\n\n\t// DNSHelperTimeout is a predefined timeout value for K8s DNS commands. It\n\t// must be larger than 5 minutes because kubedns has a hardcoded resync\n\t// period of 5 minutes. We have experienced test failures because kubedns\n\t// needed this time to recover from a connection problem to kube-apiserver.\n\t// The kubedns resyncPeriod is defined at\n\t// https://github.com/kubernetes/dns/blob/80fdd88276adba36a87c4f424b66fdf37cd7c9a8/pkg/dns/dns.go#L53\n\tDNSHelperTimeout = 7 * time.Minute\n\n\t// CIIntegrationFlannel contains the constant to be used when flannel is\n\t// used in the CI.\n\tCIIntegrationFlannel = \"flannel\"\n\n\t// CIIntegrationEKS contains the constants to be used when running tests on EKS.\n\tCIIntegrationEKS = \"eks\"\n\n\t// CIIntegrationGKE contains the constants to be used when running tests on GKE.\n\tCIIntegrationGKE = \"gke\"\n\n\t// CIIntegrationKind contains the constant to be used when running tests on kind.\n\tCIIntegrationKind = \"kind\"\n\n\t// CIIntegrationMicrok8s contains the constant to be used when running tests on microk8s.\n\tCIIntegrationMicrok8s = \"microk8s\"\n\n\t// CIIntegrationMicrok8s is the value to set CNI_INTEGRATION when running with minikube.\n\tCIIntegrationMinikube = \"minikube\"\n\n\tLogGathererSelector = \"k8s-app=cilium-test-logs\"\n\tCiliumSelector = \"k8s-app=cilium\"\n)\n\nvar (\n\t// defaultHelmOptions are passed to helm in ciliumInstallHelm, unless\n\t// overridden by options passed in at invocation. In those cases, the test\n\t// has a specific need to override the option.\n\t// These defaults are made to match some environment variables in init(),\n\t// below. These overrides represent a desire to set the default for all\n\t// tests, instead of test-specific variations.\n\tdefaultHelmOptions = map[string]string{\n\t\t\"image.repository\": \"k8s1:5000/cilium/cilium-dev\",\n\t\t\"image.tag\": \"latest\",\n\t\t\"preflight.image.repository\": \"k8s1:5000/cilium/cilium-dev\", // Set again in init to match agent.image!\n\t\t\"preflight.image.tag\": \"latest\",\n\t\t\"operator.image.repository\": \"k8s1:5000/cilium/operator\",\n\t\t\"operator.image.tag\": \"latest\",\n\t\t\"hubble.relay.image.repository\": \"k8s1:5000/cilium/hubble-relay\",\n\t\t\"hubble.relay.image.tag\": \"latest\",\n\t\t\"debug.enabled\": \"true\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"true\",\n\t\t\"pprof.enabled\": \"true\",\n\t\t\"logSystemLoad\": \"true\",\n\t\t\"bpf.preallocateMaps\": \"true\",\n\t\t\"etcd.leaseTTL\": \"30s\",\n\t\t\"ipv4.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"true\",\n\t\t// \"extraEnv[0].name\": \"KUBE_CACHE_MUTATION_DETECTOR\",\n\t\t// \"extraEnv[0].value\": \"true\",\n\t\t\"bpf.masquerade\": \"true\",\n\t\t// Disable by default, so that 4.9 CI build does not panic due to\n\t\t// missing LRU support. On 4.19 and net-next we enable it with\n\t\t// kubeProxyReplacement=strict.\n\t\t\"sessionAffinity\": \"false\",\n\n\t\t// Enable embedded Hubble, both on unix socket and TCP port 4244.\n\t\t\"hubble.enabled\": \"true\",\n\t\t\"hubble.listenAddress\": \":4244\",\n\n\t\t// We need CNP node status to know when a policy is being enforced\n\t\t\"enableCnpStatusUpdates\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t}\n\n\tflannelHelmOverrides = map[string]string{\n\t\t\"flannel.enabled\": \"true\",\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t}\n\n\teksHelmOverrides = map[string]string{\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t\t\"cni.chainingMode\": \"aws-cni\",\n\t\t\"masquerade\": \"false\",\n\t\t\"tunnel\": \"disabled\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t}\n\n\tgkeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"nodeinit.reconfigureKubelet\": \"true\",\n\t\t\"nodeinit.removeCbrBridge\": \"true\",\n\t\t\"nodeinit.restartPods\": \"true\",\n\t\t\"cni.binPath\": \"/home/kubernetes/bin\",\n\t\t\"nodePort.mode\": \"snat\",\n\t\t\"gke.enabled\": \"true\",\n\t\t\"nativeRoutingCIDR\": \"10.0.0.0/8\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t\t\"devices\": \"\", // Override \"eth0 eth0\\neth0\"\n\t}\n\n\tmicrok8sHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"cni.confPath\": \"/var/snap/microk8s/current/args/cni-network\",\n\t\t\"cni.binPath\": \"/var/snap/microk8s/current/opt/cni/bin\",\n\t\t\"cni.customConf\": \"true\",\n\t\t\"daemon.runPath\": \"/var/snap/microk8s/current/var/run/cilium\",\n\t}\n\tminikubeHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"bpf.preallocateMaps\": \"false\",\n\t\t\"k8s.requireIPv4PodCIDR\": \"false\",\n\t}\n\tkindHelmOverrides = map[string]string{\n\t\t\"ipv6.enabled\": \"false\",\n\t\t\"hostFirewall\": \"false\",\n\t\t\"nodeinit.enabled\": \"true\",\n\t\t\"kubeProxyReplacement\": \"partial\",\n\t\t\"externalIPs.enabled\": \"true\",\n\t\t\"ipam.mode\": \"kubernetes\",\n\t}\n\n\t// helmOverrides allows overriding of cilium-agent options for\n\t// specific CI environment integrations.\n\t// The key must be a string consisting of lower case characters.\n\thelmOverrides = map[string]map[string]string{\n\t\tCIIntegrationFlannel: flannelHelmOverrides,\n\t\tCIIntegrationEKS: eksHelmOverrides,\n\t\tCIIntegrationGKE: gkeHelmOverrides,\n\t\tCIIntegrationKind: kindHelmOverrides,\n\t\tCIIntegrationMicrok8s: microk8sHelmOverrides,\n\t\tCIIntegrationMinikube: minikubeHelmOverrides,\n\t}\n\n\t// resourcesToClean is the list of resources which should be cleaned\n\t// from default namespace before tests are being run. It's not possible\n\t// to delete all resources as services like \"kubernetes\" must be\n\t// preserved. This helps reduce contamination between tests if tests\n\t// are leaking resources into the default namespace for some reason.\n\tresourcesToClean = []string{\n\t\t\"deployment\",\n\t\t\"daemonset\",\n\t\t\"rs\",\n\t\t\"rc\",\n\t\t\"statefulset\",\n\t\t\"pods\",\n\t\t\"netpol\",\n\t\t\"cnp\",\n\t\t\"cep\",\n\t}\n)\n\n// HelmOverride returns the value of a Helm override option for the currently\n// enabled CNI_INTEGRATION\nfunc HelmOverride(option string) string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif overrides, exists := helmOverrides[integration]; exists {\n\t\treturn overrides[option]\n\t}\n\treturn \"\"\n}\n\n// NativeRoutingEnabled returns true when native routing is enabled for a\n// particular CNI_INTEGRATION\nfunc NativeRoutingEnabled() bool {\n\ttunnelDisabled := HelmOverride(\"tunnel\") == \"disabled\"\n\tgkeEnabled := HelmOverride(\"gke.enabled\") == \"true\"\n\treturn tunnelDisabled || gkeEnabled\n}\n\nfunc Init() {\n\tif config.CiliumTestConfig.CiliumImage != \"\" {\n\t\tos.Setenv(\"CILIUM_IMAGE\", config.CiliumTestConfig.CiliumImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumTag != \"\" {\n\t\tos.Setenv(\"CILIUM_TAG\", config.CiliumTestConfig.CiliumTag)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorImage != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_IMAGE\", config.CiliumTestConfig.CiliumOperatorImage)\n\t}\n\n\tif config.CiliumTestConfig.CiliumOperatorTag != \"\" {\n\t\tos.Setenv(\"CILIUM_OPERATOR_TAG\", config.CiliumTestConfig.CiliumOperatorTag)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayImage != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_IMAGE\", config.CiliumTestConfig.HubbleRelayImage)\n\t}\n\n\tif config.CiliumTestConfig.HubbleRelayTag != \"\" {\n\t\tos.Setenv(\"HUBBLE_RELAY_TAG\", config.CiliumTestConfig.HubbleRelayTag)\n\t}\n\n\tif config.CiliumTestConfig.ProvisionK8s == false {\n\t\tos.Setenv(\"SKIP_K8S_PROVISION\", \"true\")\n\t}\n\n\t// Copy over envronment variables that are passed in.\n\tfor envVar, helmVar := range map[string]string{\n\t\t\"CILIUM_TAG\": \"image.tag\",\n\t\t\"CILIUM_IMAGE\": \"image.repository\",\n\t\t\"CILIUM_OPERATOR_TAG\": \"operator.image.tag\",\n\t\t\"CILIUM_OPERATOR_IMAGE\": \"operator.image.repository\",\n\t\t\"HUBBLE_RELAY_IMAGE\": \"hubble.relay.image.repository\",\n\t\t\"HUBBLE_RELAY_TAG\": \"hubble.relay.image.tag\",\n\t} {\n\t\tif v := os.Getenv(envVar); v != \"\" {\n\t\t\tdefaultHelmOptions[helmVar] = v\n\t\t}\n\t}\n\n\t// preflight must match the cilium agent image (that's the point)\n\tdefaultHelmOptions[\"preflight.image.repository\"] = defaultHelmOptions[\"image.repository\"]\n\tdefaultHelmOptions[\"preflight.image.tag\"] = defaultHelmOptions[\"image.tag\"]\n}\n\n// GetCurrentK8SEnv returns the value of K8S_VERSION from the OS environment.\nfunc GetCurrentK8SEnv() string { return os.Getenv(\"K8S_VERSION\") }\n\n// GetCurrentIntegration returns CI integration set up to run against Cilium.\nfunc GetCurrentIntegration() string {\n\tintegration := strings.ToLower(os.Getenv(\"CNI_INTEGRATION\"))\n\tif _, exists := helmOverrides[integration]; exists {\n\t\treturn integration\n\t}\n\treturn \"\"\n}\n\n// IsIntegration returns true when integration matches the configuration of\n// this test run\nfunc IsIntegration(integration string) bool {\n\treturn GetCurrentIntegration() == integration\n}\n\n// GetCiliumNamespace returns the namespace into which cilium should be\n// installed for this integration.\nfunc GetCiliumNamespace(integration string) string {\n\tswitch integration {\n\tcase CIIntegrationGKE:\n\t\treturn CiliumNamespaceGKE\n\tdefault:\n\t\treturn CiliumNamespaceDefault\n\t}\n}\n\n// Kubectl is a wrapper around an SSHMeta. It is used to run Kubernetes-specific\n// commands on the node which is accessible via the SSH metadata stored in its\n// SSHMeta.\ntype Kubectl struct {\n\tExecutor\n\t*serviceCache\n}\n\n// CreateKubectl initializes a Kubectl helper with the provided vmName and log\n// It marks the test as Fail if cannot get the ssh meta information or cannot\n// execute a `ls` on the virtual machine.\nfunc CreateKubectl(vmName string, log *logrus.Entry) (k *Kubectl) {\n\tif config.CiliumTestConfig.Kubeconfig == \"\" {\n\t\tnode := GetVagrantSSHMeta(vmName)\n\t\tif node == nil {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\t// This `ls` command is a sanity check, sometimes the meta ssh info is not\n\t\t// nil but new commands cannot be executed using SSH, tests failed and it\n\t\t// was hard to debug.\n\t\tres := node.ExecShort(\"ls /tmp/\")\n\t\tif !res.WasSuccessful() {\n\t\t\tginkgoext.Fail(fmt.Sprintf(\n\t\t\t\t\"Cannot execute ls command on vmName '%s'\", vmName), 1)\n\t\t\treturn nil\n\t\t}\n\t\tnode.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: node,\n\t\t}\n\t\tk.setBasePath()\n\t} else {\n\t\t// Prepare environment variables\n\t\t// NOTE: order matters and we want the KUBECONFIG from config to win\n\t\tvar environ []string\n\t\tif config.CiliumTestConfig.PassCLIEnvironment {\n\t\t\tenviron = append(environ, os.Environ()...)\n\t\t}\n\t\tenviron = append(environ, \"KUBECONFIG=\"+config.CiliumTestConfig.Kubeconfig)\n\n\t\t// Create the executor\n\t\texec := CreateLocalExecutor(environ)\n\t\texec.logger = log\n\n\t\tk = &Kubectl{\n\t\t\tExecutor: exec,\n\t\t}\n\t\tk.setBasePath()\n\t}\n\n\t// Make sure the namespace Cilium uses exists.\n\tif err := k.EnsureNamespaceExists(CiliumNamespace); err != nil {\n\t\tginkgoext.Failf(\"failed to ensure the namespace %s exists: %s\", CiliumNamespace, err)\n\t}\n\n\tres := k.Apply(ApplyOptions{FilePath: filepath.Join(k.BasePath(), manifestsPath, \"log-gatherer.yaml\"), Namespace: LogGathererNamespace})\n\tif !res.WasSuccessful() {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Cannot connect to k8s cluster, output:\\n%s\", res.CombineOutput().String()), 1)\n\t\treturn nil\n\t}\n\tif err := k.WaitforPods(LogGathererNamespace, \"-l \"+logGathererSelector(true), HelperTimeout); err != nil {\n\t\tginkgoext.Fail(fmt.Sprintf(\"Failed waiting for log-gatherer pods: %s\", err), 1)\n\t\treturn nil\n\t}\n\n\t// Clean any leftover resources in the default namespace\n\tk.CleanNamespace(DefaultNamespace)\n\n\treturn k\n}\n\n// DaemonSetIsReady validate that a DaemonSet is scheduled on all required\n// |
View raw
(Sorry about that, but we can’t show files that are this big right now.)
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment