--- # Source: cilium/templates/cilium-agent-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: "cilium" namespace: kube-system --- # Source: cilium/templates/cilium-operator-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: "cilium-operator" namespace: kube-system --- # Source: cilium/templates/hubble-relay-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: "hubble-relay" namespace: kube-system --- # Source: cilium/templates/hubble-ui-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: "hubble-ui" namespace: kube-system --- # Source: cilium/templates/hubble-relay-client-tls-secret.yaml apiVersion: v1 kind: Secret metadata: name: hubble-relay-client-certs namespace: kube-system type: kubernetes.io/tls data: ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lRWXMwdmttbDUzOVdHMldMMklHb0c1ekFOQmdrcWhraUc5dzBCQVFzRkFEQWUKTVJ3d0dnWURWUVFERXhOb2RXSmliR1V0WTJFdVkybHNhWFZ0TG1sdk1CNFhEVEl4TURZd016QXpOVEF3TVZvWApEVEkwTURZd01qQXpOVEF3TVZvd0hqRWNNQm9HQTFVRUF4TVRhSFZpWW14bExXTmhMbU5wYkdsMWJTNXBiekNDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT2t1NnJGQTdjVFMzMnhaNkZvYnc5S2wKM3cyTzhZSXNYL0VMdUN5alR0bHF0VjQxQmI5ZzdpbVU4cUMrSkw4LzU5ZEhOdXI4WG1XUU85K3lzcHN2YllreApUZzNGUS9Hb3lHNGsvZ3pieUJFTFZiV0s1Y1Mrc1pIem9hZUZRZ1VHU0NLVTNHR21uZ3ZSRHEyREtSZHg3MnRICi9zY21QVm5od0F1cjJGd3hXRmVFeTVyUEs0dG9RcFkxakZVbjc1Yk9iSEU0dGxnUjE2MFhOZDlONXJPSVBtQ3oKaXNYQXJOQjJDZzFvUit2NjZyZ3p0N0ttYkdoV0RNNW1CRk9LMFg2bFArSnZmTjNhTzk0NHdtSlgyTytheXQyZgoyMHVpbkVCN05kMXZZMTc0Ukkxa0J3N1lBdUFpajAzS3R0elpBZWJVaXdBODVvMmxSTDRqVXBGOC9zcHYvc3NDCkF3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3IKQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlF0S3RGZnAyQjRZbnhOaTFSSQpxVlBhSXFoSU16QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUEzMEVuaVNFbjFEUURpVjZzRjdLSUc2Ry9jSjQ2CjN0SWZzb3FVZStoMEI4NWNnY2pSaExHdGFYSUlmenQwQy8zcXA3Wklnd0c1aUVsTjAvdit3ZCtGSWYyY1BVcmQKaW5lQ1VUVFlyZXVUdmxPLzJONThZRGhLemdIT1l3NXZZUHRtZFYxVDNlYWR5N2Q3ZHUwdnUxdXRWWG5LMFdDVQpGcnpkNEF1QmlkOXNXR29na2s0alZlN2lXTFExYkE2dFlZR0RsMHZPN0sxeUhjaGVDcDBIckNEeHdiQU0xd2ZnCm1hMDZCSEZtTjhlYWZpQUlJemdESEZvYTdxR2NQVFYrdHZ4bk5LNUgzUE9RS1dSYW9FTy84dExnd1U1b2EwUDQKVW1ZZmliQ3czaVNDWjR3dG0vT0VGKzZNeDB4T0krdDlmWlJvNWxPR012SHZFc2ZsRnhBQW1SOGMrQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVekNDQWp1Z0F3SUJBZ0lSQU4wSGN1SEc1WTRrUTJ3cU9zc3hnOTh3RFFZSktvWklodmNOQVFFTEJRQXcKSGpFY01Cb0dBMVVFQXhNVGFIVmlZbXhsTFdOaExtTnBiR2wxYlM1cGJ6QWVGdzB5TVRBMk1ETXdNelV3TURGYQpGdzB5TkRBMk1ESXdNelV3TURGYU1DTXhJVEFmQmdOVkJBTU1HQ291YUhWaVlteGxMWEpsYkdGNUxtTnBiR2wxCmJTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPWjZ2dHRhcWNNdVBMVnYKd0RxRHNOYStVeTgwR1ZkTG00d2tRL3R6Uzd2OFBzR21GSTUxNzRMQ1pRVW1oRExuYU5zR3JlOE1SWHRaWkJtNApyTHlxUmdQQ2MzZ1hLK045YXpzaUVGQzlKRndvK040WktsaTdjcDFieEdML0x4OUZPM1BFY2VyZ0NRTW1CNUJaCkNCNkFodFpabERwVHdJUnpCZHQrSmtaTnBCMTdkRzhnU3JoVmpSUmg5TTBzMWptOXZMU0VjeXdRdUJKSjBpTTEKNWVXVjRyTWsxOEgxUGx6NTJPMkZzcVl4WUE1Qk84M2pZUXQ5aW00OUJ1TEZVQzNERkVpSitzK01DM1B5RjhNNQpKRTZydlFNaHFuTWE5eTg2Q1dHYUh4VFl5N1RMUXpLaWNybDV0QWRnclpvQnU2aXRFVW9ZbURheGJpOC80OVkyCm5YQW01Q01DQXdFQUFhT0JoakNCZ3pBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0h3WURWUjBqQkJnd0ZvQVVMU3JSWDZkZwplR0o4VFl0VVNLbFQyaUtvU0RNd0l3WURWUjBSQkJ3d0dvSVlLaTVvZFdKaWJHVXRjbVZzWVhrdVkybHNhWFZ0CkxtbHZNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUFVWDQwZVZxR1NVc1gvZGFMaDZmSFZyR2JvYjRaNjBralYKWHRUeXpZYUFYN0JlOHlRK2p0THdjcFJ0S1hvdXJ3emxkYmphc0VjU1lNMkVLQzE0QUNzZkY2TjZvUnFNK3A5SwpVVnRiZmEvT1p0UU5QSmw4WmhQMHgvMWFkSDc4eFdsbmQxemJFUDBtbEI3VWU4MTRZK0FVa3pJSmxSK1NkVk5lCktTQVMwaE9YTUZwRlRWZUxtUU1pdytZYkNaSkRkM1lRWERXZUsyV2lWQWpPelM3ZHA5M0g4YWp4aytrWlRNNUcKYStJSXQ4Qm9yZ2ltWHNHY3NJV3FCZnJYbkxZakZlVnFZU2tNd2lkOWpSVURLTVZZbDlGZHF3cnFaZWgzT1V4RwpiQ2MvNzZPeUVlNXNqYUdRRkR3RW9BT0ExdGRsY0ZYMlJMaUV4SjVDb1p5aVF3SC9BR0UyCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBNW5xKzIxcXB3eTQ4dFcvQU9vT3cxcjVUTHpRWlYwdWJqQ1JEKzNOTHUvdyt3YVlVCmpuWHZnc0psQlNhRU11ZG8yd2F0N3d4RmUxbGtHYmlzdktwR0E4SnplQmNyNDMxck95SVFVTDBrWENqNDNoa3EKV0x0eW5WdkVZdjh2SDBVN2M4Ung2dUFKQXlZSGtGa0lIb0NHMWxtVU9sUEFoSE1GMjM0bVJrMmtIWHQwYnlCSwp1RldORkdIMHpTeldPYjI4dElSekxCQzRFa25TSXpYbDVaWGlzeVRYd2ZVK1hQblk3WVd5cGpGZ0RrRTd6ZU5oCkMzMktiajBHNHNWUUxjTVVTSW42ejR3TGMvSVh3emtrVHF1OUF5R3FjeHIzTHpvSllab2ZGTmpMdE10RE1xSnkKdVhtMEIyQ3RtZ0c3cUswUlNoaVlOckZ1THovajFqYWRjQ2JrSXdJREFRQUJBb0lCQVFDQytObWdXVU1ETVRFNgpnWWloTjlpMEQyL0I4VDg5UmkvY1VYZDRXcXA4NXZqYW5vQXIvSVRncjRzc2hCaTN2Snp2YWpwSkQ1WWtaRVhuCi8vUFpPb0dTL1ZzVXNVWTFmajREV25ZZkMzeXdDbkJ4OWFjRXlYQjJUNEdvS0E4bEcvaU83dUUxY1o1dGRaenkKd0VWRzRBNi9uQ1FKaVRKcmtkbVFRMzJJa2ZKaTlZSjRia2xFQzRtOE9CRjVTM1Boc1FGdUYyM0hacGJSbHJNegpDL3lVQTRVb2RucXovNTlxZ0FQT2xFSW5vOEhoUW9RVmhyaFBMclU4UDIxVktRRHdMTnJQUzg3T0lFUUVLYy9YCm9QTGRRRzExVlR1TDNUS3Zhd2w2Wm1teXJyREJaWHpGTjNZLzlwbmk5SHJPOFRTSGYyR3RUdk1FMXl1Q1FUZHcKMHFGaWZKT1JBb0dCQVA0cVdZRjFWbGlrWXRLOHU5ZlcrUXlRNStFV3QzLzM3NnRrZGxuRzExcFBKNkxrVVExTQpVYXJKUmZsYUIrUktNckdDZXU4UDJzUkZNOXlSNnl3TEFDZ29LMkZNalk4ZjBwMzFmR29aa2RacEw1V0pUTlRiCnp0UENtQVR2SjMwUnVPblNsNi9ocTRSQlZBVkVkM0F4SFJScnNLNklPR201b3dsdlBqYU85eElQQW9HQkFPZ2sKb1BDencvcjFpQkIvcnVSNkZDU3Z1WGc3U2swWVJBb09FTWxQNUhTSjZJa09CSitqaENHTzVCM0xwYWh3aVJSMQordElLVTB0aWd1UnEyTnltK2dEbUJPVFBPNUVTUG4yMlFCWFpoSFMrSTUrR3FmSWZGUllaM0VEaG43Mi9RTlJtCmhBVjdyTDBzalNrMkN5dmlIQVFZYm50cE1oYnB3QWxCUHR2SXhWQ3RBb0dCQUowaUI0WCt5ZXd4YlJ0R0xZZXEKVXZ5TzFaeVdFOHBjdVdLbjBIZCtkQU5VTG5nSldZYTBLVVZCL0MzRm1GMXQ5ZHRDaE1ncGNDR3BlditoZ3RnTwowUUVDbkpjaDRkNmJXUFhwSUFya1ZnSk5zMnBxb1FKMEJuRnF1OXRKRTR2Q0dpVU0ybFNWV3hsLzNmY0h0aU81CmFYWUhnZ3JWRXJOaVhrWmw3UHVEOVlISEFvR0JBTjZnK2lPOUVzOWpsM2pUS3VQODI4S1BMOXpKWjZsbUF5SnQKZU5YSG83M0w0RkZJTzFEcDNUYmR0WkdKa3pabHJtRU1BRjVjL3FMR04rZGRQTnJuMjh6dXkzU3Y5VjFjVk1mMAo5Nks4dDFYS1g1bUc2UWJGbmxCU3ZyTU9ia2JuMndHWHl6Y0M4NWQ4MldmVFo0VlBWRFR0UlU5ZmhHVUxDVnNXCjBuU3ZKK1FGQW9HQWNIK0p1ZGhPYklwVzkrYnVkV3pCOVFVanMyV0pWYTBrdW9EelNHZVNjbFdZcC9KWHNOUWkKc2xCcVRBT1hubnJzVlhTTFlyUU9vajV3TzVYdDZjME9ucjdKVW9tVzloeDVBbzAwMDR6TkxsaTJ5V1JQWjRJTwpiLzh1WFJpcU5rd3BJTGR6aWcreEVBcnRoc3JCdklUWUtRZUR6UTdzOWhnWWd1QzZnd1N4SUpRPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= --- # Source: cilium/templates/hubble-server-secret.yaml apiVersion: v1 kind: Secret metadata: name: hubble-server-certs namespace: kube-system type: kubernetes.io/tls data: ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lRWXMwdmttbDUzOVdHMldMMklHb0c1ekFOQmdrcWhraUc5dzBCQVFzRkFEQWUKTVJ3d0dnWURWUVFERXhOb2RXSmliR1V0WTJFdVkybHNhWFZ0TG1sdk1CNFhEVEl4TURZd016QXpOVEF3TVZvWApEVEkwTURZd01qQXpOVEF3TVZvd0hqRWNNQm9HQTFVRUF4TVRhSFZpWW14bExXTmhMbU5wYkdsMWJTNXBiekNDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT2t1NnJGQTdjVFMzMnhaNkZvYnc5S2wKM3cyTzhZSXNYL0VMdUN5alR0bHF0VjQxQmI5ZzdpbVU4cUMrSkw4LzU5ZEhOdXI4WG1XUU85K3lzcHN2YllreApUZzNGUS9Hb3lHNGsvZ3pieUJFTFZiV0s1Y1Mrc1pIem9hZUZRZ1VHU0NLVTNHR21uZ3ZSRHEyREtSZHg3MnRICi9zY21QVm5od0F1cjJGd3hXRmVFeTVyUEs0dG9RcFkxakZVbjc1Yk9iSEU0dGxnUjE2MFhOZDlONXJPSVBtQ3oKaXNYQXJOQjJDZzFvUit2NjZyZ3p0N0ttYkdoV0RNNW1CRk9LMFg2bFArSnZmTjNhTzk0NHdtSlgyTytheXQyZgoyMHVpbkVCN05kMXZZMTc0Ukkxa0J3N1lBdUFpajAzS3R0elpBZWJVaXdBODVvMmxSTDRqVXBGOC9zcHYvc3NDCkF3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3IKQmdFRkJRY0RBakFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlF0S3RGZnAyQjRZbnhOaTFSSQpxVlBhSXFoSU16QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUEzMEVuaVNFbjFEUURpVjZzRjdLSUc2Ry9jSjQ2CjN0SWZzb3FVZStoMEI4NWNnY2pSaExHdGFYSUlmenQwQy8zcXA3Wklnd0c1aUVsTjAvdit3ZCtGSWYyY1BVcmQKaW5lQ1VUVFlyZXVUdmxPLzJONThZRGhLemdIT1l3NXZZUHRtZFYxVDNlYWR5N2Q3ZHUwdnUxdXRWWG5LMFdDVQpGcnpkNEF1QmlkOXNXR29na2s0alZlN2lXTFExYkE2dFlZR0RsMHZPN0sxeUhjaGVDcDBIckNEeHdiQU0xd2ZnCm1hMDZCSEZtTjhlYWZpQUlJemdESEZvYTdxR2NQVFYrdHZ4bk5LNUgzUE9RS1dSYW9FTy84dExnd1U1b2EwUDQKVW1ZZmliQ3czaVNDWjR3dG0vT0VGKzZNeDB4T0krdDlmWlJvNWxPR012SHZFc2ZsRnhBQW1SOGMrQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZVENDQWttZ0F3SUJBZ0lSQUx5SG1BNWRQSXZWS2tzc0cwbGhYbFV3RFFZSktvWklodmNOQVFFTEJRQXcKSGpFY01Cb0dBMVVFQXhNVGFIVmlZbXhsTFdOaExtTnBiR2wxYlM1cGJ6QWVGdzB5TVRBMk1ETXdNelV3TURGYQpGdzB5TkRBMk1ESXdNelV3TURGYU1Db3hLREFtQmdOVkJBTU1IeW91WkdWbVlYVnNkQzVvZFdKaWJHVXRaM0p3Cll5NWphV3hwZFcwdWFXOHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDOXNXTFoKOGZWNC9RRnZCcEJxNmh6NW5NcXVyOS9ZVkdPUkFVWmFmMVhnK2xQQ1JWdDZ4NWl2YzJJemYzeEo1U0M3azRMYQp3WFRLMHV0bldlYUNOdjBLUGVvajBxd082cE01MXk0VW81MUw4SEFYbk9qQ0ZNQkcvVEdtU0VoZUwvem9mamp4CkMyL0N3WDVyaERCTnBCZkZtYnl6V3MvZGFXL1BGUU1oZ2NEUTNsZW9uN2Z0dllMYW5sVDRBTEF3VXdLSHB0RC8KdEh1VlgwdGVidGxlc3BrNnM3V3lIMHRtR21VWGRRWmRQVGhsQTR1YVowdEUrbzNyNE9KV2xiVEZZWDNNYVo4agozNEdMM3orNmJDNU9yemdpQjF5bmpHVWlIaksyRnk2NzhsTTZwUkMwZ2ZwbXZUV3lMTllFc1o5eEZsbFZoWG5BCit5MDBmM2oyRlpQQU5YQmRBZ01CQUFHamdZMHdnWW93RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVcKTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQQpGQzBxMFYrbllIaGlmRTJMVkVpcFU5b2lxRWd6TUNvR0ExVWRFUVFqTUNHQ0h5b3VaR1ZtWVhWc2RDNW9kV0ppCmJHVXRaM0p3WXk1amFXeHBkVzB1YVc4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJcDZYMGh4cDFLQVJYdGIKNVdQcmdWeHNJMjZQQmczaGNJT3JRbWp3L3loVGcxS2gvZ2lkbzFnT1VBVkV1K2pZcWpVbVZHT3pyL3FjemJlVQprWUY4MW9OYVhEUjZnLzZ3T0tGbUdmeVBQNWtweUhwZjkyZXVPWDNQR0JEbGhMei9LZ1pNK0szUDdmQktCUWVzCjErWFRFbmtXSVpLSzFHVzhSMkxvRDN5QmRTdHAzejNyRmpQQm1mNktCVUJ2eldmWmtIYnBDNEZsc2VGRUNhblUKaUxXa1VuVFQ2NkV1R3NtSHZiRHp6UDhyczdRZFlEOEtGT2FnNExlZXFGcmNmTHFBOGg1cmlJZzhUNWczNVVXbgpHa2dqOWVXQmtOYW9pU1ZQaXo2b0NVWWRuV05MTXhJcnVjVk1yVFlOdmpwNk5nQlpwOFZOcXVPUDh6QkQzWTBICi8vdXp4L289Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdmJGaTJmSDFlUDBCYndhUWF1b2MrWnpLcnEvZjJGUmprUUZHV245VjRQcFR3a1ZiCmVzZVlyM05pTTM5OFNlVWd1NU9DMnNGMHl0THJaMW5tZ2piOUNqM3FJOUtzRHVxVE9kY3VGS09kUy9Cd0Y1em8Kd2hUQVJ2MHhwa2hJWGkvODZINDQ4UXR2d3NGK2E0UXdUYVFYeFptOHMxclAzV2x2enhVRElZSEEwTjVYcUorMwo3YjJDMnA1VStBQ3dNRk1DaDZiUS83UjdsVjlMWG03WlhyS1pPck8xc2g5TFpocGxGM1VHWFQwNFpRT0xtbWRMClJQcU42K0RpVnBXMHhXRjl6R21mSTkrQmk5OC91bXd1VHE4NElnZGNwNHhsSWg0eXRoY3V1L0pUT3FVUXRJSDYKWnIwMXNpeldCTEdmY1JaWlZZVjV3UHN0Tkg5NDloV1R3RFZ3WFFJREFRQUJBb0lCQVFDVTgyWWdweHJPcnpSaAovdkNGMDJGL0szd0hvWGlPd08wUjNzTmtxbG8vOTdDdmVwQ3puMmtvVTkwSGZSS1dYOEhOS0s1Mm9iUFkwN2dmCkJudTNRZ0I5KzFJUVlRdlVCcVFtcGUyOExTR0ZrTDRqL0lUVWFVRDJQVGl3VGsvdERaZDA1REJJTG1mS3NPOVkKODRxT1hNeUIwQTlaY25TSWNwTm1NeVNyT2R5eTVKbTF1UDJQUlJvUVJaZUZLMmx3VktUQ1pydlA5RnZRSE1UVgpBZjRUbk9iWHl4cVBIclU1bzZWdTVTMDN6YlZlUFBHcHB1SUVrd3U3enNkNXNMQlowRXdNampmTHBUYlZORWlLCkRvUis5T3RwSVcvODRwQ2MyU0FhWldoUzdmaW5UK0V6NzJmeVAvejFNaDloREJGK29oeGRkZlFkY3EzM28wL1MKRjJnQW5QUkJBb0dCQU9oaFZLdXhjaUxFRFhrR0sxdHlpdzFjVkk3QUs1aFNsWW5lR3k4V0xINXk1bm50Y1BNSAo0bEY4Z3c0elk1aklmZU1HN091NGJyTnQrZ21ybkU4NDBpdTZCdk02Q2N0RjZNMzBnbDdEZjNUVktBQ3NGNGd3CmtqeWdLUmkwQllMaVd4VDdiNWZQSzk5dE9wOTFGZXJNLzJUb2Jpcm5CUGZIckNKcHBkWFl2VzF4QW9HQkFORDUKVHFTZlZoUkF1NDEwdWZabUJ5dUZuUk9Kc05ycnllZkcrNFlvejRTaDVTdHd2aHBtOWN0S2lzRkYvSzNONktZOQpjYUVrY1JHSUxvalZIbGtkYkZNZUFvVWFHVVp6Zzg5aEZRQzE3OE1tbW5SNGhyTWNGMVlHSkpnM005dllSMlRYCnpwMitCRjFWRitzRksvUGRlK3VNYkxyTWszWENzNDJGZGY2Vm1LdXRBb0dBYlpieE1tTmx1MFdRUVFJSzBuNnIKV3lxdTZmOXd6ODRxbmJFMFNybC9WRWJsc0p5c3hzQnlHQkhMVUpnbG9UajBQV3ZYc2JieDVnYTRnM1QrSzFKVgpzNjl3ZGFacVZnWHJLNjNqQ2NQU3lRdDVhUSsvclVlU3Y2cWNsdnpYR0ltZGdNQ3ZPUDNINS8rQVJSRVh1L3JmCmV2d3VjR2N3clpYcDlQU3o4NnphY05FQ2dZRUFzWFhsaVBzZWVGaDRiU25nK2hPdXB4RHlVVE11a3RxRlBXVEgKR0NqMEFTZ1NtdzlGQU5yS2hIYXEybko2bjRZcXVYR1lXMFc5Rnl4VWlES2pjUDRCalp3QWU5T3IveDM5bzFCSQpLN0JWQ1o1d3I5UGlYNXZOWFdvZURYMWpkcUIyRUhjTHFQMUNhNWZsbGRsd2Q4bHdnSmxoaEVoU09VQzhoY1ArCmJkSFFNb2tDZ1lCWlBSTWhaUmc3SzlYVlV6dWFZZ3NrdmlYN05DQmhLMTYzb3NVbndaMlBWNCtPYllORUF2YzgKaGVZVmx1VlhGcE9QUUtoRWwwVnVvREZCTjBSNVhsMzc2aW4xZ0cvRmowQng1UnpzQkppQ0JuSDU1K3hOSEtNVgpDdktDQXNEWXEreTN5UGRrdkVyb0phbzdDeWxRSWcrRzV2WHphS09VZ1JkY2t0U2k5cHowTXc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= --- # Source: cilium/templates/cilium-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: cilium-config namespace: kube-system data: # Identity allocation mode selects how identities are shared between cilium # nodes by setting how they are stored. The options are "crd" or "kvstore". # - "crd" stores identities in kubernetes as CRDs (custom resource definition). # These can be queried with: # kubectl get ciliumid # - "kvstore" stores identities in a kvstore, etcd or consul, that is # configured below. Cilium versions before 1.6 supported only the kvstore # backend. Upgrades from these older cilium versions should continue using # the kvstore by commenting out the identity-allocation-mode below, or # setting it to "kvstore". identity-allocation-mode: crd cilium-endpoint-gc-interval: "5m0s" # If you want to run cilium in debug mode change this value to true debug: "false" # The agent can be put into the following three policy enforcement modes # default, always and never. # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes enable-policy: "default" # If you want metrics enabled in all of your Cilium agents, set the port for # which the Cilium agents will have their metrics exposed. # This option deprecates the "prometheus-serve-addr" in the # "cilium-metrics-config" ConfigMap # NOTE that this will open the port on ALL nodes where Cilium pods are # scheduled. prometheus-serve-addr: ":9090" # Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this # field is not set. proxy-prometheus-port: "9095" # If you want metrics enabled in cilium-operator, set the port for # which the Cilium Operator will have their metrics exposed. # NOTE that this will open the port on the nodes where Cilium operator pod # is scheduled. operator-prometheus-serve-addr: ":6942" enable-metrics: "true" # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 # address. enable-ipv4: "true" # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 # address. enable-ipv6: "false" # Users who wish to specify their own custom CNI configuration file must set # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. custom-cni-conf: "false" enable-bpf-clock-probe: "true" # If you want cilium monitor to aggregate tracing for packets, set this level # to "low", "medium", or "maximum". The higher the level, the less packets # that will be seen in monitor output. monitor-aggregation: medium # The monitor aggregation interval governs the typical time between monitor # notification events for each allowed connection. # # Only effective when monitor aggregation is set to "medium" or higher. monitor-aggregation-interval: 5s # The monitor aggregation flags determine which TCP flags which, upon the # first observation, cause monitor notifications to be generated. # # Only effective when monitor aggregation is set to "medium" or higher. monitor-aggregation-flags: all # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. bpf-map-dynamic-size-ratio: "0.0025" # bpf-policy-map-max specifies the maximum number of entries in endpoint # policy map (per endpoint) bpf-policy-map-max: "16384" # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, # backend and affinity maps. bpf-lb-map-max: "65536" # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass # optimization for nodeport reverse NAT handling. # Pre-allocation of map entries allows per-packet latency to be reduced, at # the expense of up-front memory allocation for the entries in the maps. The # default value below will minimize memory usage in the default installation; # users who are sensitive to latency may consider setting this to "true". # # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore # this option and behave as though it is set to "true". # # If this value is modified, then during the next Cilium startup the restore # of existing endpoints and tracking of ongoing connections may be disrupted. # As a result, reply packets may be dropped and the load-balancing decisions # for established connections may change. # # If this option is set to "false" during an upgrade from 1.3 or earlier to # 1.4 or later, then it may cause one-time disruptions during the upgrade. preallocate-bpf-maps: "false" # Regular expression matching compatible Istio sidecar istio-proxy # container image names sidecar-istio-proxy-image: "cilium/istio_proxy" # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: default # Unique ID of the cluster. Must be unique across all conneted clusters and # in the range of 1 and 255. Only relevant when building a mesh of clusters. cluster-id: "" # Encapsulation mode for communication between nodes # Possible values: # - disabled # - vxlan (default) # - geneve tunnel: vxlan # Enables L7 proxy for L7 policy enforcement and visibility enable-l7-proxy: "true" # wait-bpf-mount makes init container wait until bpf filesystem is mounted wait-bpf-mount: "false" enable-ipv4-masquerade: "true" enable-ipv6-masquerade: "true" enable-bpf-masquerade: "true" enable-xt-socket-fallback: "true" install-iptables-rules: "true" install-no-conntrack-iptables-rules: "false" auto-direct-node-routes: "false" enable-bandwidth-manager: "true" enable-local-redirect-policy: "false" kube-proxy-replacement: "strict" kube-proxy-replacement-healthz-bind-address: "" enable-health-check-nodeport: "true" node-port-bind-protection: "true" enable-auto-protect-node-port-range: "true" enable-session-affinity: "true" enable-endpoint-health-checking: "true" enable-health-checking: "true" enable-well-known-identities: "false" enable-remote-node-identity: "true" operator-api-serve-addr: "127.0.0.1:9234" # Enable Hubble gRPC service. enable-hubble: "true" # UNIX domain socket for Hubble server to listen to. hubble-socket-path: "/var/run/cilium/hubble.sock" # An additional address for Hubble server to listen to (e.g. ":4244"). hubble-listen-address: ":4244" hubble-disable-tls: "false" hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt ipam: "cluster-pool" cluster-pool-ipv4-cidr: "10.0.0.0/8" cluster-pool-ipv4-mask-size: "24" disable-cnp-status-updates: "true" --- # Source: cilium/templates/hubble-ca-configmap.yaml # NOTE: the hubble-ca-cert ConfigMap is deprecated and will be removed in v1.11 # The Hubble CA certificate can be found in both the hubble-server-certs and # hubble-relay-client-certs Secrets under the ca.crt key. apiVersion: v1 kind: ConfigMap metadata: name: hubble-ca-cert namespace: kube-system data: ca.crt: |- -----BEGIN CERTIFICATE----- MIIDJzCCAg+gAwIBAgIQYs0vkml539WG2WL2IGoG5zANBgkqhkiG9w0BAQsFADAe MRwwGgYDVQQDExNodWJibGUtY2EuY2lsaXVtLmlvMB4XDTIxMDYwMzAzNTAwMVoX DTI0MDYwMjAzNTAwMVowHjEcMBoGA1UEAxMTaHViYmxlLWNhLmNpbGl1bS5pbzCC ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOku6rFA7cTS32xZ6Fobw9Kl 3w2O8YIsX/ELuCyjTtlqtV41Bb9g7imU8qC+JL8/59dHNur8XmWQO9+yspsvbYkx Tg3FQ/GoyG4k/gzbyBELVbWK5cS+sZHzoaeFQgUGSCKU3GGmngvRDq2DKRdx72tH /scmPVnhwAur2FwxWFeEy5rPK4toQpY1jFUn75bObHE4tlgR160XNd9N5rOIPmCz isXArNB2Cg1oR+v66rgzt7KmbGhWDM5mBFOK0X6lP+JvfN3aO944wmJX2O+ayt2f 20uinEB7Nd1vY174RI1kBw7YAuAij03KttzZAebUiwA85o2lRL4jUpF8/spv/ssC AwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr BgEFBQcDAjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtKtFfp2B4YnxNi1RI qVPaIqhIMzANBgkqhkiG9w0BAQsFAAOCAQEA30EniSEn1DQDiV6sF7KIG6G/cJ46 3tIfsoqUe+h0B85cgcjRhLGtaXIIfzt0C/3qp7ZIgwG5iElN0/v+wd+FIf2cPUrd ineCUTTYreuTvlO/2N58YDhKzgHOYw5vYPtmdV1T3eady7d7du0vu1utVXnK0WCU Frzd4AuBid9sWGogkk4jVe7iWLQ1bA6tYYGDl0vO7K1yHcheCp0HrCDxwbAM1wfg ma06BHFmN8eafiAIIzgDHFoa7qGcPTV+tvxnNK5H3POQKWRaoEO/8tLgwU5oa0P4 UmYfibCw3iSCZ4wtm/OEF+6Mx0xOI+t9fZRo5lOGMvHvEsflFxAAmR8c+A== -----END CERTIFICATE----- --- # Source: cilium/templates/hubble-relay-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: hubble-relay-config namespace: kube-system data: config.yaml: | peer-service: unix:///var/run/cilium/hubble.sock listen-address: :4245 dial-timeout: retry-timeout: sort-buffer-len-max: sort-buffer-drain-timeout: tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt disable-server-tls: true --- # Source: cilium/templates/hubble-ui-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: hubble-ui-envoy namespace: kube-system data: envoy.yaml: | static_resources: listeners: - name: listener_hubble_ui address: socket_address: address: 0.0.0.0 port_value: 8081 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: auto stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: prefix: "/api/" route: cluster: backend prefix_rewrite: "/" timeout: 0s max_stream_duration: grpc_timeout_header_max: 0s - match: prefix: "/" route: cluster: frontend cors: allow_origin_string_match: - prefix: "*" allow_methods: GET, PUT, DELETE, POST, OPTIONS allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout max_age: "1728000" expose_headers: grpc-status,grpc-message http_filters: - name: envoy.filters.http.grpc_web - name: envoy.filters.http.cors - name: envoy.filters.http.router clusters: - name: frontend connect_timeout: 0.25s type: strict_dns lb_policy: round_robin load_assignment: cluster_name: frontend endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8080 - name: backend connect_timeout: 0.25s type: logical_dns lb_policy: round_robin http2_protocol_options: {} load_assignment: cluster_name: backend endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8090 --- # Source: cilium/templates/cilium-agent-clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium rules: - apiGroups: - networking.k8s.io resources: - networkpolicies verbs: - get - list - watch - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - get - list - watch - apiGroups: - "" resources: - namespaces - services - nodes - endpoints verbs: - get - list - watch - apiGroups: - "" resources: - pods - pods/finalizers verbs: - get - list - watch - update - delete - apiGroups: - "" resources: - nodes verbs: - get - list - watch - update - apiGroups: - "" resources: - nodes - nodes/status verbs: - patch - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: # Deprecated for removal in v1.10 - create - list - watch - update # This is used when validating policies in preflight. This will need to stay # until we figure out how to avoid "get" inside the preflight, and then # should be removed ideally. - get - apiGroups: - cilium.io resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - ciliumnodes/finalizers - ciliumidentities - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - ciliumlocalredirectpolicies/finalizers - ciliumegressnatpolicies verbs: - '*' --- # Source: cilium/templates/cilium-operator-clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator rules: - apiGroups: - "" resources: # to automatically delete [core|kube]dns pods so that are starting to being # managed by Cilium - pods verbs: - get - list - watch - delete - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - get - list - watch - apiGroups: - "" resources: - services verbs: - get - list - watch - apiGroups: - "" resources: # to perform LB IP allocation for BGP - services/status verbs: - update - apiGroups: - "" resources: # to perform the translation of a CNP that contains `ToGroup` to its endpoints - services - endpoints # to check apiserver connectivity - namespaces verbs: - get - list - watch - apiGroups: - cilium.io resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - ciliumnetworkpolicies/finalizers - ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies/status - ciliumclusterwidenetworkpolicies/finalizers - ciliumendpoints - ciliumendpoints/status - ciliumendpoints/finalizers - ciliumnodes - ciliumnodes/status - ciliumnodes/finalizers - ciliumidentities - ciliumidentities/status - ciliumidentities/finalizers - ciliumlocalredirectpolicies - ciliumlocalredirectpolicies/status - ciliumlocalredirectpolicies/finalizers verbs: - '*' - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - create - get - list - update - watch # For cilium-operator running in HA mode. # # Cilium operator running in HA mode requires the use of ResourceLock for Leader Election # between multiple running instances. # The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less # common and fewer objects in the cluster watch "all Leases". - apiGroups: - coordination.k8s.io resources: - leases verbs: - create - get - update --- # Source: cilium/templates/hubble-relay-clusterrole.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: hubble-relay rules: - apiGroups: - "" resources: - componentstatuses - endpoints - namespaces - nodes - pods - services verbs: - get - list - watch --- # Source: cilium/templates/hubble-ui-clusterrole.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: hubble-ui rules: - apiGroups: - networking.k8s.io resources: - networkpolicies verbs: - get - list - watch - apiGroups: - "" resources: - componentstatuses - endpoints - namespaces - nodes - pods - services verbs: - get - list - watch - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - get - list - watch - apiGroups: - cilium.io resources: - "*" verbs: - get - list - watch --- # Source: cilium/templates/cilium-agent-clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium subjects: - kind: ServiceAccount name: "cilium" namespace: kube-system --- # Source: cilium/templates/cilium-operator-clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium-operator subjects: - kind: ServiceAccount name: "cilium-operator" namespace: kube-system --- # Source: cilium/templates/hubble-relay-clusterrolebinding.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: hubble-relay roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: hubble-relay subjects: - kind: ServiceAccount namespace: kube-system name: "hubble-relay" --- # Source: cilium/templates/hubble-ui-clusterrolebinding.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: hubble-ui roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: hubble-ui subjects: - kind: ServiceAccount namespace: kube-system name: "hubble-ui" --- # Source: cilium/templates/cilium-agent-service.yaml kind: Service apiVersion: v1 metadata: name: cilium-agent namespace: kube-system annotations: prometheus.io/scrape: 'true' prometheus.io/port: "9095" labels: k8s-app: cilium spec: clusterIP: None type: ClusterIP ports: - name: envoy-metrics port: 9095 protocol: TCP targetPort: envoy-metrics selector: k8s-app: cilium --- # Source: cilium/templates/hubble-relay-service.yaml kind: Service apiVersion: v1 metadata: name: hubble-relay namespace: kube-system labels: k8s-app: hubble-relay spec: type: ClusterIP selector: k8s-app: hubble-relay ports: - protocol: TCP port: 80 targetPort: 4245 --- # Source: cilium/templates/hubble-ui-service.yaml kind: Service apiVersion: v1 metadata: name: hubble-ui labels: k8s-app: hubble-ui namespace: kube-system spec: selector: k8s-app: hubble-ui ports: - name: http port: 80 targetPort: 8081 type: ClusterIP --- # Source: cilium/templates/cilium-agent-daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: labels: k8s-app: cilium name: cilium namespace: kube-system spec: selector: matchLabels: k8s-app: cilium updateStrategy: rollingUpdate: maxUnavailable: 2 type: RollingUpdate template: metadata: annotations: prometheus.io/port: "9090" prometheus.io/scrape: "true" # This annotation plus the CriticalAddonsOnly toleration makes # cilium to be a critical pod in the cluster, which ensures cilium # gets priority scheduling. # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ scheduler.alpha.kubernetes.io/critical-pod: "" labels: k8s-app: cilium spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - matchExpressions: - key: beta.kubernetes.io/os operator: In values: - linux podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: k8s-app operator: In values: - cilium topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map command: - cilium-agent startupProbe: httpGet: host: '127.0.0.1' path: /healthz port: 9876 scheme: HTTP httpHeaders: - name: "brief" value: "true" failureThreshold: 24 periodSeconds: 2 successThreshold: 1 livenessProbe: httpGet: host: '127.0.0.1' path: /healthz port: 9876 scheme: HTTP httpHeaders: - name: "brief" value: "true" failureThreshold: 10 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 readinessProbe: httpGet: host: '127.0.0.1' path: /healthz port: 9876 scheme: HTTP httpHeaders: - name: "brief" value: "true" failureThreshold: 3 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 env: - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_K8S_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ - name: CILIUM_CNI_CHAINING_MODE valueFrom: configMapKeyRef: key: cni-chaining-mode name: cilium-config optional: true - name: CILIUM_CUSTOM_CNI_CONF valueFrom: configMapKeyRef: key: custom-cni-conf name: cilium-config optional: true - name: KUBERNETES_SERVICE_HOST value: "control-plane.minikube.internal" - name: KUBERNETES_SERVICE_PORT value: "8443" image: "quay.io/cilium/cilium:v1.10.0@sha256:587627d909ffe0418c0bd907516496844867a21812946af82096d367760e4c1e" imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - "/cni-install.sh" - "--enable-debug=false" - "--cni-exclusive=true" preStop: exec: command: - /cni-uninstall.sh name: cilium-agent ports: - containerPort: 9090 hostPort: 9090 name: prometheus protocol: TCP - containerPort: 9095 hostPort: 9095 name: envoy-metrics protocol: TCP securityContext: capabilities: add: - NET_ADMIN - SYS_MODULE privileged: true volumeMounts: - mountPath: /sys/fs/bpf name: bpf-maps - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/opt/cni/bin name: cni-path - mountPath: /host/etc/cni/net.d name: etc-cni-netd - mountPath: /var/lib/cilium/clustermesh name: clustermesh-secrets readOnly: true - mountPath: /tmp/cilium/config-map name: cilium-config-path readOnly: true # Needed to be able to load kernel modules - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock - mountPath: /var/lib/cilium/tls/hubble name: hubble-tls readOnly: true hostNetwork: true initContainers: - command: - /init-container.sh env: - name: CILIUM_ALL_STATE valueFrom: configMapKeyRef: key: clean-cilium-state name: cilium-config optional: true - name: CILIUM_BPF_STATE valueFrom: configMapKeyRef: key: clean-cilium-bpf-state name: cilium-config optional: true - name: CILIUM_WAIT_BPF_MOUNT valueFrom: configMapKeyRef: key: wait-bpf-mount name: cilium-config optional: true - name: KUBERNETES_SERVICE_HOST value: "control-plane.minikube.internal" - name: KUBERNETES_SERVICE_PORT value: "8443" image: "quay.io/cilium/cilium:v1.10.0@sha256:587627d909ffe0418c0bd907516496844867a21812946af82096d367760e4c1e" imagePullPolicy: IfNotPresent name: clean-cilium-state securityContext: capabilities: add: - NET_ADMIN privileged: true volumeMounts: - mountPath: /sys/fs/bpf name: bpf-maps mountPropagation: HostToContainer - mountPath: /var/run/cilium name: cilium-run resources: requests: cpu: 100m memory: 100Mi restartPolicy: Always priorityClassName: system-node-critical serviceAccount: "cilium" serviceAccountName: "cilium" terminationGracePeriodSeconds: 1 tolerations: - operator: Exists volumes: # To keep state between restarts / upgrades - hostPath: path: /var/run/cilium type: DirectoryOrCreate name: cilium-run # To keep state between restarts / upgrades for bpf maps - hostPath: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps # To install cilium cni plugin in the host - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path # To install cilium cni configuration in the host - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate name: etc-cni-netd # To be able to load kernel modules - hostPath: path: /lib/modules name: lib-modules # To access iptables concurrently with other processes (e.g. kube-proxy) - hostPath: path: /run/xtables.lock type: FileOrCreate name: xtables-lock # To read the clustermesh configuration - name: clustermesh-secrets secret: defaultMode: 420 optional: true secretName: cilium-clustermesh # To read the configuration from the config map - configMap: name: cilium-config name: cilium-config-path - name: hubble-tls projected: sources: - secret: name: hubble-server-certs items: - key: ca.crt path: client-ca.crt - key: tls.crt path: server.crt - key: tls.key path: server.key optional: true --- # Source: cilium/templates/cilium-operator-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: io.cilium/app: operator name: cilium-operator name: cilium-operator namespace: kube-system spec: # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go # for more details. replicas: 2 selector: matchLabels: io.cilium/app: operator name: cilium-operator strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: prometheus.io/port: "6942" prometheus.io/scrape: "true" labels: io.cilium/app: operator name: cilium-operator spec: # In HA mode, cilium-operator pods must not be scheduled on the same # node as they will clash with each other. affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: io.cilium/app operator: In values: - operator topologyKey: kubernetes.io/hostname containers: - args: - --config-dir=/tmp/cilium/config-map - --debug=$(CILIUM_DEBUG) command: - cilium-operator-generic env: - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_K8S_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: CILIUM_DEBUG valueFrom: configMapKeyRef: key: debug name: cilium-config optional: true - name: KUBERNETES_SERVICE_HOST value: "control-plane.minikube.internal" - name: KUBERNETES_SERVICE_PORT value: "8443" image: "quay.io/cilium/operator-generic:v1.10.0@sha256:65143311a62a95dbe23c69ff2f624e0fdf030eb225e6375d889da66a955dd828" imagePullPolicy: IfNotPresent name: cilium-operator ports: - containerPort: 6942 hostPort: 6942 name: prometheus protocol: TCP livenessProbe: httpGet: host: '127.0.0.1' path: /healthz port: 9234 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 volumeMounts: - mountPath: /tmp/cilium/config-map name: cilium-config-path readOnly: true hostNetwork: true restartPolicy: Always priorityClassName: system-cluster-critical serviceAccount: "cilium-operator" serviceAccountName: "cilium-operator" tolerations: - operator: Exists volumes: # To read the configuration from the config map - configMap: name: cilium-config name: cilium-config-path --- # Source: cilium/templates/hubble-relay-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: hubble-relay labels: k8s-app: hubble-relay namespace: kube-system spec: replicas: 1 selector: matchLabels: k8s-app: hubble-relay strategy: rollingUpdate: maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: labels: k8s-app: hubble-relay spec: affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: "k8s-app" operator: In values: - cilium topologyKey: "kubernetes.io/hostname" containers: - name: hubble-relay image: "quay.io/cilium/hubble-relay:v1.10.0@sha256:e92e6778c71aa9e181618d61e9403761ad061c3960a9203aa2cf8e6cde95c9d7" imagePullPolicy: IfNotPresent command: - hubble-relay args: - serve ports: - name: grpc containerPort: 4245 readinessProbe: tcpSocket: port: grpc livenessProbe: tcpSocket: port: grpc volumeMounts: - mountPath: /var/run/cilium name: hubble-sock-dir readOnly: true - mountPath: /etc/hubble-relay name: config readOnly: true - mountPath: /var/lib/hubble-relay/tls name: tls readOnly: true restartPolicy: Always serviceAccount: "hubble-relay" serviceAccountName: "hubble-relay" terminationGracePeriodSeconds: 0 volumes: - configMap: name: hubble-relay-config items: - key: config.yaml path: config.yaml name: config - hostPath: path: /var/run/cilium type: Directory name: hubble-sock-dir - projected: sources: - secret: name: hubble-relay-client-certs items: - key: ca.crt path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key name: tls --- # Source: cilium/templates/hubble-ui-deployment.yaml kind: Deployment apiVersion: apps/v1 metadata: namespace: kube-system labels: k8s-app: hubble-ui name: hubble-ui spec: replicas: 1 selector: matchLabels: k8s-app: hubble-ui template: metadata: annotations: labels: k8s-app: hubble-ui spec: securityContext: runAsUser: 1001 serviceAccount: "hubble-ui" serviceAccountName: "hubble-ui" containers: - name: frontend image: "quay.io/cilium/hubble-ui:v0.7.9@sha256:e0e461c680ccd083ac24fe4f9e19e675422485f04d8720635ec41f2ba9e5562c" imagePullPolicy: IfNotPresent ports: - containerPort: 8080 name: http resources: {} - name: backend image: "quay.io/cilium/hubble-ui-backend:v0.7.9@sha256:632c938ef6ff30e3a080c59b734afb1fb7493689275443faa1435f7141aabe76" imagePullPolicy: IfNotPresent env: - name: EVENTS_SERVER_PORT value: "8090" - name: FLOWS_API_ADDR value: "hubble-relay:80" ports: - containerPort: 8090 name: grpc resources: {} - name: proxy image: "docker.io/envoyproxy/envoy:v1.18.2@sha256:e8b37c1d75787dd1e712ff389b0d37337dc8a174a63bed9c34ba73359dc67da7" imagePullPolicy: IfNotPresent ports: - containerPort: 8081 name: http resources: {} command: ["envoy"] args: [ "-c", "/etc/envoy.yaml", "-l", "info" ] volumeMounts: - name: hubble-ui-envoy-yaml mountPath: /etc/envoy.yaml subPath: envoy.yaml volumes: - name: hubble-ui-envoy-yaml configMap: name: hubble-ui-envoy