Created
March 1, 2020 00:34
-
-
Save praseodym/d57a40f4bd217578b61cb253dfc4a25d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
------------------------------------------------------------------------------- | |
NGINX Ingress controller | |
Release: 0.30.0 | |
Build: git-7e65b90c4 | |
Repository: https://github.com/kubernetes/ingress-nginx | |
nginx version: nginx/1.17.8 | |
------------------------------------------------------------------------------- | |
W0301 00:31:38.725688 8 flags.go:260] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) | |
W0301 00:31:38.725769 8 client_config.go:543] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. | |
I0301 00:31:38.725947 8 main.go:193] Creating API client for https://10.96.0.1:443 | |
I0301 00:31:38.726859 8 main.go:213] Trying to discover Kubernetes version | |
I0301 00:31:38.727392 8 main.go:222] Unexpected error discovering Kubernetes version (attempt 0): Get https://10.96.0.1:443/version?timeout=32s: dial tcp 10.96.0.1:443: connect: connection refused | |
W0301 00:31:39.738934 8 main.go:234] Initial connection to the Kubernetes API server was retried 1 times. | |
I0301 00:31:39.738955 8 main.go:237] Running in Kubernetes cluster version v1.17 (v1.17.2) - git (clean) commit 59603c6e503c87169aea6106f57b9f242f64df89 - platform linux/amd64 | |
I0301 00:31:39.939974 8 main.go:102] SSL fake certificate created /etc/ingress-controller/ssl/default-fake-certificate.pem | |
I0301 00:31:39.967273 8 nginx.go:263] Starting NGINX Ingress controller | |
I0301 00:31:39.973645 8 event.go:281] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"nginx-configuration", UID:"5e9a5dee-00f5-4534-9f3b-2037fb81fd6e", APIVersion:"v1", ResourceVersion:"1356", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/nginx-configuration | |
I0301 00:31:39.976902 8 event.go:281] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"tcp-services", UID:"6066858d-6470-4160-b6d4-6cdde1fc2b43", APIVersion:"v1", ResourceVersion:"1359", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/tcp-services | |
I0301 00:31:39.976932 8 event.go:281] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"3ed3aec7-b515-4e69-b0b8-9c783224dafb", APIVersion:"v1", ResourceVersion:"1360", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services | |
I0301 00:31:41.070795 8 event.go:281] Event(v1.ObjectReference{Kind:"Ingress", Namespace:"emojivoto", Name:"emojivoto", UID:"bc3e7984-286b-43e6-9174-a50e220445d2", APIVersion:"networking.k8s.io/v1beta1", ResourceVersion:"32681", FieldPath:""}): type: 'Normal' reason: 'CREATE' Ingress emojivoto/emojivoto | |
I0301 00:31:41.168059 8 nginx.go:307] Starting NGINX process | |
I0301 00:31:41.168115 8 leaderelection.go:242] attempting to acquire leader lease ingress-nginx/ingress-controller-leader-nginx... | |
I0301 00:31:41.168546 8 controller.go:137] Configuration changes detected, backend reload required. | |
I0301 00:31:41.169372 8 util.go:71] rlimit.max=1048576 | |
I0301 00:31:41.170137 8 template.go:789] empty byte size, hence it will not be set | |
I0301 00:31:41.170301 8 status.go:86] new leader elected: nginx-ingress-controller-77c8bcccb5-dr4zt | |
I0301 00:31:41.204433 8 nginx.go:709] NGINX configuration diff: | |
--- /etc/nginx/nginx.conf 2020-02-24 12:47:02.000000000 +0000 | |
+++ /tmp/new-nginx-cfg780858931 2020-03-01 00:31:41.199736430 +0000 | |
@@ -1,6 +1,559 @@ | |
-# A very simple nginx configuration file that forces nginx to start. | |
+ | |
+# Configuration checksum: 7246555032219247554 | |
+ | |
+# setup custom paths that do not require root access | |
pid /tmp/nginx.pid; | |
-events {} | |
-http {} | |
-daemon off; | |
\ No newline at end of file | |
+daemon off; | |
+ | |
+worker_processes 16; | |
+ | |
+worker_rlimit_nofile 64512; | |
+ | |
+worker_shutdown_timeout 240s ; | |
+ | |
+events { | |
+ multi_accept on; | |
+ worker_connections 16384; | |
+ use epoll; | |
+} | |
+ | |
+http { | |
+ lua_package_path "/etc/nginx/lua/?.lua;;"; | |
+ | |
+ lua_shared_dict balancer_ewma 10M; | |
+ lua_shared_dict balancer_ewma_last_touched_at 10M; | |
+ lua_shared_dict balancer_ewma_locks 1M; | |
+ lua_shared_dict certificate_data 20M; | |
+ lua_shared_dict certificate_servers 5M; | |
+ lua_shared_dict configuration_data 20M; | |
+ | |
+ init_by_lua_block { | |
+ collectgarbage("collect") | |
+ | |
+ -- init modules | |
+ local ok, res | |
+ | |
+ ok, res = pcall(require, "lua_ingress") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ lua_ingress = res | |
+ lua_ingress.set_config({ | |
+ use_forwarded_headers = false, | |
+ use_proxy_protocol = false, | |
+ is_ssl_passthrough_enabled = false, | |
+ http_redirect_code = 308, | |
+ listen_ports = { ssl_proxy = "442", https = "443" }, | |
+ | |
+ hsts = true, | |
+ hsts_max_age = 15724800, | |
+ hsts_include_subdomains = true, | |
+ hsts_preload = false, | |
+ }) | |
+ end | |
+ | |
+ ok, res = pcall(require, "configuration") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ configuration = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "balancer") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ balancer = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "monitor") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ monitor = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "certificate") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ certificate = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "plugins") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ plugins = res | |
+ end | |
+ -- load all plugins that'll be used here | |
+ plugins.init({}) | |
+ } | |
+ | |
+ init_worker_by_lua_block { | |
+ lua_ingress.init_worker() | |
+ balancer.init_worker() | |
+ | |
+ monitor.init_worker() | |
+ | |
+ plugins.run() | |
+ } | |
+ | |
+ geoip_country /etc/nginx/geoip/GeoIP.dat; | |
+ geoip_city /etc/nginx/geoip/GeoLiteCity.dat; | |
+ geoip_org /etc/nginx/geoip/GeoIPASNum.dat; | |
+ geoip_proxy_recursive on; | |
+ | |
+ aio threads; | |
+ aio_write on; | |
+ | |
+ tcp_nopush on; | |
+ tcp_nodelay on; | |
+ | |
+ log_subrequest on; | |
+ | |
+ reset_timedout_connection on; | |
+ | |
+ keepalive_timeout 75s; | |
+ keepalive_requests 100; | |
+ | |
+ client_body_temp_path /tmp/client-body; | |
+ fastcgi_temp_path /tmp/fastcgi-temp; | |
+ proxy_temp_path /tmp/proxy-temp; | |
+ ajp_temp_path /tmp/ajp-temp; | |
+ | |
+ client_header_buffer_size 1k; | |
+ client_header_timeout 60s; | |
+ large_client_header_buffers 4 8k; | |
+ client_body_buffer_size 8k; | |
+ client_body_timeout 60s; | |
+ | |
+ http2_max_field_size 4k; | |
+ http2_max_header_size 16k; | |
+ http2_max_requests 1000; | |
+ http2_max_concurrent_streams 128; | |
+ | |
+ types_hash_max_size 2048; | |
+ server_names_hash_max_size 1024; | |
+ server_names_hash_bucket_size 32; | |
+ map_hash_bucket_size 64; | |
+ | |
+ proxy_headers_hash_max_size 512; | |
+ proxy_headers_hash_bucket_size 64; | |
+ | |
+ variables_hash_bucket_size 256; | |
+ variables_hash_max_size 2048; | |
+ | |
+ underscores_in_headers off; | |
+ ignore_invalid_headers on; | |
+ | |
+ limit_req_status 503; | |
+ limit_conn_status 503; | |
+ | |
+ include /etc/nginx/mime.types; | |
+ default_type text/html; | |
+ | |
+ gzip on; | |
+ gzip_comp_level 5; | |
+ gzip_http_version 1.1; | |
+ gzip_min_length 256; | |
+ gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component; | |
+ gzip_proxied any; | |
+ gzip_vary on; | |
+ | |
+ # Custom headers for response | |
+ | |
+ server_tokens on; | |
+ | |
+ # disable warnings | |
+ uninitialized_variable_warn off; | |
+ | |
+ # Additional available variables: | |
+ # $namespace | |
+ # $ingress_name | |
+ # $service_name | |
+ # $service_port | |
+ log_format upstreaminfo '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id'; | |
+ | |
+ map $request_uri $loggable { | |
+ | |
+ default 1; | |
+ } | |
+ | |
+ access_log /var/log/nginx/access.log upstreaminfo if=$loggable; | |
+ | |
+ error_log /var/log/nginx/error.log notice; | |
+ | |
+ resolver 10.96.0.10 valid=30s; | |
+ | |
+ # See https://www.nginx.com/blog/websocket-nginx | |
+ map $http_upgrade $connection_upgrade { | |
+ default upgrade; | |
+ | |
+ # See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive | |
+ '' ''; | |
+ | |
+ } | |
+ | |
+ # Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server. | |
+ # If no such header is provided, it can provide a random value. | |
+ map $http_x_request_id $req_id { | |
+ default $http_x_request_id; | |
+ | |
+ "" $request_id; | |
+ | |
+ } | |
+ | |
+ # Create a variable that contains the literal $ character. | |
+ # This works because the geo module will not resolve variables. | |
+ geo $literal_dollar { | |
+ default "$"; | |
+ } | |
+ | |
+ server_name_in_redirect off; | |
+ port_in_redirect off; | |
+ | |
+ ssl_protocols TLSv1.2; | |
+ | |
+ ssl_early_data off; | |
+ | |
+ # turn on session caching to drastically improve performance | |
+ | |
+ ssl_session_cache builtin:1000 shared:SSL:10m; | |
+ ssl_session_timeout 10m; | |
+ | |
+ # allow configuring ssl session tickets | |
+ ssl_session_tickets on; | |
+ | |
+ # slightly reduce the time-to-first-byte | |
+ ssl_buffer_size 4k; | |
+ | |
+ # allow configuring custom ssl ciphers | |
+ ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384'; | |
+ ssl_prefer_server_ciphers on; | |
+ | |
+ ssl_ecdh_curve auto; | |
+ | |
+ # PEM sha: 01a333aed7a352e208e2a7b89300627d38f4586c | |
+ ssl_certificate /etc/ingress-controller/ssl/default-fake-certificate.pem; | |
+ ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem; | |
+ | |
+ proxy_ssl_session_reuse on; | |
+ | |
+ upstream upstream_balancer { | |
+ ### Attention!!! | |
+ # | |
+ # We no longer create "upstream" section for every backend. | |
+ # Backends are handled dynamically using Lua. If you would like to debug | |
+ # and see what backends ingress-nginx has in its memory you can | |
+ # install our kubectl plugin https://kubernetes.github.io/ingress-nginx/kubectl-plugin. | |
+ # Once you have the plugin you can use "kubectl ingress-nginx backends" command to | |
+ # inspect current backends. | |
+ # | |
+ ### | |
+ | |
+ server 0.0.0.1; # placeholder | |
+ | |
+ balancer_by_lua_block { | |
+ balancer.balance() | |
+ } | |
+ | |
+ keepalive 32; | |
+ | |
+ keepalive_timeout 60s; | |
+ keepalive_requests 100; | |
+ | |
+ } | |
+ | |
+ # Cache for internal auth checks | |
+ proxy_cache_path /tmp/nginx-cache-auth levels=1:2 keys_zone=auth_cache:10m max_size=128m inactive=30m use_temp_path=off; | |
+ | |
+ # Global filters | |
+ | |
+ ## start server _ | |
+ server { | |
+ server_name _ ; | |
+ | |
+ listen 80 default_server reuseport backlog=4096 ; | |
+ listen [::]:80 default_server reuseport backlog=4096 ; | |
+ listen 443 default_server reuseport backlog=4096 ssl http2 ; | |
+ listen [::]:443 default_server reuseport backlog=4096 ssl http2 ; | |
+ | |
+ set $proxy_upstream_name "-"; | |
+ | |
+ ssl_certificate_by_lua_block { | |
+ certificate.call() | |
+ } | |
+ | |
+ location / { | |
+ | |
+ set $namespace "emojivoto"; | |
+ set $ingress_name "emojivoto"; | |
+ set $service_name ""; | |
+ set $service_port ""; | |
+ set $location_path "/"; | |
+ | |
+ rewrite_by_lua_block { | |
+ lua_ingress.rewrite({ | |
+ force_ssl_redirect = false, | |
+ ssl_redirect = true, | |
+ force_no_ssl_redirect = false, | |
+ use_port_in_redirects = false, | |
+ }) | |
+ balancer.rewrite() | |
+ plugins.run() | |
+ } | |
+ | |
+ # be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any | |
+ # will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)` | |
+ # other authentication method such as basic auth or external auth useless - all requests will be allowed. | |
+ #access_by_lua_block { | |
+ #} | |
+ | |
+ header_filter_by_lua_block { | |
+ lua_ingress.header() | |
+ plugins.run() | |
+ } | |
+ | |
+ body_filter_by_lua_block { | |
+ } | |
+ | |
+ log_by_lua_block { | |
+ balancer.log() | |
+ | |
+ monitor.call() | |
+ | |
+ plugins.run() | |
+ } | |
+ | |
+ port_in_redirect off; | |
+ | |
+ set $balancer_ewma_score -1; | |
+ set $proxy_upstream_name "emojivoto-web-svc-80"; | |
+ set $proxy_host $proxy_upstream_name; | |
+ set $pass_access_scheme $scheme; | |
+ | |
+ set $pass_server_port $server_port; | |
+ | |
+ set $best_http_host $http_host; | |
+ set $pass_port $pass_server_port; | |
+ | |
+ set $proxy_alternative_upstream_name ""; | |
+ | |
+ client_max_body_size 1m; | |
+ | |
+ proxy_set_header Host $best_http_host; | |
+ | |
+ # Pass the extracted client certificate to the backend | |
+ | |
+ # Allow websocket connections | |
+ proxy_set_header Upgrade $http_upgrade; | |
+ | |
+ proxy_set_header Connection $connection_upgrade; | |
+ | |
+ proxy_set_header X-Request-ID $req_id; | |
+ proxy_set_header X-Real-IP $remote_addr; | |
+ | |
+ proxy_set_header X-Forwarded-For $remote_addr; | |
+ | |
+ proxy_set_header X-Forwarded-Host $best_http_host; | |
+ proxy_set_header X-Forwarded-Port $pass_port; | |
+ proxy_set_header X-Forwarded-Proto $pass_access_scheme; | |
+ | |
+ proxy_set_header X-Scheme $pass_access_scheme; | |
+ | |
+ # Pass the original X-Forwarded-For | |
+ proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for; | |
+ | |
+ # mitigate HTTPoxy Vulnerability | |
+ # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ | |
+ proxy_set_header Proxy ""; | |
+ | |
+ # Custom headers to proxied server | |
+ | |
+ proxy_connect_timeout 5s; | |
+ proxy_send_timeout 60s; | |
+ proxy_read_timeout 60s; | |
+ | |
+ proxy_buffering off; | |
+ proxy_buffer_size 4k; | |
+ proxy_buffers 4 4k; | |
+ | |
+ proxy_max_temp_file_size 1024m; | |
+ | |
+ proxy_request_buffering on; | |
+ proxy_http_version 1.1; | |
+ | |
+ proxy_cookie_domain off; | |
+ proxy_cookie_path off; | |
+ | |
+ # In case of errors try the next upstream server before returning an error | |
+ proxy_next_upstream error timeout; | |
+ proxy_next_upstream_timeout 0; | |
+ proxy_next_upstream_tries 3; | |
+ | |
+ proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port; | |
+ | |
+ proxy_pass http://upstream_balancer; | |
+ | |
+ proxy_redirect off; | |
+ | |
+ } | |
+ | |
+ # health checks in cloud providers require the use of port 80 | |
+ location /healthz { | |
+ | |
+ access_log off; | |
+ return 200; | |
+ } | |
+ | |
+ # this is required to avoid error if nginx is being monitored | |
+ # with an external software (like sysdig) | |
+ location /nginx_status { | |
+ | |
+ allow 127.0.0.1; | |
+ | |
+ allow ::1; | |
+ | |
+ deny all; | |
+ | |
+ access_log off; | |
+ stub_status on; | |
+ } | |
+ | |
+ } | |
+ ## end server _ | |
+ | |
+ # backend for when default-backend-service is not configured or it does not have endpoints | |
+ server { | |
+ listen 8181 default_server reuseport backlog=4096; | |
+ listen [::]:8181 default_server reuseport backlog=4096; | |
+ set $proxy_upstream_name "internal"; | |
+ | |
+ access_log off; | |
+ | |
+ location / { | |
+ return 404; | |
+ } | |
+ } | |
+ | |
+ # default server, used for NGINX healthcheck and access to nginx stats | |
+ server { | |
+ listen 127.0.0.1:10246; | |
+ set $proxy_upstream_name "internal"; | |
+ | |
+ keepalive_timeout 0; | |
+ gzip off; | |
+ | |
+ access_log off; | |
+ | |
+ location /healthz { | |
+ return 200; | |
+ } | |
+ | |
+ location /is-dynamic-lb-initialized { | |
+ content_by_lua_block { | |
+ local configuration = require("configuration") | |
+ local backend_data = configuration.get_backends_data() | |
+ if not backend_data then | |
+ ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) | |
+ return | |
+ end | |
+ | |
+ ngx.say("OK") | |
+ ngx.exit(ngx.HTTP_OK) | |
+ } | |
+ } | |
+ | |
+ location /nginx_status { | |
+ stub_status on; | |
+ } | |
+ | |
+ location /configuration { | |
+ client_max_body_size 21m; | |
+ client_body_buffer_size 21m; | |
+ proxy_buffering off; | |
+ | |
+ content_by_lua_block { | |
+ configuration.call() | |
+ } | |
+ } | |
+ | |
+ location / { | |
+ content_by_lua_block { | |
+ ngx.exit(ngx.HTTP_NOT_FOUND) | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+stream { | |
+ lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;;"; | |
+ | |
+ lua_shared_dict tcp_udp_configuration_data 5M; | |
+ | |
+ init_by_lua_block { | |
+ collectgarbage("collect") | |
+ | |
+ -- init modules | |
+ local ok, res | |
+ | |
+ ok, res = pcall(require, "configuration") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ configuration = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "tcp_udp_configuration") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ tcp_udp_configuration = res | |
+ end | |
+ | |
+ ok, res = pcall(require, "tcp_udp_balancer") | |
+ if not ok then | |
+ error("require failed: " .. tostring(res)) | |
+ else | |
+ tcp_udp_balancer = res | |
+ end | |
+ } | |
+ | |
+ init_worker_by_lua_block { | |
+ tcp_udp_balancer.init_worker() | |
+ } | |
+ | |
+ lua_add_variable $proxy_upstream_name; | |
+ | |
+ log_format log_stream '[$remote_addr] [$time_local] $protocol $status $bytes_sent $bytes_received $session_time'; | |
+ | |
+ access_log /var/log/nginx/access.log log_stream ; | |
+ | |
+ error_log /var/log/nginx/error.log; | |
+ | |
+ upstream upstream_balancer { | |
+ server 0.0.0.1:1234; # placeholder | |
+ | |
+ balancer_by_lua_block { | |
+ tcp_udp_balancer.balance() | |
+ } | |
+ } | |
+ | |
+ server { | |
+ listen 127.0.0.1:10247; | |
+ | |
+ access_log off; | |
+ | |
+ content_by_lua_block { | |
+ tcp_udp_configuration.call() | |
+ } | |
+ } | |
+ | |
+ # TCP services | |
+ | |
+ # UDP services | |
+ | |
+} | |
+ | |
I0301 00:31:41.226444 8 controller.go:153] Backend successfully reloaded. | |
I0301 00:31:41.226474 8 controller.go:162] Initial sync, sleeping for 1 second. | |
I0301 00:31:41.923296 8 healthz.go:191] [+]ping ok | |
[-]nginx-ingress-controller failed: reason withheld | |
healthz check failed | |
I0301 00:31:42.228307 8 controller.go:176] Dynamic reconfiguration succeeded. | |
I0301 00:31:44.945334 8 socket.go:353] removing ingresses [] from metrics | |
I0301 00:32:12.606716 8 leaderelection.go:252] successfully acquired lease ingress-nginx/ingress-controller-leader-nginx | |
I0301 00:32:12.606779 8 status.go:86] new leader elected: nginx-ingress-controller-84c6f976d-74zml | |
I0301 00:32:12.606792 8 status.go:65] I am the new leader | |
I0301 00:32:12.606842 8 main.go:163] Updating ssl expiration metrics. | |
I0301 00:32:12.611109 8 status.go:274] updating Ingress emojivoto/emojivoto status from [] to [{10.105.55.10 }] | |
I0301 00:32:12.613280 8 event.go:281] Event(v1.ObjectReference{Kind:"Ingress", Namespace:"emojivoto", Name:"emojivoto", UID:"bc3e7984-286b-43e6-9174-a50e220445d2", APIVersion:"networking.k8s.io/v1beta1", ResourceVersion:"32951", FieldPath:""}): type: 'Normal' reason: 'UPDATE' Ingress emojivoto/emojivoto | |
I0301 00:32:12.613420 8 main.go:163] Updating ssl expiration metrics. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment