Created
September 4, 2017 20:52
-
-
Save jlewi/e7b1e02b9788e3938bf0234d4aa259da to your computer and use it in GitHub Desktop.
fluentd config
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| apiVersion: v1 | |
| data: | |
| containers.input.conf: |- | |
| # This configuration file for Fluentd is used | |
| # to watch changes to Docker log files that live in the | |
| # directory /var/lib/docker/containers/ and are symbolically | |
| # linked to from the /var/log/containers directory using names that capture the | |
| # pod name and container name. These logs are then submitted to | |
| # Google Cloud Logging which assumes the installation of the cloud-logging plug-in. | |
| # | |
| # Example | |
| # ======= | |
| # A line in the Docker log file might look like this JSON: | |
| # | |
| # {"log":"2014/09/25 21:15:03 Got request with path wombat\\n", | |
| # "stream":"stderr", | |
| # "time":"2014-09-25T21:15:03.499185026Z"} | |
| # | |
| # The record reformer is used to write the tag to focus on the pod name | |
| # and the Kubernetes container name. For example a Docker container's logs | |
| # might be in the directory: | |
| # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b | |
| # and in the file: | |
| # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log | |
| # where 997599971ee6... is the Docker ID of the running container. | |
| # The Kubernetes kubelet makes a symbolic link to this file on the host machine | |
| # in the /var/log/containers directory which includes the pod name and the Kubernetes | |
| # container name: | |
| # synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log | |
| # -> | |
| # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log | |
| # The /var/log directory on the host is mapped to the /var/log directory in the container | |
| # running this instance of Fluentd and we end up collecting the file: | |
| # /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log | |
| # This results in the tag: | |
| # var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log | |
| # The record reformer is used is discard the var.log.containers prefix and | |
| # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag: | |
| # kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr | |
| # Tag is then parsed by google_cloud plugin and translated to the metadata, | |
| # visible in the log viewer | |
| # Example: | |
| # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} | |
| <source> | |
| type tail | |
| format json | |
| time_key time | |
| path /var/log/containers/*.log | |
| pos_file /var/log/gcp-containers.log.pos | |
| time_format %Y-%m-%dT%H:%M:%S.%N%Z | |
| tag reform.* | |
| read_from_head true | |
| </source> | |
| <filter reform.**> | |
| type parser | |
| format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/ | |
| reserve_data true | |
| suppress_parse_error_log true | |
| key_name log | |
| </filter> | |
| <match reform.**> | |
| type record_reformer | |
| enable_ruby true | |
| tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')} | |
| </match> | |
| # Detect exceptions in the log output and forward them as one log entry. | |
| <match raw.kubernetes.**> | |
| @type detect_exceptions | |
| remove_tag_prefix raw | |
| message log | |
| stream stream | |
| multiline_flush_interval 5 | |
| max_bytes 500000 | |
| max_lines 1000 | |
| </match> | |
| monitoring.conf: |- | |
| # Prometheus monitoring | |
| <source> | |
| @type prometheus | |
| port 31337 | |
| </source> | |
| <source> | |
| @type prometheus_monitor | |
| </source> | |
| # This source is used to acquire approximate process start timestamp, | |
| # which purpose is explained before the corresponding output plugin. | |
| <source> | |
| @type exec | |
| command /bin/sh -c 'date +%s' | |
| tag process_start | |
| time_format %Y-%m-%d %H:%M:%S | |
| keys process_start_timestamp | |
| </source> | |
| # This filter is used to convert process start timestamp to integer | |
| # value for correct ingestion in the prometheus output plugin. | |
| <filter process_start> | |
| @type record_transformer | |
| enable_ruby true | |
| auto_typecast true | |
| <record> | |
| process_start_timestamp ${record["process_start_timestamp"].to_i} | |
| </record> | |
| </filter> | |
| output.conf: |- | |
| # This match is placed before the all-matching output to provide metric | |
| # exporter with a process start timestamp for correct exporting of | |
| # cumulative metrics to Stackdriver. | |
| <match process_start> | |
| @type prometheus | |
| <metric> | |
| type gauge | |
| name process_start_time_seconds | |
| desc Timestamp of the process start in seconds | |
| key process_start_timestamp | |
| </metric> | |
| </match> | |
| # We use 2 output stanzas - one to handle the container logs and one to handle | |
| # the node daemon logs, the latter of which explicitly sends its logs to the | |
| # compute.googleapis.com service rather than container.googleapis.com to keep | |
| # them separate since most users don't care about the node logs. | |
| <match kubernetes.**> | |
| @type google_cloud | |
| # Collect metrics in Prometheus registry about plugin activity. | |
| enable_monitoring true | |
| monitoring_type prometheus | |
| # Set the buffer type to file to improve the reliability and reduce the memory consumption | |
| buffer_type file | |
| buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer | |
| # Set queue_full action to block because we want to pause gracefully | |
| # in case of the off-the-limits load instead of throwing an exception | |
| buffer_queue_full_action block | |
| # Set the chunk limit conservatively to avoid exceeding the GCL limit | |
| # of 10MiB per write request. | |
| buffer_chunk_limit 2M | |
| # Cap the combined memory usage of this buffer and the one below to | |
| # 2MiB/chunk * (6 + 2) chunks = 16 MiB | |
| buffer_queue_limit 6 | |
| # Never wait more than 5 seconds before flushing logs in the non-error case. | |
| flush_interval 5s | |
| # Never wait longer than 30 seconds between retries. | |
| max_retry_wait 30 | |
| # Disable the limit on the number of retries (retry forever). | |
| disable_retry_limit | |
| # Use multiple threads for processing. | |
| num_threads 2 | |
| </match> | |
| # Keep a smaller buffer here since these logs are less important than the user's | |
| # container logs. | |
| <match **> | |
| @type google_cloud | |
| enable_monitoring true | |
| monitoring_type prometheus | |
| detect_subservice false | |
| buffer_type file | |
| buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer | |
| buffer_queue_full_action block | |
| buffer_chunk_limit 2M | |
| buffer_queue_limit 2 | |
| flush_interval 5s | |
| max_retry_wait 30 | |
| disable_retry_limit | |
| num_threads 2 | |
| </match> | |
| system.input.conf: |- | |
| # Example: | |
| # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 | |
| <source> | |
| type tail | |
| format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/ | |
| time_format %Y-%m-%d %H:%M:%S | |
| path /var/log/salt/minion | |
| pos_file /var/log/gcp-salt.pos | |
| tag salt | |
| </source> | |
| # Example: | |
| # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script | |
| <source> | |
| type tail | |
| format syslog | |
| path /var/log/startupscript.log | |
| pos_file /var/log/gcp-startupscript.log.pos | |
| tag startupscript | |
| </source> | |
| # Examples: | |
| # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json" | |
| # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404 | |
| <source> | |
| type tail | |
| format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/ | |
| path /var/log/docker.log | |
| pos_file /var/log/gcp-docker.log.pos | |
| tag docker | |
| </source> | |
| # Example: | |
| # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal | |
| <source> | |
| type tail | |
| # Not parsing this, because it doesn't have anything particularly useful to | |
| # parse out of it (like severities). | |
| format none | |
| path /var/log/etcd.log | |
| pos_file /var/log/gcp-etcd.log.pos | |
| tag etcd | |
| </source> | |
| # Multi-line parsing is required for all the kube logs because very large log | |
| # statements, such as those that include entire object bodies, get split into | |
| # multiple lines by glog. | |
| # Example: | |
| # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537] | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/kubelet.log | |
| pos_file /var/log/gcp-kubelet.log.pos | |
| tag kubelet | |
| </source> | |
| # Example: | |
| # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/kube-proxy.log | |
| pos_file /var/log/gcp-kube-proxy.log.pos | |
| tag kube-proxy | |
| </source> | |
| # Example: | |
| # I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266] | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/kube-apiserver.log | |
| pos_file /var/log/gcp-kube-apiserver.log.pos | |
| tag kube-apiserver | |
| </source> | |
| # Example: | |
| # 2017-02-09T00:15:57.992775796Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" ip="104.132.1.72" method="GET" user="kubecfg" as="<self>" asgroups="<lookup>" namespace="default" uri="/api/v1/namespaces/default/pods" | |
| # 2017-02-09T00:15:57.993528822Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" response="200" | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\S+\s+AUDIT:/ | |
| # Fields must be explicitly captured by name to be parsed into the record. | |
| # Fields may not always be present, and order may change, so this just looks | |
| # for a list of key="\"quoted\" value" pairs separated by spaces. | |
| # Unknown fields are ignored. | |
| # Note: We can't separate query/response lines as format1/format2 because | |
| # they don't always come one after the other for a given query. | |
| # TODO: Maybe add a JSON output mode to audit log so we can get rid of this? | |
| format1 /^(?<time>\S+) AUDIT:(?: (?:id="(?<id>(?:[^"\\]|\\.)*)"|ip="(?<ip>(?:[^"\\]|\\.)*)"|method="(?<method>(?:[^"\\]|\\.)*)"|user="(?<user>(?:[^"\\]|\\.)*)"|groups="(?<groups>(?:[^"\\]|\\.)*)"|as="(?<as>(?:[^"\\]|\\.)*)"|asgroups="(?<asgroups>(?:[^"\\]|\\.)*)"|namespace="(?<namespace>(?:[^"\\]|\\.)*)"|uri="(?<uri>(?:[^"\\]|\\.)*)"|response="(?<response>(?:[^"\\]|\\.)*)"|\w+="(?:[^"\\]|\\.)*"))*/ | |
| time_format %FT%T.%L%Z | |
| path /var/log/kube-apiserver-audit.log | |
| pos_file /var/log/gcp-kube-apiserver-audit.log.pos | |
| tag kube-apiserver-audit | |
| </source> | |
| # Example: | |
| # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/kube-controller-manager.log | |
| pos_file /var/log/gcp-kube-controller-manager.log.pos | |
| tag kube-controller-manager | |
| </source> | |
| # Example: | |
| # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312] | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/kube-scheduler.log | |
| pos_file /var/log/gcp-kube-scheduler.log.pos | |
| tag kube-scheduler | |
| </source> | |
| # Example: | |
| # I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/rescheduler.log | |
| pos_file /var/log/gcp-rescheduler.log.pos | |
| tag rescheduler | |
| </source> | |
| # Example: | |
| # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/glbc.log | |
| pos_file /var/log/gcp-glbc.log.pos | |
| tag glbc | |
| </source> | |
| # Example: | |
| # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf | |
| <source> | |
| type tail | |
| format multiline | |
| multiline_flush_interval 5s | |
| format_firstline /^\w\d{4}/ | |
| format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
| time_format %m%d %H:%M:%S.%N | |
| path /var/log/cluster-autoscaler.log | |
| pos_file /var/log/gcp-cluster-autoscaler.log.pos | |
| tag cluster-autoscaler | |
| </source> | |
| # Logs from systemd-journal for interesting services. | |
| <source> | |
| type systemd | |
| filters [{ "_SYSTEMD_UNIT": "docker.service" }] | |
| pos_file /var/log/gcp-journald-docker.pos | |
| read_from_head true | |
| tag docker | |
| </source> | |
| <source> | |
| type systemd | |
| filters [{ "_SYSTEMD_UNIT": "kubelet.service" }] | |
| pos_file /var/log/gcp-journald-kubelet.pos | |
| read_from_head true | |
| tag kubelet | |
| </source> | |
| kind: ConfigMap | |
| metadata: | |
| annotations: | |
| kubectl.kubernetes.io/last-applied-configuration: | | |
| {"apiVersion":"v1","data":{"containers.input.conf":"# This configuration file for Fluentd is used\n# to watch changes to Docker log files that live in the\n# directory /var/lib/docker/containers/ and are symbolically\n# linked to from the /var/log/containers directory using names that capture the\n# pod name and container name. These logs are then submitted to\n# Google Cloud Logging which assumes the installation of the cloud-logging plug-in.\n#\n# Example\n# =======\n# A line in the Docker log file might look like this JSON:\n#\n# {\"log\":\"2014/09/25 21:15:03 Got request with path wombat\\\\n\",\n# \"stream\":\"stderr\",\n# \"time\":\"2014-09-25T21:15:03.499185026Z\"}\n#\n# The record reformer is used to write the tag to focus on the pod name\n# and the Kubernetes container name. For example a Docker container's logs\n# might be in the directory:\n# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b\n# and in the file:\n# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log\n# where 997599971ee6... is the Docker ID of the running container.\n# The Kubernetes kubelet makes a symbolic link to this file on the host machine\n# in the /var/log/containers directory which includes the pod name and the Kubernetes\n# container name:\n# synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log\n# -\u003e\n# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log\n# The /var/log directory on the host is mapped to the /var/log directory in the container\n# running this instance of Fluentd and we end up collecting the file:\n# /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log\n# This results in the tag:\n# var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log\n# The record reformer is used is discard the var.log.containers prefix and\n# the Docker container ID suffix and \"kubernetes.\" is pre-pended giving the tag:\n# kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr\n# Tag is then parsed by google_cloud plugin and translated to the metadata,\n# visible in the log viewer\n\n# Example:\n# {\"log\":\"[info:2016-02-16T16:04:05.930-08:00] Some log text here\\n\",\"stream\":\"stdout\",\"time\":\"2016-02-17T00:04:05.931087621Z\"}\n\u003csource\u003e\n type tail\n format json\n time_key time\n path /var/log/containers/*.log\n pos_file /var/log/gcp-containers.log.pos\n time_format %Y-%m-%dT%H:%M:%S.%N%Z\n tag reform.*\n read_from_head true\n\u003c/source\u003e\n\n\u003cfilter reform.**\u003e\n type parser\n format /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003clog\u003e.*)/\n reserve_data true\n suppress_parse_error_log true\n key_name log\n\u003c/filter\u003e\n\n\u003cmatch reform.**\u003e\n type record_reformer\n enable_ruby true\n tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}\n\u003c/match\u003e\n\n# Detect exceptions in the log output and forward them as one log entry.\n\u003cmatch raw.kubernetes.**\u003e\n @type detect_exceptions\n\n remove_tag_prefix raw\n message log\n stream stream\n multiline_flush_interval 5\n max_bytes 500000\n max_lines 1000\n\u003c/match\u003e","monitoring.conf":"# Prometheus monitoring\n\u003csource\u003e\n @type prometheus\n port 31337\n\u003c/source\u003e\n\n\u003csource\u003e\n @type prometheus_monitor\n\u003c/source\u003e\n\n# This source is used to acquire approximate process start timestamp,\n# which purpose is explained before the corresponding output plugin.\n\u003csource\u003e\n @type exec\n command /bin/sh -c 'date +%s'\n tag process_start\n time_format %Y-%m-%d %H:%M:%S\n keys process_start_timestamp\n\u003c/source\u003e\n\n# This filter is used to convert process start timestamp to integer\n# value for correct ingestion in the prometheus output plugin.\n\u003cfilter process_start\u003e\n @type record_transformer\n enable_ruby true\n auto_typecast true\n \u003crecord\u003e\n process_start_timestamp ${record[\"process_start_timestamp\"].to_i}\n \u003c/record\u003e\n\u003c/filter\u003e","output.conf":"# This match is placed before the all-matching output to provide metric\n# exporter with a process start timestamp for correct exporting of\n# cumulative metrics to Stackdriver.\n\u003cmatch process_start\u003e\n @type prometheus\n\n \u003cmetric\u003e\n type gauge\n name process_start_time_seconds\n desc Timestamp of the process start in seconds\n key process_start_timestamp\n \u003c/metric\u003e\n\u003c/match\u003e\n\n# We use 2 output stanzas - one to handle the container logs and one to handle\n# the node daemon logs, the latter of which explicitly sends its logs to the\n# compute.googleapis.com service rather than container.googleapis.com to keep\n# them separate since most users don't care about the node logs.\n\u003cmatch kubernetes.**\u003e\n @type google_cloud\n\n # Collect metrics in Prometheus registry about plugin activity.\n enable_monitoring true\n monitoring_type prometheus\n # Set the buffer type to file to improve the reliability and reduce the memory consumption\n buffer_type file\n buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer\n # Set queue_full action to block because we want to pause gracefully\n # in case of the off-the-limits load instead of throwing an exception\n buffer_queue_full_action block\n # Set the chunk limit conservatively to avoid exceeding the GCL limit\n # of 10MiB per write request.\n buffer_chunk_limit 2M\n # Cap the combined memory usage of this buffer and the one below to\n # 2MiB/chunk * (6 + 2) chunks = 16 MiB\n buffer_queue_limit 6\n # Never wait more than 5 seconds before flushing logs in the non-error case.\n flush_interval 5s\n # Never wait longer than 30 seconds between retries.\n max_retry_wait 30\n # Disable the limit on the number of retries (retry forever).\n disable_retry_limit\n # Use multiple threads for processing.\n num_threads 2\n\u003c/match\u003e\n\n# Keep a smaller buffer here since these logs are less important than the user's\n# container logs.\n\u003cmatch **\u003e\n @type google_cloud\n\n enable_monitoring true\n monitoring_type prometheus\n detect_subservice false\n buffer_type file\n buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer\n buffer_queue_full_action block\n buffer_chunk_limit 2M\n buffer_queue_limit 2\n flush_interval 5s\n max_retry_wait 30\n disable_retry_limit\n num_threads 2\n\u003c/match\u003e","system.input.conf":"# Example:\n# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081\n\u003csource\u003e\n type tail\n format /^(?\u003ctime\u003e[^ ]* [^ ,]*)[^\\[]*\\[[^\\]]*\\]\\[(?\u003cseverity\u003e[^ \\]]*) *\\] (?\u003cmessage\u003e.*)$/\n time_format %Y-%m-%d %H:%M:%S\n path /var/log/salt/minion\n pos_file /var/log/gcp-salt.pos\n tag salt\n\u003c/source\u003e\n\n# Example:\n# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script\n\u003csource\u003e\n type tail\n format syslog\n path /var/log/startupscript.log\n pos_file /var/log/gcp-startupscript.log.pos\n tag startupscript\n\u003c/source\u003e\n\n# Examples:\n# time=\"2016-02-04T06:51:03.053580605Z\" level=info msg=\"GET /containers/json\"\n# time=\"2016-02-04T07:53:57.505612354Z\" level=error msg=\"HTTP Error\" err=\"No such image: -f\" statusCode=404\n\u003csource\u003e\n type tail\n format /^time=\"(?\u003ctime\u003e[^)]*)\" level=(?\u003cseverity\u003e[^ ]*) msg=\"(?\u003cmessage\u003e[^\"]*)\"( err=\"(?\u003cerror\u003e[^\"]*)\")?( statusCode=($\u003cstatus_code\u003e\\d+))?/\n path /var/log/docker.log\n pos_file /var/log/gcp-docker.log.pos\n tag docker\n\u003c/source\u003e\n\n# Example:\n# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal\n\u003csource\u003e\n type tail\n # Not parsing this, because it doesn't have anything particularly useful to\n # parse out of it (like severities).\n format none\n path /var/log/etcd.log\n pos_file /var/log/gcp-etcd.log.pos\n tag etcd\n\u003c/source\u003e\n\n# Multi-line parsing is required for all the kube logs because very large log\n# statements, such as those that include entire object bodies, get split into\n# multiple lines by glog.\n\n# Example:\n# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/kubelet.log\n pos_file /var/log/gcp-kubelet.log.pos\n tag kubelet\n\u003c/source\u003e\n\n# Example:\n# I1118 21:26:53.975789 6 proxier.go:1096] Port \"nodePort for kube-system/default-http-backend:http\" (:31429/tcp) was open before and is still needed\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/kube-proxy.log\n pos_file /var/log/gcp-kube-proxy.log.pos\n tag kube-proxy\n\u003c/source\u003e\n\n# Example:\n# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/kube-apiserver.log\n pos_file /var/log/gcp-kube-apiserver.log.pos\n tag kube-apiserver\n\u003c/source\u003e\n\n# Example:\n# 2017-02-09T00:15:57.992775796Z AUDIT: id=\"90c73c7c-97d6-4b65-9461-f94606ff825f\" ip=\"104.132.1.72\" method=\"GET\" user=\"kubecfg\" as=\"\u003cself\u003e\" asgroups=\"\u003clookup\u003e\" namespace=\"default\" uri=\"/api/v1/namespaces/default/pods\"\n# 2017-02-09T00:15:57.993528822Z AUDIT: id=\"90c73c7c-97d6-4b65-9461-f94606ff825f\" response=\"200\"\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\S+\\s+AUDIT:/\n # Fields must be explicitly captured by name to be parsed into the record.\n # Fields may not always be present, and order may change, so this just looks\n # for a list of key=\"\\\"quoted\\\" value\" pairs separated by spaces.\n # Unknown fields are ignored.\n # Note: We can't separate query/response lines as format1/format2 because\n # they don't always come one after the other for a given query.\n # TODO: Maybe add a JSON output mode to audit log so we can get rid of this?\n format1 /^(?\u003ctime\u003e\\S+) AUDIT:(?: (?:id=\"(?\u003cid\u003e(?:[^\"\\\\]|\\\\.)*)\"|ip=\"(?\u003cip\u003e(?:[^\"\\\\]|\\\\.)*)\"|method=\"(?\u003cmethod\u003e(?:[^\"\\\\]|\\\\.)*)\"|user=\"(?\u003cuser\u003e(?:[^\"\\\\]|\\\\.)*)\"|groups=\"(?\u003cgroups\u003e(?:[^\"\\\\]|\\\\.)*)\"|as=\"(?\u003cas\u003e(?:[^\"\\\\]|\\\\.)*)\"|asgroups=\"(?\u003casgroups\u003e(?:[^\"\\\\]|\\\\.)*)\"|namespace=\"(?\u003cnamespace\u003e(?:[^\"\\\\]|\\\\.)*)\"|uri=\"(?\u003curi\u003e(?:[^\"\\\\]|\\\\.)*)\"|response=\"(?\u003cresponse\u003e(?:[^\"\\\\]|\\\\.)*)\"|\\w+=\"(?:[^\"\\\\]|\\\\.)*\"))*/\n time_format %FT%T.%L%Z\n path /var/log/kube-apiserver-audit.log\n pos_file /var/log/gcp-kube-apiserver-audit.log.pos\n tag kube-apiserver-audit\n\u003c/source\u003e\n\n# Example:\n# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/kube-controller-manager.log\n pos_file /var/log/gcp-kube-controller-manager.log.pos\n tag kube-controller-manager\n\u003c/source\u003e\n\n# Example:\n# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/kube-scheduler.log\n pos_file /var/log/gcp-kube-scheduler.log.pos\n tag kube-scheduler\n\u003c/source\u003e\n\n# Example:\n# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/rescheduler.log\n pos_file /var/log/gcp-rescheduler.log.pos\n tag rescheduler\n\u003c/source\u003e\n\n# Example:\n# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/glbc.log\n pos_file /var/log/gcp-glbc.log.pos\n tag glbc\n\u003c/source\u003e\n\n# Example:\n# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf\n\u003csource\u003e\n type tail\n format multiline\n multiline_flush_interval 5s\n format_firstline /^\\w\\d{4}/\n format1 /^(?\u003cseverity\u003e\\w)(?\u003ctime\u003e\\d{4} [^\\s]*)\\s+(?\u003cpid\u003e\\d+)\\s+(?\u003csource\u003e[^ \\]]+)\\] (?\u003cmessage\u003e.*)/\n time_format %m%d %H:%M:%S.%N\n path /var/log/cluster-autoscaler.log\n pos_file /var/log/gcp-cluster-autoscaler.log.pos\n tag cluster-autoscaler\n\u003c/source\u003e\n\n# Logs from systemd-journal for interesting services.\n\u003csource\u003e\n type systemd\n filters [{ \"_SYSTEMD_UNIT\": \"docker.service\" }]\n pos_file /var/log/gcp-journald-docker.pos\n read_from_head true\n tag docker\n\u003c/source\u003e\n\n\u003csource\u003e\n type systemd\n filters [{ \"_SYSTEMD_UNIT\": \"kubelet.service\" }]\n pos_file /var/log/gcp-journald-kubelet.pos\n read_from_head true\n tag kubelet\n\u003c/source\u003e"},"kind":"ConfigMap","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile"},"name":"fluentd-gcp-config-v1.1","namespace":"kube-system"}} | |
| creationTimestamp: 2017-08-10T21:20:01Z | |
| labels: | |
| addonmanager.kubernetes.io/mode: Reconcile | |
| name: fluentd-gcp-config-v1.1 | |
| namespace: kube-system | |
| resourceVersion: "27256679" | |
| selfLink: /api/v1/namespaces/kube-system/configmaps/fluentd-gcp-config-v1.1 | |
| uid: ab4d84c7-7e11-11e7-9ba0-42010af00105 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment