Skip to content

Instantly share code, notes, and snippets.

@ottomata
Created March 8, 2017 15:20
Show Gist options
  • Select an option

  • Save ottomata/bddd45dfc5f697d114cb109fa4fd399c to your computer and use it in GitHub Desktop.

Select an option

Save ottomata/bddd45dfc5f697d114cb109fa4fd399c to your computer and use it in GitHub Desktop.
# Where to output varnish log lines:
# kafka - (default) send to kafka broker
# stdout - just print to stdout (behave like varnishncsa)
# null - (test) collect all tags specified by format but dont output anything
output = stdout
# Log formatter
format.type = json
format = %{fake_tag0@hostname?fake-hostname-change-me-if-you-want}x %{@sequence!num?0}n %{end:%FT%T@dt}t %{Varnish:time_firstbyte@time_firstbyte!num?0.0}x %{X-Client-IP@ip}o %{X-Cache-Status@cache_status}o %{@http_status}s %{@response_size!num?0}b %{@http_method}m %{Host@uri_host}i %{@uri_path}U %{@uri_query}q %{Content-Type@content_type}o %{Referer@referer}i %{X-Forwarded-For@x_forwarded_for}i %{User-Agent@user_agent}i %{Accept-Language@accept_language}i %{X-Analytics@x_analytics}o %{Range@range}i %{X-Cache@x_cache}o
# Optional secondary formatting.
# 'output = kafka': The rendered 'format.key' will be provided as the
# Kafka message Key
# 'output = string': Print string to stdout.
# Supports the same formatting and type as 'format' and 'format.type'.
# format.key.type = string
# format.key = %l
# Start for sequence number (%n)
# Either a number, or the string "time" which will set it to the current
# unix time in seconds multiplied by 1,000,000.
# Defaults to 0.
sequence.number = 0
# daemonize varnishkafka (boolean)
daemonize = false
# Statistics output interval
# Defaults to 60 seconds, use 0 to disable.
log.statistics.interval = 0
# varnishkafka log level (1 = emergencies .. 7 = debug)
log.level = 2
# specify log output (multiples allowed)
log.stderr = true
#######################################################################
# #
# Standard varnish VSL command line arguments #
# #
# Syntax: #
# varnish.arg.<c> = <value>, where <c> is a command line option. #
# #
# See varnishncsa(1) and varnishlog(1) for valid options. #
# #
#######################################################################
# varnish instance name --- YOU MIGHT NEED TO SET THIS
#varnish.arg.n = frontend
kafka.topic = not-using-kafka
#######################################################################
# #
# varnishkafka configuration file #
# Varnish 4 specific #
# #
#######################################################################
# #
# Syntax: #
# <property-name> = <value> #
# #
# Boolean property values: #
# >0, "true", "yes", "on" - interpreted as true #
# everything else - interpreted as false #
# #
#######################################################################
#
#
#
#######################################################################
# #
# Varnish log formatting #
# #
# format.type - format output type, one of: #
# string - ASCII string output #
# json - JSON output #
# #
# #
# format - format string #
# %X #
# where 'X' is one of the standard varnishncsa(1) formatters. #
# Example: %u #
# #
# #
# %{VAR}X #
# Name-Value tokens where X is 'x', 'i' or 'o' and 'VAR' is the #
# Name to extract the value for. #
# Example: %{User-Agent}i #
# #
# #
# %{?DEFAULT@FIELD!OPTION!OPTION..}X #
# where 'X' is any formatter, #
# #
# 'DEFAULT' is the default string to use if no tag was matched, #
# the default default string is "-". #
# #
# 'FIELD' is the field name to use with the JSON formatter. #
# i.e., "%{@host}l" will be JSON encoded as: {"host":"1.2.3.4"} #
# #
# 'OPTION' is one or more of the formatting options: #
# escape - escape non-printable characters to \<octalcode> #
# and \t\n\r\v\f " to their canonical #
# backslashed notations (\t\n\r\v\f\"\ ). #
# num - for typed formatters, such as JSON, try to encode #
# the value as a number. #
# #
# #
# This syntax can be combined with %{VAR}X. #
# Example: %{User-Agent?Mozilla!escape}i #
# %{?nouser}u #
# %{!escape}q #
# %{@host}l #
# #
# #
# #
# Additional formatter specials: #
# %{<strftime-format>}t - format timestamp according to supplied #
# strftime(3) compatible format string. #
# %{Varnish:xid}x - transaction id of client request. #
# Same value as X-Varnish header #
# #
# #
# #
# Non %-prefixed strings are copied verbatim to the #
# output log string. #
# Example: "User: %u;" would render "User: snaps;" #
# #
# #
#######################################################################
# Where to output varnish log lines:
# kafka - (default) send to kafka broker
# stdout - just print to stdout (behave like varnishncsa)
# null - (test) collect all tags specified by format but dont output anything
output = kafka
# Log formatter
format.type = json
format = %{fake_tag0@hostname?cp1045.eqiad.wmnet}x %{@sequence!num?0}n %{end:%FT%T@dt}t %{Varnish:time_firstbyte@time_firstbyte!num?0.0}x %{X-Client-IP@ip}o %{X-Cache-Status@cache_status}o %{@http_status}s %{@response_size!num?0}b %{@http_method}m %{Host@uri_host}i %{@uri_path}U %{@uri_query}q %{Content-Type@content_type}o %{Referer@referer}i %{X-Forwarded-For@x_forwarded_for}i %{User-Agent@user_agent}i %{Accept-Language@accept_language}i %{X-Analytics@x_analytics}o %{Range@range}i %{X-Cache@x_cache}o
# Optional secondary formatting.
# 'output = kafka': The rendered 'format.key' will be provided as the
# Kafka message Key
# 'output = string': Print string to stdout.
# Supports the same formatting and type as 'format' and 'format.type'.
# format.key.type = string
# format.key = %l
# Start for sequence number (%n)
# Either a number, or the string "time" which will set it to the current
# unix time in seconds multiplied by 1,000,000.
# Defaults to 0.
sequence.number = 0
#
# TUNING
#
# The maximum accepted log tag (field) size.
# Larger tags will be truncated to this size.
# Defaults to 2048
tag.size.max = 2048
# Size of per logline scratch buffer.
# The scratch buffer is used as a temporary storage space while
# collecting tags for the log line. If the scratch size is too small the
# logline tag match will be incomplete. log.line.scratch.size depicts the
# size of the "static" always-available memory allocated with each logline.
# It should be configured to fit all matched tag-values (prior to formatting)
# for a normal request. If the scratch buffer gets full vk will start
# allocating tmpbufs, a tmpbuf only lives for the current request and is then
# freed, so it is a little more costly than using thestatic scratch pad.
# Defaults to 4096 bytes.
logline.scratch.size = 4096
#
# varnishkafka log messages configuration
# Debugging, error reporting, etc, not to be confused with varnish logs.
#
# varnishkafka log level (1 = emergencies .. 7 = debug)
log.level = 6
# specify log output (multiples allowed)
log.stderr = false
log.syslog = true
# Maximum number of error logs produced per log.rate.period seconds
# This setting is applied per error type.
# log.rate.max defaults to 100
# log.rate.period defaults to 60
#log.rate.max = 100
#log.rate.period = 60
# Kafka: log message delivery failures (requires required.acks > 0)
log.kafka.msg.error = true
#
# JSON Statistics
#
# Statistics is collected from varnishkafka itself as well as librdkafka
# Each JSON object has a top level key of either 'varnishkafka' or
# 'kafka' to indicate which type of statistics the object contains.
# Each line is a valid JSON object.
#
# Statistics output interval
# Defaults to 60 seconds, use 0 to disable.
log.statistics.interval = 15
# Statistics output file
# Defaults to /tmp/varnishkafka.stats.json
log.statistics.file = /var/cache/varnishkafka/webrequest.stats.json
# daemonize varnishkafka (boolean)
daemonize = false
#######################################################################
# #
# Standard varnish VSL command line arguments #
# #
# Syntax: #
# varnish.arg.<c> = <value>, where <c> is a command line option. #
# #
# See varnishncsa(1) and varnishlog(1) for valid options. #
# #
#######################################################################
# -L 10000
varnish.arg.L = 10000
# -T 1500
varnish.arg.T = 1500
# -q ReqMethod ne "PURGE" and not Timestamp:Pipe and not ReqHeader:Upgrade ~ "[wW]ebsocket" and not HttpGarbage
varnish.arg.q = ReqMethod ne "PURGE" and not Timestamp:Pipe and not ReqHeader:Upgrade ~ "[wW]ebsocket" and not HttpGarbage
# varnish instance name
varnish.arg.n = frontend
#######################################################################
# #
# Kafka configuration #
# #
# Kafka configuration properties are prefixed with "kafka." #
# and topic properties are prefixed with "kafka.topic.". #
# #
# For the full range of Kafka handle and topic configuration #
# properties, see: #
# http://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md #
# #
# And the Apache Kafka configuration reference: #
# http://kafka.apache.org/08/configuration.html #
# #
#######################################################################
# Initial list of kafka brokers
kafka.metadata.broker.list = kafka1012.eqiad.wmnet,kafka1013.eqiad.wmnet,kafka1014.eqiad.wmnet,kafka1018.eqiad.wmnet,kafka1020.eqiad.wmnet,kafka1022.eqiad.wmnet
# Maximum number of messages allowed on the local producer queue
# Defaults to 1000000
kafka.queue.buffering.max.messages = 720000
# Maximum time, in milliseconds, for buffering data on the producer queue.
# Defaults to 1000 (1 second)
kafka.queue.buffering.max.ms = 1000
# Maximum number of messages batched in one MessageSet.
# Defaults to 1000
kafka.batch.num.messages = 9000
# Maximum number of retries per messageset.
kafka.message.send.max.retries = 3
# Use compression when sending to Kafka.. Default is none.
# Valid values are 'none', 'gzip', and 'snappy'.
kafka.compression.codec = snappy
#
# Topic configuration
#
# Topic to produce messages to
kafka.topic = webrequest_misc
# Partition (-1: random, else one of the available partitions)
kafka.partition = -1
# Required number of acks
kafka.topic.request.required.acks = 1
# Local message timeout (milliseconds)
kafka.topic.message.timeout.ms = 300000
# The ack timeout of the producer request in milliseconds
kafka.topic.request.timeout.ms = 2000
# SO_SNDBUFF Socket send buffer size. System default is used if 0.
kafka.socket.send.buffer.bytes = 0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment