Skip to content

Instantly share code, notes, and snippets.

@fieldju
Last active June 17, 2022 05:38
Show Gist options
  • Save fieldju/c4af82bd8663569476be2d5f575d19f8 to your computer and use it in GitHub Desktop.
Save fieldju/c4af82bd8663569476be2d5f575d19f8 to your computer and use it in GitHub Desktop.

ArmoryCloudJsonLayout.java <- Custom SLF4J layout, that adds the context that we want to the log statements

logback.xml <- Logback config that uses the custom layout

☝️ These go into our Javaspring boot applications which are containerized and run in a Kubernetes cluster

newrelic-logging-helm-chart-values.yaml <- Our values file for the New Relic Fluentbit helm chart

☝️ we configured ours to strip a bunch of metadata and only forward logs from containers with the logme: "true" annotation

package io.armory.cloud.logging;
import ch.qos.logback.classic.pattern.TargetLengthBasedClassNameAbbreviator;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.classic.spi.ThrowableProxyUtil;
import ch.qos.logback.core.LayoutBase;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.LinkedHashMap;
import java.util.Properties;
import static java.util.Optional.ofNullable;
public class ArmoryCloudJsonLayout extends LayoutBase<ILoggingEvent> {
// Log message attributes
public static final String APPLICATION = "app";
public static final String VERSION = "version";
public static final String ENVIRONMENT = "environment";
public static final String REPLICA_SET = "replicaset";
public static final String MESSAGE = "message";
public static final String THREAD = "thread";
public static final String LOGGER = "logger";
public static final String STACKTRACE = "stacktrace";
public static final String TIMESTAMP = "timestamp";
public static final String HOSTNAME = "hostname";
public static final String LOG_LEVEL = "level";
private final TargetLengthBasedClassNameAbbreviator classNameAbbreviator = new TargetLengthBasedClassNameAbbreviator(32);
private final ObjectMapper om;
private final String application = System.getenv("ARMORY_APPLICATION_NAME");
private final String environment = System.getenv("ARMORY_ENVIRONMENT_NAME");
private final String replicaSet = System.getenv("ARMORY_REPLICA_SET_NAME");
private String hostname;
private String version;
public ArmoryCloudJsonLayout() {
super();
om = new ObjectMapper();
try {
hostname = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
try {
var res = Runtime.getRuntime().exec("hostname").getInputStream();
hostname = new BufferedReader(new InputStreamReader(res)).readLine().trim();
} catch (IOException ex) {
hostname = null;
}
}
try (var is = this.getClass().getClassLoader().getResourceAsStream("META-INF/build-info.properties")) {
var props = new Properties();
props.load(is);
version = props.getProperty("build.version");
} catch (Exception e) {
version = null;
}
}
@Override
public String doLayout(ILoggingEvent event) {
// LinkedHashMap so that insert order is preserved for consistent logging.
var entry = new LinkedHashMap<String, Object>();
entry.put(TIMESTAMP, String.valueOf(event.getTimeStamp()));
entry.put(LOG_LEVEL, event.getLevel().toString());
entry.put(THREAD, event.getThreadName());
entry.put(LOGGER, classNameAbbreviator.abbreviate(event.getLoggerName()));
ofNullable(hostname).ifPresent(h -> entry.put(HOSTNAME, h));
ofNullable(application).ifPresent(a -> entry.put(APPLICATION, a));
ofNullable(environment).ifPresent(e -> entry.put(ENVIRONMENT, e));
ofNullable(replicaSet).ifPresent(rs -> entry.put(REPLICA_SET, rs));
ofNullable(version).ifPresent(v -> entry.put(VERSION, v));
entry.putAll(event.getMDCPropertyMap());
entry.put(MESSAGE, event.getFormattedMessage());
ofNullable(event.getThrowableProxy()).ifPresent(t -> entry.put(STACKTRACE, ThrowableProxyUtil.asString(t)));
try {
return "%s\n".formatted(om.writeValueAsString(entry));
} catch (Exception e) {
addError("Failed to serialize log entry. Defaulting to map.toString(). Message: %s".formatted(e.getMessage()), e);
return entry.toString();
}
}
}
<configuration>
<appender name="json" class="ch.qos.logback.core.ConsoleAppender">
<layout class="io.armory.cloud.logging.ArmoryCloudJsonLayout" />
</appender>
<root level="info">
<appender-ref ref="json" />
</root>
</configuration>
customSecretName: newrelic-secrets
customSecretLicenseKey: api-key
endpoint: https://log-api.newrelic.com/log/v1
fluentBit:
logLevel: "info"
k8sBufferSize: "32k"
k8sLoggingExclude: "Off"
additionalEnvVariables: []
# New Relic default configuration for fluent-bit.conf (service, inputs, filters, outputs)
# and parsers.conf (parsers). The configuration below is not configured for lowDataMode and will
# send all attributes. If custom configuration is required, update these variables.
config:
service: |
[SERVICE]
Flush 1
Log_Level ${LOG_LEVEL}
Daemon off
Parsers_File parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
inputs: |
[INPUT]
Name tail
Tag kube.*
Path ${PATH}
Parser ${LOG_PARSER}
DB ${FB_DB}
Mem_Buf_Limit 7MB
Skip_Long_Lines On
Refresh_Interval 10
lowDataModeFilters: |
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc.cluster.local:443
Buffer_Size ${K8S_BUFFER_SIZE}
K8S-Logging.Exclude ${K8S_LOGGING_EXCLUDE}
Labels Off
[FILTER]
Name nest
Match *
Operation lift
Nested_under kubernetes
Add_prefix kubernetes_
[FILTER]
Name nest
Match *
Operation lift
Nested_under kubernetes_annotations
Add_prefix kubernetes_annotations_
[FILTER]
Name grep
Match *
Regex kubernetes_annotations_logme ^true$
[FILTER]
Name nest
Match *
Operation nest
Wildcard kubernetes_annotations_*
Nest_under kubernetes_annotations
Remove_prefix kubernetes_annotations_
[FILTER]
Name nest
Match *
Operation nest
Wildcard kubernetes_*
Nest_under kubernetes
Remove_prefix kubernetes_
[FILTER]
Name record_modifier
Match *
Remove_key kubernetes
Remove_key index
Remove_key stream
outputs: |
[OUTPUT]
Name newrelic
Match *
licenseKey ${LICENSE_KEY}
endpoint ${ENDPOINT}
lowDataMode ${LOW_DATA_MODE}
parsers: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
image:
repository: newrelic/newrelic-fluentbit-output
tag: ""
pullPolicy: IfNotPresent
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# pullSecrets:
# - name: regsecret
# By default, the Linux DaemonSet will always be deployed, while the Windows DaemonSet(s) won't.
enableLinux: true
enableWindows: false
# For every entry in this Windows OS list, we will create an independent DaemonSet which will get deployed
# on Windows nodes running each specific Windows version and build number. Note that
# Windows containers can only be executed on hosts running the exact same Windows version and build number,
# because Kubernetes only supports process isolation and not Hyper-V isolation (as of September 2021)
windowsOsList:
# Right now, the windows-2019 worker offerred by GitHub is based on ltsc2019/10.0.17763.2183,
# so it can only compile containers running this specific version and compilation number of the OS.
# We aim to support (but right now, we can only support LTSC2019 using GitHub actions):
# https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#windows-os-version-support
- version: ltsc2019
imageTagSuffix: windows-ltsc-2019
buildNumber: 10.0.17763
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 250m
memory: 64Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
pspEnabled: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specify any annotations to add to the ServiceAccount
annotations: {}
# If you wish to provide additional labels to apply to the pod(s), specify
# them here
# podLabels:
# Pod scheduling priority
# Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: high-priority
# Node affinity rules
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
#
# IMPORTANT #
# ######### #
# When .Values.global.fargate == true, the chart will automatically add the required affinity rules to exclude
# the DaemonSet from Fargate nodes. There is no need to manually touch this property achieve this.
# This automatic exclusion will, however, not take place if this value is overridden: Setting this to a
# non-empty value WHEN deploying in EKS Fargate (global.fargate == true) requires the user to manually
# include in their custom ruleset an exclusion for nodes with "eks.amazonaws.com/compute-type: fargate", as
# the New Relic DaemonSet MUST NOT be deployed on fargate nodes, as the operator takes care of injecting it
# as a sidecar instead.
# Please refer to the daemonset.yaml template for more details on how to achieve this.
nodeAffinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
# Note that the Linux DaemonSet already contains a node selector label based on their OS (kubernetes.io/os: linux).
nodeSelector: {}
# Note that the Windows DaemonSet already contains a node selector label based on their OS (kubernetes.io/os: windows).
# and build number (node.kubernetes.io/windows-build: {{ .buildNumber }}, to ensure that each version of the DaemonSet
# gets deployed only on those Windows nodes running the exact same Windows version and build number. Note that
# Windows containers can only be executed on hosts running the exact same Windows version and build number.
windowsNodeSelector: {}
# These are default tolerations to be able to run the New Relic Kubernetes
# integration.
tolerations:
- operator: "Exists"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
updateStrategy: RollingUpdate
# Sends data to staging, can be set as a global.
# global.nrStaging
nrStaging: false
daemonSet:
# Annotations to add to the DaemonSet.
annotations: {}
lowDataMode: true
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment