Skip to content

Instantly share code, notes, and snippets.

@patsevanton
Created October 16, 2024 01:52
Show Gist options
  • Save patsevanton/efe60dc61af7cdfb1fc286750e076f7d to your computer and use it in GitHub Desktop.
Save patsevanton/efe60dc61af7cdfb1fc286750e076f7d to your computer and use it in GitHub Desktop.
sentry-pr-1544
prefix:
# Set this to true to support IPV6 networks
ipv6: false
user:
create: true
email: [email protected]
password: aaaa
## set this value to an existingSecret name to create the admin user with the password in the secret
# existingSecret: sentry-admin-password
## set this value to an existingSecretKey which holds the password to be used for sentry admin user default key is `admin-password`
# existingSecretKey: admin-password
# this is required on the first installation, as sentry has to be initialized first
# recommended to set false for updating the helm chart afterwards,
# as you will have some downtime on each update if it's a hook
# deploys relay & snuba consumers as post hooks
asHook: true
images:
sentry:
# repository: getsentry/sentry
# tag: Chart.AppVersion
# pullPolicy: IfNotPresent
imagePullSecrets: []
snuba:
# repository: getsentry/snuba
# tag: Chart.AppVersion
# pullPolicy: IfNotPresent
imagePullSecrets: []
relay:
# repository: getsentry/relay
# tag: Chart.AppVersion
# pullPolicy: IfNotPresent
imagePullSecrets: []
symbolicator:
# repository: getsentry/symbolicator
# tag: Chart.AppVersion
# pullPolicy: IfNotPresent
imagePullSecrets: []
vroom:
# repository: getsentry/vroom
# tag: Chart.AppVersion
# pullPolicy: IfNotPresent
imagePullSecrets: []
serviceAccount:
# serviceAccount.annotations -- Additional Service Account annotations.
annotations: {}
# serviceAccount.enabled -- If `true`, a custom Service Account will be used.
enabled: false
# serviceAccount.name -- The base name of the ServiceAccount to use. Will be appended with e.g. `snuba-api` or `web` for the pods accordingly.
name: "sentry"
# serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account.
automountServiceAccountToken: true
vroom:
# annotations: {}
# args: []
replicas: 1
env: []
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
resources: {}
# requests:
# cpu: 100m
# memory: 700Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# priorityClassName: ""
service:
annotations: {}
# tolerations: []
# podLabels: {}
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
sidecars: []
# topologySpreadConstraints: []
volumes: []
volumeMounts: []
relay:
enabled: true
# annotations: {}
replicas: 1
# args: []
mode: managed
env: []
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
resources: {}
# requests:
# cpu: 100m
# memory: 700Mi
affinity: {}
nodeSelector: {}
# healthCheck:
# readinessRequestPath: ""
securityContext: {}
# if you are using GKE Ingress controller use 'securityPolicy' to add Google Cloud Armor Ingress policy
securityPolicy: ""
# if you are using GKE Ingress controller use 'customResponseHeaders' to add custom response header
customResponseHeaders: []
containerSecurityContext: {}
service:
annotations: {}
# tolerations: []
# podLabels: {}
# priorityClassName: ""
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
volumeMounts: []
init:
resources: {}
# additionalArgs: []
# credentialsSubcommand: ""
# env: []
# volumes: []
# volumeMounts: []
# cache:
# envelopeBufferSize: 1000
# logging:
# level: info
# format: json
processing:
kafkaConfig:
messageMaxBytes: 50000000
# messageTimeoutMs:
# requestTimeoutMs:
# deliveryTimeoutMs:
# apiVersionRequestTimeoutMs:
# additionalKafkaConfig:
# - name: security.protocol
# value: "SSL"
# Override custom Kafka topic names
# WARNING: If you update this and you are also using the Kafka subchart, you need to update the provisioned Topic names in this values as well!
kafkaTopicOverrides:
prefix: "sentry.dev."
# enable and reference the volume
geodata:
accountID: ""
licenseKey: ""
editionIDs: ""
persistence:
## If defined, storageClassName: <storageClass>
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "" # for example: csi-s3
size: 1Gi
volumeName: "" # for example: data-sentry-geoip
# mountPath of the volume containing the database
mountPath: "" # for example: /usr/share/GeoIP
# path to the geoip database inside the volumemount
path: "" # for example: /usr/share/GeoIP/GeoLite2-City.mmdb
sentry:
# to not generate a sentry-secret, use these 2 values to reference an existing secret
# existingSecret: "my-secret"
# existingSecretKey: "my-secret-key"
singleOrganization: true
web:
enabled: true
# if using filestore backend filesystem with RWO access, set strategyType to Recreate
strategyType: RollingUpdate
replicas: 1
env: []
existingSecretEnv: []
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
resources: {}
# requests:
# cpu: 200m
# memory: 850Mi
affinity: {}
nodeSelector: {}
securityContext: {}
# if you are using GKE Ingress controller use 'securityPolicy' to add Google Cloud Armor Ingress policy
securityPolicy: ""
# if you are using GKE Ingress controller use 'customResponseHeaders' to add custom response header
customResponseHeaders: []
containerSecurityContext: {}
service:
annotations: {}
# tolerations: []
# podLabels: {}
# Mount and use custom CA
# customCA:
# secretName: custom-ca
# item: ca.crt
# logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL
# logFormat: "human" # human|machine
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
volumeMounts: []
# workers: 3
features:
orgSubdomains: false
vstsLimitedScopes: true
enableProfiling: false
enableSessionReplay: true
enableFeedback: false
enableSpan: false
# example customFeature to enable Metrics(beta) https://docs.sentry.io/product/metrics/
# customFeatures:
# - organizations:custom-metric
# - organizations:custom-metrics-experimental
# - organizations:derive-code-mappings
worker:
enabled: true
replicas: 1
# concurrency: 4
env: []
existingSecretEnv: []
resources: {}
# requests:
# cpu: 1000m
# memory: 1100Mi
affinity: {}
nodeSelector: {}
# tolerations: []
# podLabels: {}
# logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL
# logFormat: "machine" # human|machine
# excludeQueues: ""
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
livenessProbe:
enabled: true
periodSeconds: 60
timeoutSeconds: 10
failureThreshold: 3
sidecars: []
topologySpreadConstraints: []
volumes: []
volumeMounts: []
# allows to dedicate some workers to specific queues
workerEvents:
## If the number of exceptions increases, it is recommended to enable workerEvents
enabled: false
queues: "events.save_event,post_process_errors"
## When increasing the number of exceptions and enabling workerEvents, it is recommended to increase the number of their replicas
replicas: 1
# concurrency: 4
env: []
resources: {}
affinity: {}
nodeSelector: {}
# tolerations: []
# podLabels: {}
# logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL
# logFormat: "machine" # human|machine
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
livenessProbe:
enabled: false
periodSeconds: 60
timeoutSeconds: 10
failureThreshold: 3
sidecars: []
topologySpreadConstraints: []
volumes: []
volumeMounts: []
# allows to dedicate some workers to specific queues
workerTransactions:
enabled: false
queues: "events.save_event_transaction,post_process_transactions"
replicas: 1
# concurrency: 4
env: []
resources: {}
affinity: {}
nodeSelector: {}
# tolerations: []
# podLabels: {}
# logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL
# logFormat: "machine" # human|machine
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
livenessProbe:
enabled: false
periodSeconds: 60
timeoutSeconds: 10
failureThreshold: 3
sidecars: []
topologySpreadConstraints: []
volumes: []
volumeMounts: []
ingestConsumerAttachments:
enabled: true
replicas: 1
# concurrency: 4
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 700Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# maxBatchSize: ""
# logLevel: info
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestConsumerEvents:
enabled: true
replicas: 1
# concurrency: 4
env: []
resources: {}
# requests:
# cpu: 300m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# maxBatchSize: ""
# logLevel: "info"
# inputBlockSize: ""
# maxBatchTimeMs: ""
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestConsumerTransactions:
enabled: true
replicas: 1
# concurrency: 4
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# maxBatchSize: ""
# logLevel: "info"
# inputBlockSize: ""
# maxBatchTimeMs: ""
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestReplayRecordings:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 100m
# memory: 250Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestProfiles:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestOccurrences:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 100m
# memory: 250Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
ingestMonitors:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 100m
# memory: 250Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
billingMetricsConsumer:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 100m
# memory: 250Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
genericMetricsConsumer:
enabled: true
replicas: 1
# concurrency: 4
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# maxPollIntervalMs: ""
# logLevel: "info"
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
metricsConsumer:
enabled: true
replicas: 1
# concurrency: 4
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# logLevel: "info"
# maxPollIntervalMs: ""
# it's better to use prometheus adapter and scale based on
# the size of the rabbitmq queue
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
cron:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
# logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL
# logFormat: "machine" # human|machine
subscriptionConsumerEvents:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
# volumeMounts: []
subscriptionConsumerSessions:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
# volumeMounts: []
subscriptionConsumerTransactions:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
# volumeMounts: []
postProcessForwardErrors:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 150m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
postProcessForwardTransactions:
enabled: true
replicas: 1
# processes: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumeMounts: []
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
postProcessForwardIssuePlatform:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 300m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
subscriptionConsumerGenericMetrics:
enabled: true
replicas: 1
# concurrency: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
subscriptionConsumerMetrics:
enabled: true
replicas: 1
# concurrency: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
cleanup:
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
concurrency: 1
enabled: true
schedule: "0 0 * * *"
days: 90
# logLevel: INFO
logLevel: ''
# securityContext: {}
# containerSecurityContext: {}
sidecars: []
volumes: []
# volumeMounts: []
serviceAccount: {}
snuba:
api:
enabled: true
replicas: 1
# set command to ["snuba","api"] if securityContext.runAsUser > 0
# see: https://github.com/getsentry/snuba/issues/956
command: []
# - snuba
# - api
env: []
probeInitialDelaySeconds: 10
liveness:
timeoutSeconds: 2
readiness:
timeoutSeconds: 2
resources: {}
# requests:
# cpu: 100m
# memory: 150Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
service:
annotations: {}
# tolerations: []
# podLabels: {}
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
sidecars: []
topologySpreadConstraints: []
volumes: []
# volumeMounts: []
consumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# noStrictOffsetReset: false
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
outcomesConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
maxBatchSize: "3"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
# maxBatchTimeMs: ""
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
outcomesBillingConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
maxBatchSize: "3"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
replacer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
# maxBatchTimeMs: ""
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# volumes: []
# volumeMounts: []
metricsConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
subscriptionConsumerEvents:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
genericMetricsCountersConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
genericMetricsDistributionConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
genericMetricsSetsConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
subscriptionConsumerMetrics:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# volumes: []
# volumeMounts: []
subscriptionConsumerTransactions:
enabled: true
replicas: 1
env: []
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# volumes: []
# volumeMounts: []
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
subscriptionConsumerSessions:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# commitBatchSize: 1
# autoOffsetReset: "earliest"
sidecars: []
volumes: []
# noStrictOffsetReset: false
# volumeMounts: []
replaysConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
sessionsConsumer:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
# noStrictOffsetReset: false
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
# maxBatchTimeMs: ""
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
transactionsConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
profilingProfilesConsumer:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
profilingFunctionsConsumer:
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
issueOccurrenceConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
spansConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
groupAttributesConsumer:
enabled: true
replicas: 1
env: []
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# autoOffsetReset: "earliest"
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 320
# maxBatchSize: ""
# processes: ""
# inputBlockSize: ""
# outputBlockSize: ""
maxBatchTimeMs: 750
# queuedMaxMessagesKbytes: ""
# queuedMinMessages: ""
# noStrictOffsetReset: false
# volumeMounts:
# - mountPath: /dev/shm
# name: dshm
# volumes:
# - name: dshm
# emptyDir:
# medium: Memory
dbInitJob:
env: []
migrateJob:
env: []
clickhouse:
maxConnections: 100
rustConsumer: false
hooks:
enabled: true
preUpgrade: false
removeOnSuccess: true
activeDeadlineSeconds: 600
shareProcessNamespace: false
dbCheck:
enabled: true
image:
# repository: subfuzion/netcat
# tag: latest
# pullPolicy: IfNotPresent
imagePullSecrets: []
env: []
# podLabels: {}
podAnnotations: {}
resources:
limits:
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
affinity: {}
nodeSelector: {}
securityContext: {}
containerSecurityContext: {}
# tolerations: []
# volumes: []
# volumeMounts: []
dbInit:
enabled: true
env: []
# podLabels: {}
podAnnotations: {}
resources:
limits:
memory: 2048Mi
requests:
cpu: 300m
memory: 2048Mi
sidecars: []
volumes: []
affinity: {}
nodeSelector: {}
# tolerations: []
# volumes: []
# volumeMounts: []
snubaInit:
enabled: true
# As snubaInit doesn't support configuring partition and replication factor, you can disable snubaInit's kafka topic creation by setting `kafka.enabled` to `false`,
# and create the topics using `kafka.provisioning.topics` with the desired partition and replication factor.
# Note that when you set `kafka.enabled` to `false`, snuba component might fail to start if newly added topics are not created by `kafka.provisioning`.
kafka:
enabled: true
# podLabels: {}
podAnnotations: {}
resources:
limits:
cpu: 2000m
memory: 1Gi
requests:
cpu: 700m
memory: 1Gi
affinity: {}
nodeSelector: {}
# tolerations: []
# volumes: []
# volumeMounts: []
snubaMigrate:
enabled: true
# podLabels: {}
# volumes: []
# volumeMounts: []
system:
## be sure to include the scheme on the url, for example: "https://sentry.example.com"
url: ""
adminEmail: ""
## This should only be used if you’re installing Sentry behind your company’s firewall.
public: false
## This will generate one for you (it's must be given upon updates)
# secretKey: "xx"
mail:
# For example: smtp
backend: dummy
useTls: false
useSsl: false
username: ""
password: ""
# existingSecret: secret-name
## set existingSecretKey if key name inside existingSecret is different from 'mail-password'
# existingSecretKey: secret-key-name
port: 25
host: ""
from: ""
symbolicator:
enabled: false
api:
usedeployment: true # Set true to use Deployment, false for StatefulSet
persistence:
enabled: true # Set true for using PersistentVolumeClaim, false for emptyDir
accessModes: ["ReadWriteOnce"]
# storageClassName: standard
size: "10Gi"
replicas: 1
env: []
probeInitialDelaySeconds: 10
resources: {}
affinity: {}
nodeSelector: {}
securityContext: {}
topologySpreadConstraints: []
containerSecurityContext: {}
# tolerations: []
# podLabels: {}
# priorityClassName: "xxx"
config: |-
# See: https://getsentry.github.io/symbolicator/#configuration
cache_dir: "/data"
bind: "0.0.0.0:3021"
logging:
level: "warn"
metrics:
statsd: null
prefix: "symbolicator"
sentry_dsn: null
connect_to_reserved_ips: true
# caches:
# downloaded:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# derived:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# diagnostics:
# retention: 1w
# TODO autoscaling in not yet implemented
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50
# volumes: []
# volumeMounts: []
# TODO The cleanup cronjob is not yet implemented
cleanup:
enabled: false
# podLabels: {}
# affinity: {}
# env: []
auth:
register: true
service:
name: sentry
type: ClusterIP
externalPort: 9000
annotations: {}
# externalIPs:
# - 192.168.0.1
# loadBalancerSourceRanges: []
# https://github.com/settings/apps (Create a Github App)
github: {}
# github:
# appId: "xxxx"
# appName: MyAppName
# clientId: "xxxxx"
# clientSecret: "xxxxx"
# privateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpA" !!!! Don't forget a trailing \n
# webhookSecret: "xxxxx"
#
# Note: if you use `existingSecret`, all above `clientId`, `clientSecret`, `privateKey`, `webhookSecret`
# params would be ignored, because chart will suppose that they are stored in `existingSecret`. So you
# must define all required keys and set it at least to empty strings if they are not needed in `existingSecret`
# secret (client-id, client-secret, webhook-secret, private-key)
#
# existingSecret: "xxxxx"
# existingSecretPrivateKeyKey: "" # by default "private-key"
# existingSecretWebhookSecretKey: "" # by default "webhook-secret"
# existingSecretClientIdKey: "" # by default "client-id"
# existingSecretClientSecretKey: "" # by default "client-secret"
#
# Reference -> https://docs.sentry.io/product/integrations/source-code-mgmt/github/
# https://developers.google.com/identity/sign-in/web/server-side-flow#step_1_create_a_client_id_and_client_secret
google: {}
# google:
# clientId: ""
# clientSecret: ""
# existingSecret: ""
# existingSecretClientIdKey: "" # by default "client-id"
# existingSecretClientSecretKey: "" # by default "client-secret"
slack: {}
# slack:
# clientId:
# clientSecret:
# signingSecret:
# existingSecret:
# Reference -> https://develop.sentry.dev/integrations/slack/
discord: {}
# discord:
# applicationId:
# publicKey:
# clientSecret:
# botToken:
# existingSecret:
# Reference -> https://develop.sentry.dev/integrations/discord/
openai: {}
# existingSecret: "xxxxx"
# existingSecretKey: "" # by default "api-token"
nginx:
enabled: true
containerPort: 8080
existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}'
resources: {}
replicaCount: 1
service:
type: ClusterIP
ports:
http: 80
extraLocationSnippet: false
customReadinessProbe:
tcpSocket:
port: http
initialDelaySeconds: 5
timeoutSeconds: 3
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
# extraLocationSnippet: |
# location /admin {
# allow 1.2.3.4; # VPN network
# deny all;
# proxy_pass http://sentry;
# }
# Use this to enable an extra service account
# serviceAccount:
# create: false
# name: nginx
metrics:
serviceMonitor: {}
ingress:
enabled: true
# If you are using traefik ingress controller, switch this to 'traefik'
# if you are using AWS ALB Ingress controller, switch this to 'aws-alb'
# if you are using GKE Ingress controller, switch this to 'gke'
regexPathStyle: nginx
# ingressClassName: nginx
# If you are using AWS ALB Ingress controller, switch to true if you want activate the http to https redirection.
alb:
httpRedirect: false
# annotations:
# If you are using nginx ingress controller, please use at least those 2 annotations
# kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/use-regex: "true"
# https://github.com/getsentry/self-hosted/issues/1927
# nginx.ingress.kubernetes.io/proxy-buffers-number: "16"
# nginx.ingress.kubernetes.io/proxy-buffer-size: "32k"
#
# hostname:
# additionalHostNames: []
#
# tls:
# - secretName:
# hosts:
filestore:
# Set to one of filesystem, gcs or s3 as supported by Sentry.
backend: filesystem
filesystem:
path: /var/lib/sentry/files
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
## Whether to mount the persistent volume to the Sentry worker and
## cron deployments. This setting needs to be enabled for some advanced
## Sentry features, such as private source maps. If you disable this
## setting, the Sentry workers will not have access to artifacts you upload
## through the web deployment.
## Please note that you may need to change your accessMode to ReadWriteMany
## if you plan on having the web, worker and cron deployments run on
## different nodes.
persistentWorkers: false
## If existingClaim is specified, no PVC will be created and this claim will
## be used
existingClaim: ""
gcs: {}
## Point this at a pre-configured secret containing a service account. The resulting
## secret will be mounted at /var/run/secrets/google
# secretName:
# credentialsFile: credentials.json
# bucketName:
## Currently unconfigured and changing this has no impact on the template configuration.
## Note that you can use a secret with default references "s3-access-key-id" and "s3-secret-access-key".
## Otherwise, you can use custom secret references, or use plain text values.
s3: {}
# existingSecret:
# accessKeyIdRef:
# secretAccessKeyRef:
# accessKey:
# secretKey:
# bucketName:
# endpointUrl:
# signature_version:
# region_name:
# default_acl:
config:
# No YAML Extension Config Given
configYml: {}
sentryConfPy: |
# No Python Extension Config Given
snubaSettingsPy: |
# No Python Extension Config Given
relay: |
# No YAML relay config given
web:
httpKeepalive: 15
maxRequests: 100000
maxRequestsDelta: 500
maxWorkerLifetime: 86400
clickhouse:
enabled: true
clickhouse:
replicas: "1"
imageVersion: "21.8.13.6"
configmap:
remote_servers:
internal_replication: true
replica:
backup:
enabled: false
zookeeper_servers:
enabled: true
config:
- index: "clickhouse"
hostTemplate: "{{ .Release.Name }}-zookeeper-clickhouse"
port: "2181"
users:
enabled: false
user:
# the first user will be used if enabled
- name: default
config:
password: ""
networks:
- ::/0
profile: default
quota: default
persistentVolumeClaim:
enabled: true
dataPersistentVolume:
enabled: true
accessModes:
- "ReadWriteOnce"
storage: "30Gi"
## Use this to enable an extra service account
# serviceAccount:
# annotations: {}
# enabled: false
# name: "sentry-clickhouse"
# automountServiceAccountToken: true
## This value is only used when clickhouse.enabled is set to false
##
externalClickhouse:
## Hostname or ip address of external clickhouse
##
host: "clickhouse"
tcpPort: 9000
httpPort: 8123
username: default
password: ""
database: default
singleNode: true
# existingSecret: secret-name
## set existingSecretKey if key name inside existingSecret is different from 'postgres-password'
# existingSecretKey: secret-key-name
## Cluster name, can be found in config
## (https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers)
## or by executing `select * from system.clusters`
##
# clusterName: test_shard_localhost
# Settings for Zookeeper.
# See https://github.com/bitnami/charts/tree/master/bitnami/zookeeper
zookeeper:
enabled: true
nameOverride: zookeeper-clickhouse
replicaCount: 1
## When increasing the number of exceptions, you need to increase persistence.size
# persistence:
# size: 8Gi
# Settings for Kafka.
# See https://github.com/bitnami/charts/tree/master/bitnami/kafka
kafka:
enabled: true
provisioning:
## Increasing the replicationFactor enhances data reliability during Kafka pod failures by replicating data across multiple brokers.
# replicationFactor: 1
enabled: true
# Topic list is based on files below.
# - https://github.com/getsentry/snuba/blob/master/snuba/utils/streams/topics.py
# - https://github.com/getsentry/self-hosted/blob/master/install/create-kafka-topics.sh#L6
## Default number of partitions for topics when unspecified
##
# numPartitions: 1
# Note that snuba component might fail if you set `hooks.snubaInit.kafka.enabled` to `false` and remove the topics from this default topic list.
topics:
- name: sentry.dev.events
## Number of partitions for this topic
# partitions: 1
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.event-replacements
- name: sentry.dev.snuba-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.cdc
- name: sentry.dev.transactions
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.snuba-transactions-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.snuba-metrics
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.outcomes
- name: sentry.dev.outcomes-billing
- name: sentry.dev.ingest-sessions
- name: sentry.dev.snuba-sessions-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.snuba-metrics-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.scheduled-subscriptions-events
- name: sentry.dev.scheduled-subscriptions-transactions
- name: sentry.dev.scheduled-subscriptions-sessions
- name: sentry.dev.scheduled-subscriptions-metrics
- name: sentry.dev.scheduled-subscriptions-generic-metrics-sets
- name: sentry.dev.scheduled-subscriptions-generic-metrics-distributions
- name: sentry.dev.scheduled-subscriptions-generic-metrics-counters
- name: sentry.dev.events-subscription-results
- name: sentry.dev.transactions-subscription-results
- name: sentry.dev.sessions-subscription-results
- name: sentry.dev.metrics-subscription-results
- name: sentry.dev.generic-metrics-subscription-results
- name: sentry.dev.snuba-queries
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.processed-profiles
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.profiles-call-tree
- name: sentry.dev.ingest-replay-events
config:
"message.timestamp.type": LogAppendTime
"max.message.bytes": "15000000"
- name: sentry.dev.snuba-generic-metrics
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.snuba-generic-metrics-sets-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.snuba-generic-metrics-distributions-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.snuba-generic-metrics-counters-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.generic-events
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.snuba-generic-events-commit-log
config:
"cleanup.policy": "compact,delete"
"min.compaction.lag.ms": "3600000"
- name: sentry.dev.group-attributes
config:
"message.timestamp.type": LogAppendTime
- name: sentry.dev.snuba-attribution
- name: sentry.dev.snuba-dead-letter-metrics
- name: sentry.dev.snuba-dead-letter-sessions
- name: sentry.dev.snuba-dead-letter-generic-metrics
- name: sentry.dev.snuba-dead-letter-replays
- name: sentry.dev.snuba-dead-letter-generic-events
- name: sentry.dev.snuba-dead-letter-querylog
- name: sentry.dev.snuba-dead-letter-group-attributes
- name: sentry.dev.ingest-attachments
- name: sentry.dev.ingest-transactions
- name: sentry.dev.ingest-events
## If the number of exceptions increases, it is recommended to increase the number of partitions for ingest-events
# partitions: 1
- name: sentry.dev.ingest-replay-recordings
- name: sentry.dev.ingest-metrics
- name: sentry.dev.ingest-performance-metrics
- name: sentry.dev.ingest-monitors
- name: sentry.dev.profiles
- name: sentry.dev.ingest-occurrences
- name: sentry.dev.snuba-spans
- name: sentry.dev.shared-resources-usage
- name: sentry.dev.snuba-metrics-summaries
listeners:
client:
protocol: "PLAINTEXT"
controller:
protocol: "PLAINTEXT"
interbroker:
protocol: "PLAINTEXT"
external:
protocol: "PLAINTEXT"
zookeeper:
enabled: false
kraft:
enabled: true
controller:
replicaCount: 3
## if the load on the kafka controller increases, resourcesPreset must be increased
# resourcesPreset: small # small, medium, large, xlarge, 2xlarge
## if the load on the kafka controller increases, persistence.size must be increased
# persistence:
# size: 8Gi
## Use this to enable an extra service account
# serviceAccount:
# create: false
# name: kafka
## Use this to enable an extra service account
# zookeeper:
# serviceAccount:
# create: false
# name: zookeeper
## This value is only used when kafka.enabled is set to false
##
externalKafka:
## Hostname or ip address of external kafka
##
# host: "kafka-confluent"
port: 9092
sourcemaps:
enabled: false
redis:
enabled: true
replica:
replicaCount: 1
auth:
enabled: false
sentinel: false
## Just omit the password field if your redis cluster doesn't use password
# password: redis
# existingSecret: secret-name
## set existingSecretPasswordKey if key name inside existingSecret is different from redis-password'
# existingSecretPasswordKey: secret-key-name
nameOverride: sentry-redis
master:
persistence:
enabled: true
## Use this to enable an extra service account
# serviceAccount:
# create: false
# name: sentry-redis
## This value is only used when redis.enabled is set to false
##
externalRedis:
## Hostname or ip address of external redis cluster
##
# host: "redis"
port: 6379
## Just omit the password field if your redis cluster doesn't use password
# password: redis
# existingSecret: secret-name
## set existingSecretKey if key name inside existingSecret is different from redis-password'
# existingSecretKey: secret-key-name
## Integer database number to use for redis (This is an integer)
# db: 0
## Use ssl for the connection to Redis (True/False)
# ssl: false
postgresql:
enabled: true
nameOverride: sentry-postgresql
auth:
database: sentry
replication:
enabled: false
readReplicas: 2
synchronousCommit: "on"
numSynchronousReplicas: 1
applicationName: sentry
## Use this to enable an extra service account
# serviceAccount:
# enabled: false
## Default connection max age is 0 (unlimited connections)
## Set to a higher number to close connections after a period of time in seconds
connMaxAge: 0
## If you are increasing the number of replicas, you need to increase max_connections
# primary:
# extendedConfiguration: |
# max_connections=100
## When increasing the number of exceptions, you need to increase persistence.size
# primary:
# persistence:
# size: 8Gi
## This value is only used when postgresql.enabled is set to false
## Set either externalPostgresql.password or externalPostgresql.existingSecret to configure password
externalPostgresql:
# host: postgres
port: 5432
username: postgres
# password: postgres
# existingSecret: secret-name
# set existingSecretKeys in a secret, if not specified, value from the secret won't be used
# if externalPostgresql.existingSecret is used, externalPostgresql.existingSecretKeys.password must be specified.
existingSecretKeys: {}
# password: postgresql-password # Required if existingSecret is used. Key in existingSecret.
# username: username
# database: database
# port: port
# host: host
database: sentry
# sslMode: require
## Default connection max age is 0 (unlimited connections)
## Set to a higher number to close connections after a period of time in seconds
connMaxAge: 0
rabbitmq:
## If disabled, Redis will be used instead as the broker.
enabled: true
vhost: /
clustering:
forceBoot: true
rebalance: true
replicaCount: 1
auth:
erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA
username: guest
password: guest
nameOverride: ""
pdb:
create: true
persistence:
enabled: true
resources: {}
memoryHighWatermark: {}
# enabled: true
# type: relative
# value: 0.4
extraSecrets:
load-definition:
load_definition.json: |
{
"users": [
{
"name": "{{ .Values.auth.username }}",
"password": "{{ .Values.auth.password }}",
"tags": "administrator"
}
],
"permissions": [{
"user": "{{ .Values.auth.username }}",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}],
"policies": [
{
"name": "ha-all",
"pattern": ".*",
"vhost": "/",
"definition": {
"ha-mode": "all",
"ha-sync-mode": "automatic",
"ha-sync-batch-size": 1
}
}
],
"vhosts": [
{
"name": "/"
}
]
}
loadDefinition:
enabled: true
existingSecret: load-definition
extraConfiguration: |
load_definitions = /app/load_definition.json
## Use this to enable an extra service account
# serviceAccount:
# create: false
# name: rabbitmq
memcached:
memoryLimit: "2048"
maxItemSize: "26214400"
args:
- "memcached"
- "-u memcached"
- "-p 11211"
- "-v"
- "-m $(MEMCACHED_MEMORY_LIMIT)"
- "-I $(MEMCACHED_MAX_ITEM_SIZE)"
extraEnvVarsCM: "sentry-memcached"
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
podAnnotations: {}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 3
successThreshold: 1
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
affinity: {}
securityContext: {}
containerSecurityContext: {}
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: {}
service:
type: ClusterIP
labels: {}
image:
repository: prom/statsd-exporter
tag: v0.17.0
pullPolicy: IfNotPresent
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
additionalLabels: {}
namespace: ""
namespaceSelector: {}
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
scrapeInterval: 30s
# honorLabels: true
relabelings: []
metricRelabelings: []
revisionHistoryLimit: 10
# dnsPolicy: "ClusterFirst"
# dnsConfig:
# nameservers: []
# searches: []
# options: []
extraManifests: []
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment