Skip to content

Instantly share code, notes, and snippets.

@dpeek
Created January 29, 2025 03:58
Show Gist options
  • Save dpeek/66e3ce7147c63628e1997b13eab7eeeb to your computer and use it in GitHub Desktop.
Save dpeek/66e3ce7147c63628e1997b13eab7eeeb to your computer and use it in GitHub Desktop.
{
// ## Top-Level Keys
// Whether Wrangler should keep variables configured in the dashboard on deploy.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#source-of-truth
"keep_vars": false,
// Whether Wrangler should send usage data to Cloudflare for this project. Defaults to true.
// See https://github.com/cloudflare/workers-sdk/blob/main/packages/wrangler/telemetry.md
"send_metrics": true,
// When making changes to your Durable Object classes, you must perform a migration.
// See https://developers.cloudflare.com/durable-objects/reference/durable-objects-migrations/
"migrations": [
{
// Migration tags are treated like unique names and are used to determine which migrations have already been applied.
"tag": "v1",
// The classes that are new in this migration.
"new_classes": ["DurableObjectClass"],
// The classes (with SQLite storage instead of KV) that are new in this migration.
"new_sqlite_classes": ["SqliteDurableObjectClass"],
// The classes that are renamed in this migration.
"renamed_classes": [
{
"from": "OldDurableObjectClass",
"to": "UpdatedDurableObjectClass"
}
],
// The classes that are deleted in this migration.
"deleted_classes": ["UnusedDurableObjectClass"]
}
],
// You can configure various aspects of local development, such as the local protocol or port.
"dev": {
"ip": "192.168.1.1",
"port": 8080,
"local_protocol": "http",
"upstream_protocol": "https",
"host": "example.com"
},
// ## Inheritable Keys
"upload_source_maps": true,
// Required: The name of your Worker (unique in your account). Alphanumeric characters (a,b,c, etc.) and dashes (-) only. Do not use underscores (_).
"name": "<WORKER_NAME>",
// Required: The path to the entrypoint of your Worker that will be executed. For example: ./src/worker.ts.
"main": "./worker.ts",
// Required: A date in the form yyyy-mm-dd, which will be used to determine which version of the Workers runtime is used.
// See https://developers.cloudflare.com/workers/configuration/compatibility-dates/
"compatibility_date": "2024-09-23",
// A list of flags that enable features from upcoming features of the Workers runtime, usually used together with compatibility_date.
// See https://developers.cloudflare.com/workers/configuration/compatibility-flags/
"compatibility_flags": [
// New Node support
"nodejs_compat_v2",
// Just AsyncLocalStorage
"nodejs_als"
],
// This is the ID of the account associated with your zone. You might have more than one account, so make sure to use the ID of the account associated with the zone/route you provide, if you provide one. It can also be specified through the CLOUDFLARE_ACCOUNT_ID environment variable.
"account_id": "<ACCOUNT_ID>",
// Enables use of *.workers.dev subdomain to test and deploy your Worker. If you have a Worker that is only for scheduled events, you can set this to false. Defaults to true.
"workers_dev": true,
// An array of routes that your Worker should be deployed to.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#types-of-routes
"routes": [
{
"pattern": "api.example.org",
"custom_domain": true
},
{
"pattern": "example.org/*",
"zone_name": "example.org"
}
],
// Path to a custom tsconfig.
"tsconfig": "./tsconfig.json",
// Cron definitions to trigger a Worker's scheduled function.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#triggers
"triggers": {
"crons": ["* * * * *"]
},
// An ordered list of rules that define which modules to import, and what type to import them as. You will need to specify rules to use Text, Data and CompiledWasm modules, or when you wish to have a .js file be treated as an ESModule instead of CommonJS.
// Configures a custom build step to be run by Wrangler when building your Worker.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#custom-builds
"rules": [
{
// The type of module. Must be one of: ESModule, CommonJS, CompiledWasm, Text or Data.
"type": "Text",
// An array of glob rules (for example, ["**/*.md"]).
"globs": ["**/*.md"],
// When set to true on a rule, this allows you to have multiple rules for the same Type.
"fallthrough": true
}
],
// Configures a custom build step to be run by Wrangler when building your Worker.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#custom-builds
"build": {
// The command used to build your Worker. On Linux and macOS, the command is executed in the sh shell and the cmd shell for Windows. The && and || shell operators may be used.
"command": "npm run build",
// The directory in which the command is executed.
"cwd": "build_cwd",
// The directory to watch for changes while using wrangler dev. Defaults to the current working directory.
"watch_dir": []
},
// Skip internal build steps and directly deploy your Worker script. You must have a plain JavaScript Worker with no dependencies.
"no_bundle": false,
// If true then Wrangler will traverse the file tree below base_dir. Any files that match rules will be included in the deployed Worker. Defaults to true if no_bundle is true, otherwise false. Can only be used with Module format Workers (not Service Worker format).
"find_additional_modules": false,
// The directory in which module "rules" should be evaluated when including additional files (via find_additional_modules) into a Worker deployment. Defaults to the directory containing the main entry point of the Worker if not specified.
"base_dir": "./dist",
// Determines whether Wrangler will preserve the file names of additional modules bundled with the Worker. The default is to prepend filenames with a content hash. For example, 34de60b44167af5c5a709e62a4e20c4f18c9e3b6-favicon.ico.
"preserve_file_names": false,
// Minify the Worker script before uploading.
"minify": true,
// Enables Workers Trace Events Logpush for a Worker. Any scripts with this property will automatically get picked up by the Workers Logpush job configured for your account. Defaults to false.
// See https://developers.cloudflare.com/workers/observability/logs/logpush/
"logpush": false,
// Configures limits to be imposed on execution at runtime.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#limits
"limits": {
// The maximum CPU time allowed per invocation, in milliseconds.
"cpu_ms": 100
},
// Configures automatic observability settings for telemetry data emitted from your Worker.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#observability
"observability": {
// When set to true on a Worker, logs for the Worker are persisted. Defaults to true for all new Workers.
"enabled": true,
// A number between 0 and 1, where 0 indicates zero out of one hundred requests are logged, and 1 indicates every request is logged. If head_sampling_rate is unspecified, it is configured to a default value of 1 (100%).
"head_sampling_rate": 0.1,
"logs": {
// Enable invocation logs for the Worker. Defaults to true.
"invocation_logs": false
}
},
// Configures static assets that will be served.
// See https://developers.cloudflare.com/workers/static-assets/binding/
"assets": {
// The folder of static assets to be served. For many frameworks, this is the ./public/, ./dist/, or ./build/ folder.
"directory": "./public/",
// Configuring the optional binding gives you access to the collection of assets from within your Worker script.
"binding": "<BINDING_NAME>",
// Controls whether assets will be served first on a matching request. experimental_serve_directly = true (default) will serve any static asset matching a request, while experimental_serve_directly = false will unconditionally invoke your Worker script.
"experimental_serve_directly": false,
// Determines the redirects and rewrites of requests for HTML content.
// See https://developers.cloudflare.com/workers/static-assets/routing/#html_handling
"html_handling": "auto-trailing-slash",
// Determines the handling of requests that do not map to an asset.
// See https://developers.cloudflare.com/workers/static-assets/routing/#html_handling
"not_found_handling": "none"
},
// ## Non-Inheritable Keys
// A map of values to substitute when deploying your Worker.
"define": {},
// A map of environment variables to set when deploying your Worker.
// See https://developers.cloudflare.com/workers/configuration/environment-variables/
"vars": {
"API_HOST": "staging.example.com",
// JSON values work too, apparently!
"SERVICE_X_DATA": {
"URL": "service-x-api.dev.example",
"MY_ID": 123
}
},
// A list of Durable Objects that your Worker should be bound to.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#durable-objects
"durable_objects": {
"bindings": [
{
// The name of the binding used to refer to the Durable Object.
"name": "<BINDING_NAME>",
// The exported class name of the Durable Object. Must be exported from main worker script.
"class_name": "<CLASS_NAME>",
// the name of the Worker where the Durable Object is defined, if it is external to this Worker.
// This option can be used both in local and remote development. In local development, you must
// run the external Worker in a separate process (via wrangler dev).
// In remote development, the appropriate remote binding must be used.
"script_name": "<WORKER_NAME>",
// the environment of the script_name to bind to.
"environment": "<ENVIRONMENT_NAME>"
}
]
},
// A list of KV namespaces that your Worker should be bound to.
// See https://developers.cloudflare.com/workers/wrangler/configuration/#kv-namespaces
"kv_namespaces": [
{
// The binding name used to refer to the namespace in your Worker. The binding must be a valid JavaScript variable name. For example, "MY_KV" or "productionKV" would both be valid names for the binding.
"binding": "<MY_NAMESPACE>",
"id": "<KV_ID>",
"preview_id": "<KV_PREVIEW_ID>"
}
],
// Bindings for queue products and consumers
"queues": {
// To bind Queues to your producer Worker, assign an array of the below.
"producers": [
{
// The name of the queue, used on the Cloudflare dashboard.
"queue": "<QUEUE_NAME>",
// The binding name used to refer to the queue in your Worker. The binding must be a valid JavaScript variable name ↗. For example, "MY_QUEUE" or "productionQueue" would both be valid names for the binding.
"binding": "<BINDING_NAME>",
// The number of seconds to delay messages sent to a queue for by default. This can be overridden on a per-message or per-batch basis.
"delivery_delay": 60
}
],
// To bind Queues to your consumer Worker, assign an array of the below.
"consumers": [
{
// The name of the queue, used on the Cloudflare dashboard.
"queue": "<QUEUE_NAME>",
// The maximum number of messages allowed in each batch.
"max_batch_size": 10,
// The maximum number of seconds to wait for messages to fill a batch before the batch is sent to the consumer Worker.
"max_batch_timeout": 30,
// The maximum number of retries for a message, if it fails or retryAll() is invoked.
"max_retries": 10,
// The name of another queue to send a message if it fails processing at least max_retries times.
"dead_letter_queue": "<DEAD_LETTER_QUEUE_NAME>",
// The maximum number of concurrent consumers allowed to run at once. Leaving this unset will mean that the number of invocations will scale to the currently supported maximum.
"max_concurrency": 5,
// The number of seconds to delay retried messages for by default, before they are re-delivered to the consumer. This can be overridden on a per-message or per-batch basis when retrying messages.
"retry_delay": 120
}
]
},
// Browser rendering binding
"browser": {
// The binding name used to refer to the Browser. The value (string) you set will be used to reference this database in your Worker.
// The binding must be a valid JavaScript variable name.
"binding": "<BINDING_NAME>"
},
// D1 Bindings
"d1_databases": [
{
// The binding name used to refer to the D1 database. The value (string) you set will be used to reference this database in your Worker.
// The binding must be a valid JavaScript variable name.
// For example, binding = "MY_DB" or binding = "productionDB" would both be valid names for the binding.
"binding": "<BINDING_NAME>",
// The name of the database. This is a human-readable name that allows you to distinguish between different databases, and is set when
// you first create the database.
"database_name": "<DATABASE_NAME>",
// The ID of the database. The database ID is available when you first use wrangler d1 create or when you call wrangler d1 list, and
// uniquely identifies your database.
"database_id": "<DATABASE_ID>",
// The preview ID of this D1 database. If provided, wrangler dev will use this ID. Otherwise, it will use database_id. This option is
// required when using wrangler dev --remote.
"preview_database_id": "<PREVIEW_DATABASE_ID>",
// The migration directory containing the migration files. By default, wrangler d1 migrations create creates a folder named migrations.
// You can use migrations_dir to specify a different folder containing the migration files (for example, if you have a mono-repo setup,
// and want to use a single D1 instance across your apps/packages).
"migrations_dir": "./migrations"
}
],
// You can send an email about your Worker's activity from your Worker to an email address verified on Email Routing. This is useful for
// when you want to know about certain types of events being triggered, for example.
"send_email": [
{
"name": "<NAME_FOR_BINDING1>"
},
{
"name": "<NAME_FOR_BINDING2>",
"destination_address": "<YOUR_EMAIL>@example.com"
},
{
"name": "<NAME_FOR_BINDING3>",
"allowed_destination_addresses": [
"<YOUR_EMAIL>@example.com",
"<YOUR_EMAIL2>@example.com"
]
}
],
// R2 bindings
"r2_buckets": [
{
"binding": "<BINDING_NAME>",
"bucket_name": "<BUCKET_NAME>"
}
],
// Service bindings
"services": [
{
// The binding name used to refer to the bound Worker.
"binding": "<BINDING_NAME>",
// The name of the Worker in your account.
"service": "<WORKER_NAME>",
// The name of the entrypoint to bind to. If you do not specify an entrypoint, the default export of the Worker will be used.
"entrypoint": "<ENTRYPOINT_NAME>"
}
],
// Analytics bindings
"analytics_engine_datasets": [
{
"binding": "<BINDING_NAME>",
"dataset": "<DATASET_NAME>"
}
],
// Workers AI binding
"ai": {
// The binding name used to refer to the AI service.
"binding": "AI"
},
// A map of environment names to environment configurations.
"env": {
"staging": {
"name": "my-worker-staging",
"route": {
"pattern": "staging.example.org/*",
"zone_name": "example.org"
},
"kv_namespaces": [
{
"binding": "<MY_NAMESPACE>",
"id": "<STAGING_KV_ID>"
}
]
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment