Start 2 nodes
$ docker-compose up -d
[+] Running 5/5
⠿ Network sandbox_default Created 0.1s
⠿ Volume "sandbox_couchdb_data2" Created 0.0s
⠿ Volume "sandbox_couchdb_data1" Created 0.0s
⠿ Container sandbox_node2_1 Started 1.9s
⠿ Container sandbox_node1_1 Started 2.0s
$ curl -q -K .curlrc http://localhost:15984 | jq '.'
{
"couchdb": "Welcome",
"version": "3.1.1",
"git_sha": "ce596c65d",
"uuid": "7a5a39814f151447abd8ef79269420b7",
"features": [
"access-ready",
"partitioned",
"pluggable-storage-engines",
"reshard",
"scheduler"
],
"vendor": {
"name": "The Apache Software Foundation"
}
}
$ curl -q -K .curlrc -X GET http://localhost:15984/_membership | jq '.'
{
"all_nodes": [
"[email protected]"
],
"cluster_nodes": [
"[email protected]"
]
}
$ curl -q -K .curlrc -X GET http://localhost:25984/_membership | jq '.'
{
"all_nodes": [
"[email protected]"
],
"cluster_nodes": [
"[email protected]"
]
}
Join nodes into a cluster
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_config/couchdb/uuid -d '"INIT-UUID"'
"7a5a39814f151447abd8ef79269420b7"
$ curl -q -K .curlrc -X PUT http://localhost:25984/_node/_local/_config/couchdb/uuid -d '"INIT-UUID"'
""
$ curl -q -K .curlrc -X POST http://localhost:15984/_cluster_setup -d '{"action": "enable_cluster", "bind_address": "0.0.0.0", "node_count": "2"}'
{"error":"bad_request","reason":"Cluster is already enabled"}
$ curl -q -K .curlrc -X POST http://localhost:15984/_cluster_setup -d '{"action": "add_node", "host": "node2.dev", "port": 5984, "username": "admin", "password": "god"}'
{"ok":true}
$ curl -q -K .curlrc -X POST http://localhost:15984/_cluster_setup -d '{"action": "finish_cluster"}'
{"ok":true}
$ curl -q -K .curlrc -X GET http://localhost:15984/_cluster_setup | jq '.'
{
"state": "cluster_finished"
}
$ curl -q -K .curlrc -X GET http://localhost:15984/_membership | jq '.'
{
"all_nodes": [
"[email protected]",
"[email protected]"
],
"cluster_nodes": [
"[email protected]",
"[email protected]"
]
}
$ curl -q -K .curlrc -X GET http://localhost:25984/_membership | jq '.'
{
"all_nodes": [
"[email protected]",
"[email protected]"
],
"cluster_nodes": [
"[email protected]",
"[email protected]"
]
}
Create a database with some random docs
$ curl -q -K .curlrc -X PUT http://localhost:15984/koi
{"ok":true}
$ while read -r row; do \
curl -q -K .curlrc -X POST http://localhost:15984/koi -d "$row"; \
done <docs.json
{"ok":true,"id":"eaef2c7ca340d5c953c276c72e000200","rev":"1-cfcea61d826396faee370508b828f9d5"}
{"ok":true,"id":"eaef2c7ca340d5c953c276c72e000e85","rev":"1-02843978cc8c3bdefbc8b28197379f38"}
{"ok":true,"id":"eaef2c7ca340d5c953c276c72e001359","rev":"1-fe554019820a099ad31eb187779bec49"}
...
{"ok":true,"id":"eaef2c7ca340d5c953c276c72e0191c1","rev":"1-3f9e5b6bb8ca2282ea0bd0be47611be9"}
$ curl -q -K .curlrc -X GET http://localhost:15984/koi | jq '.'
{
"db_name": "koi",
"purge_seq": "0-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdJLSS3LAUnlsQBJhgYg9R8IshIZ8KhNZEiqhyjKAgA7jRtk",
"update_seq": "50-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdJLSS3LAUnlsQBJhgYg9R8IshJl8KhNZEiqBysSywIAP_0blg",
"sizes": {
"file": 237986,
"external": 10015,
"active": 21382
},
"props": {},
"doc_del_count": 0,
"doc_count": 50,
"disk_format_version": 8,
"compact_running": false,
"cluster": {
"q": 2,
"n": 2,
"w": 2,
"r": 2
},
"instance_start_time": "0"
}
An important bit in the meta data above is n: 2
in "cluster" block.
Start three nods now and add third node to the cluster
$ docker-compose --profile full up -d
[+] Running 5/5
⠿ Network sandbox_default Created 0.2s
⠿ Volume "sandbox_couchdb_data3" Created 0.0s
⠿ Container sandbox_node2_1 Started 2.4s
⠿ Container sandbox_node3_1 Started 2.4s
⠿ Container sandbox_node1_1 Started 2.5s
$ curl -q -K .curlrc -X GET http://localhost:35984/_membership | jq '.'
{
"all_nodes": [
"[email protected]"
],
"cluster_nodes": [
"[email protected]"
]
}
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_nodes/[email protected] -d {}
{"ok":true,"id":"[email protected]","rev":"1-967a00dff5e02add41819138abb3284d"}
$ curl -q -K .curlrc -X GET http://localhost:35984/_membership | jq '.'
{
"all_nodes": [
"[email protected]",
"[email protected]",
"[email protected]"
],
"cluster_nodes": [
"[email protected]",
"[email protected]",
"[email protected]"
]
}
Here is the third node yet to know about our database.
It can proxy requests to the database if other nodes are available:
$ curl -q -K .curlrc -X GET http://localhost:35984/koi | jq '.'
{
"db_name": "koi",
"purge_seq": "0-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdRLSS3LAUnlsQBJhgYg9R8IshIZ8KhNZEiqhyjKAgA7Nxti",
"update_seq": "50-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdRLSS3LAUnlsQBJhgYg9R8IshJl8KhNZEiqBysSywIAP6cblA",
"sizes": {
"file": 237986,
"external": 10015,
"active": 21382
},
"props": {},
"doc_del_count": 0,
"doc_count": 50,
"disk_format_version": 8,
"compact_running": false,
"cluster": {
"q": 2,
"n": 2,
"w": 2,
"r": 2
},
"instance_start_time": "0"
}
But once they are down or in the maintanance mode the database can't be accessed anymore.
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_config/couchdb/maintenance_mode -d '"true"'
""
$ curl -q -K .curlrc -X PUT http://localhost:25984/_node/_local/_config/couchdb/maintenance_mode -d '"true"'
""
$ curl -q -K .curlrc -X GET http://localhost:35984/koi | jq '.'
{
"error": "internal_server_error",
"reason": "No DB shards could be opened.",
"ref": 2745110390
}
Now put two "original" nodes back from maintanance mode
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_config/couchdb/maintenance_mode -d '"false"'
"true"
$ curl -q -K .curlrc -X PUT http://localhost:25984/_node/_local/_config/couchdb/maintenance_mode -d '"false"'
"true"
Then read and update database metadata.
$ curl -q -K .curlrc -X GET http://localhost:15984/_node/_local/_dbs/koi | jq '.' | tee meta.json
{
"_id": "koi",
"_rev": "1-c1e21114c816b10cc1244b16099bc5dd",
"shard_suffix": [46, 49, 54, 50, 57, 53, 49, 52, 50, 52, 51],
"changelog": [
[
"add",
"00000000-7fffffff",
"[email protected]"
],
[
"add",
"00000000-7fffffff",
"[email protected]"
],
[
"add",
"80000000-ffffffff",
"[email protected]"
],
[
"add",
"80000000-ffffffff",
"[email protected]"
]
],
"by_node": {
"[email protected]": [
"00000000-7fffffff",
"80000000-ffffffff"
],
"[email protected]": [
"00000000-7fffffff",
"80000000-ffffffff"
]
},
"by_range": {
"00000000-7fffffff": [
"[email protected]",
"[email protected]"
],
"80000000-ffffffff": [
"[email protected]",
"[email protected]"
]
},
"props": {}
}
Update the meta.json
file
index 012f57a..0da42e6 100644
--- a/old_meta.json
+++ b/meta.json
@@ -34,6 +34,16 @@
"add",
"80000000-ffffffff",
"[email protected]"
+ ],
+ [
+ "add",
+ "00000000-7fffffff",
+ "[email protected]"
+ ],
+ [
+ "add",
+ "80000000-ffffffff",
+ "[email protected]"
]
],
"by_node": {
@@ -44,16 +54,22 @@
"[email protected]": [
"00000000-7fffffff",
"80000000-ffffffff"
+ ],
+ "[email protected]": [
+ "00000000-7fffffff",
+ "80000000-ffffffff"
]
},
"by_range": {
"00000000-7fffffff": [
"[email protected]",
- "[email protected]"
+ "[email protected]",
+ "[email protected]"
],
"80000000-ffffffff": [
"[email protected]",
- "[email protected]"
+ "[email protected]",
+ "[email protected]"
]
},
"props": {}
Update database's meta and sync shards
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_dbs/koi -d @meta.json
{"ok":true,"id":"koi","rev":"2-d81c894a4f93f19fe3b23cccf085a8ac"}
curl -q -K .curlrc -X POST http://localhost:15984/koi/_sync_shards
{"ok":true}
Now put two "original" nodes back into maintanance mode and confirm that our database is now available on the new node.
$ curl -q -K .curlrc -X PUT http://localhost:15984/_node/_local/_config/couchdb/maintenance_mode -d '"true"'
"false"
$ curl -q -K .curlrc -X PUT http://localhost:25984/_node/_local/_config/couchdb/maintenance_mode -d '"true"'
"false"
curl -q -K .curlrc -X GET http://localhost:35984/koi | jq '.'
{
"db_name": "koi",
"purge_seq": "0-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdZLSS3LAUnlsQBJhgYg9R8IshIZ8KhNZEiqhyjKAgA74xtm",
"update_seq": "50-g1AAAABXeJzLYWBgYMpgTmEQTM4vTc5ISXLIy09JNdZLSS3LAUnlsQBJhgYg9R8IshJl8KhNZEiqBysSywIAQFMbmA",
"sizes": {
"file": 74160,
"external": 10015,
"active": 21453
},
"props": {},
"doc_del_count": 0,
"doc_count": 50,
"disk_format_version": 8,
"compact_running": false,
"cluster": {
"q": 2,
"n": 3,
"w": 2,
"r": 2
},
"instance_start_time": "0"
}
An important bit in meta above that now in "cluster" block we have n: 3
.