Skip to content

Instantly share code, notes, and snippets.

View cbodley's full-sized avatar

Casey Bodley cbodley

  • Red Hat
  • Ann Arbor, MI
View GitHub Profile
diff --git a/src/neorados/cls/common.h b/src/neorados/cls/common.h
index 6b39ea4b295..b1dd4b320ab 100644
--- a/src/neorados/cls/common.h
+++ b/src/neorados/cls/common.h
@@ -95,11 +95,15 @@ auto exec(
method = std::move(method), in = std::move(in),
f = std::move(f)](auto& self) {
auto e = self.get_executor();
+ using executor_type = decltype(e);
+ using awaitable_void = asio::awaitable<void, executor_type>;
<error>
<unique>0x34cc</unique>
<tid>342</tid>
<kind>UninitCondition</kind>
<what>Conditional jump or move depends on uninitialised value(s)</what>
<stack>
<frame>
<ip>0x141B715</ip>
<obj>/home/cbodley/ceph/build/bin/radosgw</obj>
<fn>CSVParser::next_line()</fn>
<error>
<unique>0x36a4</unique>
<tid>1</tid>
<kind>Leak_DefinitelyLost</kind>
<xwhat>
<text>2,511 bytes in 81 blocks are definitely lost in loss record 33 of 35</text>
<leakedbytes>2511</leakedbytes>
<leakedblocks>81</leakedblocks>
</xwhat>
<stack>
@cbodley
cbodley / rgw_sync_meta.hpp
Created September 7, 2022 19:42
overview of c++20 coroutine interfaces for metadata sync
#pragma once
#include <vector>
#include <string>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/io_context.hpp>
#include "common/ceph_time.h"
#include "rgw_meta_sync_status.h"
struct meta_list_result; // TODO: move out of RGWFetchAllMetaCR
@cbodley
cbodley / setup_multi.sh
Created February 14, 2022 17:43
setup_multi.sh to deploy two vstart clusters and configure for multisite
#!/bin/bash -ex
RGW_ACCESS=DiPt4V7WWvy2njL1z6aC
RGW_SECRET=xSZUdYky0bTctAdCEEW8ikhfBVKsBV5LFYL82vvh
HOSTNAME=localhost
export CEPH_NUM_OSD=1
export CEPH_NUM_MON=1
export CEPH_NUM_MDS=0
@cbodley
cbodley / vstart.diff
Created April 26, 2021 15:01
config for multisite dev
diff --git a/src/vstart.sh b/src/vstart.sh
index 7f0dfef731..07d458e438 100755
--- a/src/vstart.sh
+++ b/src/vstart.sh
@@ -684,6 +684,10 @@ EOF
log file = $CEPH_OUT_DIR/\$name.\$pid.log
admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
+ rgw data log num shards = 1
+ rgw md log max shards = 1
#showtooltip
/cancelform [noform:1]
/cast [mod:shift/ctrl/alt,form:1] Dire Bear Form
/cast [mod:ctrl] Major Healing Potion
/cast [mod:alt] Greater Stoneshield Potion
/cast Dire Bear Form
ret = target_shards_mgr.finish();
if (ret < 0) {
lderr(store->ctx()) << "ERROR: failed to reshard" << dendl;
return -EIO;
}
+ if (ret = f.check("link-bucket"); ret < 0) {
+ return ret;
+ }
ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield);
if (ret < 0) {
void encode(ceph::buffer::list& bl) const {
- ENCODE_START(1, 1, bl);
+ ENCODE_START(2, 1, bl);
encode((uint8_t)reshard_status, bl);
- encode(new_bucket_instance_id, bl);
- encode(num_shards, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
cbodley@cezanne ~/ceph/build $ s3cmd multipart s3://testbucket
s3://testbucket/
Initiated Path Id
2020-03-09T13:17:40.975Z s3://testbucket/asd 2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc
cbodley@cezanne ~/ceph/build $ bin/rados ls -p default.rgw.buckets.data
9bfd25e9-5a9a-4d5b-a9b8-1fccd3fb87ef.4137.2__shadow_asd.2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc.1_16
9bfd25e9-5a9a-4d5b-a9b8-1fccd3fb87ef.4137.2__shadow_asd.2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc.1_9
9bfd25e9-5a9a-4d5b-a9b8-1fccd3fb87ef.4137.2__shadow_asd.2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc.1_12
9bfd25e9-5a9a-4d5b-a9b8-1fccd3fb87ef.4137.2__shadow_asd.2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc.1_26
9bfd25e9-5a9a-4d5b-a9b8-1fccd3fb87ef.4137.2__shadow_asd.2~w6JyRaW8Wa_y6CUMSVDIOdi6fIpaHOc.1_18