Skip to content

Instantly share code, notes, and snippets.

View rferreira's full-sized avatar

Rafael Ferreira rferreira

View GitHub Profile
root@donatello:~# curl 10.30.3.252:5000
{"versions": {"values": [{"status": "stable", "updated": "2013-03-06T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v3+json"}, {"base": "application/xml", "type": "application/vnd.openstack.identity-v3+xml"}], "id": "v3.0", "links": [{"href": "http://localhost:5000/v3/", "rel": "self"}]}, {"status": "stable", "updated": "2013-03-06T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json"}, {"base": "application/xml", "type": "application/vnd.openstack.identity-v2.0+xml"}], "id": "v2.0", "links": [{"href": "http://localhost:5000/v2.0/", "rel": "self"}, {"href": "http://docs.openstack.org/api/openstack-identity-service/2.0/content/", "type": "text/html", "rel": "describedby"}, {"href": "http://docs.openstack.org/api/openstack-identity-service/2.0/identity-dev-guide-2.0.pdf", "type": "application/pdf", "rel": "describedby"}]}]}}
# gluster volume info
Volume Name: gvol01
Type: Replicate
Volume ID: 29d5fe6e-0b91-47c0-b84e-a1fc9b18d066
Status: Started
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: c01:/gluster/brick1/gdev01
13-10-02 18:00:13.638819] I [server.c:762:server_rpc_notify] 0-gvol01-server: disconnecting connectionfrom leonardo-1891-2013/10/02-17:32:52:832624-gvol01-client-1-0
[2013-10-02 18:00:13.638908] I [server-helpers.c:726:server_connection_put] 0-gvol01-server: Shutting down connection leonardo-1891-2013/10/02-17:32:52:832624-gvol01-client-1-0
[2013-10-02 18:00:13.638941] I [server-helpers.c:614:server_connection_destroy] 0-gvol01-server: destroyed connection of leonardo-1891-2013/10/02-17:32:52:832624-gvol01-client-1-0
[2013-10-02 18:00:38.364409] I [server-rpc-fops.c:1739:server_stat_cbk] 0-gvol01-server: 351083: STAT (null) (30d8092d-1a50-46f3-a1ac-15ad3e85cb6c) ==> (No such file or directory)
[2013-10-02 18:00:38.368413] I [server-rpc-fops.c:1739:server_stat_cbk] 0-gvol01-server: 351087: STAT (null) (30d8092d-1a50-46f3-a1ac-15ad3e85cb6c) ==> (No such file or directory)
[2013-10-02 18:00:55.866322] I [server-rpc-fops.c:1739:server_stat_cbk] 0-gvol01-server: 351134: STAT (null) (30d8092d-1a50-46f3-a1ac-15ad3e8
root@ceph1:~# cat /var/log/ceph/ceph.log
2013-08-21 00:28:58.109058 mon.1 172.20.22.91:6789/0 1 : [INF] mon.ceph2 calling new monitor election
2013-08-21 00:28:58.128803 mon.0 172.20.22.90:6789/0 1 : [INF] mon.ceph1 calling new monitor election
2013-08-21 00:28:58.148142 mon.0 172.20.22.90:6789/0 2 : [INF] mon.ceph1@0 won leader election with quorum 0,1
2013-08-21 00:28:58.158501 mon.0 172.20.22.90:6789/0 3 : [INF] pgmap v2: 192 pgs: 192 creating; 0 bytes data, 0 KB used, 0 KB / 0 KB avail
2013-08-21 00:28:58.158735 mon.0 172.20.22.90:6789/0 4 : [INF] mdsmap e1: 0/0/1 up
2013-08-21 00:28:58.158895 mon.0 172.20.22.90:6789/0 5 : [INF] osdmap e1: 0 osds: 0 up, 0 in
2013-08-21 00:43:21.886904 mon.1 172.20.22.91:6789/0 1 : [INF] mon.ceph2 calling new monitor election
2013-08-21 00:43:21.888309 mon.0 172.20.22.90:6789/0 1 : [INF] mon.ceph1 calling new monitor election
2013-08-21 00:43:21.917392 mon.0 172.20.22.90:6789/0 2 : [INF] mon.ceph1@0 won leader election with quorum 0,1
# ceph-deploy osd create ceph1:vdb1
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks ceph1:/dev/vdb1:
[ceph_deploy.osd][DEBUG ] Deploying osd to ceph1
[ceph_deploy.osd][DEBUG ] Host ceph1 is now ready for osd use.
[ceph_deploy.osd][DEBUG ] Preparing host ceph1 disk /dev/vdb1 journal None activate True
ceph-deploy osd create ceph{1,2}:/srv/osd
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks ceph1:/srv/osd: ceph2:/srv/osd:
[ceph_deploy.osd][DEBUG ] Deploying osd to ceph1
[ceph_deploy.osd][DEBUG ] Host ceph1 is now ready for osd use.
[ceph_deploy.osd][DEBUG ] Preparing host ceph1 disk /srv/osd journal None activate True
[ceph_deploy.osd][DEBUG ] Deploying osd to ceph2
[ceph_deploy.osd][DEBUG ] Host ceph2 is now ready for osd use.
[ceph_deploy.osd][DEBUG ] Preparing host ceph2 disk /srv/osd journal None activate True
root@oc:/opt/ceph# ceph-deploy mds create ceph2
[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts ceph2:ceph2
ceph2:~$ cat /etc/ceph/ceph.conf
[global]
fsid = b5466a8d-b660-42f7-b39c-1157d14fd580
mon_initial_members = ceph1, ceph2
mon_host = 172.20.22.90,172.20.22.91
auth_supported = cephx
osd_journal_size = 1024
filestore_xattr_use_omap = true
@ceph1:~# cat /etc/ceph/ceph.conf
ceph2:~# netstat -altn
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:6800 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
tcp 0 0 172.20.22.91:6789 0.0.0.0:* LISTEN
tcp 0 9 172.20.22.91:53685 172.20.22.90:6789 ESTABLISHED
tcp 0 0 172.20.22.91:6789 172.20.22.90:55808 ESTABLISHED
tcp 0 176 172.20.22.91:22 172.20.20.210:59809 ESTABLISHED
tcp6 0 0 :::22 :::* LISTEN
ps aux | grep ceph
root 4482 0.0 1.7 141624 8772 ? Ssl 00:43 0:00 /usr/bin/ceph-mon --cluster=ceph -i ceph2 -f
root 5541 0.0 1.4 148132 7276 ? Ssl 00:48 0:00 /usr/bin/ceph-mds --cluster=ceph -i ceph2 -f
root 5831 0.0 0.1 9376 892 pts/0 S+ 01:07 0:00 grep --color=auto ceph
ceph health detail
HEALTH_ERR 192 pgs stuck inactive; 192 pgs stuck unclean; no osds
pg 0.3f is stuck inactive since forever, current state creating, last acting []
pg 1.3e is stuck inactive since forever, current state creating, last acting []
pg 2.3d is stuck inactive since forever, current state creating, last acting []
pg 0.3e is stuck inactive since forever, current state creating, last acting []
pg 1.3f is stuck inactive since forever, current state creating, last acting []
pg 2.3c is stuck inactive since forever, current state creating, last acting []
pg 0.3d is stuck inactive since forever, current state creating, last acting []
pg 1.3c is stuck inactive since forever, current state creating, last acting []