ceph-container: Deploy Rados Gateway failed

1. Delete containers and directory

sudo docker rm -f ceph-monitor ceph-osd ceph-mds ceph-gateway 
sudo rm -rf /etc/ceph
sudo rm -rf /var/lib/ceph

2. Run Ceph monitor witin container

sudo docker run -d \
                --net=host \
                -v /etc/ceph:/etc/ceph \
                -v /var/lib/ceph/:/var/lib/ceph/ \
                -e MON_IP=192.168.59.1 \
                -e CEPH_PUBLIC_NETWORK=192.168.59.0/24 \
                --name=ceph-monitor \
                ceph/daemon mon

2. Run Ceph osd witin container

sudo mkdir /var/lib/ceph/osd


sudo docker run -d \
                --net=host \
                --pid=host \
                --privileged=true \
                -v /etc/ceph:/etc/ceph \
                -v /var/lib/ceph/:/var/lib/ceph/ \
                -v /dev/:/dev/ \
                -e OSD_DEVICE=/dev/vdd \
                -e OSD_TYPE=directory \
                --name=ceph-osd \
                ceph/daemon osd

Check Ceph status

sudo docker exec -it ceph-monitor ceph -s
    cluster 6d05d9fa-2635-4f09-b544-5a5b12706905
     health HEALTH_WARN
            64 pgs stuck inactive
            64 pgs stuck unclean
     monmap e1: 1 mons at {macubuntu1=192.168.59.1:6789/0}
            election epoch 2, quorum 0 macubuntu1
     osdmap e5: 1 osds: 1 up, 1 in
            flags sortbitwise
      pgmap v6: 64 pgs, 1 pools, 0 bytes data, 0 objects
            0 kB used, 0 kB / 0 kB avail
                  64 creating

3. Run Ceph mds witin container

sudo docker run -d \
                --net=host \
                -v /var/lib/ceph/:/var/lib/ceph/ \
                -v /etc/ceph:/etc/ceph \
                -e CEPHFS_CREATE=1 \
                --name=ceph-mds \
                ceph/daemon mds

4. Run Ceph gateway witin container

sudo docker run -d \
                --net=host \
                -v /var/lib/ceph/:/var/lib/ceph/ \
                -v /etc/ceph:/etc/ceph \
                --name=ceph-gateway \
                ceph/daemon rgw

create ceph user failed without output

sudo docker exec ceph-gateway radosgw-admin -n client.rgw.$(hostname) -k /var/lib/ceph/radosgw/$(hostname)/keyring user create --uid="kiwenlau" --display-name="kiwenlau"

the ceph gatawaty container exited after 5 minutes:

docker logs ceph-gateway 
static: does not generate config
HEALTH_WARN 80 pgs degraded; 80 pgs stuck inactive; 80 pgs stuck unclean; 80 pgs undersized
2016-05-26 08:41:54.813320 7f54b1f567c0  0 set uid:gid to 64045:64045
2016-05-26 08:41:54.813405 7f54b1f567c0  0 ceph version 9.2.1 (752b6a3020c3de74e07d2a8b4c5e48dab5a6b6fd), process radosgw, pid 75
2016-05-26 08:41:54.818179 7f54b1f567c0  0 couldn't find old data placement pools config, setting up new ones for the zone
2016-05-26 08:46:54.813600 7f54a71a2700 -1 Initialization timeout, failed to initialize

About this issue

  • Original URL
  • State: closed
  • Created 8 years ago
  • Comments: 18 (8 by maintainers)

Most upvoted comments

@leseb More info:

[root@localhost conf.d]# docker images
REPOSITORY          TAG                               IMAGE ID            CREATED             SIZE
ceph                demo                              f51f8b5ab29e        3 hours ago         941.7 MB

docker ps -a
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
4cd53457df1a        ceph:demo           "/entrypoint.sh"    About an hour ago   Up 39 minutes                           ceph

Steps I have done:

  1. build new docker for ceph demo.
  2. docker run -d --net=host -v /etc/ceph:/etc/ceph -e MON_IP=10.23.174.157 -e CEPH_PUBLIC_NETWORK=10.23.174.157 --name=ceph ceph:demo
  3. sudo yum install ceph-common ceph-radosgw
  4. sudo ceph auth del client.radosgw.gateway sudo ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rwx' -o /etc/ceph/ceph.client.radosgw.keyring
  5. docker restart ceph
  6. sudo /etc/init.d/ceph-radosgw start --> I got init timeout here.

My ceph.conf:

[global]
fsid = 46f53925-4fe3-4f0b-8cef-93f694ed21cd
mon initial members = localhost
mon host = 10.23.174.157
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd crush chooseleaf type = 0
osd journal size = 100
osd pool default pg num = 8
osd pool default pgp num = 8
osd pool default size = 1
public network = 10.23.174.157/24
cluster network = 10.23.174.157/24

[client.radosgw.gateway]
host = localhost
keyring = /etc/ceph/ceph.client.radosgw.keyring
rgw socket path = ""
log file = /var/log/radosgw/client.radosgw.gateway.log
rgw frontends = fastcgi socket_port=9000 socket_host=0.0.0.0
rgw print continue = false
rgw init timeout = 1200

~ /etc/httpd/conf.d/raw.conf

ServerName localhost
DocumentRoot /var/www/html

ErrorLog /var/log/httpd/rgw_error.log
CustomLog /var/log/httpd/rgw_access.log combined

# LogLevel debug

RewriteEngine On

RewriteRule .\* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]

SetEnv proxy-nokeepalive 1

ProxyPass / unix:///var/run/ceph/ceph.radosgw.gateway.fastcgi.sock|fcgi://localhost:9000/

</VirtualHost>

@kiwenlau this is normal since you only have a single OSD. Please issue the following commands on the monitor:

  1. Get all the pools with: ceph osd dump| grep size
  2. Change pool replica size with: ceph osd pool set <pool> size 1

This should fix your issues.