Tested for jewel-10.2.5 0. apt-get install libsnappy-dev libleveldb-dev libblkid-dev libkeyutils-dev libcrypto++-dev libfuse-dev libtcmalloc-minimal4 libatomic-ops-dev xfslibs-dev libboost-all-dev libldap2-dev libcurl4-openssl-dev libfcgi-dev ./configure --prefix=$HOME/ceph-install --without-tcmalloc --with-radosgw 1. prepare directories cd $HOME mkdir ceph-build mkdir ceph-install mkdir ceph-deploy 2. build & install $HOME/ceph: this is your ceph source tree cd $HOME/ceph-build ../ceph/configure --prefix=$HOME/ceph-install make -j `getconf _NPROCESSORS_ONLN` make install 3. add below to your $HOME/.bashrc CEPH=$HOME/ceph-install export PYTHONPATH=$CEPH/lib/python2.7/site-packages:$PYTHONPATH export LD_LIBRARY_PATH=$CEPH/lib:$LD_LIBRARY_PATH export PATH=$CEPH/bin:$CEPH/sbin:$PATH export CEPH_CONF=$HOME/ceph-deploy/ceph.conf 4. create $HOME/ceph-deploy/ceph.conf [global] fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx osd_journal_size = 1024 filestore_xattr_use_omap = true osd_pool_default_size = 1 mon_initial_members = localhost mon_host = 127.0.0.1 mon data = /home/mlin/ceph-deploy/mon/mymondata mon cluster log file = /home/mlin/ceph-deploy/mon/mon.log osd data = /home/mlin/ceph-deploy/osd/myosddata osd journal = /home/mlin/ceph-deploy/osd/myosdjournal keyring=/home/mlin/ceph-deploy/ceph.client.admin.keyring run dir = /home/mlin/ceph-deploy/run #for ext4 osd max object name len = 256 osd max object namespace len = 64 5. prepare keys cd $HOME/ceph-deploy ceph-authtool --create-keyring ./ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' ceph-authtool --create-keyring ./ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' ceph-authtool ./ceph.mon.keyring --import-keyring ./ceph.client.admin.keyring monmaptool --create --add localhost 127.0.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 ./monmap 6. setup & start mon cd $HOME/ceph-deploy mkdir -p mon/mymondata ceph-mon --mkfs -i localhost --monmap ./monmap --keyring ./ceph.mon.keyring ceph-mon -d -i localhost 7. setup & start osd cd $HOME/ceph-deploy mkdir -p osd/myosddata ceph osd create 487b9f85-0fee-48df-8976-e03218466ac6 0 ceph-osd -i 0 --mkfs --mkkey --osd-uuid 487b9f85-0fee-48df-8976-e03218466ac6 ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i ./ceph.client.admin.keyring ceph osd crush add-bucket localhost host ceph osd crush move localhost root=default ceph osd crush add osd.0 1.0 host=localhost ceph-osd -d -i 0 8. check it $ ceph -s cluster a7f64266-0894-4f1e-a635-d0aeaca0e993 health HEALTH_OK monmap e1: 1 mons at {localhost=127.0.0.1:6789/0} election epoch 2, quorum 0 localhost osdmap e10: 1 osds: 1 up, 1 in flags sortbitwise pgmap v15: 64 pgs, 1 pools, 0 bytes data, 0 objects 131 GB used, 122 GB / 267 GB avail 64 active+clean 9. create rbd rbd create bar -s 1024 --image-format=2 --image-feature layering rbd map bar --id admin rbd unmap bar