Monthly Archives: October 2015

build and setup ceph manually

Tested for jewel-10.2.5


apt-get install libsnappy-dev libleveldb-dev libblkid-dev libkeyutils-dev libcrypto++-dev libfuse-dev libtcmalloc-minimal4 libatomic-ops-dev xfslibs-dev libboost-all-dev libldap2-dev libcurl4-openssl-dev libfcgi-dev

./configure --prefix=$HOME/ceph-install --without-tcmalloc --with-radosgw

1. prepare directories

cd $HOME
mkdir ceph-build
mkdir ceph-install
mkdir ceph-deploy

2. build & install

$HOME/ceph: this is your ceph source tree

cd $HOME/ceph-build
../ceph/configure --prefix=$HOME/ceph-install
make -j `getconf _NPROCESSORS_ONLN`
make install

3. add below to your $HOME/.bashrc

export PYTHONPATH=$CEPH/lib/python2.7/site-packages:$PYTHONPATH
export PATH=$CEPH/bin:$CEPH/sbin:$PATH
export CEPH_CONF=$HOME/ceph-deploy/ceph.conf

4. create $HOME/ceph-deploy/ceph.conf

fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_journal_size = 1024
filestore_xattr_use_omap = true
osd_pool_default_size = 1
mon_initial_members = localhost
mon_host =
mon data = /home/mlin/ceph-deploy/mon/mymondata
mon cluster log file = /home/mlin/ceph-deploy/mon/mon.log
osd data = /home/mlin/ceph-deploy/osd/myosddata
osd journal = /home/mlin/ceph-deploy/osd/myosdjournal
run dir = /home/mlin/ceph-deploy/run

#for ext4
osd max object name len = 256
osd max object namespace len = 64

5. prepare keys

cd $HOME/ceph-deploy

ceph-authtool --create-keyring ./ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring ./ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool ./ceph.mon.keyring --import-keyring ./ceph.client.admin.keyring
monmaptool --create --add localhost --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 ./monmap

6. setup & start mon

cd $HOME/ceph-deploy
mkdir -p mon/mymondata
ceph-mon --mkfs -i localhost --monmap ./monmap --keyring ./ceph.mon.keyring
ceph-mon -d -i localhost

7. setup & start osd

cd $HOME/ceph-deploy
mkdir -p osd/myosddata

ceph osd create 487b9f85-0fee-48df-8976-e03218466ac6 0
ceph-osd -i 0 --mkfs --mkkey --osd-uuid 487b9f85-0fee-48df-8976-e03218466ac6

ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i ./ceph.client.admin.keyring

ceph osd crush add-bucket localhost host
ceph osd crush move localhost root=default
ceph osd crush add osd.0 1.0 host=localhost

ceph-osd -d -i 0

8. check it
$ ceph -s
    cluster a7f64266-0894-4f1e-a635-d0aeaca0e993
     health HEALTH_OK
     monmap e1: 1 mons at {localhost=}
            election epoch 2, quorum 0 localhost
     osdmap e10: 1 osds: 1 up, 1 in
            flags sortbitwise
      pgmap v15: 64 pgs, 1 pools, 0 bytes data, 0 objects
            131 GB used, 122 GB / 267 GB avail
                  64 active+clean

9. create rbd

rbd create bar -s 1024 --image-format=2 --image-feature layering
rbd map bar --id admin
rbd unmap bar


ceph configure


packages="rados rbd cephfs radosgw selinux radosstriper mon osd mds cryptopp nss profiler debug fuse jemalloc tcmalloc-minimal tcmalloc"
packages="$packags libatomic-ops ocf kinetic librocksdb librocksdb-static libzfs lttng babeltrace"

packages_disabled="cephfs radosgw selinux radosstriper mds cryptopp profiler debug fuse jemalloc tcmalloc-minimal tcmalloc"
packages_disabled="$packages_disabled libatomic-ops ocf kinetic librocksdb librocksdb-static libzfs lttng babeltrace"

features="libtool-lock dependency-tracking client server coverage pgrefdebugging cephfs-java xio valgrind"
features_disabled="client coverage pgrefdebugging cephfs-java xio valgrind"


	for package in $packages_disabled ; do
		opts="$opts --without-$package"

	for feature in $features_disabled ; do
		opts="$opts --disable-$feature"


../ceph/configure $opts