[global]
fsid = 88caa60a-e6d1-4590-a2b5-bd4e703e46d9
mon host = 10.0.1.21,10.0.1.22,10.0.1.23
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 128
osd pool default pgp num = 128
public network = 10.0.1.0
/24
cluster network = 10.0.1.0
/24
max
open
files = 131072
mon initial members = controller1, controller2, compute01
[mon]
mon data =
/var/lib/ceph/mon/ceph-
$
id
mon clock drift allowed = 1
mon osd min down reporters = 13
mon osd down out interval = 600
[osd]
osd data =
/var/lib/ceph/osd/ceph-
$
id
osd journal size = 20000
osd journal =
/var/lib/ceph/osd/
$cluster-$
id
/journal
osd mkfs
type
= xfs
osd mkfs options xfs = -f -i size=2048
filestore xattr use omap =
true
filestore min
sync
interval = 10
filestore max
sync
interval = 15
filestore queue max ops = 25000
filestore queue max bytes = 1048576000
filestore queue committing max ops = 50000
filestore queue committing max bytes = 10485760000
filestore
split
multiple = 8
filestore merge threshold = 40
filestore fd cache size = 1024
journal max write bytes = 1073714824
journal max write entries = 10000
journal queue max ops = 50000
journal queue max bytes = 10485760000
osd max write size = 512
osd client message size cap = 2147483648
osd deep scrub stride = 131072
osd
op
threads = 16
osd disk threads = 4
osd map cache size = 1024
osd map cache bl size = 128
osd
mount
options xfs =
"rw,noexec,nodev,noatime,nodiratime,nobarrier"
osd recovery
op
priority = 2
osd recovery max active = 10
osd max backfills = 4
osd min pg log entries = 30000
osd max pg log entries = 100000
osd mon heartbeat interval = 40
ms dispatch throttle bytes = 1048576000
objecter inflight ops = 819200
osd
op
log threshold = 50
osd crush chooseleaf
type
= 0
[client]
rbd cache =
true
rbd cache size = 335544320
rbd cache max dirty = 134217728
rbd cache max dirty age = 30
rbd cache writethrough
until
flush =
false
rbd cache max dirty object = 2
rbd cache target dirty = 235544320