This method is not officially supported(yet) and  is a Jerry-rigged implementation

Alternatively,  You can use the normal method to create your cluster

create /etc/openstack_deploy/conf.d/ceph.yml  to define your ceph cluster

---
mons_hosts:
## can be same as my infra hosts ##
  c11:
    ip: 172.29.240.11
  c12:
    ip: 172.29.240.12
  c13:
    ip: 172.29.240.13

osds_hosts:
  c20:
    ip: 172.29.240.20
    container_vars:
      raw_journal_devices:
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
      devices:
        - /dev/vdc
        - /dev/vdd
        - /dev/vde
        - /dev/vdf

  c21:
    ip: 172.29.240.21
    container_vars:
      raw_journal_devices:
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
      devices:
        - /dev/vdc
        - /dev/vdd
        - /dev/vde
        - /dev/vdf

  c22:
    ip: 172.29.240.22
    container_vars:
      raw_journal_devices:
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
        - /dev/vdb
      devices:
        - /dev/vdc
        - /dev/vdd
        - /dev/vde
        - /dev/vdf

create /etc/openstack_deploy/env.d/ceph.yml to define the structure, same as other  components

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

component_skel:
  mons:
    belongs_to:
      - ceph_all
  osds:
    belongs_to:
      - ceph_all


container_skel:
  ceph_mon_container:
    belongs_to:
      - mons_containers
    contains:
      - mons
    properties:
      container_release: trusty
      service_name: ceph
  ceph_osd_container:
    belongs_to:
      - osds_containers
    contains:
      - osds
    properties:
      is_metal: true
      container_release: trusty
      service_name: ceph


physical_skel:
  osds_containers:
    belongs_to:
      - all_containers
  osds_hosts:
    belongs_to:
      - hosts
  mons_containers:
    belongs_to:
      - all_containers
  mons_hosts:
    belongs_to:
      - hosts

create /etc/openstack_deploy/user_extra_variables

# The interface within the mon containers for the Ceph mon service to listen on. This is usually eth1.
monitor_interface: eth1
# The network CIDR for the network over which clients will access Ceph mons and osds. This is usually
# br-storage network CIDR.
public_network: 172.29.244.0/22
# The network CIDR for osd to osd replication.
cluster_network: 172.29.244.0/22

create /opt/openstack-ansible/playbooks/ceph-mon.yml

---
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

- name: Deploy mons
  hosts: mons
  user: root
  max_fail_percentage: 0
  roles:
    - ceph-mon
  tasks:
    - name: Check if rbd pool exists and is empty
      shell: rados -p rbd df | egrep '^rbd( +0){9}$'
      run_once: true
      ignore_errors: true
      register: rbd_pool_exists

    - name: Unset nodelete flag on rbd pool
      command: ceph osd pool set rbd nodelete 0
      run_once: true
      when: rbd_pool_exists.rc == 0

    - name: Remove rbd pool if it exists and is empty
      command: ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
      run_once: true
      when: rbd_pool_exists.rc == 0

create ceph-osd.yml under playbooks

---
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

- name: Gather mons facts for ceph.conf template
  hosts: mons

- name: Deploy osds
  hosts: osds
  user: root
  pre_tasks:
    - name: Create log dir
      file:
        path: "{{ item.path }}"
        state: directory
      with_items:
        - { path: "/openstack/log/{{ inventory_hostname }}-ceph" }
      when: is_metal | bool
      tags:
        - ceph-logs
        - ceph-log-dirs
    - name: Create log aggregation links
      file:
        src: "{{ item.src }}"
        dest: "{{ item.dest }}"
        state: "{{ item.state }}"
        force: "yes"
      with_items:
        - { src: "/openstack/log/{{ inventory_hostname }}-ceph", dest: "/var/log/ceph", state: "link" }
      when: is_metal | bool
      tags:
        - ceph-logs
  roles:
    - ceph-osd
  vars:
    is_metal: "{{ properties.is_metal|default(false) }}"

under roles checkout the following

https://github.com/ceph/ansible-ceph-mon   as roles/ceph-mon
https://github.com/ceph/ansible-ceph-osd as roles/ceph-osd
https://github.com/ceph/ansible-ceph-common as roles/ceph.ceph-common

add the following on user_variables.yml

# use uuidgen to generate one UUID 
fsid_uuid: 688da01e-abb5-49c8-bd81-606f1f6980c1  
# Ceph options
# fsid is the unique identifier for your object store.
fsid: '{{ fsid_uuid }}'
# directory for backing up ceph keys.
fetch_directory: /etc/openstack_deploy/ceph_fetch
# Use stable version of ceph
ceph_stable: true
# Specify ceph release name
ceph_stable_release: hammer
# Enable OpenStack support inside the ceph-ansible playbooks
openstack_config: true
# Use raw journal devices
raw_multi_journal: true
# Set the journal size to: "Size of journal device / number of devices for which it is a journal"
# E.g. Given a 400G journal disk with 5 disks using it as their journal device, the journal size should be 80G each or 80000
journal_size: 50000
# Default number of replicas for a pool
pool_default_size: 3
# Default min number of replicas for ceph to consider the state to be not degraded.
pool_default_min_size: 2
# The % of disk used before an osd is considered full - Ceph will be marked critical and stop functioning if an OSD reaches this %
mon_osd_full_ratio: .90
# The % of disk used before an osd is considered nearfull - Ceph will still work but will return a HEALTH_WARN.
mon_osd_nearfull_ratio: .80
# Determines whether we use secure cluster flags.
secure_cluster: true
# List of secure flags to set on for a pool (options for the list are nodelete, nopgchange, nosizechange - prevents deletion, pg from changing and size from changing respectively).
secure_cluster_flags:
  - nodelete

playbooks to run:

openstack-ansible setup-hosts

alongwith other containers, you will also see ceph_mon_container  on lxc-ls -f

NAME                             STATE    IPV4                      IPV6  AUTOSTART
-------------------------------------------------------------------------------------------------
c11_ceph_mon_container-5124b17e  RUNNING  10.0.3.174, 172.29.239.1  -     YES (onboot, openstack)

openstack-ansible ceph-mon.yml
openstack-ansible ceph-osd.yml

Login to the mon container

root@c11:~# lxc-attach -n c11_ceph_mon_container-5124b17e
root@c11_ceph_mon_container-5124b17e:~# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 2.39996 root default
-2 0.79999     host c20
 0 0.20000         osd.0       up  1.00000          1.00000
 3 0.20000         osd.3       up  1.00000          1.00000
 6 0.20000         osd.6       up  1.00000          1.00000
 9 0.20000         osd.9       up  1.00000          1.00000
-3 0.79999     host c22
 1 0.20000         osd.1       up  1.00000          1.00000
 4 0.20000         osd.4       up  1.00000          1.00000
 7 0.20000         osd.7       up  1.00000          1.00000
11 0.20000         osd.11      up  1.00000          1.00000
-4 0.79999     host c21
 2 0.20000         osd.2       up  1.00000          1.00000
 5 0.20000         osd.5       up  1.00000          1.00000
 8 0.20000         osd.8       up  1.00000          1.00000
10 0.20000         osd.10      up  1.00000          1.00000
root@c11_ceph_mon_container-5124b17e:~# ceph df
GLOBAL:
    SIZE      AVAIL     RAW USED     %RAW USED
    2398G     2398G         452M          0.02
POOLS:
    NAME        ID     USED     %USED     MAX AVAIL     OBJECTS
    images      1         0         0          799G           0
    volumes     2         0         0          799G           0
    vms         3         0         0          799G           0
    backups     4         0         0          799G           0

 

About The Author