Skip to content

Commit

Permalink
Merge remote-tracking branch 'gh/infernalis'
Browse files Browse the repository at this point in the history
  • Loading branch information
liewegas committed Sep 22, 2015
2 parents 424bcea + 4b71a54 commit fb5f058
Show file tree
Hide file tree
Showing 6 changed files with 151 additions and 111 deletions.
114 changes: 67 additions & 47 deletions qa/workunits/ceph-disk/ceph-disk-test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,20 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
# When debugging these tests, here are a few useful commands:
# When debugging these tests (must be root), here are a few useful commands:
#
# export PATH=..:$PATH
# ln -sf /home/ubuntu/ceph/src/ceph-disk /usr/sbin/ceph-disk
# ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
# ln -sf /home/ubuntu/ceph/systemd/[email protected] /usr/lib/systemd/system/[email protected]
# ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
# cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
# python ceph-disk-test.py --verbose --destroy-osd 0
# py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
# udevadm monitor --property & tail -f /var/log/messages # on CentOS 7
# udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/* # on Ubuntu 14.04
# udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
# udevadm control --reload # when changing the udev rules
#
import argparse
import json
Expand All @@ -28,6 +37,7 @@
import subprocess
import sys
import tempfile
import time
import uuid

LOG = logging.getLogger('CephDisk')
Expand All @@ -49,7 +59,7 @@ def unused_disks(self, pattern='[vs]d.'):
names = filter(lambda x: re.match(pattern, x), os.listdir("/sys/block"))
if not names:
return []
disks = json.loads(self.helper("ceph-disk list --format json " + " ".join(names)))
disks = json.loads(self.sh("ceph-disk list --format json " + " ".join(names)))
unused = []
for disk in disks:
if 'partitions' not in disk:
Expand All @@ -71,7 +81,7 @@ def unload_scsi_debug(self):
self.sh("rmmod scsi_debug || true")

def get_osd_partition(self, uuid):
disks = json.loads(self.helper("ceph-disk list --format json"))
disks = json.loads(self.sh("ceph-disk list --format json"))
for disk in disks:
if 'partitions' in disk:
for partition in disk['partitions']:
Expand All @@ -82,7 +92,7 @@ def get_osd_partition(self, uuid):
def get_journal_partition(self, uuid):
data_partition = self.get_osd_partition(uuid)
journal_dev = data_partition['journal_dev']
disks = json.loads(self.helper("ceph-disk list --format json"))
disks = json.loads(self.sh("ceph-disk list --format json"))
for disk in disks:
if 'partitions' in disk:
for partition in disk['partitions']:
Expand All @@ -95,6 +105,7 @@ def get_journal_partition(self, uuid):
def destroy_osd(self, uuid):
id = self.sh("ceph osd create " + uuid)
self.helper("control_osd stop " + id + " || true")
self.wait_for_osd_down(uuid)
try:
partition = self.get_journal_partition(uuid)
if partition:
Expand All @@ -121,16 +132,40 @@ def destroy_osd(self, uuid):
ceph osd crush rm osd.{id}
""".format(id=id))

def run_osd(self, uuid, data, journal=None):
prepare = ("ceph-disk prepare --osd-uuid " + uuid +
" " + data)
if journal:
prepare += " " + journal
self.sh(prepare)
self.sh("ceph osd create " + uuid)
partition = self.get_osd_partition(uuid)
assert partition['type'] == 'data'
assert partition['state'] == 'active'
@staticmethod
def osd_up_predicate(osds, uuid):
for osd in osds:
if osd['uuid'] == uuid and 'up' in osd['state']:
return True
return False

@staticmethod
def wait_for_osd_up(uuid):
CephDisk.wait_for_osd(uuid, CephDisk.osd_up_predicate, 'up')

@staticmethod
def osd_down_predicate(osds, uuid):
found = False
for osd in osds:
if osd['uuid'] == uuid:
found = True
if 'down' in osd['state'] or ['exists'] == osd['state']:
return True
return not found

@staticmethod
def wait_for_osd_down(uuid):
CephDisk.wait_for_osd(uuid, CephDisk.osd_down_predicate, 'down')

@staticmethod
def wait_for_osd(uuid, predicate, info):
LOG.info("wait_for_osd " + info + " " + uuid)
for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
if predicate(dump['osds'], uuid):
return True
time.sleep(delay)
raise Exception('timeout waiting for osd ' + uuid + ' to be ' + info)

@staticmethod
def augtool(command):
Expand All @@ -150,16 +185,19 @@ def setup_class(self):
logging.basicConfig(level=logging.DEBUG)
c = CephDisk()
c.helper("install augeas-tools augeas")
c.helper("install multipath-tools device-mapper-multipath")
if c.sh("lsb_release -si") == 'CentOS':
c.helper("install multipath-tools device-mapper-multipath")
c.augtool("set /files/etc/ceph/ceph.conf/global/osd_journal_size 100")

def test_destroy_osd(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.run_osd(osd_uuid, disk)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
partition = c.get_osd_partition(osd_uuid)
assert partition['type'] == 'data'
assert partition['state'] == 'active'
c.destroy_osd(osd_uuid)
c.sh("ceph-disk zap " + disk)

Expand All @@ -179,8 +217,6 @@ def test_activate_dmcrypt_luks(self):

def activate_dmcrypt(self, type):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
journal_uuid = str(uuid.uuid1())
Expand All @@ -190,8 +226,7 @@ def activate_dmcrypt(self, type):
" --journal-uuid " + journal_uuid +
" --dmcrypt " +
" " + disk)
data_partition = c.get_osd_partition(osd_uuid)
c.sh("ceph-disk activate --dmcrypt " + data_partition['path'])
c.wait_for_osd_up(osd_uuid)
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
assert data_partition['state'] == 'active'
Expand All @@ -202,15 +237,14 @@ def activate_dmcrypt(self, type):

def test_activate_no_journal(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.augtool("set /files/etc/ceph/ceph.conf/global/osd_objectstore memstore")
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + disk)
device = json.loads(c.helper("ceph-disk list --format json " + disk))[0]
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
assert len(device['partitions']) == 1
partition = device['partitions'][0]
assert partition['type'] == 'data'
Expand All @@ -223,14 +257,13 @@ def test_activate_no_journal(self):

def test_activate_with_journal(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk zap " + disk)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + disk)
device = json.loads(c.helper("ceph-disk list --format json " + disk))[0]
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
assert len(device['partitions']) == 2
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
Expand All @@ -243,8 +276,6 @@ def test_activate_with_journal(self):

def test_activate_separated_journal(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disks = c.unused_disks()
data_disk = disks[0]
journal_disk = disks[1]
Expand All @@ -255,12 +286,11 @@ def test_activate_separated_journal(self):

def activate_separated_journal(self, data_disk, journal_disk):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_disk)
device = json.loads(c.helper("ceph-disk list --format json " + data_disk))[0]
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
assert len(device['partitions']) == 1
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
Expand All @@ -278,8 +308,6 @@ def activate_separated_journal(self, data_disk, journal_disk):
#
def test_activate_two_separated_journal(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disks = c.unused_disks()
data_disk = disks[0]
other_data_disk = disks[1]
Expand All @@ -300,8 +328,6 @@ def test_activate_two_separated_journal(self):
#
def test_activate_reuse_journal(self):
c = CephDisk()
if c.sh("lsb_release -si") != 'Ubuntu':
pytest.skip("see issue http://tracker.ceph.com/issues/12787")
disks = c.unused_disks()
data_disk = disks[0]
journal_disk = disks[1]
Expand All @@ -320,7 +346,8 @@ def test_activate_reuse_journal(self):
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_path)
c.helper("pool_read_write 1") # 1 == pool size
device = json.loads(c.helper("ceph-disk list --format json " + data_disk))[0]
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
assert len(device['partitions']) == 1
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
Expand Down Expand Up @@ -355,18 +382,11 @@ def test_activate_multipath(self):
c.sh("ceph-disk zap " + multipath)
c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
" " + multipath)
device = json.loads(c.helper("ceph-disk list --format json " + multipath))[0]
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + multipath))[0]
assert len(device['partitions']) == 2
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['type'] == 'data'
#
# Activate it although it should auto activate
#
if True: # remove this block when http://tracker.ceph.com/issues/12786 is fixed
c.sh("ceph-disk activate " + data_partition['path'])
device = json.loads(c.helper("ceph-disk list --format json " + multipath))[0]
assert len(device['partitions']) == 2
data_partition = c.get_osd_partition(osd_uuid)
assert data_partition['state'] == 'active'
journal_partition = c.get_journal_partition(osd_uuid)
assert journal_partition
Expand Down
24 changes: 20 additions & 4 deletions qa/workunits/ceph-disk/ceph-disk.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,26 @@
source $(dirname $0)/../ceph-helpers-root.sh true
#!/bin/bash
if [ -f $(dirname $0)/../ceph-helpers-root.sh ]; then
source $(dirname $0)/../ceph-helpers-root.sh
else
echo "$(dirname $0)/../ceph-helpers-root.sh does not exist."
exit 1
fi

install python-pytest || true
install pytest || true

PATH=$(dirname $0)/..:$PATH

if ! which py.test > /dev/null; then
echo "py.test not installed"
exit 1
fi

install python-pytest
install pytest
sudo env PATH=$(dirname $0)/..:$PATH py.test -v $(dirname $0)/ceph-disk-test.py
result=$?

# own whatever was created as a side effect of the py.test run
# so that it can successfully be removed later on by a non privileged
# process
sudo chown -R $(id -u) $(dirname $0)

exit $result
Loading

0 comments on commit fb5f058

Please sign in to comment.