diff -ruN anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py updates/storage/devicelibs/mdraid.py --- anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py 2012-11-26 12:42:03.000000000 -0800 +++ updates/storage/devicelibs/mdraid.py 2013-10-20 16:13:54.000000000 -0700 @@ -100,7 +100,7 @@ RAID6: 4, RAID5: 3, RAID4: 3, - RAID1: 2, + RAID1: 1, RAID0: 2} for raid, min_members in raid_min_members.items(): diff -ruN anaconda-13.21.195.upstream/storage/devices.py updates/storage/devices.py --- anaconda-13.21.195.upstream/storage/devices.py 2012-12-10 15:40:04.000000000 -0800 +++ updates/storage/devices.py 2013-12-02 20:19:32.000000000 -0800 @@ -3139,6 +3144,11 @@ disks = [disk.path for disk in self.devices] spares = len(self.devices) - self.memberDevices + + # allow creation of degraded arrays + if len(disks) == 1: + disks.append("missing") + mdraid.mdcreate(self.path, self.level, disks, diff -ruN anaconda-13.21.195.upstream/storage/devicetree.py updates/storage/devicetree.py --- anaconda-13.21.195.upstream/storage/devicetree.py 2012-12-18 17:17:19.000000000 -0800 +++ updates/storage/devicetree.py 2013-07-30 14:25:43.000000000 -0700 @@ -2051,7 +2051,11 @@ # remove md array devices for which we did not find all members for array in self.getDevicesByType("mdarray"): - if array.memberDevices > len(array.parents): + # Exception: allow degraded RAID1 arrays to be detected + if (array.level == 1) and (array.memberDevices-1) == len(array.parents): + log.warning("RAID1 array %s is degraded - %d of %d members found." % \ + (array.name, len(array.parents), array.memberDevices)) + elif array.memberDevices > len(array.parents): self._recursiveRemove(array) def _recursiveRemove(self, device): diff -ruN anaconda-13.21.195.upstream/storage/partitioning.py updates/storage/partitioning.py --- anaconda-13.21.195.upstream/storage/partitioning.py 2012-11-26 12:42:03.000000000 -0800 +++ updates/storage/partitioning.py 2013-07-30 14:25:43.000000000 -0700 @@ -64,6 +64,41 @@ # create a separate pv partition for each disk with free space devs = [] + + # if there's no partition requests with asVol==True, then there's no + # need to create any pv partitions + requested_lvm_partitions = False + for request in anaconda.id.storage.autoPartitionRequests: + if request.asVol: + requested_lvm_partitions = True + break + + if not requested_lvm_partitions: + return (disks, devs) + + # if using RAID1, create free space partitions as RAID1 members + # so they can later be combined into a single pv partition + requested_raid_partitions = False + for request in anaconda.id.storage.autoPartitionRequests: + if request.useRAID and request.mountpoint == '/': + requested_raid_partitions = True + break + + if requested_raid_partitions: + raid_devs = [] + for disk in disks: + dev = anaconda.id.storage.newPartition(fmt_type="mdmember", size=1, grow=True, disks=[disk]) + anaconda.id.storage.createDevice(dev) + raid_devs.append(dev) + + # use minor="1" so that /dev/md1 is the PV, and /dev/md0 is /boot + part = anaconda.id.storage.newMDArray(fmt_type="lvmpv", minor="1", level="raid1", parents=raid_devs, memberDevices=len(raid_devs)) + anaconda.id.storage.createDevice(part) + devs.append(part) + + return (disks, devs) + + # original default case for disk in disks: if anaconda.id.storage.encryptedAutoPart: fmt_type = "luks" @@ -94,6 +129,24 @@ if request.asVol: continue + if request.useRAID: + log.info("partitioning: RAID1 requested for %s" % request.mountpoint) + + raid_devs = [] + for disk in disks: + dev = anaconda.id.storage.newPartition(fmt_type="mdmember", + size=request.size, grow=request.grow, maxsize=request.maxSize, + mountpoint=request.mountpoint, disks=[disk], weight=request.weight) + anaconda.id.storage.createDevice(dev) + raid_devs.append(dev) + + dev = anaconda.id.storage.newMDArray(fmt_type=request.fstype, + mountpoint=request.mountpoint, level="raid1", + parents=raid_devs, memberDevices=len(raid_devs)) + anaconda.id.storage.createDevice(dev) + + continue + if request.fstype is None: request.fstype = anaconda.id.storage.defaultFSType elif request.fstype == "prepboot": @@ -140,6 +193,9 @@ return def _scheduleLVs(anaconda, devs): + if not devs: + return + if anaconda.id.storage.encryptedAutoPart: pvs = [] for dev in devs: diff -ruN anaconda-13.21.195.upstream/storage/partspec.py updates/storage/partspec.py --- anaconda-13.21.195.upstream/storage/partspec.py 2012-11-26 12:42:03.000000000 -0800 +++ updates/storage/partspec.py 2013-07-30 14:25:43.000000000 -0700 @@ -22,7 +22,7 @@ class PartSpec(object): def __init__(self, mountpoint=None, fstype=None, size=None, maxSize=None, grow=False, asVol=False, singlePV=False, weight=0, - requiredSpace=0): + requiredSpace=0, useRAID=False): """ Create a new storage specification. These are used to specify the default partitioning layout as an object before we have the storage system up and running. The attributes are obvious @@ -45,6 +45,8 @@ other LVs are created inside it. If not enough space exists, this PartSpec will never get turned into an LV. + useRAID -- Should a RAID1 array be created for this volume? If + not, it will be allocated as a partition. """ self.mountpoint = mountpoint @@ -56,6 +58,7 @@ self.singlePV = singlePV self.weight = weight self.requiredSpace = requiredSpace + self.useRAID = useRAID if self.singlePV and not self.asVol: self.asVol = True