1 |
diff -ruN anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py updates/storage/devicelibs/mdraid.py |
2 |
--- anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py 2012-11-26 12:42:03.000000000 -0800 |
3 |
+++ updates/storage/devicelibs/mdraid.py 2013-10-20 16:13:54.000000000 -0700 |
4 |
@@ -100,7 +100,7 @@ |
5 |
RAID6: 4, |
6 |
RAID5: 3, |
7 |
RAID4: 3, |
8 |
- RAID1: 2, |
9 |
+ RAID1: 1, |
10 |
RAID0: 2} |
11 |
|
12 |
for raid, min_members in raid_min_members.items(): |
13 |
diff -ruN anaconda-13.21.195.upstream/storage/devices.py updates/storage/devices.py |
14 |
--- anaconda-13.21.195.upstream/storage/devices.py 2012-12-10 15:40:04.000000000 -0800 |
15 |
+++ updates/storage/devices.py 2013-12-02 20:19:32.000000000 -0800 |
16 |
@@ -3139,6 +3144,11 @@ |
17 |
|
18 |
disks = [disk.path for disk in self.devices] |
19 |
spares = len(self.devices) - self.memberDevices |
20 |
+ |
21 |
+ # allow creation of degraded arrays |
22 |
+ if len(disks) == 1: |
23 |
+ disks.append("missing") |
24 |
+ |
25 |
mdraid.mdcreate(self.path, |
26 |
self.level, |
27 |
disks, |
28 |
diff -ruN anaconda-13.21.195.upstream/storage/devicetree.py updates/storage/devicetree.py |
29 |
--- anaconda-13.21.195.upstream/storage/devicetree.py 2012-12-18 17:17:19.000000000 -0800 |
30 |
+++ updates/storage/devicetree.py 2013-07-30 14:25:43.000000000 -0700 |
31 |
@@ -2051,7 +2051,11 @@ |
32 |
|
33 |
# remove md array devices for which we did not find all members |
34 |
for array in self.getDevicesByType("mdarray"): |
35 |
- if array.memberDevices > len(array.parents): |
36 |
+ # Exception: allow degraded RAID1 arrays to be detected |
37 |
+ if (array.level == 1) and (array.memberDevices-1) == len(array.parents): |
38 |
+ log.warning("RAID1 array %s is degraded - %d of %d members found." % \ |
39 |
+ (array.name, len(array.parents), array.memberDevices)) |
40 |
+ elif array.memberDevices > len(array.parents): |
41 |
self._recursiveRemove(array) |
42 |
|
43 |
def _recursiveRemove(self, device): |
44 |
diff -ruN anaconda-13.21.195.upstream/storage/partitioning.py updates/storage/partitioning.py |
45 |
--- anaconda-13.21.195.upstream/storage/partitioning.py 2012-11-26 12:42:03.000000000 -0800 |
46 |
+++ updates/storage/partitioning.py 2013-07-30 14:25:43.000000000 -0700 |
47 |
@@ -64,6 +64,41 @@ |
48 |
|
49 |
# create a separate pv partition for each disk with free space |
50 |
devs = [] |
51 |
+ |
52 |
+ # if there's no partition requests with asVol==True, then there's no |
53 |
+ # need to create any pv partitions |
54 |
+ requested_lvm_partitions = False |
55 |
+ for request in anaconda.id.storage.autoPartitionRequests: |
56 |
+ if request.asVol: |
57 |
+ requested_lvm_partitions = True |
58 |
+ break |
59 |
+ |
60 |
+ if not requested_lvm_partitions: |
61 |
+ return (disks, devs) |
62 |
+ |
63 |
+ # if using RAID1, create free space partitions as RAID1 members |
64 |
+ # so they can later be combined into a single pv partition |
65 |
+ requested_raid_partitions = False |
66 |
+ for request in anaconda.id.storage.autoPartitionRequests: |
67 |
+ if request.useRAID and request.mountpoint == '/': |
68 |
+ requested_raid_partitions = True |
69 |
+ break |
70 |
+ |
71 |
+ if requested_raid_partitions: |
72 |
+ raid_devs = [] |
73 |
+ for disk in disks: |
74 |
+ dev = anaconda.id.storage.newPartition(fmt_type="mdmember", size=1, grow=True, disks=[disk]) |
75 |
+ anaconda.id.storage.createDevice(dev) |
76 |
+ raid_devs.append(dev) |
77 |
+ |
78 |
+ # use minor="1" so that /dev/md1 is the PV, and /dev/md0 is /boot |
79 |
+ part = anaconda.id.storage.newMDArray(fmt_type="lvmpv", minor="1", level="raid1", parents=raid_devs, memberDevices=len(raid_devs)) |
80 |
+ anaconda.id.storage.createDevice(part) |
81 |
+ devs.append(part) |
82 |
+ |
83 |
+ return (disks, devs) |
84 |
+ |
85 |
+ # original default case |
86 |
for disk in disks: |
87 |
if anaconda.id.storage.encryptedAutoPart: |
88 |
fmt_type = "luks" |
89 |
@@ -94,6 +129,24 @@ |
90 |
if request.asVol: |
91 |
continue |
92 |
|
93 |
+ if request.useRAID: |
94 |
+ log.info("partitioning: RAID1 requested for %s" % request.mountpoint) |
95 |
+ |
96 |
+ raid_devs = [] |
97 |
+ for disk in disks: |
98 |
+ dev = anaconda.id.storage.newPartition(fmt_type="mdmember", |
99 |
+ size=request.size, grow=request.grow, maxsize=request.maxSize, |
100 |
+ mountpoint=request.mountpoint, disks=[disk], weight=request.weight) |
101 |
+ anaconda.id.storage.createDevice(dev) |
102 |
+ raid_devs.append(dev) |
103 |
+ |
104 |
+ dev = anaconda.id.storage.newMDArray(fmt_type=request.fstype, |
105 |
+ mountpoint=request.mountpoint, level="raid1", |
106 |
+ parents=raid_devs, memberDevices=len(raid_devs)) |
107 |
+ anaconda.id.storage.createDevice(dev) |
108 |
+ |
109 |
+ continue |
110 |
+ |
111 |
if request.fstype is None: |
112 |
request.fstype = anaconda.id.storage.defaultFSType |
113 |
elif request.fstype == "prepboot": |
114 |
@@ -140,6 +193,9 @@ |
115 |
return |
116 |
|
117 |
def _scheduleLVs(anaconda, devs): |
118 |
+ if not devs: |
119 |
+ return |
120 |
+ |
121 |
if anaconda.id.storage.encryptedAutoPart: |
122 |
pvs = [] |
123 |
for dev in devs: |
124 |
diff -ruN anaconda-13.21.195.upstream/storage/partspec.py updates/storage/partspec.py |
125 |
--- anaconda-13.21.195.upstream/storage/partspec.py 2012-11-26 12:42:03.000000000 -0800 |
126 |
+++ updates/storage/partspec.py 2013-07-30 14:25:43.000000000 -0700 |
127 |
@@ -22,7 +22,7 @@ |
128 |
class PartSpec(object): |
129 |
def __init__(self, mountpoint=None, fstype=None, size=None, maxSize=None, |
130 |
grow=False, asVol=False, singlePV=False, weight=0, |
131 |
- requiredSpace=0): |
132 |
+ requiredSpace=0, useRAID=False): |
133 |
""" Create a new storage specification. These are used to specify |
134 |
the default partitioning layout as an object before we have the |
135 |
storage system up and running. The attributes are obvious |
136 |
@@ -45,6 +45,8 @@ |
137 |
other LVs are created inside it. If not enough |
138 |
space exists, this PartSpec will never get turned |
139 |
into an LV. |
140 |
+ useRAID -- Should a RAID1 array be created for this volume? If |
141 |
+ not, it will be allocated as a partition. |
142 |
""" |
143 |
|
144 |
self.mountpoint = mountpoint |
145 |
@@ -56,6 +58,7 @@ |
146 |
self.singlePV = singlePV |
147 |
self.weight = weight |
148 |
self.requiredSpace = requiredSpace |
149 |
+ self.useRAID = useRAID |
150 |
|
151 |
if self.singlePV and not self.asVol: |
152 |
self.asVol = True |