/[smeserver]/rpms/anaconda/sme9/0005-DegradedRAID1.patch
ViewVC logotype

Contents of /rpms/anaconda/sme9/0005-DegradedRAID1.patch

Parent Directory Parent Directory | Revision Log Revision Log | View Revision Graph Revision Graph


Revision 1.2 - (show annotations) (download)
Fri Jan 3 22:12:21 2014 UTC (10 years, 10 months ago) by charliebrady
Branch: MAIN
CVS Tags: anaconda-13_21_229-1_el6_sme_3, anaconda-13_21_215-1_el6_sme_7, anaconda-13_21_239-1_el6_sme_1, anaconda-13_21_215-1_el6_sme_9, anaconda-13_21_215-1_el6_sme_10, anaconda-13_21_215-1_el6_sme_11, anaconda-13_21_215-1_el6_sme_12, anaconda-13_21_263-1_el6_sme_1, anaconda-13_21_229-1_el6_sme_2, anaconda-13_21_229-1_el6_sme_1, anaconda-13_21_239-1_el6_sme, anaconda-13_21_215-1_el6_sme_8, anaconda-13_21_239_1-1_el6_sme, anaconda-13_21_215-1_el6_sme_6, anaconda-13_21_254-1_el6_sme_2, anaconda-13_21_254-1_el6_sme_1, anaconda-13_21_215-1_el6_sme_5, anaconda-13_21_215-1_el6_sme_4, anaconda-13_21_239-1_el6_sme_2, HEAD
Changes since 1.1: +36 -36 lines
Error occurred while calculating annotation data.
Adjust SME specific patches to have zero offset to avoid .py.orig files. Fix changelog
entry for [SME: 8038] change.

1 diff -ruN anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py updates/storage/devicelibs/mdraid.py
2 --- anaconda-13.21.195.upstream/storage/devicelibs/mdraid.py 2012-11-26 12:42:03.000000000 -0800
3 +++ updates/storage/devicelibs/mdraid.py 2013-10-20 16:13:54.000000000 -0700
4 @@ -100,7 +100,7 @@
5 RAID6: 4,
6 RAID5: 3,
7 RAID4: 3,
8 - RAID1: 2,
9 + RAID1: 1,
10 RAID0: 2}
11
12 for raid, min_members in raid_min_members.items():
13 diff -ruN anaconda-13.21.215.upstream/storage/devices.py updates/storage/devices.py
14 --- anaconda-13.21.215.upstream/storage/devices.py 2012-12-10 15:40:04.000000000 -0800
15 +++ updates/storage/devices.py 2013-12-02 20:19:32.000000000 -0800
16 @@ -3145,6 +3150,11 @@
17
18 disks = [disk.path for disk in self.devices]
19 spares = len(self.devices) - self.memberDevices
20 +
21 + # allow creation of degraded arrays
22 + if len(disks) == 1:
23 + disks.append("missing")
24 +
25 mdraid.mdcreate(self.path,
26 self.level,
27 disks,
28 diff -ruN anaconda-13.21.195.upstream/storage/devicetree.py updates/storage/devicetree.py
29 --- anaconda-13.21.195.upstream/storage/devicetree.py 2012-12-18 17:17:19.000000000 -0800
30 +++ updates/storage/devicetree.py 2013-07-30 14:25:43.000000000 -0700
31 @@ -2051,7 +2051,11 @@
32
33 # remove md array devices for which we did not find all members
34 for array in self.getDevicesByType("mdarray"):
35 - if array.memberDevices > len(array.parents):
36 + # Exception: allow degraded RAID1 arrays to be detected
37 + if (array.level == 1) and (array.memberDevices-1) == len(array.parents):
38 + log.warning("RAID1 array %s is degraded - %d of %d members found." % \
39 + (array.name, len(array.parents), array.memberDevices))
40 + elif array.memberDevices > len(array.parents):
41 self._recursiveRemove(array)
42
43 def _recursiveRemove(self, device):
44 diff -ruN anaconda-13.21.195.upstream/storage/partspec.py updates/storage/partspec.py
45 --- anaconda-13.21.195.upstream/storage/partspec.py 2012-11-26 12:42:03.000000000 -0800
46 +++ updates/storage/partspec.py 2013-07-30 14:25:43.000000000 -0700
47 @@ -22,7 +22,7 @@
48 class PartSpec(object):
49 def __init__(self, mountpoint=None, fstype=None, size=None, maxSize=None,
50 grow=False, asVol=False, singlePV=False, weight=0,
51 - requiredSpace=0):
52 + requiredSpace=0, useRAID=False):
53 """ Create a new storage specification. These are used to specify
54 the default partitioning layout as an object before we have the
55 storage system up and running. The attributes are obvious
56 @@ -45,6 +45,8 @@
57 other LVs are created inside it. If not enough
58 space exists, this PartSpec will never get turned
59 into an LV.
60 + useRAID -- Should a RAID1 array be created for this volume? If
61 + not, it will be allocated as a partition.
62 """
63
64 self.mountpoint = mountpoint
65 @@ -56,6 +58,7 @@
66 self.singlePV = singlePV
67 self.weight = weight
68 self.requiredSpace = requiredSpace
69 + self.useRAID = useRAID
70
71 if self.singlePV and not self.asVol:
72 self.asVol = True
73 diff -ruN anaconda-13.21.195/storage/partitioning.py.orig anaconda-13.21.195/storage/partitioning.py
74 --- anaconda-13.21.215/storage/partitioning.py.orig 2013-08-02 09:47:00.000000000 -0400
75 +++ anaconda-13.21.215/storage/partitioning.py 2014-01-03 16:38:31.954668243 -0500
76 @@ -64,6 +64,41 @@
77
78 # create a separate pv partition for each disk with free space
79 devs = []
80 +
81 + # if there's no partition requests with asVol==True, then there's no
82 + # need to create any pv partitions
83 + requested_lvm_partitions = False
84 + for request in anaconda.id.storage.autoPartitionRequests:
85 + if request.asVol:
86 + requested_lvm_partitions = True
87 + break
88 +
89 + if not requested_lvm_partitions:
90 + return (disks, devs)
91 +
92 + # if using RAID1, create free space partitions as RAID1 members
93 + # so they can later be combined into a single pv partition
94 + requested_raid_partitions = False
95 + for request in anaconda.id.storage.autoPartitionRequests:
96 + if request.useRAID and request.mountpoint == '/':
97 + requested_raid_partitions = True
98 + break
99 +
100 + if requested_raid_partitions:
101 + raid_devs = []
102 + for disk in disks:
103 + dev = anaconda.id.storage.newPartition(fmt_type="mdmember", size=1, grow=True, disks=[disk])
104 + anaconda.id.storage.createDevice(dev)
105 + raid_devs.append(dev)
106 +
107 + # use minor="1" so that /dev/md1 is the PV, and /dev/md0 is /boot
108 + part = anaconda.id.storage.newMDArray(fmt_type="lvmpv", minor="1", level="raid1", parents=raid_devs, memberDevices=len(raid_devs))
109 + anaconda.id.storage.createDevice(part)
110 + devs.append(part)
111 +
112 + return (disks, devs)
113 +
114 + # original default case
115 for disk in disks:
116 if anaconda.id.storage.encryptedAutoPart:
117 fmt_type = "luks"
118 @@ -94,6 +129,24 @@
119 if request.asVol:
120 continue
121
122 + if request.useRAID:
123 + log.info("partitioning: RAID1 requested for %s" % request.mountpoint)
124 +
125 + raid_devs = []
126 + for disk in disks:
127 + dev = anaconda.id.storage.newPartition(fmt_type="mdmember",
128 + size=request.size, grow=request.grow, maxsize=request.maxSize,
129 + mountpoint=request.mountpoint, disks=[disk], weight=request.weight)
130 + anaconda.id.storage.createDevice(dev)
131 + raid_devs.append(dev)
132 +
133 + dev = anaconda.id.storage.newMDArray(fmt_type=request.fstype,
134 + mountpoint=request.mountpoint, level="raid1",
135 + parents=raid_devs, memberDevices=len(raid_devs))
136 + anaconda.id.storage.createDevice(dev)
137 +
138 + continue
139 +
140 if request.fstype is None:
141 request.fstype = anaconda.id.storage.defaultFSType
142 elif request.fstype == "prepboot":
143 @@ -145,6 +198,9 @@
144 return
145
146 def _scheduleLVs(anaconda, devs):
147 + if not devs:
148 + return
149 +
150 if anaconda.id.storage.encryptedAutoPart:
151 pvs = []
152 for dev in devs:

admin@koozali.org
ViewVC Help
Powered by ViewVC 1.2.1 RSS 2.0 feed