1 |
charliebrady |
1.1 |
# partitioning.py |
2 |
|
|
# Disk partitioning functions. |
3 |
|
|
# |
4 |
|
|
# Copyright (C) 2009 Red Hat, Inc. |
5 |
|
|
# |
6 |
|
|
# This copyrighted material is made available to anyone wishing to use, |
7 |
|
|
# modify, copy, or redistribute it subject to the terms and conditions of |
8 |
|
|
# the GNU General Public License v.2, or (at your option) any later version. |
9 |
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
10 |
|
|
# ANY WARRANTY expressed or implied, including the implied warranties of |
11 |
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General |
12 |
|
|
# Public License for more details. You should have received a copy of the |
13 |
|
|
# GNU General Public License along with this program; if not, write to the |
14 |
|
|
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
15 |
|
|
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the |
16 |
|
|
# source code or documentation are not subject to the GNU General Public |
17 |
|
|
# License and may only be used or replicated with the express permission of |
18 |
|
|
# Red Hat, Inc. |
19 |
|
|
# |
20 |
|
|
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com> |
21 |
|
|
# |
22 |
|
|
|
23 |
|
|
import sys |
24 |
|
|
import os |
25 |
|
|
from operator import add, sub, gt, lt |
26 |
|
|
|
27 |
|
|
import parted |
28 |
|
|
from pykickstart.constants import * |
29 |
|
|
|
30 |
|
|
from constants import * |
31 |
|
|
import platform |
32 |
|
|
|
33 |
|
|
from errors import * |
34 |
|
|
from deviceaction import * |
35 |
|
|
from devices import PartitionDevice, LUKSDevice, devicePathToName |
36 |
|
|
from formats import getFormat |
37 |
|
|
|
38 |
|
|
import gettext |
39 |
|
|
_ = lambda x: gettext.ldgettext("anaconda", x) |
40 |
|
|
|
41 |
|
|
import logging |
42 |
|
|
log = logging.getLogger("storage") |
43 |
|
|
|
44 |
|
|
def _createFreeSpacePartitions(anaconda): |
45 |
|
|
# get a list of disks that have at least one free space region of at |
46 |
|
|
# least 100MB |
47 |
|
|
disks = [] |
48 |
|
|
for disk in anaconda.id.storage.partitioned: |
49 |
|
|
if anaconda.id.storage.clearPartDisks and \ |
50 |
|
|
(disk.name not in anaconda.id.storage.clearPartDisks): |
51 |
|
|
continue |
52 |
|
|
|
53 |
|
|
part = disk.format.firstPartition |
54 |
|
|
while part: |
55 |
|
|
if not part.type & parted.PARTITION_FREESPACE: |
56 |
|
|
part = part.nextPartition() |
57 |
|
|
continue |
58 |
|
|
|
59 |
|
|
if part.getSize(unit="MB") > 100: |
60 |
|
|
disks.append(disk) |
61 |
|
|
break |
62 |
|
|
|
63 |
|
|
part = part.nextPartition() |
64 |
|
|
|
65 |
|
|
# create a separate pv partition for each disk with free space |
66 |
|
|
devs = [] |
67 |
charliebrady |
1.2 |
|
68 |
|
|
# if there's no partition requests with asVol==True, then there's no |
69 |
|
|
# need to create any pv partitions |
70 |
|
|
requested_lvm_partitions = False |
71 |
|
|
for request in anaconda.id.storage.autoPartitionRequests: |
72 |
|
|
if request.asVol: |
73 |
|
|
requested_lvm_partitions = True |
74 |
|
|
break |
75 |
|
|
|
76 |
|
|
if not requested_lvm_partitions: |
77 |
|
|
return (disks, devs) |
78 |
|
|
|
79 |
|
|
# if using RAID1, create free space partitions as RAID1 members |
80 |
|
|
# so they can later be combined into a single pv partition |
81 |
|
|
requested_raid_partitions = False |
82 |
|
|
for request in anaconda.id.storage.autoPartitionRequests: |
83 |
|
|
if request.useRAID and request.mountpoint == '/': |
84 |
|
|
requested_raid_partitions = True |
85 |
|
|
break |
86 |
|
|
|
87 |
|
|
if requested_raid_partitions: |
88 |
|
|
raid_devs = [] |
89 |
|
|
for disk in disks: |
90 |
|
|
dev = anaconda.id.storage.newPartition(fmt_type="mdmember", size=1, grow=True, disks=[disk]) |
91 |
|
|
anaconda.id.storage.createDevice(dev) |
92 |
|
|
raid_devs.append(dev) |
93 |
|
|
|
94 |
|
|
# use minor="1" so that /dev/md1 is the PV, and /dev/md0 is /boot |
95 |
|
|
part = anaconda.id.storage.newMDArray(fmt_type="lvmpv", minor="1", level="raid1", parents=raid_devs, memberDevices=len(raid_devs)) |
96 |
|
|
anaconda.id.storage.createDevice(part) |
97 |
|
|
devs.append(part) |
98 |
|
|
|
99 |
|
|
return (disks, devs) |
100 |
|
|
|
101 |
|
|
# original default case |
102 |
charliebrady |
1.1 |
for disk in disks: |
103 |
|
|
if anaconda.id.storage.encryptedAutoPart: |
104 |
|
|
fmt_type = "luks" |
105 |
|
|
fmt_args = {"escrow_cert": anaconda.id.storage.autoPartEscrowCert, |
106 |
|
|
"cipher": anaconda.id.storage.encryptionCipher, |
107 |
|
|
"add_backup_passphrase": anaconda.id.storage.autoPartAddBackupPassphrase} |
108 |
|
|
else: |
109 |
|
|
fmt_type = "lvmpv" |
110 |
|
|
fmt_args = {} |
111 |
|
|
part = anaconda.id.storage.newPartition(fmt_type=fmt_type, |
112 |
|
|
fmt_args=fmt_args, |
113 |
|
|
size=1, |
114 |
|
|
grow=True, |
115 |
|
|
disks=[disk]) |
116 |
|
|
anaconda.id.storage.createDevice(part) |
117 |
|
|
devs.append(part) |
118 |
|
|
|
119 |
|
|
return (disks, devs) |
120 |
|
|
|
121 |
|
|
def _schedulePartitions(anaconda, disks): |
122 |
|
|
# |
123 |
|
|
# Convert storage.autoPartitionRequests into Device instances and |
124 |
|
|
# schedule them for creation |
125 |
|
|
# |
126 |
|
|
# First pass is for partitions only. We'll do LVs later. |
127 |
|
|
# |
128 |
|
|
for request in anaconda.id.storage.autoPartitionRequests: |
129 |
|
|
if request.asVol: |
130 |
|
|
continue |
131 |
|
|
|
132 |
charliebrady |
1.2 |
if request.useRAID: |
133 |
|
|
log.info("partitioning: RAID1 requested for %s" % request.mountpoint) |
134 |
|
|
|
135 |
|
|
raid_devs = [] |
136 |
|
|
for disk in disks: |
137 |
|
|
dev = anaconda.id.storage.newPartition(fmt_type="mdmember", |
138 |
|
|
size=request.size, grow=request.grow, maxsize=request.maxSize, |
139 |
|
|
mountpoint=request.mountpoint, disks=[disk], weight=request.weight) |
140 |
|
|
anaconda.id.storage.createDevice(dev) |
141 |
|
|
raid_devs.append(dev) |
142 |
|
|
|
143 |
|
|
dev = anaconda.id.storage.newMDArray(fmt_type=request.fstype, |
144 |
|
|
mountpoint=request.mountpoint, level="raid1", |
145 |
|
|
parents=raid_devs, memberDevices=len(raid_devs)) |
146 |
|
|
anaconda.id.storage.createDevice(dev) |
147 |
|
|
|
148 |
|
|
continue |
149 |
|
|
|
150 |
charliebrady |
1.1 |
if request.fstype is None: |
151 |
|
|
request.fstype = anaconda.id.storage.defaultFSType |
152 |
|
|
elif request.fstype == "prepboot": |
153 |
|
|
# make sure there never is more than one prepboot per disk |
154 |
|
|
bootdev = anaconda.platform.bootDevice() |
155 |
|
|
if (bootdev and |
156 |
|
|
anaconda.id.bootloader.drivelist and |
157 |
|
|
anaconda.id.bootloader.drivelist[0] == bootdev.disk.name): |
158 |
|
|
# do not allow creating the new PReP boot on the same drive |
159 |
|
|
log.info("partitioning: skipping a PReP boot " |
160 |
|
|
"partition request on %s" % bootdev.disk.name) |
161 |
|
|
continue |
162 |
|
|
log.debug("partitioning: allowing a PReP boot partition request") |
163 |
|
|
elif request.fstype == "efi": |
164 |
|
|
# make sure there never is more than one efi system partition per disk |
165 |
|
|
bootdev = anaconda.platform.bootDevice() |
166 |
|
|
if (bootdev and |
167 |
|
|
anaconda.id.bootloader.drivelist and |
168 |
|
|
anaconda.id.bootloader.drivelist[0] == bootdev.disk.name): |
169 |
|
|
log.info("partitioning: skipping a EFI System " |
170 |
|
|
"Partition request on %s" % bootdev.disk.name) |
171 |
|
|
bootdev.format.mountpoint = "/boot/efi" |
172 |
|
|
continue |
173 |
|
|
log.debug("partitioning: allowing a EFI System Partition request") |
174 |
|
|
|
175 |
|
|
|
176 |
|
|
# This is a little unfortunate but let the backend dictate the rootfstype |
177 |
|
|
# so that things like live installs can do the right thing |
178 |
|
|
if request.mountpoint == "/" and anaconda.backend.rootFsType != None: |
179 |
|
|
request.fstype = anaconda.backend.rootFsType |
180 |
|
|
|
181 |
|
|
dev = anaconda.id.storage.newPartition(fmt_type=request.fstype, |
182 |
|
|
size=request.size, |
183 |
|
|
grow=request.grow, |
184 |
|
|
maxsize=request.maxSize, |
185 |
|
|
mountpoint=request.mountpoint, |
186 |
|
|
disks=disks, |
187 |
|
|
weight=request.weight) |
188 |
|
|
|
189 |
|
|
# schedule the device for creation |
190 |
|
|
anaconda.id.storage.createDevice(dev) |
191 |
|
|
|
192 |
|
|
# make sure preexisting broken lvm/raid configs get out of the way |
193 |
|
|
return |
194 |
|
|
|
195 |
|
|
def _scheduleLVs(anaconda, devs): |
196 |
charliebrady |
1.2 |
if not devs: |
197 |
|
|
return |
198 |
|
|
|
199 |
charliebrady |
1.1 |
if anaconda.id.storage.encryptedAutoPart: |
200 |
|
|
pvs = [] |
201 |
|
|
for dev in devs: |
202 |
|
|
pv = LUKSDevice("luks-%s" % dev.name, |
203 |
|
|
format=getFormat("lvmpv", device=dev.path), |
204 |
|
|
size=dev.size, |
205 |
|
|
parents=dev) |
206 |
|
|
pvs.append(pv) |
207 |
|
|
anaconda.id.storage.createDevice(pv) |
208 |
|
|
else: |
209 |
|
|
pvs = devs |
210 |
|
|
|
211 |
|
|
# create a vg containing all of the autopart pvs |
212 |
|
|
vg = anaconda.id.storage.newVG(pvs=pvs) |
213 |
|
|
anaconda.id.storage.createDevice(vg) |
214 |
|
|
|
215 |
|
|
initialVGSize = vg.size |
216 |
|
|
|
217 |
|
|
# |
218 |
|
|
# Convert storage.autoPartitionRequests into Device instances and |
219 |
|
|
# schedule them for creation. |
220 |
|
|
# |
221 |
|
|
# Second pass, for LVs only. |
222 |
|
|
for request in anaconda.id.storage.autoPartitionRequests: |
223 |
|
|
if not request.asVol: |
224 |
|
|
continue |
225 |
|
|
|
226 |
|
|
if request.requiredSpace and request.requiredSpace > initialVGSize: |
227 |
|
|
continue |
228 |
|
|
|
229 |
|
|
if request.fstype is None: |
230 |
|
|
request.fstype = anaconda.id.storage.defaultFSType |
231 |
|
|
|
232 |
|
|
# This is a little unfortunate but let the backend dictate the rootfstype |
233 |
|
|
# so that things like live installs can do the right thing |
234 |
|
|
if request.mountpoint == "/" and anaconda.backend.rootFsType != None: |
235 |
|
|
request.fstype = anaconda.backend.rootFsType |
236 |
|
|
|
237 |
|
|
# FIXME: move this to a function and handle exceptions |
238 |
|
|
dev = anaconda.id.storage.newLV(vg=vg, |
239 |
|
|
fmt_type=request.fstype, |
240 |
|
|
mountpoint=request.mountpoint, |
241 |
|
|
grow=request.grow, |
242 |
|
|
maxsize=request.maxSize, |
243 |
|
|
size=request.size, |
244 |
|
|
singlePV=request.singlePV) |
245 |
|
|
|
246 |
|
|
# schedule the device for creation |
247 |
|
|
anaconda.id.storage.createDevice(dev) |
248 |
|
|
|
249 |
|
|
|
250 |
|
|
def doAutoPartition(anaconda): |
251 |
|
|
log.debug("doAutoPartition(%s)" % anaconda) |
252 |
|
|
log.debug("doAutoPart: %s" % anaconda.id.storage.doAutoPart) |
253 |
|
|
log.debug("clearPartType: %s" % anaconda.id.storage.clearPartType) |
254 |
|
|
log.debug("clearPartDisks: %s" % anaconda.id.storage.clearPartDisks) |
255 |
|
|
log.debug("autoPartitionRequests: %s" % anaconda.id.storage.autoPartitionRequests) |
256 |
|
|
log.debug("storage.disks: %s" % [d.name for d in anaconda.id.storage.disks]) |
257 |
|
|
log.debug("storage.partitioned: %s" % [d.name for d in anaconda.id.storage.partitioned]) |
258 |
|
|
log.debug("all names: %s" % [d.name for d in anaconda.id.storage.devices]) |
259 |
|
|
if anaconda.dir == DISPATCH_BACK: |
260 |
|
|
# temporarily unset storage.clearPartType so that all devices will be |
261 |
|
|
# found during storage reset |
262 |
|
|
clearPartType = anaconda.id.storage.clearPartType |
263 |
|
|
anaconda.id.storage.clearPartType = None |
264 |
|
|
anaconda.id.storage.reset() |
265 |
|
|
anaconda.id.storage.clearPartType = clearPartType |
266 |
|
|
return |
267 |
|
|
|
268 |
|
|
disks = [] |
269 |
|
|
devs = [] |
270 |
|
|
|
271 |
|
|
if anaconda.id.storage.doAutoPart: |
272 |
|
|
clearPartitions(anaconda.id.storage) |
273 |
|
|
# update the bootloader's drive list to add disks which have their |
274 |
|
|
# whole disk format replaced by a disklabel. Make sure to keep any |
275 |
|
|
# previous boot order selection from clearpart_gui or kickstart |
276 |
|
|
anaconda.id.bootloader.updateDriveList(anaconda.id.bootloader.drivelist) |
277 |
|
|
|
278 |
|
|
if anaconda.id.storage.doAutoPart: |
279 |
|
|
(disks, devs) = _createFreeSpacePartitions(anaconda) |
280 |
|
|
|
281 |
|
|
if disks == []: |
282 |
|
|
if anaconda.isKickstart: |
283 |
|
|
msg = _("Could not find enough free space for automatic " |
284 |
|
|
"partitioning. Press 'OK' to exit the installer.") |
285 |
|
|
else: |
286 |
|
|
msg = _("Could not find enough free space for automatic " |
287 |
|
|
"partitioning, please use another partitioning method.") |
288 |
|
|
|
289 |
|
|
anaconda.intf.messageWindow(_("Error Partitioning"), msg, |
290 |
|
|
custom_icon='error') |
291 |
|
|
|
292 |
|
|
if anaconda.isKickstart: |
293 |
|
|
sys.exit(0) |
294 |
|
|
|
295 |
|
|
anaconda.id.storage.reset() |
296 |
|
|
return DISPATCH_BACK |
297 |
|
|
|
298 |
|
|
_schedulePartitions(anaconda, disks) |
299 |
|
|
|
300 |
|
|
# sanity check the individual devices |
301 |
|
|
log.warning("not sanity checking devices because I don't know how yet") |
302 |
|
|
|
303 |
|
|
# run the autopart function to allocate and grow partitions |
304 |
|
|
try: |
305 |
|
|
doPartitioning(anaconda.id.storage, |
306 |
|
|
exclusiveDisks=anaconda.id.storage.clearPartDisks) |
307 |
|
|
|
308 |
|
|
if anaconda.id.storage.doAutoPart: |
309 |
|
|
_scheduleLVs(anaconda, devs) |
310 |
|
|
|
311 |
|
|
# grow LVs |
312 |
|
|
growLVM(anaconda.id.storage) |
313 |
|
|
except PartitioningWarning as msg: |
314 |
|
|
if not anaconda.isKickstart: |
315 |
|
|
anaconda.intf.messageWindow(_("Warnings During Automatic " |
316 |
|
|
"Partitioning"), |
317 |
|
|
_("Following warnings occurred during automatic " |
318 |
|
|
"partitioning:\n\n%s") % (msg,), |
319 |
|
|
custom_icon='warning') |
320 |
|
|
else: |
321 |
|
|
log.warning(msg) |
322 |
|
|
except PartitioningError as msg: |
323 |
|
|
# restore drives to original state |
324 |
|
|
anaconda.id.storage.reset() |
325 |
|
|
if not anaconda.isKickstart: |
326 |
|
|
extra = "" |
327 |
|
|
|
328 |
|
|
if anaconda.id.displayMode != "t": |
329 |
|
|
anaconda.dispatch.skipStep("partition", skip = 0) |
330 |
|
|
else: |
331 |
|
|
extra = _("\n\nPress 'OK' to exit the installer.") |
332 |
|
|
anaconda.intf.messageWindow(_("Error Partitioning"), |
333 |
|
|
_("Could not allocate requested partitions: \n\n" |
334 |
|
|
"%(msg)s.%(extra)s") % {'msg': msg, 'extra': extra}, |
335 |
|
|
custom_icon='error') |
336 |
|
|
|
337 |
|
|
if anaconda.isKickstart: |
338 |
|
|
sys.exit(0) |
339 |
|
|
else: |
340 |
|
|
return DISPATCH_BACK |
341 |
|
|
|
342 |
|
|
# sanity check the collection of devices |
343 |
|
|
log.warning("not sanity checking storage config because I don't know how yet") |
344 |
|
|
# now do a full check of the requests |
345 |
|
|
(errors, warnings) = anaconda.id.storage.sanityCheck() |
346 |
|
|
if warnings: |
347 |
|
|
for warning in warnings: |
348 |
|
|
log.warning(warning) |
349 |
|
|
if errors: |
350 |
|
|
errortxt = "\n".join(errors) |
351 |
|
|
if anaconda.isKickstart: |
352 |
|
|
extra = _("\n\nPress 'OK' to exit the installer.") |
353 |
|
|
else: |
354 |
|
|
extra = _("\n\nPress 'OK' to choose a different partitioning option.") |
355 |
|
|
|
356 |
|
|
anaconda.intf.messageWindow(_("Automatic Partitioning Errors"), |
357 |
|
|
_("The following errors occurred with your " |
358 |
|
|
"partitioning:\n\n%(errortxt)s\n\n" |
359 |
|
|
"This can happen if there is not enough " |
360 |
|
|
"space on your hard drive(s) for the " |
361 |
|
|
"installation. %(extra)s") |
362 |
|
|
% {'errortxt': errortxt, 'extra': extra}, |
363 |
|
|
custom_icon='error') |
364 |
|
|
# |
365 |
|
|
# XXX if in kickstart we reboot |
366 |
|
|
# |
367 |
|
|
if anaconda.isKickstart: |
368 |
|
|
anaconda.intf.messageWindow(_("Unrecoverable Error"), |
369 |
|
|
_("The system will now reboot.")) |
370 |
|
|
sys.exit(0) |
371 |
|
|
anaconda.id.storage.reset() |
372 |
|
|
return DISPATCH_BACK |
373 |
|
|
|
374 |
|
|
def shouldClear(device, clearPartType, clearPartDisks=None): |
375 |
|
|
if clearPartType not in [CLEARPART_TYPE_LINUX, CLEARPART_TYPE_ALL]: |
376 |
|
|
return False |
377 |
|
|
|
378 |
|
|
if isinstance(device, PartitionDevice): |
379 |
|
|
# Never clear the special first partition on a Mac disk label, as that |
380 |
|
|
# holds the partition table itself. |
381 |
|
|
if device.disk.format.partedDisk.type == "mac" and \ |
382 |
|
|
device.partedPartition.number == 1 and \ |
383 |
|
|
device.partedPartition.name == "Apple": |
384 |
|
|
return False |
385 |
|
|
|
386 |
|
|
# If we got a list of disks to clear, make sure this one's on it |
387 |
|
|
if clearPartDisks and device.disk.name not in clearPartDisks: |
388 |
|
|
return False |
389 |
|
|
|
390 |
|
|
# We don't want to fool with extended partitions, freespace, &c |
391 |
|
|
if device.partType not in [parted.PARTITION_NORMAL, |
392 |
|
|
parted.PARTITION_LOGICAL]: |
393 |
|
|
return False |
394 |
|
|
|
395 |
|
|
if clearPartType == CLEARPART_TYPE_LINUX and \ |
396 |
|
|
not device.format.linuxNative and \ |
397 |
|
|
not device.getFlag(parted.PARTITION_LVM) and \ |
398 |
|
|
not device.getFlag(parted.PARTITION_RAID) and \ |
399 |
|
|
not device.getFlag(parted.PARTITION_SWAP): |
400 |
|
|
return False |
401 |
|
|
elif device.isDisk and not device.partitioned: |
402 |
|
|
# If we got a list of disks to clear, make sure this one's on it |
403 |
|
|
if clearPartDisks and device.name not in clearPartDisks: |
404 |
|
|
return False |
405 |
|
|
|
406 |
|
|
# Never clear disks with hidden formats |
407 |
|
|
if device.format.hidden: |
408 |
|
|
return False |
409 |
|
|
|
410 |
|
|
if clearPartType == CLEARPART_TYPE_LINUX and \ |
411 |
|
|
not device.format.linuxNative: |
412 |
|
|
return False |
413 |
|
|
|
414 |
|
|
# Don't clear devices holding install media. |
415 |
|
|
if device.protected: |
416 |
|
|
return False |
417 |
|
|
|
418 |
|
|
# Don't clear immutable devices. |
419 |
|
|
if device.immutable: |
420 |
|
|
return False |
421 |
|
|
|
422 |
|
|
# TODO: do platform-specific checks on ia64, pSeries, iSeries, mac |
423 |
|
|
|
424 |
|
|
return True |
425 |
|
|
|
426 |
|
|
def clearPartitions(storage): |
427 |
|
|
""" Clear partitions and dependent devices from disks. |
428 |
|
|
|
429 |
|
|
Arguments: |
430 |
|
|
|
431 |
|
|
storage -- a storage.Storage instance |
432 |
|
|
|
433 |
|
|
Keyword arguments: |
434 |
|
|
|
435 |
|
|
None |
436 |
|
|
|
437 |
|
|
NOTES: |
438 |
|
|
|
439 |
|
|
- Needs some error handling, especially for the parted bits. |
440 |
|
|
|
441 |
|
|
""" |
442 |
|
|
if storage.clearPartType is None or storage.clearPartType == CLEARPART_TYPE_NONE: |
443 |
|
|
# not much to do |
444 |
|
|
return |
445 |
|
|
|
446 |
|
|
_platform = storage.anaconda.platform |
447 |
|
|
|
448 |
|
|
if not hasattr(_platform, "diskLabelTypes"): |
449 |
|
|
raise StorageError("can't clear partitions without platform data") |
450 |
|
|
|
451 |
|
|
# we are only interested in partitions that physically exist |
452 |
|
|
partitions = [p for p in storage.partitions if p.exists] |
453 |
|
|
# Sort partitions by descending partition number to minimize confusing |
454 |
|
|
# things like multiple "destroy sda5" actions due to parted renumbering |
455 |
|
|
# partitions. This can still happen through the UI but it makes sense to |
456 |
|
|
# avoid it where possible. |
457 |
|
|
partitions.sort(key=lambda p: p.partedPartition.number, reverse=True) |
458 |
|
|
for part in partitions: |
459 |
|
|
log.debug("clearpart: looking at %s" % part.name) |
460 |
|
|
if not shouldClear(part, storage.clearPartType, storage.clearPartDisks): |
461 |
|
|
continue |
462 |
|
|
|
463 |
|
|
log.debug("clearing %s" % part.name) |
464 |
|
|
|
465 |
|
|
# XXX is there any argument for not removing incomplete devices? |
466 |
|
|
# -- maybe some RAID devices |
467 |
|
|
devices = storage.deviceDeps(part) |
468 |
|
|
while devices: |
469 |
|
|
log.debug("devices to remove: %s" % ([d.name for d in devices],)) |
470 |
|
|
leaves = [d for d in devices if d.isleaf] |
471 |
|
|
log.debug("leaves to remove: %s" % ([d.name for d in leaves],)) |
472 |
|
|
for leaf in leaves: |
473 |
|
|
storage.destroyDevice(leaf) |
474 |
|
|
devices.remove(leaf) |
475 |
|
|
|
476 |
|
|
log.debug("partitions: %s" % [p.getDeviceNodeName() for p in part.partedPartition.disk.partitions]) |
477 |
|
|
storage.destroyDevice(part) |
478 |
|
|
|
479 |
|
|
# now remove any empty extended partitions |
480 |
|
|
removeEmptyExtendedPartitions(storage) |
481 |
|
|
|
482 |
|
|
# make sure that the the boot device, along with any other disk we are |
483 |
|
|
# supposed to reinitialize, has the correct disklabel type if we're going |
484 |
|
|
# to completely clear it. |
485 |
|
|
for disk in storage.partitioned: |
486 |
|
|
if not storage.anaconda.id.bootloader.drivelist and \ |
487 |
|
|
not storage.reinitializeDisks: |
488 |
|
|
break |
489 |
|
|
|
490 |
|
|
if not storage.reinitializeDisks and \ |
491 |
|
|
disk.name != storage.anaconda.id.bootloader.drivelist[0]: |
492 |
|
|
continue |
493 |
|
|
|
494 |
|
|
if storage.clearPartType != CLEARPART_TYPE_ALL or \ |
495 |
|
|
(storage.clearPartDisks and disk.name not in storage.clearPartDisks): |
496 |
|
|
continue |
497 |
|
|
|
498 |
|
|
# Don't touch immutable disks |
499 |
|
|
if disk.immutable: |
500 |
|
|
continue |
501 |
|
|
|
502 |
|
|
# don't reinitialize the disklabel if the disk contains install media |
503 |
|
|
if filter(lambda p: p.dependsOn(disk), storage.protectedDevices): |
504 |
|
|
continue |
505 |
|
|
|
506 |
|
|
nativeLabelType = _platform.bestDiskLabelType(disk) |
507 |
|
|
if disk.format.labelType == nativeLabelType: |
508 |
|
|
continue |
509 |
|
|
|
510 |
|
|
if disk.format.labelType == "mac": |
511 |
|
|
# remove the magic apple partition |
512 |
|
|
for part in storage.partitions: |
513 |
|
|
if part.disk == disk and part.partedPartition.number == 1: |
514 |
|
|
log.debug("clearing %s" % part.name) |
515 |
|
|
# We can't schedule the apple map partition for removal |
516 |
|
|
# because parted will not allow us to remove it from the |
517 |
|
|
# disk. Still, we need it out of the devicetree. |
518 |
|
|
storage.devicetree._removeDevice(part, moddisk=False) |
519 |
|
|
|
520 |
|
|
destroy_action = ActionDestroyFormat(disk) |
521 |
|
|
newLabel = getFormat("disklabel", device=disk.path, |
522 |
|
|
labelType=nativeLabelType) |
523 |
|
|
create_action = ActionCreateFormat(disk, format=newLabel) |
524 |
|
|
storage.devicetree.registerAction(destroy_action) |
525 |
|
|
storage.devicetree.registerAction(create_action) |
526 |
|
|
|
527 |
|
|
def removeEmptyExtendedPartitions(storage): |
528 |
|
|
for disk in storage.partitioned: |
529 |
|
|
log.debug("checking whether disk %s has an empty extended" % disk.name) |
530 |
|
|
extended = disk.format.extendedPartition |
531 |
|
|
logical_parts = disk.format.logicalPartitions |
532 |
|
|
log.debug("extended is %s ; logicals is %s" % (extended, [p.getDeviceNodeName() for p in logical_parts])) |
533 |
|
|
if extended and not logical_parts: |
534 |
|
|
log.debug("removing empty extended partition from %s" % disk.name) |
535 |
|
|
extended_name = devicePathToName(extended.getDeviceNodeName()) |
536 |
|
|
extended = storage.devicetree.getDeviceByName(extended_name) |
537 |
|
|
storage.destroyDevice(extended) |
538 |
|
|
#disk.partedDisk.removePartition(extended.partedPartition) |
539 |
|
|
|
540 |
|
|
for disk in [d for d in storage.disks if d not in storage.partitioned]: |
541 |
|
|
# clear any whole-disk formats that need clearing |
542 |
|
|
if shouldClear(disk, storage.clearPartType, storage.clearPartDisks): |
543 |
|
|
log.debug("clearing %s" % disk.name) |
544 |
|
|
devices = storage.deviceDeps(disk) |
545 |
|
|
while devices: |
546 |
|
|
log.debug("devices to remove: %s" % ([d.name for d in devices],)) |
547 |
|
|
leaves = [d for d in devices if d.isleaf] |
548 |
|
|
log.debug("leaves to remove: %s" % ([d.name for d in leaves],)) |
549 |
|
|
for leaf in leaves: |
550 |
|
|
storage.destroyDevice(leaf) |
551 |
|
|
devices.remove(leaf) |
552 |
|
|
|
553 |
|
|
destroy_action = ActionDestroyFormat(disk) |
554 |
|
|
newLabel = getFormat("disklabel", device=disk.path) |
555 |
|
|
create_action = ActionCreateFormat(disk, format=newLabel) |
556 |
|
|
storage.devicetree.registerAction(destroy_action) |
557 |
|
|
storage.devicetree.registerAction(create_action) |
558 |
|
|
|
559 |
|
|
def partitionCompare(part1, part2): |
560 |
|
|
""" More specifically defined partitions come first. |
561 |
|
|
|
562 |
|
|
< 1 => x < y |
563 |
|
|
0 => x == y |
564 |
|
|
> 1 => x > y |
565 |
|
|
""" |
566 |
|
|
ret = 0 |
567 |
|
|
|
568 |
|
|
if part1.req_base_weight: |
569 |
|
|
ret -= part1.req_base_weight |
570 |
|
|
|
571 |
|
|
if part2.req_base_weight: |
572 |
|
|
ret += part2.req_base_weight |
573 |
|
|
|
574 |
|
|
# more specific disk specs to the front of the list |
575 |
|
|
# req_disks being empty is equivalent to it being an infinitely long list |
576 |
|
|
if part1.req_disks and not part2.req_disks: |
577 |
|
|
ret -= 500 |
578 |
|
|
elif not part1.req_disks and part2.req_disks: |
579 |
|
|
ret += 500 |
580 |
|
|
else: |
581 |
|
|
ret += cmp(len(part1.req_disks), len(part2.req_disks)) * 500 |
582 |
|
|
|
583 |
|
|
# primary-only to the front of the list |
584 |
|
|
ret -= cmp(part1.req_primary, part2.req_primary) * 200 |
585 |
|
|
|
586 |
|
|
# fixed size requests to the front |
587 |
|
|
ret += cmp(part1.req_grow, part2.req_grow) * 100 |
588 |
|
|
|
589 |
|
|
# larger requests go to the front of the list |
590 |
|
|
ret -= cmp(part1.req_base_size, part2.req_base_size) * 50 |
591 |
|
|
|
592 |
|
|
# potentially larger growable requests go to the front |
593 |
|
|
if part1.req_grow and part2.req_grow: |
594 |
|
|
if not part1.req_max_size and part2.req_max_size: |
595 |
|
|
ret -= 25 |
596 |
|
|
elif part1.req_max_size and not part2.req_max_size: |
597 |
|
|
ret += 25 |
598 |
|
|
else: |
599 |
|
|
ret -= cmp(part1.req_max_size, part2.req_max_size) * 25 |
600 |
|
|
|
601 |
|
|
# give a little bump based on mountpoint |
602 |
|
|
if hasattr(part1.format, "mountpoint") and \ |
603 |
|
|
hasattr(part2.format, "mountpoint"): |
604 |
|
|
ret += cmp(part1.format.mountpoint, part2.format.mountpoint) * 10 |
605 |
|
|
|
606 |
|
|
if ret > 0: |
607 |
|
|
ret = 1 |
608 |
|
|
elif ret < 0: |
609 |
|
|
ret = -1 |
610 |
|
|
|
611 |
|
|
return ret |
612 |
|
|
|
613 |
|
|
def getNextPartitionType(disk, no_primary=None): |
614 |
|
|
""" Find the type of partition to create next on a disk. |
615 |
|
|
|
616 |
|
|
Return a parted partition type value representing the type of the |
617 |
|
|
next partition we will create on this disk. |
618 |
|
|
|
619 |
|
|
If there is only one free primary partition and we can create an |
620 |
|
|
extended partition, we do that. |
621 |
|
|
|
622 |
|
|
If there are free primary slots and an extended partition we will |
623 |
|
|
recommend creating a primary partition. This can be overridden |
624 |
|
|
with the keyword argument no_primary. |
625 |
|
|
|
626 |
|
|
Arguments: |
627 |
|
|
|
628 |
|
|
disk -- a parted.Disk instance representing the disk |
629 |
|
|
|
630 |
|
|
Keyword arguments: |
631 |
|
|
|
632 |
|
|
no_primary -- given a choice between primary and logical |
633 |
|
|
partitions, prefer logical |
634 |
|
|
|
635 |
|
|
""" |
636 |
|
|
part_type = None |
637 |
|
|
extended = disk.getExtendedPartition() |
638 |
|
|
supports_extended = disk.supportsFeature(parted.DISK_TYPE_EXTENDED) |
639 |
|
|
logical_count = len(disk.getLogicalPartitions()) |
640 |
|
|
max_logicals = disk.getMaxLogicalPartitions() |
641 |
|
|
primary_count = disk.primaryPartitionCount |
642 |
|
|
|
643 |
|
|
if primary_count < disk.maxPrimaryPartitionCount: |
644 |
|
|
if primary_count == disk.maxPrimaryPartitionCount - 1: |
645 |
|
|
# can we make an extended partition? now's our chance. |
646 |
|
|
if not extended and supports_extended: |
647 |
|
|
part_type = parted.PARTITION_EXTENDED |
648 |
|
|
elif not extended: |
649 |
|
|
# extended partitions not supported. primary or nothing. |
650 |
|
|
if not no_primary: |
651 |
|
|
part_type = parted.PARTITION_NORMAL |
652 |
|
|
else: |
653 |
|
|
# there is an extended and a free primary |
654 |
|
|
if not no_primary: |
655 |
|
|
part_type = parted.PARTITION_NORMAL |
656 |
|
|
elif logical_count < max_logicals: |
657 |
|
|
# we have an extended with logical slots, so use one. |
658 |
|
|
part_type = parted.PARTITION_LOGICAL |
659 |
|
|
else: |
660 |
|
|
# there are two or more primary slots left. use one unless we're |
661 |
|
|
# not supposed to make primaries. |
662 |
|
|
if not no_primary: |
663 |
|
|
part_type = parted.PARTITION_NORMAL |
664 |
|
|
elif extended and logical_count < max_logicals: |
665 |
|
|
part_type = parted.PARTITION_LOGICAL |
666 |
|
|
elif extended and logical_count < max_logicals: |
667 |
|
|
part_type = parted.PARTITION_LOGICAL |
668 |
|
|
|
669 |
|
|
return part_type |
670 |
|
|
|
671 |
|
|
def getBestFreeSpaceRegion(disk, part_type, req_size, |
672 |
|
|
boot=None, best_free=None, grow=None): |
673 |
|
|
""" Return the "best" free region on the specified disk. |
674 |
|
|
|
675 |
|
|
For non-boot partitions, we return the largest free region on the |
676 |
|
|
disk. For boot partitions, we return the first region that is |
677 |
|
|
large enough to hold the partition. |
678 |
|
|
|
679 |
|
|
Partition type (parted's PARTITION_NORMAL, PARTITION_LOGICAL) is |
680 |
|
|
taken into account when locating a suitable free region. |
681 |
|
|
|
682 |
|
|
For locating the best region from among several disks, the keyword |
683 |
|
|
argument best_free allows the specification of a current "best" |
684 |
|
|
free region with which to compare the best from this disk. The |
685 |
|
|
overall best region is returned. |
686 |
|
|
|
687 |
|
|
Arguments: |
688 |
|
|
|
689 |
|
|
disk -- the disk (a parted.Disk instance) |
690 |
|
|
part_type -- the type of partition we want to allocate |
691 |
|
|
(one of parted's partition type constants) |
692 |
|
|
req_size -- the requested size of the partition (in MB) |
693 |
|
|
|
694 |
|
|
Keyword arguments: |
695 |
|
|
|
696 |
|
|
boot -- indicates whether this will be a bootable partition |
697 |
|
|
(boolean) |
698 |
|
|
best_free -- current best free region for this partition |
699 |
|
|
grow -- indicates whether this is a growable request |
700 |
|
|
|
701 |
|
|
""" |
702 |
|
|
log.debug("getBestFreeSpaceRegion: disk=%s part_type=%d req_size=%dMB " |
703 |
|
|
"boot=%s best=%s grow=%s" % |
704 |
|
|
(disk.device.path, part_type, req_size, boot, best_free, grow)) |
705 |
|
|
extended = disk.getExtendedPartition() |
706 |
|
|
|
707 |
|
|
for _range in disk.getFreeSpaceRegions(): |
708 |
|
|
if extended: |
709 |
|
|
# find out if there is any overlap between this region and the |
710 |
|
|
# extended partition |
711 |
|
|
log.debug("looking for intersection between extended (%d-%d) and free (%d-%d)" % |
712 |
|
|
(extended.geometry.start, extended.geometry.end, _range.start, _range.end)) |
713 |
|
|
|
714 |
|
|
# parted.Geometry.overlapsWith can handle this |
715 |
|
|
try: |
716 |
|
|
free_geom = extended.geometry.intersect(_range) |
717 |
|
|
except ArithmeticError, e: |
718 |
|
|
# this freespace region does not lie within the extended |
719 |
|
|
# partition's geometry |
720 |
|
|
free_geom = None |
721 |
|
|
|
722 |
|
|
if (free_geom and part_type == parted.PARTITION_NORMAL) or \ |
723 |
|
|
(not free_geom and part_type == parted.PARTITION_LOGICAL): |
724 |
|
|
log.debug("free region not suitable for request") |
725 |
|
|
continue |
726 |
|
|
|
727 |
|
|
if part_type == parted.PARTITION_NORMAL: |
728 |
|
|
# we're allocating a primary and the region is not within |
729 |
|
|
# the extended, so we use the original region |
730 |
|
|
free_geom = _range |
731 |
|
|
else: |
732 |
|
|
free_geom = _range |
733 |
|
|
|
734 |
|
|
if free_geom.start > disk.maxPartitionStartSector: |
735 |
|
|
log.debug("free range start sector beyond max for new partitions") |
736 |
|
|
continue |
737 |
|
|
|
738 |
|
|
if boot: |
739 |
|
|
free_start_mb = sectorsToSize(free_geom.start, |
740 |
|
|
disk.device.sectorSize) |
741 |
|
|
req_end_mb = free_start_mb + req_size |
742 |
|
|
if req_end_mb > 2*1024*1024: |
743 |
|
|
log.debug("free range position would place boot req above 2TB") |
744 |
|
|
continue |
745 |
|
|
|
746 |
|
|
log.debug("current free range is %d-%d (%dMB)" % (free_geom.start, |
747 |
|
|
free_geom.end, |
748 |
|
|
free_geom.getSize())) |
749 |
|
|
free_size = free_geom.getSize() |
750 |
|
|
|
751 |
|
|
# For boot partitions, we want the first suitable region we find. |
752 |
|
|
# For growable or extended partitions, we want the largest possible |
753 |
|
|
# free region. |
754 |
|
|
# For all others, we want the smallest suitable free region. |
755 |
|
|
if grow or part_type == parted.PARTITION_EXTENDED: |
756 |
|
|
op = gt |
757 |
|
|
else: |
758 |
|
|
op = lt |
759 |
|
|
if req_size <= free_size: |
760 |
|
|
if not best_free or op(free_geom.length, best_free.length): |
761 |
|
|
best_free = free_geom |
762 |
|
|
|
763 |
|
|
if boot: |
764 |
|
|
# if this is a bootable partition we want to |
765 |
|
|
# use the first freespace region large enough |
766 |
|
|
# to satisfy the request |
767 |
|
|
break |
768 |
|
|
|
769 |
|
|
return best_free |
770 |
|
|
|
771 |
|
|
def sectorsToSize(sectors, sectorSize): |
772 |
|
|
""" Convert length in sectors to size in MB. |
773 |
|
|
|
774 |
|
|
Arguments: |
775 |
|
|
|
776 |
|
|
sectors - sector count |
777 |
|
|
sectorSize - sector size for the device, in bytes |
778 |
|
|
""" |
779 |
|
|
return (sectors * sectorSize) / (1024.0 * 1024.0) |
780 |
|
|
|
781 |
|
|
def sizeToSectors(size, sectorSize): |
782 |
|
|
""" Convert size in MB to length in sectors. |
783 |
|
|
|
784 |
|
|
Arguments: |
785 |
|
|
|
786 |
|
|
size - size in MB |
787 |
|
|
sectorSize - sector size for the device, in bytes |
788 |
|
|
""" |
789 |
|
|
return (size * 1024.0 * 1024.0) / sectorSize |
790 |
|
|
|
791 |
|
|
def removeNewPartitions(disks, partitions): |
792 |
|
|
""" Remove newly added input partitions from input disks. |
793 |
|
|
|
794 |
|
|
Arguments: |
795 |
|
|
|
796 |
|
|
disks -- list of StorageDevice instances with DiskLabel format |
797 |
|
|
partitions -- list of PartitionDevice instances |
798 |
|
|
|
799 |
|
|
""" |
800 |
|
|
log.debug("removing all non-preexisting partitions %s from disk(s) %s" |
801 |
|
|
% (["%s(id %d)" % (p.name, p.id) for p in partitions |
802 |
|
|
if not p.exists], |
803 |
|
|
[d.name for d in disks])) |
804 |
|
|
for part in partitions: |
805 |
|
|
if part.partedPartition and part.disk in disks: |
806 |
|
|
if part.exists: |
807 |
|
|
# we're only removing partitions that don't physically exist |
808 |
|
|
continue |
809 |
|
|
|
810 |
|
|
if part.isExtended: |
811 |
|
|
# these get removed last |
812 |
|
|
continue |
813 |
|
|
|
814 |
|
|
part.disk.format.partedDisk.removePartition(part.partedPartition) |
815 |
|
|
part.partedPartition = None |
816 |
|
|
part.disk = None |
817 |
|
|
|
818 |
|
|
for disk in disks: |
819 |
|
|
# remove empty extended so it doesn't interfere |
820 |
|
|
extended = disk.format.extendedPartition |
821 |
|
|
if extended and not disk.format.logicalPartitions: |
822 |
|
|
log.debug("removing empty extended partition from %s" % disk.name) |
823 |
|
|
disk.format.partedDisk.removePartition(extended) |
824 |
|
|
|
825 |
|
|
def addPartition(disklabel, free, part_type, size): |
826 |
|
|
""" Return new partition after adding it to the specified disk. |
827 |
|
|
|
828 |
|
|
Arguments: |
829 |
|
|
|
830 |
|
|
disklabel -- disklabel instance to add partition to |
831 |
|
|
free -- where to add the partition (parted.Geometry instance) |
832 |
|
|
part_type -- partition type (parted.PARTITION_* constant) |
833 |
|
|
size -- size (in MB) of the new partition |
834 |
|
|
|
835 |
|
|
The new partition will be aligned. |
836 |
|
|
|
837 |
|
|
Return value is a parted.Partition instance. |
838 |
|
|
|
839 |
|
|
""" |
840 |
|
|
start = free.start |
841 |
|
|
if not disklabel.alignment.isAligned(free, start): |
842 |
|
|
start = disklabel.alignment.alignNearest(free, start) |
843 |
|
|
|
844 |
|
|
if part_type == parted.PARTITION_LOGICAL: |
845 |
|
|
# make room for logical partition's metadata |
846 |
|
|
start += disklabel.alignment.grainSize |
847 |
|
|
|
848 |
|
|
if start != free.start: |
849 |
|
|
log.debug("adjusted start sector from %d to %d" % (free.start, start)) |
850 |
|
|
|
851 |
|
|
if part_type == parted.PARTITION_EXTENDED: |
852 |
|
|
end = free.end |
853 |
|
|
length = end - start + 1 |
854 |
|
|
else: |
855 |
|
|
# size is in MB |
856 |
|
|
length = sizeToSectors(size, disklabel.partedDevice.sectorSize) |
857 |
|
|
end = start + length - 1 |
858 |
|
|
|
859 |
|
|
if not disklabel.endAlignment.isAligned(free, end): |
860 |
|
|
end = disklabel.endAlignment.alignNearest(free, end) |
861 |
|
|
log.debug("adjusted length from %d to %d" % (length, end - start + 1)) |
862 |
|
|
if start > end: |
863 |
|
|
raise PartitioningError("unable to allocate aligned partition") |
864 |
|
|
|
865 |
|
|
new_geom = parted.Geometry(device=disklabel.partedDevice, |
866 |
|
|
start=start, |
867 |
|
|
end=end) |
868 |
|
|
|
869 |
|
|
max_length = disklabel.partedDisk.maxPartitionLength |
870 |
|
|
if max_length and new_geom.length > max_length: |
871 |
|
|
raise PartitioningError("requested size exceeds maximum allowed") |
872 |
|
|
|
873 |
|
|
# create the partition and add it to the disk |
874 |
|
|
partition = parted.Partition(disk=disklabel.partedDisk, |
875 |
|
|
type=part_type, |
876 |
|
|
geometry=new_geom) |
877 |
|
|
constraint = parted.Constraint(exactGeom=new_geom) |
878 |
|
|
disklabel.partedDisk.addPartition(partition=partition, |
879 |
|
|
constraint=constraint) |
880 |
|
|
return partition |
881 |
|
|
|
882 |
|
|
def getFreeRegions(disks): |
883 |
|
|
""" Return a list of free regions on the specified disks. |
884 |
|
|
|
885 |
|
|
Arguments: |
886 |
|
|
|
887 |
|
|
disks -- list of parted.Disk instances |
888 |
|
|
|
889 |
|
|
Return value is a list of unaligned parted.Geometry instances. |
890 |
|
|
|
891 |
|
|
""" |
892 |
|
|
free = [] |
893 |
|
|
for disk in disks: |
894 |
|
|
for f in disk.format.partedDisk.getFreeSpaceRegions(): |
895 |
|
|
if f.length > 0: |
896 |
|
|
free.append(f) |
897 |
|
|
|
898 |
|
|
return free |
899 |
|
|
|
900 |
|
|
def updateExtendedPartitions(storage, disks): |
901 |
|
|
# XXX hack -- if we created any extended partitions we need to add |
902 |
|
|
# them to the tree now |
903 |
|
|
for disk in disks: |
904 |
|
|
extended = disk.format.extendedPartition |
905 |
|
|
if not extended: |
906 |
|
|
# remove any obsolete extended partitions |
907 |
|
|
for part in storage.partitions: |
908 |
|
|
if part.disk == disk and part.isExtended: |
909 |
|
|
if part.exists: |
910 |
|
|
storage.destroyDevice(part) |
911 |
|
|
else: |
912 |
|
|
storage.devicetree._removeDevice(part, moddisk=False) |
913 |
|
|
continue |
914 |
|
|
|
915 |
|
|
extendedName = devicePathToName(extended.getDeviceNodeName()) |
916 |
|
|
# remove any obsolete extended partitions |
917 |
|
|
for part in storage.partitions: |
918 |
|
|
if part.disk == disk and part.isExtended and \ |
919 |
|
|
part.partedPartition not in disk.format.partitions: |
920 |
|
|
if part.exists: |
921 |
|
|
storage.destroyDevice(part) |
922 |
|
|
else: |
923 |
|
|
storage.devicetree._removeDevice(part, moddisk=False) |
924 |
|
|
|
925 |
|
|
device = storage.devicetree.getDeviceByName(extendedName) |
926 |
|
|
if device: |
927 |
|
|
if not device.exists: |
928 |
|
|
# created by us, update partedPartition |
929 |
|
|
device.partedPartition = extended |
930 |
|
|
continue |
931 |
|
|
|
932 |
|
|
# This is a little odd because normally instantiating a partition |
933 |
|
|
# that does not exist means leaving self.parents empty and instead |
934 |
|
|
# populating self.req_disks. In this case, we need to skip past |
935 |
|
|
# that since this partition is already defined. |
936 |
|
|
device = PartitionDevice(extendedName, parents=disk) |
937 |
|
|
device.parents = [disk] |
938 |
|
|
device.partedPartition = extended |
939 |
|
|
# just add the device for now -- we'll handle actions at the last |
940 |
|
|
# moment to simplify things |
941 |
|
|
storage.devicetree._addDevice(device) |
942 |
|
|
|
943 |
|
|
def doPartitioning(storage, exclusiveDisks=None): |
944 |
|
|
""" Allocate and grow partitions. |
945 |
|
|
|
946 |
|
|
When this function returns without error, all PartitionDevice |
947 |
|
|
instances must have their parents set to the disk they are |
948 |
|
|
allocated on, and their partedPartition attribute set to the |
949 |
|
|
appropriate parted.Partition instance from their containing |
950 |
|
|
disk. All req_xxxx attributes must be unchanged. |
951 |
|
|
|
952 |
|
|
Arguments: |
953 |
|
|
|
954 |
|
|
storage - Main anaconda Storage instance |
955 |
|
|
|
956 |
|
|
Keyword arguments: |
957 |
|
|
|
958 |
|
|
exclusiveDisks -- list of names of disks to use |
959 |
|
|
|
960 |
|
|
""" |
961 |
|
|
anaconda = storage.anaconda |
962 |
|
|
if not hasattr(anaconda.platform, "diskLabelTypes"): |
963 |
|
|
raise StorageError("can't allocate partitions without platform data") |
964 |
|
|
|
965 |
|
|
disks = storage.partitioned |
966 |
|
|
if exclusiveDisks: |
967 |
|
|
disks = [d for d in disks if d.name in exclusiveDisks] |
968 |
|
|
|
969 |
|
|
for disk in disks: |
970 |
|
|
try: |
971 |
|
|
disk.setup() |
972 |
|
|
except DeviceError as (msg, name): |
973 |
|
|
log.error("failed to set up disk %s: %s" % (name, msg)) |
974 |
|
|
raise PartitioningError("disk %s inaccessible" % disk.name) |
975 |
|
|
|
976 |
|
|
partitions = storage.partitions[:] |
977 |
|
|
for part in storage.partitions: |
978 |
|
|
part.req_bootable = False |
979 |
|
|
|
980 |
|
|
if part.exists or \ |
981 |
|
|
(storage.deviceImmutable(part) and part.partedPartition): |
982 |
|
|
# if the partition is preexisting or part of a complex device |
983 |
|
|
# then we shouldn't modify it |
984 |
|
|
partitions.remove(part) |
985 |
|
|
continue |
986 |
|
|
|
987 |
|
|
if not part.exists: |
988 |
|
|
# start over with flexible-size requests |
989 |
|
|
part.req_size = part.req_base_size |
990 |
|
|
|
991 |
|
|
# FIXME: isn't there a better place for this to happen? |
992 |
|
|
try: |
993 |
|
|
bootDev = anaconda.platform.bootDevice() |
994 |
|
|
except DeviceError: |
995 |
|
|
bootDev = None |
996 |
|
|
|
997 |
|
|
if bootDev: |
998 |
|
|
bootDev.req_bootable = True |
999 |
|
|
|
1000 |
|
|
# turn off cylinder alignment |
1001 |
|
|
for partedDisk in [d.format.partedDisk for d in disks]: |
1002 |
|
|
if partedDisk.isFlagAvailable(parted.DISK_CYLINDER_ALIGNMENT): |
1003 |
|
|
partedDisk.unsetFlag(parted.DISK_CYLINDER_ALIGNMENT) |
1004 |
|
|
|
1005 |
|
|
removeNewPartitions(disks, partitions) |
1006 |
|
|
free = getFreeRegions(disks) |
1007 |
|
|
|
1008 |
|
|
try: |
1009 |
|
|
allocatePartitions(storage, disks, partitions, free) |
1010 |
|
|
growPartitions(disks, partitions, free) |
1011 |
|
|
finally: |
1012 |
|
|
# The number and thus the name of partitions may have changed now, |
1013 |
|
|
# allocatePartitions() takes care of this for new partitions, but not |
1014 |
|
|
# for pre-existing ones, so we update the name of all partitions here |
1015 |
|
|
for part in storage.partitions: |
1016 |
|
|
# leave extended partitions as-is -- we'll handle them separately |
1017 |
|
|
if part.isExtended: |
1018 |
|
|
continue |
1019 |
|
|
part.updateName() |
1020 |
|
|
|
1021 |
|
|
updateExtendedPartitions(storage, disks) |
1022 |
|
|
|
1023 |
|
|
def allocatePartitions(storage, disks, partitions, freespace): |
1024 |
|
|
""" Allocate partitions based on requested features. |
1025 |
|
|
|
1026 |
|
|
Non-existing partitions are sorted according to their requested |
1027 |
|
|
attributes, and then allocated. |
1028 |
|
|
|
1029 |
|
|
The basic approach to sorting is that the more specifically- |
1030 |
|
|
defined a request is, the earlier it will be allocated. See |
1031 |
|
|
the function partitionCompare for details on the sorting |
1032 |
|
|
criteria. |
1033 |
|
|
|
1034 |
|
|
The PartitionDevice instances will have their name and parents |
1035 |
|
|
attributes set once they have been allocated. |
1036 |
|
|
""" |
1037 |
|
|
log.debug("allocatePartitions: disks=%s ; partitions=%s" % |
1038 |
|
|
([d.name for d in disks], |
1039 |
|
|
["%s(id %d)" % (p.name, p.id) for p in partitions])) |
1040 |
|
|
|
1041 |
|
|
new_partitions = [p for p in partitions if not p.exists] |
1042 |
|
|
new_partitions.sort(cmp=partitionCompare) |
1043 |
|
|
|
1044 |
|
|
# the following dicts all use device path strings as keys |
1045 |
|
|
disklabels = {} # DiskLabel instances for each disk |
1046 |
|
|
all_disks = {} # StorageDevice for each disk |
1047 |
|
|
for disk in disks: |
1048 |
|
|
if disk.path not in disklabels.keys(): |
1049 |
|
|
disklabels[disk.path] = disk.format |
1050 |
|
|
all_disks[disk.path] = disk |
1051 |
|
|
|
1052 |
|
|
removeNewPartitions(disks, new_partitions) |
1053 |
|
|
|
1054 |
|
|
for _part in new_partitions: |
1055 |
|
|
if _part.partedPartition and _part.isExtended: |
1056 |
|
|
# ignore new extendeds as they are implicit requests |
1057 |
|
|
continue |
1058 |
|
|
|
1059 |
|
|
# obtain the set of candidate disks |
1060 |
|
|
req_disks = [] |
1061 |
|
|
if _part.disk: |
1062 |
|
|
# we have a already selected a disk for this request |
1063 |
|
|
req_disks = [_part.disk] |
1064 |
|
|
elif _part.req_disks: |
1065 |
|
|
# use the requested disk set |
1066 |
|
|
req_disks = _part.req_disks |
1067 |
|
|
else: |
1068 |
|
|
# no disks specified means any disk will do |
1069 |
|
|
req_disks = disks |
1070 |
|
|
|
1071 |
|
|
# sort the disks, making sure the boot disk is first |
1072 |
|
|
req_disks.sort(key=lambda d: d.name, cmp=storage.compareDisks) |
1073 |
|
|
boot_index = None |
1074 |
|
|
for disk in req_disks: |
1075 |
|
|
if disk.name in storage.anaconda.id.bootloader.drivelist and \ |
1076 |
|
|
disk.name == storage.anaconda.id.bootloader.drivelist[0]: |
1077 |
|
|
boot_index = req_disks.index(disk) |
1078 |
|
|
|
1079 |
|
|
if boot_index is not None and len(req_disks) > 1: |
1080 |
|
|
boot_disk = req_disks.pop(boot_index) |
1081 |
|
|
req_disks.insert(0, boot_disk) |
1082 |
|
|
|
1083 |
|
|
boot = _part.req_base_weight > 1000 |
1084 |
|
|
|
1085 |
|
|
log.debug("allocating partition: %s ; id: %d ; disks: %s ;\n" |
1086 |
|
|
"boot: %s ; primary: %s ; size: %dMB ; grow: %s ; " |
1087 |
|
|
"max_size: %s" % (_part.name, _part.id, |
1088 |
|
|
[d.name for d in req_disks], |
1089 |
|
|
boot, _part.req_primary, |
1090 |
|
|
_part.req_size, _part.req_grow, |
1091 |
|
|
_part.req_max_size)) |
1092 |
|
|
free = None |
1093 |
|
|
use_disk = None |
1094 |
|
|
part_type = None |
1095 |
|
|
growth = 0 |
1096 |
|
|
# loop through disks |
1097 |
|
|
for _disk in req_disks: |
1098 |
|
|
disklabel = disklabels[_disk.path] |
1099 |
|
|
sectorSize = disklabel.partedDevice.sectorSize |
1100 |
|
|
best = None |
1101 |
|
|
current_free = free |
1102 |
|
|
|
1103 |
|
|
# for growable requests, we don't want to pass the current free |
1104 |
|
|
# geometry to getBestFreeRegion -- this allows us to try the |
1105 |
|
|
# best region from each disk and choose one based on the total |
1106 |
|
|
# growth it allows |
1107 |
|
|
if _part.req_grow: |
1108 |
|
|
current_free = None |
1109 |
|
|
|
1110 |
|
|
problem = _part.checkSize() |
1111 |
|
|
if problem: |
1112 |
|
|
raise PartitioningError("partition is too %s for %s formatting " |
1113 |
|
|
"(allowable size is %d MB to %d MB)" |
1114 |
|
|
% (problem, _part.format.name, |
1115 |
|
|
_part.format.minSize, |
1116 |
|
|
_part.format.maxSize)) |
1117 |
|
|
|
1118 |
|
|
log.debug("checking freespace on %s" % _disk.name) |
1119 |
|
|
|
1120 |
|
|
new_part_type = getNextPartitionType(disklabel.partedDisk) |
1121 |
|
|
if new_part_type is None: |
1122 |
|
|
# can't allocate any more partitions on this disk |
1123 |
|
|
log.debug("no free partition slots on %s" % _disk.name) |
1124 |
|
|
continue |
1125 |
|
|
|
1126 |
|
|
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL: |
1127 |
|
|
if (disklabel.partedDisk.primaryPartitionCount < |
1128 |
|
|
disklabel.partedDisk.maxPrimaryPartitionCount): |
1129 |
|
|
# don't fail to create a primary if there are only three |
1130 |
|
|
# primary partitions on the disk (#505269) |
1131 |
|
|
new_part_type = parted.PARTITION_NORMAL |
1132 |
|
|
else: |
1133 |
|
|
# we need a primary slot and none are free on this disk |
1134 |
|
|
log.debug("no primary slots available on %s" % _disk.name) |
1135 |
|
|
continue |
1136 |
|
|
|
1137 |
|
|
best = getBestFreeSpaceRegion(disklabel.partedDisk, |
1138 |
|
|
new_part_type, |
1139 |
|
|
_part.req_size, |
1140 |
|
|
best_free=current_free, |
1141 |
|
|
boot=boot, |
1142 |
|
|
grow=_part.req_grow) |
1143 |
|
|
|
1144 |
|
|
if best == free and not _part.req_primary and \ |
1145 |
|
|
new_part_type == parted.PARTITION_NORMAL: |
1146 |
|
|
# see if we can do better with a logical partition |
1147 |
|
|
log.debug("not enough free space for primary -- trying logical") |
1148 |
|
|
new_part_type = getNextPartitionType(disklabel.partedDisk, |
1149 |
|
|
no_primary=True) |
1150 |
|
|
if new_part_type: |
1151 |
|
|
best = getBestFreeSpaceRegion(disklabel.partedDisk, |
1152 |
|
|
new_part_type, |
1153 |
|
|
_part.req_size, |
1154 |
|
|
best_free=current_free, |
1155 |
|
|
boot=boot, |
1156 |
|
|
grow=_part.req_grow) |
1157 |
|
|
|
1158 |
|
|
if best and free != best: |
1159 |
|
|
update = True |
1160 |
|
|
if _part.req_grow: |
1161 |
|
|
log.debug("evaluating growth potential for new layout") |
1162 |
|
|
new_growth = 0 |
1163 |
|
|
for disk_path in disklabels.keys(): |
1164 |
|
|
log.debug("calculating growth for disk %s" % disk_path) |
1165 |
|
|
# Now we check, for growable requests, which of the two |
1166 |
|
|
# free regions will allow for more growth. |
1167 |
|
|
|
1168 |
|
|
# set up chunks representing the disks' layouts |
1169 |
|
|
temp_parts = [] |
1170 |
|
|
for _p in new_partitions[:new_partitions.index(_part)]: |
1171 |
|
|
if _p.disk.path == disk_path: |
1172 |
|
|
temp_parts.append(_p) |
1173 |
|
|
|
1174 |
|
|
# add the current request to the temp disk to set up |
1175 |
|
|
# its partedPartition attribute with a base geometry |
1176 |
|
|
if disk_path == _disk.path: |
1177 |
|
|
temp_part = addPartition(disklabel, |
1178 |
|
|
best, |
1179 |
|
|
new_part_type, |
1180 |
|
|
_part.req_size) |
1181 |
|
|
_part.partedPartition = temp_part |
1182 |
|
|
_part.disk = _disk |
1183 |
|
|
temp_parts.append(_part) |
1184 |
|
|
|
1185 |
|
|
chunks = getDiskChunks(all_disks[disk_path], |
1186 |
|
|
temp_parts, freespace) |
1187 |
|
|
|
1188 |
|
|
# grow all growable requests |
1189 |
|
|
disk_growth = 0 |
1190 |
|
|
disk_sector_size = disklabels[disk_path].partedDevice.sectorSize |
1191 |
|
|
for chunk in chunks: |
1192 |
|
|
chunk.growRequests() |
1193 |
|
|
# record the growth for this layout |
1194 |
|
|
new_growth += chunk.growth |
1195 |
|
|
disk_growth += chunk.growth |
1196 |
|
|
for req in chunk.requests: |
1197 |
|
|
log.debug("request %d (%s) growth: %d (%dMB) " |
1198 |
|
|
"size: %dMB" % |
1199 |
|
|
(req.partition.id, |
1200 |
|
|
req.partition.name, |
1201 |
|
|
req.growth, |
1202 |
|
|
sectorsToSize(req.growth, |
1203 |
|
|
disk_sector_size), |
1204 |
|
|
sectorsToSize(req.growth + req.base, |
1205 |
|
|
disk_sector_size))) |
1206 |
|
|
log.debug("disk %s growth: %d (%dMB)" % |
1207 |
|
|
(disk_path, disk_growth, |
1208 |
|
|
sectorsToSize(disk_growth, |
1209 |
|
|
disk_sector_size))) |
1210 |
|
|
|
1211 |
|
|
disklabel.partedDisk.removePartition(temp_part) |
1212 |
|
|
_part.partedPartition = None |
1213 |
|
|
_part.disk = None |
1214 |
|
|
|
1215 |
|
|
log.debug("total growth: %d sectors" % new_growth) |
1216 |
|
|
|
1217 |
|
|
# update the chosen free region unless the previous |
1218 |
|
|
# choice yielded greater total growth |
1219 |
|
|
if new_growth < growth: |
1220 |
|
|
log.debug("keeping old free: %d < %d" % (new_growth, |
1221 |
|
|
growth)) |
1222 |
|
|
update = False |
1223 |
|
|
else: |
1224 |
|
|
growth = new_growth |
1225 |
|
|
|
1226 |
|
|
if update: |
1227 |
|
|
# now we know we are choosing a new free space, |
1228 |
|
|
# so update the disk and part type |
1229 |
|
|
log.debug("updating use_disk to %s (%s), type: %s" |
1230 |
|
|
% (_disk, _disk.name, new_part_type)) |
1231 |
|
|
part_type = new_part_type |
1232 |
|
|
use_disk = _disk |
1233 |
|
|
log.debug("new free: %s (%d-%d / %dMB)" % (best, |
1234 |
|
|
best.start, |
1235 |
|
|
best.end, |
1236 |
|
|
best.getSize())) |
1237 |
|
|
log.debug("new free allows for %d sectors of growth" % |
1238 |
|
|
growth) |
1239 |
|
|
free = best |
1240 |
|
|
|
1241 |
|
|
if free and boot: |
1242 |
|
|
# if this is a bootable partition we want to |
1243 |
|
|
# use the first freespace region large enough |
1244 |
|
|
# to satisfy the request |
1245 |
|
|
log.debug("found free space for bootable request") |
1246 |
|
|
break |
1247 |
|
|
|
1248 |
|
|
if free is None: |
1249 |
|
|
raise PartitioningError("not enough free space on disks") |
1250 |
|
|
|
1251 |
|
|
_disk = use_disk |
1252 |
|
|
disklabel = _disk.format |
1253 |
|
|
|
1254 |
|
|
# create the extended partition if needed |
1255 |
|
|
if part_type == parted.PARTITION_EXTENDED: |
1256 |
|
|
log.debug("creating extended partition") |
1257 |
|
|
addPartition(disklabel, free, part_type, None) |
1258 |
|
|
|
1259 |
|
|
# now the extended partition exists, so set type to logical |
1260 |
|
|
part_type = parted.PARTITION_LOGICAL |
1261 |
|
|
|
1262 |
|
|
# recalculate freespace |
1263 |
|
|
log.debug("recalculating free space") |
1264 |
|
|
free = getBestFreeSpaceRegion(disklabel.partedDisk, |
1265 |
|
|
part_type, |
1266 |
|
|
_part.req_size, |
1267 |
|
|
boot=boot, |
1268 |
|
|
grow=_part.req_grow) |
1269 |
|
|
if not free: |
1270 |
|
|
raise PartitioningError("not enough free space after " |
1271 |
|
|
"creating extended partition") |
1272 |
|
|
|
1273 |
|
|
partition = addPartition(disklabel, free, part_type, _part.req_size) |
1274 |
|
|
log.debug("created partition %s of %dMB and added it to %s" % |
1275 |
|
|
(partition.getDeviceNodeName(), partition.getSize(), |
1276 |
|
|
disklabel.device)) |
1277 |
|
|
|
1278 |
|
|
# this one sets the name |
1279 |
|
|
_part.partedPartition = partition |
1280 |
|
|
_part.disk = _disk |
1281 |
|
|
|
1282 |
|
|
# parted modifies the partition in the process of adding it to |
1283 |
|
|
# the disk, so we need to grab the latest version... |
1284 |
|
|
_part.partedPartition = disklabel.partedDisk.getPartitionByPath(_part.path) |
1285 |
|
|
|
1286 |
|
|
|
1287 |
|
|
class Request(object): |
1288 |
|
|
""" A partition request. |
1289 |
|
|
|
1290 |
|
|
Request instances are used for calculating how much to grow |
1291 |
|
|
partitions. |
1292 |
|
|
""" |
1293 |
|
|
def __init__(self, partition): |
1294 |
|
|
""" Create a Request instance. |
1295 |
|
|
|
1296 |
|
|
Arguments: |
1297 |
|
|
|
1298 |
|
|
partition -- a PartitionDevice instance |
1299 |
|
|
|
1300 |
|
|
""" |
1301 |
|
|
self.partition = partition # storage.devices.PartitionDevice |
1302 |
|
|
self.growth = 0 # growth in sectors |
1303 |
|
|
self.max_growth = 0 # max growth in sectors |
1304 |
|
|
self.done = not partition.req_grow # can we grow this request more? |
1305 |
|
|
self.base = partition.partedPartition.geometry.length # base sectors |
1306 |
|
|
|
1307 |
|
|
sector_size = partition.partedPartition.disk.device.sectorSize |
1308 |
|
|
|
1309 |
|
|
if partition.req_grow: |
1310 |
|
|
limits = filter(lambda l: l > 0, |
1311 |
|
|
[sizeToSectors(partition.req_max_size, sector_size), |
1312 |
|
|
sizeToSectors(partition.format.maxSize, sector_size), |
1313 |
|
|
partition.partedPartition.disk.maxPartitionLength]) |
1314 |
|
|
|
1315 |
|
|
if limits: |
1316 |
|
|
max_sectors = min(limits) |
1317 |
|
|
self.max_growth = max_sectors - self.base |
1318 |
|
|
if self.max_growth <= 0: |
1319 |
|
|
# max size is less than or equal to base, so we're done |
1320 |
|
|
self.done = True |
1321 |
|
|
|
1322 |
|
|
@property |
1323 |
|
|
def growable(self): |
1324 |
|
|
""" True if this request is growable. """ |
1325 |
|
|
return self.partition.req_grow |
1326 |
|
|
|
1327 |
|
|
@property |
1328 |
|
|
def id(self): |
1329 |
|
|
""" The id of the PartitionDevice this request corresponds to. """ |
1330 |
|
|
return self.partition.id |
1331 |
|
|
|
1332 |
|
|
def __str__(self): |
1333 |
|
|
s = ("%(type)s instance --\n" |
1334 |
|
|
"id = %(id)s name = %(name)s growable = %(growable)s\n" |
1335 |
|
|
"base = %(base)d growth = %(growth)d max_grow = %(max_grow)d\n" |
1336 |
|
|
"done = %(done)s" % |
1337 |
|
|
{"type": self.__class__.__name__, "id": self.id, |
1338 |
|
|
"name": self.partition.name, "growable": self.growable, |
1339 |
|
|
"base": self.base, "growth": self.growth, |
1340 |
|
|
"max_grow": self.max_growth, "done": self.done}) |
1341 |
|
|
return s |
1342 |
|
|
|
1343 |
|
|
|
1344 |
|
|
class Chunk(object): |
1345 |
|
|
""" A free region on disk from which partitions will be allocated """ |
1346 |
|
|
def __init__(self, geometry, requests=None): |
1347 |
|
|
""" Create a Chunk instance. |
1348 |
|
|
|
1349 |
|
|
Arguments: |
1350 |
|
|
|
1351 |
|
|
geometry -- parted.Geometry instance describing the free space |
1352 |
|
|
|
1353 |
|
|
|
1354 |
|
|
Keyword Arguments: |
1355 |
|
|
|
1356 |
|
|
requests -- list of Request instances allocated from this chunk |
1357 |
|
|
|
1358 |
|
|
|
1359 |
|
|
Note: We will limit partition growth based on disklabel |
1360 |
|
|
limitations for partition end sector, so a 10TB disk with an |
1361 |
|
|
msdos disklabel will be treated like a 2TB disk. |
1362 |
|
|
|
1363 |
|
|
""" |
1364 |
|
|
self.geometry = geometry # parted.Geometry |
1365 |
|
|
self.pool = self.geometry.length # free sector count |
1366 |
|
|
self.sectorSize = self.geometry.device.sectorSize |
1367 |
|
|
self.base = 0 # sum of growable requests' base |
1368 |
|
|
# sizes, in sectors |
1369 |
|
|
self.requests = [] # list of Request instances |
1370 |
|
|
if isinstance(requests, list): |
1371 |
|
|
for req in requests: |
1372 |
|
|
self.addRequest(req) |
1373 |
|
|
|
1374 |
|
|
def __str__(self): |
1375 |
|
|
s = ("%(type)s instance --\n" |
1376 |
|
|
"device = %(device)s start = %(start)d end = %(end)d\n" |
1377 |
|
|
"length = %(length)d size = %(size)d pool = %(pool)d\n" |
1378 |
|
|
"remaining = %(rem)d sectorSize = %(sectorSize)d" % |
1379 |
|
|
{"type": self.__class__.__name__, |
1380 |
|
|
"device": self.geometry.device.path, |
1381 |
|
|
"start": self.geometry.start, "end": self.geometry.end, |
1382 |
|
|
"length": self.geometry.length, "size": self.geometry.getSize(), |
1383 |
|
|
"pool": self.pool, "rem": self.remaining, |
1384 |
|
|
"sectorSize": self.sectorSize}) |
1385 |
|
|
|
1386 |
|
|
return s |
1387 |
|
|
|
1388 |
|
|
def addRequest(self, req): |
1389 |
|
|
""" Add a Request to this chunk. """ |
1390 |
|
|
log.debug("adding request %d to chunk %s" % (req.partition.id, self)) |
1391 |
|
|
if not self.requests: |
1392 |
|
|
# when adding the first request to the chunk, adjust the pool |
1393 |
|
|
# size to reflect any disklabel-specific limits on end sector |
1394 |
|
|
max_sector = req.partition.partedPartition.disk.maxPartitionStartSector |
1395 |
|
|
chunk_end = min(max_sector, self.geometry.end) |
1396 |
|
|
if chunk_end <= self.geometry.start: |
1397 |
|
|
# this should clearly never be possible, but if the chunk's |
1398 |
|
|
# start sector is beyond the maximum allowed end sector, we |
1399 |
|
|
# cannot continue |
1400 |
|
|
log.error("chunk start sector is beyond disklabel maximum") |
1401 |
|
|
raise PartitioningError("partitions allocated outside " |
1402 |
|
|
"disklabel limits") |
1403 |
|
|
|
1404 |
|
|
new_pool = chunk_end - self.geometry.start + 1 |
1405 |
|
|
if new_pool != self.pool: |
1406 |
|
|
log.debug("adjusting pool to %d based on disklabel limits" |
1407 |
|
|
% new_pool) |
1408 |
|
|
self.pool = new_pool |
1409 |
|
|
|
1410 |
|
|
self.requests.append(req) |
1411 |
|
|
self.pool -= req.base |
1412 |
|
|
|
1413 |
|
|
if not req.done: |
1414 |
|
|
self.base += req.base |
1415 |
|
|
|
1416 |
|
|
def getRequestByID(self, id): |
1417 |
|
|
""" Retrieve a request from this chunk based on its id. """ |
1418 |
|
|
for request in self.requests: |
1419 |
|
|
if request.id == id: |
1420 |
|
|
return request |
1421 |
|
|
|
1422 |
|
|
@property |
1423 |
|
|
def growth(self): |
1424 |
|
|
""" Sum of growth in sectors for all requests in this chunk. """ |
1425 |
|
|
return sum(r.growth for r in self.requests) |
1426 |
|
|
|
1427 |
|
|
@property |
1428 |
|
|
def hasGrowable(self): |
1429 |
|
|
""" True if this chunk contains at least one growable request. """ |
1430 |
|
|
for req in self.requests: |
1431 |
|
|
if req.growable: |
1432 |
|
|
return True |
1433 |
|
|
return False |
1434 |
|
|
|
1435 |
|
|
@property |
1436 |
|
|
def remaining(self): |
1437 |
|
|
""" Number of requests still being grown in this chunk. """ |
1438 |
|
|
return len([d for d in self.requests if not d.done]) |
1439 |
|
|
|
1440 |
|
|
@property |
1441 |
|
|
def done(self): |
1442 |
|
|
""" True if we are finished growing all requests in this chunk. """ |
1443 |
|
|
return self.remaining == 0 |
1444 |
|
|
|
1445 |
|
|
def trimOverGrownRequest(self, req, base=None): |
1446 |
|
|
""" Enforce max growth and return extra sectors to the pool. """ |
1447 |
|
|
req_end = req.partition.partedPartition.geometry.end |
1448 |
|
|
req_start = req.partition.partedPartition.geometry.start |
1449 |
|
|
|
1450 |
|
|
# Establish the current total number of sectors of growth for requests |
1451 |
|
|
# that lie before this one within this chunk. We add the total count |
1452 |
|
|
# to this request's end sector to obtain the end sector for this |
1453 |
|
|
# request, including growth of earlier requests but not including |
1454 |
|
|
# growth of this request. Maximum growth values are obtained using |
1455 |
|
|
# this end sector and various values for maximum end sector. |
1456 |
|
|
growth = 0 |
1457 |
|
|
for request in self.requests: |
1458 |
|
|
if request.partition.partedPartition.geometry.start < req_start: |
1459 |
|
|
growth += request.growth |
1460 |
|
|
req_end += growth |
1461 |
|
|
|
1462 |
|
|
# obtain the set of possible maximum sectors-of-growth values for this |
1463 |
|
|
# request and use the smallest |
1464 |
|
|
limits = [] |
1465 |
|
|
|
1466 |
|
|
# disklabel-specific maximum sector |
1467 |
|
|
max_sector = req.partition.partedPartition.disk.maxPartitionStartSector |
1468 |
|
|
limits.append(max_sector - req_end) |
1469 |
|
|
|
1470 |
|
|
# 2TB limit on bootable partitions, regardless of disklabel |
1471 |
|
|
if req.partition.req_bootable: |
1472 |
|
|
limits.append(sizeToSectors(2*1024*1024, self.sectorSize) - req_end) |
1473 |
|
|
|
1474 |
|
|
# request-specific maximum (see Request.__init__, above, for details) |
1475 |
|
|
if req.max_growth: |
1476 |
|
|
limits.append(req.max_growth) |
1477 |
|
|
|
1478 |
|
|
max_growth = min(limits) |
1479 |
|
|
|
1480 |
|
|
if max_growth and req.growth >= max_growth: |
1481 |
|
|
if req.growth > max_growth: |
1482 |
|
|
# we've grown beyond the maximum. put some back. |
1483 |
|
|
extra = req.growth - max_growth |
1484 |
|
|
log.debug("taking back %d (%dMB) from %d (%s)" % |
1485 |
|
|
(extra, |
1486 |
|
|
sectorsToSize(extra, self.sectorSize), |
1487 |
|
|
req.partition.id, req.partition.name)) |
1488 |
|
|
self.pool += extra |
1489 |
|
|
req.growth = max_growth |
1490 |
|
|
|
1491 |
|
|
# We're done growing this partition, so it no longer |
1492 |
|
|
# factors into the growable base used to determine |
1493 |
|
|
# what fraction of the pool each request gets. |
1494 |
|
|
if base is not None: |
1495 |
|
|
base -= req.base |
1496 |
|
|
req.done = True |
1497 |
|
|
|
1498 |
|
|
return base |
1499 |
|
|
|
1500 |
|
|
def growRequests(self): |
1501 |
|
|
""" Calculate growth amounts for requests in this chunk. """ |
1502 |
|
|
log.debug("Chunk.growRequests: %s" % self) |
1503 |
|
|
|
1504 |
|
|
# sort the partitions by start sector |
1505 |
|
|
self.requests.sort(key=lambda r: r.partition.partedPartition.geometry.start) |
1506 |
|
|
|
1507 |
|
|
# we use this to hold the base for the next loop through the |
1508 |
|
|
# chunk's requests since we want the base to be the same for |
1509 |
|
|
# all requests in any given growth iteration |
1510 |
|
|
new_base = self.base |
1511 |
|
|
last_pool = 0 # used to track changes to the pool across iterations |
1512 |
|
|
while not self.done and self.pool and last_pool != self.pool: |
1513 |
|
|
last_pool = self.pool # to keep from getting stuck |
1514 |
|
|
self.base = new_base |
1515 |
|
|
log.debug("%d partitions and %d (%dMB) left in chunk" % |
1516 |
|
|
(self.remaining, self.pool, |
1517 |
|
|
sectorsToSize(self.pool, self.sectorSize))) |
1518 |
|
|
for p in self.requests: |
1519 |
|
|
if p.done: |
1520 |
|
|
continue |
1521 |
|
|
|
1522 |
|
|
# Each partition is allocated free sectors from the pool |
1523 |
|
|
# based on the relative _base_ sizes of the remaining |
1524 |
|
|
# growable partitions. |
1525 |
|
|
share = p.base / float(self.base) |
1526 |
|
|
growth = int(share * last_pool) # truncate, don't round |
1527 |
|
|
p.growth += growth |
1528 |
|
|
self.pool -= growth |
1529 |
|
|
log.debug("adding %d (%dMB) to %d (%s)" % |
1530 |
|
|
(growth, |
1531 |
|
|
sectorsToSize(growth, self.sectorSize), |
1532 |
|
|
p.partition.id, p.partition.name)) |
1533 |
|
|
|
1534 |
|
|
new_base = self.trimOverGrownRequest(p, base=new_base) |
1535 |
|
|
log.debug("new grow amount for partition %d (%s) is %d " |
1536 |
|
|
"sectors, or %dMB" % |
1537 |
|
|
(p.partition.id, p.partition.name, p.growth, |
1538 |
|
|
sectorsToSize(p.growth, self.sectorSize))) |
1539 |
|
|
|
1540 |
|
|
if self.pool: |
1541 |
|
|
# allocate any leftovers in pool to the first partition |
1542 |
|
|
# that can still grow |
1543 |
|
|
for p in self.requests: |
1544 |
|
|
if p.done: |
1545 |
|
|
continue |
1546 |
|
|
|
1547 |
|
|
p.growth += self.pool |
1548 |
|
|
self.pool = 0 |
1549 |
|
|
|
1550 |
|
|
self.trimOverGrownRequest(p) |
1551 |
|
|
if self.pool == 0: |
1552 |
|
|
break |
1553 |
|
|
|
1554 |
|
|
|
1555 |
|
|
def getDiskChunks(disk, partitions, free): |
1556 |
|
|
""" Return a list of Chunk instances representing a disk. |
1557 |
|
|
|
1558 |
|
|
Arguments: |
1559 |
|
|
|
1560 |
|
|
disk -- a StorageDevice with a DiskLabel format |
1561 |
|
|
partitions -- list of PartitionDevice instances |
1562 |
|
|
free -- list of parted.Geometry instances representing free space |
1563 |
|
|
|
1564 |
|
|
Partitions and free regions not on the specified disk are ignored. |
1565 |
|
|
|
1566 |
|
|
""" |
1567 |
|
|
# list of all new partitions on this disk |
1568 |
|
|
disk_parts = [p for p in partitions if p.disk == disk and not p.exists] |
1569 |
|
|
disk_free = [f for f in free if f.device.path == disk.path] |
1570 |
|
|
|
1571 |
|
|
|
1572 |
|
|
chunks = [Chunk(f) for f in disk_free] |
1573 |
|
|
|
1574 |
|
|
for p in disk_parts: |
1575 |
|
|
if p.isExtended: |
1576 |
|
|
# handle extended partitions specially since they are |
1577 |
|
|
# indeed very special |
1578 |
|
|
continue |
1579 |
|
|
|
1580 |
|
|
for i, f in enumerate(disk_free): |
1581 |
|
|
if f.contains(p.partedPartition.geometry): |
1582 |
|
|
chunks[i].addRequest(Request(p)) |
1583 |
|
|
break |
1584 |
|
|
|
1585 |
|
|
return chunks |
1586 |
|
|
|
1587 |
|
|
def growPartitions(disks, partitions, free): |
1588 |
|
|
""" Grow all growable partition requests. |
1589 |
|
|
|
1590 |
|
|
Partitions have already been allocated from chunks of free space on |
1591 |
|
|
the disks. This function does not modify the ordering of partitions |
1592 |
|
|
or the free chunks from which they are allocated. |
1593 |
|
|
|
1594 |
|
|
Free space within a given chunk is allocated to each growable |
1595 |
|
|
partition allocated from that chunk in an amount corresponding to |
1596 |
|
|
the ratio of that partition's base size to the sum of the base sizes |
1597 |
|
|
of all growable partitions allocated from the chunk. |
1598 |
|
|
|
1599 |
|
|
Arguments: |
1600 |
|
|
|
1601 |
|
|
disks -- a list of all usable disks (DiskDevice instances) |
1602 |
|
|
partitions -- a list of all partitions (PartitionDevice instances) |
1603 |
|
|
free -- a list of all free regions (parted.Geometry instances) |
1604 |
|
|
""" |
1605 |
|
|
log.debug("growPartitions: disks=%s, partitions=%s" % |
1606 |
|
|
([d.name for d in disks], |
1607 |
|
|
["%s(id %d)" % (p.name, p.id) for p in partitions])) |
1608 |
|
|
all_growable = [p for p in partitions if p.req_grow] |
1609 |
|
|
if not all_growable: |
1610 |
|
|
log.debug("no growable partitions") |
1611 |
|
|
return |
1612 |
|
|
|
1613 |
|
|
log.debug("growable partitions are %s" % [p.name for p in all_growable]) |
1614 |
|
|
|
1615 |
|
|
for disk in disks: |
1616 |
|
|
log.debug("growing partitions on %s" % disk.name) |
1617 |
|
|
sector_size = disk.format.partedDevice.sectorSize |
1618 |
|
|
|
1619 |
|
|
# find any extended partition on this disk |
1620 |
|
|
extended_geometry = getattr(disk.format.extendedPartition, |
1621 |
|
|
"geometry", |
1622 |
|
|
None) # parted.Geometry |
1623 |
|
|
|
1624 |
|
|
# list of free space regions on this disk prior to partition allocation |
1625 |
|
|
disk_free = [f for f in free if f.device.path == disk.path] |
1626 |
|
|
if not disk_free: |
1627 |
|
|
log.debug("no free space on %s" % disk.name) |
1628 |
|
|
continue |
1629 |
|
|
|
1630 |
|
|
chunks = getDiskChunks(disk, partitions, disk_free) |
1631 |
|
|
log.debug("disk %s has %d chunks" % (disk.name, len(chunks))) |
1632 |
|
|
# grow the partitions in each chunk as a group |
1633 |
|
|
for chunk in chunks: |
1634 |
|
|
if not chunk.hasGrowable: |
1635 |
|
|
# no growable partitions in this chunk |
1636 |
|
|
continue |
1637 |
|
|
|
1638 |
|
|
chunk.growRequests() |
1639 |
|
|
|
1640 |
|
|
# recalculate partition geometries |
1641 |
|
|
disklabel = disk.format |
1642 |
|
|
start = chunk.geometry.start |
1643 |
|
|
# align start sector as needed |
1644 |
|
|
if not disklabel.alignment.isAligned(chunk.geometry, start): |
1645 |
|
|
start = disklabel.alignment.alignUp(chunk.geometry, start) |
1646 |
|
|
new_partitions = [] |
1647 |
|
|
for p in chunk.requests: |
1648 |
|
|
ptype = p.partition.partedPartition.type |
1649 |
|
|
log.debug("partition %s (%d): %s" % (p.partition.name, |
1650 |
|
|
p.partition.id, ptype)) |
1651 |
|
|
if ptype == parted.PARTITION_EXTENDED: |
1652 |
|
|
continue |
1653 |
|
|
|
1654 |
|
|
# XXX since we need one metadata sector before each |
1655 |
|
|
# logical partition we burn one logical block to |
1656 |
|
|
# safely align the start of each logical partition |
1657 |
|
|
if ptype == parted.PARTITION_LOGICAL: |
1658 |
|
|
start += disklabel.alignment.grainSize |
1659 |
|
|
|
1660 |
|
|
old_geometry = p.partition.partedPartition.geometry |
1661 |
|
|
new_length = p.base + p.growth |
1662 |
|
|
end = start + new_length - 1 |
1663 |
|
|
# align end sector as needed |
1664 |
|
|
if not disklabel.endAlignment.isAligned(chunk.geometry, end): |
1665 |
|
|
end = disklabel.endAlignment.alignDown(chunk.geometry, end) |
1666 |
|
|
new_geometry = parted.Geometry(device=disklabel.partedDevice, |
1667 |
|
|
start=start, |
1668 |
|
|
end=end) |
1669 |
|
|
log.debug("new geometry for %s: %s" % (p.partition.name, |
1670 |
|
|
new_geometry)) |
1671 |
|
|
start = end + 1 |
1672 |
|
|
new_partition = parted.Partition(disk=disklabel.partedDisk, |
1673 |
|
|
type=ptype, |
1674 |
|
|
geometry=new_geometry) |
1675 |
|
|
new_partitions.append((new_partition, p.partition)) |
1676 |
|
|
|
1677 |
|
|
# remove all new partitions from this chunk |
1678 |
|
|
removeNewPartitions([disk], [r.partition for r in chunk.requests]) |
1679 |
|
|
log.debug("back from removeNewPartitions") |
1680 |
|
|
|
1681 |
|
|
# adjust the extended partition as needed |
1682 |
|
|
# we will ony resize an extended partition that we created |
1683 |
|
|
log.debug("extended: %s" % extended_geometry) |
1684 |
|
|
if extended_geometry and \ |
1685 |
|
|
chunk.geometry.contains(extended_geometry): |
1686 |
|
|
log.debug("setting up new geometry for extended on %s" % disk.name) |
1687 |
|
|
ext_start = 0 |
1688 |
|
|
for (partition, device) in new_partitions: |
1689 |
|
|
if partition.type != parted.PARTITION_LOGICAL: |
1690 |
|
|
continue |
1691 |
|
|
|
1692 |
|
|
if not ext_start or partition.geometry.start < ext_start: |
1693 |
|
|
# account for the logical block difference in start |
1694 |
|
|
# sector for the extended -v- first logical |
1695 |
|
|
# (partition.geometry.start is already aligned) |
1696 |
|
|
ext_start = partition.geometry.start - disklabel.alignment.grainSize |
1697 |
|
|
|
1698 |
|
|
new_geometry = parted.Geometry(device=disklabel.partedDevice, |
1699 |
|
|
start=ext_start, |
1700 |
|
|
end=chunk.geometry.end) |
1701 |
|
|
log.debug("new geometry for extended: %s" % new_geometry) |
1702 |
|
|
new_extended = parted.Partition(disk=disklabel.partedDisk, |
1703 |
|
|
type=parted.PARTITION_EXTENDED, |
1704 |
|
|
geometry=new_geometry) |
1705 |
|
|
ptypes = [p.type for (p, d) in new_partitions] |
1706 |
|
|
for pt_idx, ptype in enumerate(ptypes): |
1707 |
|
|
if ptype == parted.PARTITION_LOGICAL: |
1708 |
|
|
new_partitions.insert(pt_idx, (new_extended, None)) |
1709 |
|
|
break |
1710 |
|
|
|
1711 |
|
|
# add the partitions with their new geometries to the disk |
1712 |
|
|
for (partition, device) in new_partitions: |
1713 |
|
|
if device: |
1714 |
|
|
name = device.name |
1715 |
|
|
else: |
1716 |
|
|
# If there was no extended partition on this disk when |
1717 |
|
|
# doPartitioning was called we won't have a |
1718 |
|
|
# PartitionDevice instance for it. |
1719 |
|
|
name = partition.getDeviceNodeName() |
1720 |
|
|
|
1721 |
|
|
log.debug("setting %s new geometry: %s" % (name, |
1722 |
|
|
partition.geometry)) |
1723 |
|
|
constraint = parted.Constraint(exactGeom=partition.geometry) |
1724 |
|
|
disklabel.partedDisk.addPartition(partition=partition, |
1725 |
|
|
constraint=constraint) |
1726 |
|
|
path = partition.path |
1727 |
|
|
if device: |
1728 |
|
|
# set the device's name |
1729 |
|
|
device.partedPartition = partition |
1730 |
|
|
# without this, the path attr will be a basename. eek. |
1731 |
|
|
device.disk = disk |
1732 |
|
|
|
1733 |
|
|
# make sure we store the disk's version of the partition |
1734 |
|
|
newpart = disklabel.partedDisk.getPartitionByPath(path) |
1735 |
|
|
device.partedPartition = newpart |
1736 |
|
|
|
1737 |
|
|
|
1738 |
|
|
def lvCompare(lv1, lv2): |
1739 |
|
|
""" More specifically defined lvs come first. |
1740 |
|
|
|
1741 |
|
|
< 1 => x < y |
1742 |
|
|
0 => x == y |
1743 |
|
|
> 1 => x > y |
1744 |
|
|
""" |
1745 |
|
|
ret = 0 |
1746 |
|
|
|
1747 |
|
|
# larger requests go to the front of the list |
1748 |
|
|
ret -= cmp(lv1.size, lv2.size) * 100 |
1749 |
|
|
|
1750 |
|
|
# fixed size requests to the front |
1751 |
|
|
ret += cmp(lv1.req_grow, lv2.req_grow) * 50 |
1752 |
|
|
|
1753 |
|
|
# potentially larger growable requests go to the front |
1754 |
|
|
if lv1.req_grow and lv2.req_grow: |
1755 |
|
|
if not lv1.req_max_size and lv2.req_max_size: |
1756 |
|
|
ret -= 25 |
1757 |
|
|
elif lv1.req_max_size and not lv2.req_max_size: |
1758 |
|
|
ret += 25 |
1759 |
|
|
else: |
1760 |
|
|
ret -= cmp(lv1.req_max_size, lv2.req_max_size) * 25 |
1761 |
|
|
|
1762 |
|
|
if ret > 0: |
1763 |
|
|
ret = 1 |
1764 |
|
|
elif ret < 0: |
1765 |
|
|
ret = -1 |
1766 |
|
|
|
1767 |
|
|
return ret |
1768 |
|
|
|
1769 |
|
|
def growLVM(storage): |
1770 |
|
|
""" Grow LVs according to the sizes of the PVs. """ |
1771 |
|
|
for vg in storage.vgs: |
1772 |
|
|
total_free = vg.freeSpace |
1773 |
|
|
if total_free < 0: |
1774 |
|
|
# by now we have allocated the PVs so if there isn't enough |
1775 |
|
|
# space in the VG we have a real problem |
1776 |
|
|
raise PartitioningError("not enough space for LVM requests") |
1777 |
|
|
elif not total_free: |
1778 |
|
|
log.debug("vg %s has no free space" % vg.name) |
1779 |
|
|
continue |
1780 |
|
|
|
1781 |
|
|
log.debug("vg %s: %dMB free ; lvs: %s" % (vg.name, vg.freeSpace, |
1782 |
|
|
[l.lvname for l in vg.lvs])) |
1783 |
|
|
|
1784 |
|
|
# figure out how much to grow each LV |
1785 |
|
|
grow_amounts = {} |
1786 |
|
|
lv_total = vg.size - total_free |
1787 |
|
|
log.debug("used: %dMB ; vg.size: %dMB" % (lv_total, vg.size)) |
1788 |
|
|
|
1789 |
|
|
# This first loop is to calculate percentage-based growth |
1790 |
|
|
# amounts. These are based on total free space. |
1791 |
|
|
lvs = vg.lvs |
1792 |
|
|
lvs.sort(cmp=lvCompare) |
1793 |
|
|
for lv in lvs: |
1794 |
|
|
if not lv.req_grow or not lv.req_percent: |
1795 |
|
|
continue |
1796 |
|
|
|
1797 |
|
|
portion = (lv.req_percent * 0.01) |
1798 |
|
|
grow = portion * vg.freeSpace |
1799 |
|
|
new_size = lv.req_size + grow |
1800 |
|
|
if lv.req_max_size and new_size > lv.req_max_size: |
1801 |
|
|
grow -= (new_size - lv.req_max_size) |
1802 |
|
|
|
1803 |
|
|
if lv.format.maxSize and lv.format.maxSize < new_size: |
1804 |
|
|
grow -= (new_size - lv.format.maxSize) |
1805 |
|
|
|
1806 |
|
|
# clamp growth amount to a multiple of vg extent size |
1807 |
|
|
grow_amounts[lv.name] = vg.align(grow) |
1808 |
|
|
total_free -= grow |
1809 |
|
|
lv_total += grow |
1810 |
|
|
|
1811 |
|
|
# This second loop is to calculate non-percentage-based growth |
1812 |
|
|
# amounts. These are based on free space remaining after |
1813 |
|
|
# calculating percentage-based growth amounts. |
1814 |
|
|
|
1815 |
|
|
# keep a tab on space not allocated due to format or requested |
1816 |
|
|
# maximums -- we'll dole it out to subsequent requests |
1817 |
|
|
leftover = 0 |
1818 |
|
|
for lv in lvs: |
1819 |
|
|
log.debug("checking lv %s: req_grow: %s ; req_percent: %s" |
1820 |
|
|
% (lv.name, lv.req_grow, lv.req_percent)) |
1821 |
|
|
if not lv.req_grow or lv.req_percent: |
1822 |
|
|
continue |
1823 |
|
|
|
1824 |
|
|
portion = float(lv.req_size) / float(lv_total) |
1825 |
|
|
grow = portion * total_free |
1826 |
|
|
log.debug("grow is %dMB" % grow) |
1827 |
|
|
|
1828 |
|
|
todo = lvs[lvs.index(lv):] |
1829 |
|
|
unallocated = reduce(lambda x,y: x+y, |
1830 |
|
|
[l.req_size for l in todo |
1831 |
|
|
if l.req_grow and not l.req_percent]) |
1832 |
|
|
extra_portion = float(lv.req_size) / float(unallocated) |
1833 |
|
|
extra = extra_portion * leftover |
1834 |
|
|
log.debug("%s getting %dMB (%d%%) of %dMB leftover space" |
1835 |
|
|
% (lv.name, extra, extra_portion * 100, leftover)) |
1836 |
|
|
leftover -= extra |
1837 |
|
|
grow += extra |
1838 |
|
|
log.debug("grow is now %dMB" % grow) |
1839 |
|
|
max_size = lv.req_size + grow |
1840 |
|
|
if lv.req_max_size and max_size > lv.req_max_size: |
1841 |
|
|
max_size = lv.req_max_size |
1842 |
|
|
|
1843 |
|
|
if lv.format.maxSize and max_size > lv.format.maxSize: |
1844 |
|
|
max_size = lv.format.maxSize |
1845 |
|
|
|
1846 |
|
|
log.debug("max size is %dMB" % max_size) |
1847 |
|
|
max_size = max_size |
1848 |
|
|
leftover += (lv.req_size + grow) - max_size |
1849 |
|
|
grow = max_size - lv.req_size |
1850 |
|
|
log.debug("lv %s gets %dMB" % (lv.name, vg.align(grow))) |
1851 |
|
|
grow_amounts[lv.name] = vg.align(grow) |
1852 |
|
|
|
1853 |
|
|
if not grow_amounts: |
1854 |
|
|
log.debug("no growable lvs in vg %s" % vg.name) |
1855 |
|
|
continue |
1856 |
|
|
|
1857 |
|
|
# now grow the lvs by the amounts we've calculated above |
1858 |
|
|
for lv in lvs: |
1859 |
|
|
if lv.name not in grow_amounts.keys(): |
1860 |
|
|
continue |
1861 |
|
|
lv.size += grow_amounts[lv.name] |
1862 |
|
|
|
1863 |
|
|
# now there shouldn't be any free space left, but if there is we |
1864 |
|
|
# should allocate it to one of the LVs |
1865 |
|
|
vg_free = vg.freeSpace |
1866 |
|
|
log.debug("vg %s has %dMB free" % (vg.name, vg_free)) |
1867 |
|
|
if vg_free: |
1868 |
|
|
for lv in lvs: |
1869 |
|
|
if not lv.req_grow: |
1870 |
|
|
continue |
1871 |
|
|
|
1872 |
|
|
if lv.req_percent > 0: |
1873 |
|
|
continue |
1874 |
|
|
|
1875 |
|
|
if lv.req_max_size and lv.size == lv.req_max_size: |
1876 |
|
|
continue |
1877 |
|
|
|
1878 |
|
|
if lv.format.maxSize and lv.size == lv.format.maxSize: |
1879 |
|
|
continue |
1880 |
|
|
|
1881 |
|
|
# first come, first served |
1882 |
|
|
projected = lv.size + vg.freeSpace |
1883 |
|
|
if lv.req_max_size and projected > lv.req_max_size: |
1884 |
|
|
projected = lv.req_max_size |
1885 |
|
|
|
1886 |
|
|
if lv.format.maxSize and projected > lv.format.maxSize: |
1887 |
|
|
projected = lv.format.maxSize |
1888 |
|
|
|
1889 |
|
|
log.debug("giving leftover %dMB to %s" % (projected - lv.size, |
1890 |
|
|
lv.name)) |
1891 |
|
|
lv.size = projected |
1892 |
|
|
|