1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1991-1998 Linus Torvalds
4 * Re-organised Feb 1998 Russell King
5 * Copyright (C) 2020 Christoph Hellwig
8 #include <linux/major.h>
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/vmalloc.h>
12 #include <linux/raid/detect.h>
15 static int (*const check_part[])(struct parsed_partitions *) = {
17 * Probe partition formats with tables at disk address 0
18 * that also have an ADFS boot block at 0xdc0.
20 #ifdef CONFIG_ACORN_PARTITION_ICS
23 #ifdef CONFIG_ACORN_PARTITION_POWERTEC
24 adfspart_check_POWERTEC,
26 #ifdef CONFIG_ACORN_PARTITION_EESOX
31 * Now move on to formats that only have partition info at
32 * disk address 0xdc0. Since these may also have stale
33 * PC/BIOS partition tables, they need to come before
36 #ifdef CONFIG_ACORN_PARTITION_CUMANA
37 adfspart_check_CUMANA,
39 #ifdef CONFIG_ACORN_PARTITION_ADFS
43 #ifdef CONFIG_CMDLINE_PARTITION
46 #ifdef CONFIG_EFI_PARTITION
47 efi_partition, /* this must come before msdos */
49 #ifdef CONFIG_SGI_PARTITION
52 #ifdef CONFIG_LDM_PARTITION
53 ldm_partition, /* this must come before msdos */
55 #ifdef CONFIG_MSDOS_PARTITION
58 #ifdef CONFIG_OSF_PARTITION
61 #ifdef CONFIG_SUN_PARTITION
64 #ifdef CONFIG_AMIGA_PARTITION
67 #ifdef CONFIG_ATARI_PARTITION
70 #ifdef CONFIG_MAC_PARTITION
73 #ifdef CONFIG_ULTRIX_PARTITION
76 #ifdef CONFIG_IBM_PARTITION
79 #ifdef CONFIG_KARMA_PARTITION
82 #ifdef CONFIG_SYSV68_PARTITION
88 static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
90 struct parsed_partitions *state;
91 int nr = DISK_MAX_PARTS;
93 state = kzalloc(sizeof(*state), GFP_KERNEL);
97 state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
108 static void free_partitions(struct parsed_partitions *state)
114 static struct parsed_partitions *check_partition(struct gendisk *hd)
116 struct parsed_partitions *state;
119 state = allocate_partitions(hd);
122 state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
123 if (!state->pp_buf) {
124 free_partitions(state);
127 state->pp_buf[0] = '\0';
130 snprintf(state->name, BDEVNAME_SIZE, "%s", hd->disk_name);
131 snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
132 if (isdigit(state->name[strlen(state->name)-1]))
133 sprintf(state->name, "p");
136 while (!res && check_part[i]) {
137 memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
138 res = check_part[i++](state);
141 * We have hit an I/O error which we don't report now.
142 * But record it, and let the others do their job.
150 printk(KERN_INFO "%s", state->pp_buf);
152 free_page((unsigned long)state->pp_buf);
155 if (state->access_beyond_eod)
158 * The partition is unrecognized. So report I/O errors if there were any
163 strlcat(state->pp_buf,
164 " unable to read partition table\n", PAGE_SIZE);
165 printk(KERN_INFO "%s", state->pp_buf);
168 free_page((unsigned long)state->pp_buf);
169 free_partitions(state);
173 static ssize_t part_partition_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
176 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
179 static ssize_t part_start_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
182 return sprintf(buf, "%llu\n", dev_to_bdev(dev)->bd_start_sect);
185 static ssize_t part_ro_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
188 return sprintf(buf, "%d\n", bdev_read_only(dev_to_bdev(dev)));
191 static ssize_t part_alignment_offset_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
194 return sprintf(buf, "%u\n", bdev_alignment_offset(dev_to_bdev(dev)));
197 static ssize_t part_discard_alignment_show(struct device *dev,
198 struct device_attribute *attr, char *buf)
200 return sprintf(buf, "%u\n", bdev_discard_alignment(dev_to_bdev(dev)));
203 static DEVICE_ATTR(partition, 0444, part_partition_show, NULL);
204 static DEVICE_ATTR(start, 0444, part_start_show, NULL);
205 static DEVICE_ATTR(size, 0444, part_size_show, NULL);
206 static DEVICE_ATTR(ro, 0444, part_ro_show, NULL);
207 static DEVICE_ATTR(alignment_offset, 0444, part_alignment_offset_show, NULL);
208 static DEVICE_ATTR(discard_alignment, 0444, part_discard_alignment_show, NULL);
209 static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
210 static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
211 #ifdef CONFIG_FAIL_MAKE_REQUEST
212 static struct device_attribute dev_attr_fail =
213 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
216 static struct attribute *part_attrs[] = {
217 &dev_attr_partition.attr,
218 &dev_attr_start.attr,
221 &dev_attr_alignment_offset.attr,
222 &dev_attr_discard_alignment.attr,
224 &dev_attr_inflight.attr,
225 #ifdef CONFIG_FAIL_MAKE_REQUEST
231 static const struct attribute_group part_attr_group = {
235 static const struct attribute_group *part_attr_groups[] = {
237 #ifdef CONFIG_BLK_DEV_IO_TRACE
238 &blk_trace_attr_group,
243 static void part_release(struct device *dev)
245 put_disk(dev_to_bdev(dev)->bd_disk);
246 iput(dev_to_bdev(dev)->bd_inode);
249 static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
251 const struct block_device *part = dev_to_bdev(dev);
253 add_uevent_var(env, "PARTN=%u", part->bd_partno);
254 if (part->bd_meta_info && part->bd_meta_info->volname[0])
255 add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
259 const struct device_type part_type = {
261 .groups = part_attr_groups,
262 .release = part_release,
263 .uevent = part_uevent,
266 void drop_partition(struct block_device *part)
268 lockdep_assert_held(&part->bd_disk->open_mutex);
270 xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
271 kobject_put(part->bd_holder_dir);
273 device_del(&part->bd_device);
274 put_device(&part->bd_device);
277 static void delete_partition(struct block_device *part)
280 * Remove the block device from the inode hash, so that it cannot be
281 * looked up any more even when openers still hold references.
283 remove_inode_hash(part->bd_inode);
286 __invalidate_device(part, true);
288 drop_partition(part);
291 static ssize_t whole_disk_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
296 static const DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
299 * Must be called either with open_mutex held, before a disk can be opened or
300 * after all disk users are gone.
302 static struct block_device *add_partition(struct gendisk *disk, int partno,
303 sector_t start, sector_t len, int flags,
304 struct partition_meta_info *info)
306 dev_t devt = MKDEV(0, 0);
307 struct device *ddev = disk_to_dev(disk);
309 struct block_device *bdev;
313 lockdep_assert_held(&disk->open_mutex);
315 if (partno >= DISK_MAX_PARTS)
316 return ERR_PTR(-EINVAL);
319 * Partitions are not supported on zoned block devices that are used as
322 switch (disk->queue->limits.zoned) {
324 pr_warn("%s: partitions not supported on host managed zoned block device\n",
326 return ERR_PTR(-ENXIO);
328 pr_info("%s: disabling host aware zoned block device support due to partitions\n",
330 disk_set_zoned(disk, BLK_ZONED_NONE);
336 if (xa_load(&disk->part_tbl, partno))
337 return ERR_PTR(-EBUSY);
339 /* ensure we always have a reference to the whole disk */
340 get_device(disk_to_dev(disk));
343 bdev = bdev_alloc(disk, partno);
347 bdev->bd_start_sect = start;
348 bdev_set_nr_sectors(bdev, len);
350 pdev = &bdev->bd_device;
351 dname = dev_name(ddev);
352 if (isdigit(dname[strlen(dname) - 1]))
353 dev_set_name(pdev, "%sp%d", dname, partno);
355 dev_set_name(pdev, "%s%d", dname, partno);
357 device_initialize(pdev);
358 pdev->class = &block_class;
359 pdev->type = &part_type;
362 /* in consecutive minor range? */
363 if (bdev->bd_partno < disk->minors) {
364 devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
366 err = blk_alloc_ext_minor();
369 devt = MKDEV(BLOCK_EXT_MAJOR, err);
375 bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
376 if (!bdev->bd_meta_info)
380 /* delay uevent until 'holders' subdir is created */
381 dev_set_uevent_suppress(pdev, 1);
382 err = device_add(pdev);
387 bdev->bd_holder_dir = kobject_create_and_add("holders", &pdev->kobj);
388 if (!bdev->bd_holder_dir)
391 dev_set_uevent_suppress(pdev, 0);
392 if (flags & ADDPART_FLAG_WHOLEDISK) {
393 err = device_create_file(pdev, &dev_attr_whole_disk);
398 /* everything is up and running, commence */
399 err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
402 bdev_add(bdev, devt);
404 /* suppress uevent if the disk suppresses it */
405 if (!dev_get_uevent_suppress(ddev))
406 kobject_uevent(&pdev->kobj, KOBJ_ADD);
410 kobject_put(bdev->bd_holder_dir);
420 static bool partition_overlaps(struct gendisk *disk, sector_t start,
421 sector_t length, int skip_partno)
423 struct block_device *part;
424 bool overlap = false;
428 xa_for_each_start(&disk->part_tbl, idx, part, 1) {
429 if (part->bd_partno != skip_partno &&
430 start < part->bd_start_sect + bdev_nr_sectors(part) &&
431 start + length > part->bd_start_sect) {
441 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
444 sector_t capacity = get_capacity(disk), end;
445 struct block_device *part;
448 mutex_lock(&disk->open_mutex);
449 if (check_add_overflow(start, length, &end)) {
454 if (start >= capacity || end > capacity) {
459 if (!disk_live(disk)) {
464 if (partition_overlaps(disk, start, length, -1)) {
469 part = add_partition(disk, partno, start, length,
470 ADDPART_FLAG_NONE, NULL);
471 ret = PTR_ERR_OR_ZERO(part);
473 mutex_unlock(&disk->open_mutex);
477 int bdev_del_partition(struct gendisk *disk, int partno)
479 struct block_device *part = NULL;
482 mutex_lock(&disk->open_mutex);
483 part = xa_load(&disk->part_tbl, partno);
488 if (atomic_read(&part->bd_openers))
491 delete_partition(part);
494 mutex_unlock(&disk->open_mutex);
498 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
501 struct block_device *part = NULL;
504 mutex_lock(&disk->open_mutex);
505 part = xa_load(&disk->part_tbl, partno);
510 if (start != part->bd_start_sect)
514 if (partition_overlaps(disk, start, length, partno))
517 bdev_set_nr_sectors(part, length);
521 mutex_unlock(&disk->open_mutex);
525 static bool disk_unlock_native_capacity(struct gendisk *disk)
527 if (!disk->fops->unlock_native_capacity ||
528 test_and_set_bit(GD_NATIVE_CAPACITY, &disk->state)) {
529 printk(KERN_CONT "truncated\n");
533 printk(KERN_CONT "enabling native capacity\n");
534 disk->fops->unlock_native_capacity(disk);
538 static bool blk_add_partition(struct gendisk *disk,
539 struct parsed_partitions *state, int p)
541 sector_t size = state->parts[p].size;
542 sector_t from = state->parts[p].from;
543 struct block_device *part;
548 if (from >= get_capacity(disk)) {
550 "%s: p%d start %llu is beyond EOD, ",
551 disk->disk_name, p, (unsigned long long) from);
552 if (disk_unlock_native_capacity(disk))
557 if (from + size > get_capacity(disk)) {
559 "%s: p%d size %llu extends beyond EOD, ",
560 disk->disk_name, p, (unsigned long long) size);
562 if (disk_unlock_native_capacity(disk))
566 * We can not ignore partitions of broken tables created by for
567 * example camera firmware, but we limit them to the end of the
568 * disk to avoid creating invalid block devices.
570 size = get_capacity(disk) - from;
573 part = add_partition(disk, p, from, size, state->parts[p].flags,
574 &state->parts[p].info);
575 if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
576 printk(KERN_ERR " %s: p%d could not be added: %ld\n",
577 disk->disk_name, p, -PTR_ERR(part));
581 if (IS_BUILTIN(CONFIG_BLK_DEV_MD) &&
582 (state->parts[p].flags & ADDPART_FLAG_RAID))
583 md_autodetect_dev(part->bd_dev);
588 static int blk_add_partitions(struct gendisk *disk)
590 struct parsed_partitions *state;
591 int ret = -EAGAIN, p;
593 if (disk->flags & GENHD_FL_NO_PART)
596 if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
599 state = check_partition(disk);
604 * I/O error reading the partition table. If we tried to read
605 * beyond EOD, retry after unlocking the native capacity.
607 if (PTR_ERR(state) == -ENOSPC) {
608 printk(KERN_WARNING "%s: partition table beyond EOD, ",
610 if (disk_unlock_native_capacity(disk))
617 * Partitions are not supported on host managed zoned block devices.
619 if (disk->queue->limits.zoned == BLK_ZONED_HM) {
620 pr_warn("%s: ignoring partition table on host managed zoned block device\n",
627 * If we read beyond EOD, try unlocking native capacity even if the
628 * partition table was successfully read as we could be missing some
631 if (state->access_beyond_eod) {
633 "%s: partition table partially beyond EOD, ",
635 if (disk_unlock_native_capacity(disk))
639 /* tell userspace that the media / partition table may have changed */
640 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
642 for (p = 1; p < state->limit; p++)
643 if (!blk_add_partition(disk, state, p))
648 free_partitions(state);
652 int bdev_disk_changed(struct gendisk *disk, bool invalidate)
654 struct block_device *part;
658 lockdep_assert_held(&disk->open_mutex);
660 if (!disk_live(disk))
664 if (disk->open_partitions)
666 sync_blockdev(disk->part0);
667 invalidate_bdev(disk->part0);
669 xa_for_each_start(&disk->part_tbl, idx, part, 1)
670 delete_partition(part);
671 clear_bit(GD_NEED_PART_SCAN, &disk->state);
674 * Historically we only set the capacity to zero for devices that
675 * support partitions (independ of actually having partitions created).
676 * Doing that is rather inconsistent, but changing it broke legacy
677 * udisks polling for legacy ide-cdrom devices. Use the crude check
678 * below to get the sane behavior for most device while not breaking
679 * userspace for this particular setup.
682 if (!(disk->flags & GENHD_FL_NO_PART) ||
683 !(disk->flags & GENHD_FL_REMOVABLE))
684 set_capacity(disk, 0);
687 if (get_capacity(disk)) {
688 ret = blk_add_partitions(disk);
691 } else if (invalidate) {
693 * Tell userspace that the media / partition table may have
696 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
702 * Only exported for loop and dasd for historic reasons. Don't use in new
705 EXPORT_SYMBOL_GPL(bdev_disk_changed);
707 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
709 struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
712 if (n >= get_capacity(state->disk)) {
713 state->access_beyond_eod = true;
717 folio = read_mapping_folio(mapping, n >> PAGE_SECTORS_SHIFT, NULL);
722 return folio_address(folio) + offset_in_folio(folio, n * SECTOR_SIZE);