1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
6 #define CREATE_TRACE_POINTS
9 static inline sector_t mb_to_sects(unsigned long mb)
11 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
14 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
16 return sect >> ilog2(dev->zone_size_sects);
19 static inline void null_lock_zone_res(struct nullb_device *dev)
21 if (dev->need_zone_res_mgmt)
22 spin_lock_irq(&dev->zone_res_lock);
25 static inline void null_unlock_zone_res(struct nullb_device *dev)
27 if (dev->need_zone_res_mgmt)
28 spin_unlock_irq(&dev->zone_res_lock);
31 static inline void null_init_zone_lock(struct nullb_device *dev,
32 struct nullb_zone *zone)
34 if (!dev->memory_backed)
35 spin_lock_init(&zone->spinlock);
37 mutex_init(&zone->mutex);
40 static inline void null_lock_zone(struct nullb_device *dev,
41 struct nullb_zone *zone)
43 if (!dev->memory_backed)
44 spin_lock_irq(&zone->spinlock);
46 mutex_lock(&zone->mutex);
49 static inline void null_unlock_zone(struct nullb_device *dev,
50 struct nullb_zone *zone)
52 if (!dev->memory_backed)
53 spin_unlock_irq(&zone->spinlock);
55 mutex_unlock(&zone->mutex);
58 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
60 sector_t dev_capacity_sects, zone_capacity_sects;
61 struct nullb_zone *zone;
65 if (!is_power_of_2(dev->zone_size)) {
66 pr_err("zone_size must be power-of-two\n");
69 if (dev->zone_size > dev->size) {
70 pr_err("Zone size larger than device capacity\n");
74 if (!dev->zone_capacity)
75 dev->zone_capacity = dev->zone_size;
77 if (dev->zone_capacity > dev->zone_size) {
78 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
79 dev->zone_capacity, dev->zone_size);
83 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
84 dev_capacity_sects = mb_to_sects(dev->size);
85 dev->zone_size_sects = mb_to_sects(dev->zone_size);
86 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
87 >> ilog2(dev->zone_size_sects);
89 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
90 GFP_KERNEL | __GFP_ZERO);
94 spin_lock_init(&dev->zone_res_lock);
96 if (dev->zone_nr_conv >= dev->nr_zones) {
97 dev->zone_nr_conv = dev->nr_zones - 1;
98 pr_info("changed the number of conventional zones to %u",
102 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
103 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
104 dev->zone_max_active = 0;
105 pr_info("zone_max_active limit disabled, limit >= zone count\n");
108 /* Max open zones has to be <= max active zones */
109 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
110 dev->zone_max_open = dev->zone_max_active;
111 pr_info("changed the maximum number of open zones to %u\n",
113 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
114 dev->zone_max_open = 0;
115 pr_info("zone_max_open limit disabled, limit >= zone count\n");
117 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
118 dev->imp_close_zone_no = dev->zone_nr_conv;
120 for (i = 0; i < dev->zone_nr_conv; i++) {
121 zone = &dev->zones[i];
123 null_init_zone_lock(dev, zone);
124 zone->start = sector;
125 zone->len = dev->zone_size_sects;
126 zone->capacity = zone->len;
127 zone->wp = zone->start + zone->len;
128 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
129 zone->cond = BLK_ZONE_COND_NOT_WP;
131 sector += dev->zone_size_sects;
134 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
135 zone = &dev->zones[i];
137 null_init_zone_lock(dev, zone);
138 zone->start = zone->wp = sector;
139 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
140 zone->len = dev_capacity_sects - zone->start;
142 zone->len = dev->zone_size_sects;
144 min_t(sector_t, zone->len, zone_capacity_sects);
145 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
146 zone->cond = BLK_ZONE_COND_EMPTY;
148 sector += dev->zone_size_sects;
154 int null_register_zoned_dev(struct nullb *nullb)
156 struct nullb_device *dev = nullb->dev;
157 struct request_queue *q = nullb->q;
159 blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
160 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
161 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
163 if (queue_is_mq(q)) {
164 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
169 blk_queue_chunk_sectors(q, dev->zone_size_sects);
170 q->nr_zones = blkdev_nr_zones(nullb->disk);
173 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
174 blk_queue_max_open_zones(q, dev->zone_max_open);
175 blk_queue_max_active_zones(q, dev->zone_max_active);
180 void null_free_zoned_dev(struct nullb_device *dev)
186 int null_report_zones(struct gendisk *disk, sector_t sector,
187 unsigned int nr_zones, report_zones_cb cb, void *data)
189 struct nullb *nullb = disk->private_data;
190 struct nullb_device *dev = nullb->dev;
191 unsigned int first_zone, i;
192 struct nullb_zone *zone;
193 struct blk_zone blkz;
196 first_zone = null_zone_no(dev, sector);
197 if (first_zone >= dev->nr_zones)
200 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
201 trace_nullb_report_zones(nullb, nr_zones);
203 memset(&blkz, 0, sizeof(struct blk_zone));
204 zone = &dev->zones[first_zone];
205 for (i = 0; i < nr_zones; i++, zone++) {
207 * Stacked DM target drivers will remap the zone information by
208 * modifying the zone information passed to the report callback.
209 * So use a local copy to avoid corruption of the device zone
212 null_lock_zone(dev, zone);
213 blkz.start = zone->start;
214 blkz.len = zone->len;
216 blkz.type = zone->type;
217 blkz.cond = zone->cond;
218 blkz.capacity = zone->capacity;
219 null_unlock_zone(dev, zone);
221 error = cb(&blkz, i, data);
230 * This is called in the case of memory backing from null_process_cmd()
231 * with the target zone already locked.
233 size_t null_zone_valid_read_len(struct nullb *nullb,
234 sector_t sector, unsigned int len)
236 struct nullb_device *dev = nullb->dev;
237 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
238 unsigned int nr_sectors = len >> SECTOR_SHIFT;
240 /* Read must be below the write pointer position */
241 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
242 sector + nr_sectors <= zone->wp)
245 if (sector > zone->wp)
248 return (zone->wp - sector) << SECTOR_SHIFT;
251 static blk_status_t __null_close_zone(struct nullb_device *dev,
252 struct nullb_zone *zone)
254 switch (zone->cond) {
255 case BLK_ZONE_COND_CLOSED:
256 /* close operation on closed is not an error */
258 case BLK_ZONE_COND_IMP_OPEN:
259 dev->nr_zones_imp_open--;
261 case BLK_ZONE_COND_EXP_OPEN:
262 dev->nr_zones_exp_open--;
264 case BLK_ZONE_COND_EMPTY:
265 case BLK_ZONE_COND_FULL:
267 return BLK_STS_IOERR;
270 if (zone->wp == zone->start) {
271 zone->cond = BLK_ZONE_COND_EMPTY;
273 zone->cond = BLK_ZONE_COND_CLOSED;
274 dev->nr_zones_closed++;
280 static void null_close_imp_open_zone(struct nullb_device *dev)
282 struct nullb_zone *zone;
285 zno = dev->imp_close_zone_no;
286 if (zno >= dev->nr_zones)
287 zno = dev->zone_nr_conv;
289 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
290 zone = &dev->zones[zno];
292 if (zno >= dev->nr_zones)
293 zno = dev->zone_nr_conv;
295 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
296 __null_close_zone(dev, zone);
297 dev->imp_close_zone_no = zno;
303 static blk_status_t null_check_active(struct nullb_device *dev)
305 if (!dev->zone_max_active)
308 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
309 dev->nr_zones_closed < dev->zone_max_active)
312 return BLK_STS_ZONE_ACTIVE_RESOURCE;
315 static blk_status_t null_check_open(struct nullb_device *dev)
317 if (!dev->zone_max_open)
320 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
323 if (dev->nr_zones_imp_open) {
324 if (null_check_active(dev) == BLK_STS_OK) {
325 null_close_imp_open_zone(dev);
330 return BLK_STS_ZONE_OPEN_RESOURCE;
334 * This function matches the manage open zone resources function in the ZBC standard,
335 * with the addition of max active zones support (added in the ZNS standard).
337 * The function determines if a zone can transition to implicit open or explicit open,
338 * while maintaining the max open zone (and max active zone) limit(s). It may close an
339 * implicit open zone in order to make additional zone resources available.
341 * ZBC states that an implicit open zone shall be closed only if there is not
342 * room within the open limit. However, with the addition of an active limit,
343 * it is not certain that closing an implicit open zone will allow a new zone
344 * to be opened, since we might already be at the active limit capacity.
346 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
347 struct nullb_zone *zone)
351 switch (zone->cond) {
352 case BLK_ZONE_COND_EMPTY:
353 ret = null_check_active(dev);
354 if (ret != BLK_STS_OK)
357 case BLK_ZONE_COND_CLOSED:
358 return null_check_open(dev);
360 /* Should never be called for other states */
362 return BLK_STS_IOERR;
366 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
367 unsigned int nr_sectors, bool append)
369 struct nullb_device *dev = cmd->nq->dev;
370 unsigned int zno = null_zone_no(dev, sector);
371 struct nullb_zone *zone = &dev->zones[zno];
374 trace_nullb_zone_op(cmd, zno, zone->cond);
376 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
378 return BLK_STS_IOERR;
379 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
382 null_lock_zone(dev, zone);
384 if (zone->cond == BLK_ZONE_COND_FULL) {
385 /* Cannot write to a full zone */
391 * Regular writes must be at the write pointer position.
392 * Zone append writes are automatically issued at the write
393 * pointer and the position returned using the request or BIO
399 cmd->bio->bi_iter.bi_sector = sector;
401 cmd->rq->__sector = sector;
402 } else if (sector != zone->wp) {
407 if (zone->wp + nr_sectors > zone->start + zone->capacity) {
412 if (zone->cond == BLK_ZONE_COND_CLOSED ||
413 zone->cond == BLK_ZONE_COND_EMPTY) {
414 null_lock_zone_res(dev);
416 ret = null_check_zone_resources(dev, zone);
417 if (ret != BLK_STS_OK) {
418 null_unlock_zone_res(dev);
421 if (zone->cond == BLK_ZONE_COND_CLOSED) {
422 dev->nr_zones_closed--;
423 dev->nr_zones_imp_open++;
424 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
425 dev->nr_zones_imp_open++;
428 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
429 zone->cond = BLK_ZONE_COND_IMP_OPEN;
431 null_unlock_zone_res(dev);
434 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
435 if (ret != BLK_STS_OK)
438 zone->wp += nr_sectors;
439 if (zone->wp == zone->start + zone->capacity) {
440 null_lock_zone_res(dev);
441 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
442 dev->nr_zones_exp_open--;
443 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
444 dev->nr_zones_imp_open--;
445 zone->cond = BLK_ZONE_COND_FULL;
446 null_unlock_zone_res(dev);
452 null_unlock_zone(dev, zone);
457 static blk_status_t null_open_zone(struct nullb_device *dev,
458 struct nullb_zone *zone)
460 blk_status_t ret = BLK_STS_OK;
462 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
463 return BLK_STS_IOERR;
465 null_lock_zone_res(dev);
467 switch (zone->cond) {
468 case BLK_ZONE_COND_EXP_OPEN:
469 /* open operation on exp open is not an error */
471 case BLK_ZONE_COND_EMPTY:
472 ret = null_check_zone_resources(dev, zone);
473 if (ret != BLK_STS_OK)
476 case BLK_ZONE_COND_IMP_OPEN:
477 dev->nr_zones_imp_open--;
479 case BLK_ZONE_COND_CLOSED:
480 ret = null_check_zone_resources(dev, zone);
481 if (ret != BLK_STS_OK)
483 dev->nr_zones_closed--;
485 case BLK_ZONE_COND_FULL:
491 zone->cond = BLK_ZONE_COND_EXP_OPEN;
492 dev->nr_zones_exp_open++;
495 null_unlock_zone_res(dev);
500 static blk_status_t null_close_zone(struct nullb_device *dev,
501 struct nullb_zone *zone)
505 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
506 return BLK_STS_IOERR;
508 null_lock_zone_res(dev);
509 ret = __null_close_zone(dev, zone);
510 null_unlock_zone_res(dev);
515 static blk_status_t null_finish_zone(struct nullb_device *dev,
516 struct nullb_zone *zone)
518 blk_status_t ret = BLK_STS_OK;
520 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
521 return BLK_STS_IOERR;
523 null_lock_zone_res(dev);
525 switch (zone->cond) {
526 case BLK_ZONE_COND_FULL:
527 /* finish operation on full is not an error */
529 case BLK_ZONE_COND_EMPTY:
530 ret = null_check_zone_resources(dev, zone);
531 if (ret != BLK_STS_OK)
534 case BLK_ZONE_COND_IMP_OPEN:
535 dev->nr_zones_imp_open--;
537 case BLK_ZONE_COND_EXP_OPEN:
538 dev->nr_zones_exp_open--;
540 case BLK_ZONE_COND_CLOSED:
541 ret = null_check_zone_resources(dev, zone);
542 if (ret != BLK_STS_OK)
544 dev->nr_zones_closed--;
551 zone->cond = BLK_ZONE_COND_FULL;
552 zone->wp = zone->start + zone->len;
555 null_unlock_zone_res(dev);
560 static blk_status_t null_reset_zone(struct nullb_device *dev,
561 struct nullb_zone *zone)
563 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
564 return BLK_STS_IOERR;
566 null_lock_zone_res(dev);
568 switch (zone->cond) {
569 case BLK_ZONE_COND_EMPTY:
570 /* reset operation on empty is not an error */
571 null_unlock_zone_res(dev);
573 case BLK_ZONE_COND_IMP_OPEN:
574 dev->nr_zones_imp_open--;
576 case BLK_ZONE_COND_EXP_OPEN:
577 dev->nr_zones_exp_open--;
579 case BLK_ZONE_COND_CLOSED:
580 dev->nr_zones_closed--;
582 case BLK_ZONE_COND_FULL:
585 null_unlock_zone_res(dev);
586 return BLK_STS_IOERR;
589 zone->cond = BLK_ZONE_COND_EMPTY;
590 zone->wp = zone->start;
592 null_unlock_zone_res(dev);
594 if (dev->memory_backed)
595 return null_handle_discard(dev, zone->start, zone->len);
600 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
603 struct nullb_device *dev = cmd->nq->dev;
604 unsigned int zone_no;
605 struct nullb_zone *zone;
609 if (op == REQ_OP_ZONE_RESET_ALL) {
610 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
611 zone = &dev->zones[i];
612 null_lock_zone(dev, zone);
613 if (zone->cond != BLK_ZONE_COND_EMPTY) {
614 null_reset_zone(dev, zone);
615 trace_nullb_zone_op(cmd, i, zone->cond);
617 null_unlock_zone(dev, zone);
622 zone_no = null_zone_no(dev, sector);
623 zone = &dev->zones[zone_no];
625 null_lock_zone(dev, zone);
628 case REQ_OP_ZONE_RESET:
629 ret = null_reset_zone(dev, zone);
631 case REQ_OP_ZONE_OPEN:
632 ret = null_open_zone(dev, zone);
634 case REQ_OP_ZONE_CLOSE:
635 ret = null_close_zone(dev, zone);
637 case REQ_OP_ZONE_FINISH:
638 ret = null_finish_zone(dev, zone);
641 ret = BLK_STS_NOTSUPP;
645 if (ret == BLK_STS_OK)
646 trace_nullb_zone_op(cmd, zone_no, zone->cond);
648 null_unlock_zone(dev, zone);
653 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
654 sector_t sector, sector_t nr_sectors)
656 struct nullb_device *dev;
657 struct nullb_zone *zone;
662 return null_zone_write(cmd, sector, nr_sectors, false);
663 case REQ_OP_ZONE_APPEND:
664 return null_zone_write(cmd, sector, nr_sectors, true);
665 case REQ_OP_ZONE_RESET:
666 case REQ_OP_ZONE_RESET_ALL:
667 case REQ_OP_ZONE_OPEN:
668 case REQ_OP_ZONE_CLOSE:
669 case REQ_OP_ZONE_FINISH:
670 return null_zone_mgmt(cmd, op, sector);
673 zone = &dev->zones[null_zone_no(dev, sector)];
675 null_lock_zone(dev, zone);
676 sts = null_process_cmd(cmd, op, sector, nr_sectors);
677 null_unlock_zone(dev, zone);