null_blk: Allow controlling max_hw_sectors limit
[linux-block.git] / drivers / block / null_blk.h
CommitLineData
6dad38d3
MB
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __BLK_NULL_BLK_H
3#define __BLK_NULL_BLK_H
4
9c7eddf1
AA
5#undef pr_fmt
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
6dad38d3
MB
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/blk-mq.h>
11#include <linux/hrtimer.h>
12#include <linux/configfs.h>
13#include <linux/badblocks.h>
14#include <linux/fault-inject.h>
2b8b7ed7
DLM
15#include <linux/spinlock.h>
16#include <linux/mutex.h>
6dad38d3
MB
17
18struct nullb_cmd {
6dad38d3
MB
19 struct request *rq;
20 struct bio *bio;
21 unsigned int tag;
22 blk_status_t error;
23 struct nullb_queue *nq;
24 struct hrtimer timer;
25};
26
27struct nullb_queue {
28 unsigned long *tag_map;
29 wait_queue_head_t wait;
30 unsigned int queue_depth;
31 struct nullb_device *dev;
32 unsigned int requeue_selection;
33
34 struct nullb_cmd *cmds;
35};
36
2b8b7ed7
DLM
37struct nullb_zone {
38 /*
39 * Zone lock to prevent concurrent modification of a zone write
40 * pointer position and condition: with memory backing, a write
41 * command execution may sleep on memory allocation. For this case,
42 * use mutex as the zone lock. Otherwise, use the spinlock for
43 * locking the zone.
44 */
45 union {
46 spinlock_t spinlock;
47 struct mutex mutex;
48 };
49 enum blk_zone_type type;
50 enum blk_zone_cond cond;
51 sector_t start;
52 sector_t wp;
53 unsigned int len;
54 unsigned int capacity;
55};
56
6dad38d3
MB
57struct nullb_device {
58 struct nullb *nullb;
59 struct config_item item;
60 struct radix_tree_root data; /* data stored in the disk */
61 struct radix_tree_root cache; /* disk cache data */
62 unsigned long flags; /* device flags */
63 unsigned int curr_cache;
64 struct badblocks badblocks;
65
ca4b2a01 66 unsigned int nr_zones;
dc4d137e
NC
67 unsigned int nr_zones_imp_open;
68 unsigned int nr_zones_exp_open;
69 unsigned int nr_zones_closed;
2e8c6e0e 70 unsigned int imp_close_zone_no;
2b8b7ed7 71 struct nullb_zone *zones;
ca4b2a01 72 sector_t zone_size_sects;
2b8b7ed7
DLM
73 bool need_zone_res_mgmt;
74 spinlock_t zone_res_lock;
ca4b2a01 75
6dad38d3
MB
76 unsigned long size; /* device size in MB */
77 unsigned long completion_nsec; /* time in ns to complete a request */
78 unsigned long cache_size; /* disk cache size in MB */
ca4b2a01 79 unsigned long zone_size; /* zone size in MB if device is zoned */
089565fb 80 unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
ea2c18e1 81 unsigned int zone_nr_conv; /* number of conventional zones */
dc4d137e
NC
82 unsigned int zone_max_open; /* max number of open zones */
83 unsigned int zone_max_active; /* max number of active zones */
6dad38d3
MB
84 unsigned int submit_queues; /* number of submission queues */
85 unsigned int home_node; /* home node for the device */
86 unsigned int queue_mode; /* block interface */
87 unsigned int blocksize; /* block size */
ea17fd35 88 unsigned int max_sectors; /* Max sectors per command */
6dad38d3
MB
89 unsigned int irqmode; /* IRQ completion handler */
90 unsigned int hw_queue_depth; /* queue depth */
91 unsigned int index; /* index of the disk, only valid with a disk */
92 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
93 bool blocking; /* blocking blk-mq device */
94 bool use_per_node_hctx; /* use per-node allocation for hardware context */
95 bool power; /* power on/off the device */
96 bool memory_backed; /* if data is stored in memory */
97 bool discard; /* if support discard */
ca4b2a01 98 bool zoned; /* if device is zoned */
6dad38d3
MB
99};
100
101struct nullb {
102 struct nullb_device *dev;
103 struct list_head list;
104 unsigned int index;
105 struct request_queue *q;
106 struct gendisk *disk;
107 struct blk_mq_tag_set *tag_set;
108 struct blk_mq_tag_set __tag_set;
109 unsigned int queue_depth;
110 atomic_long_t cur_bytes;
111 struct hrtimer bw_timer;
112 unsigned long cache_flush_pos;
113 spinlock_t lock;
114
115 struct nullb_queue *queues;
116 unsigned int nr_queues;
117 char disk_name[DISK_NAME_LEN];
118};
ca4b2a01 119
0ec4d913
DLM
120blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
121 sector_t nr_sectors);
9dd44c7e
DLM
122blk_status_t null_process_cmd(struct nullb_cmd *cmd,
123 enum req_opf op, sector_t sector,
124 unsigned int nr_sectors);
125
ca4b2a01 126#ifdef CONFIG_BLK_DEV_ZONED
d205bde7
DLM
127int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
128int null_register_zoned_dev(struct nullb *nullb);
129void null_free_zoned_dev(struct nullb_device *dev);
7fc8fb51 130int null_report_zones(struct gendisk *disk, sector_t sector,
d4100351 131 unsigned int nr_zones, report_zones_cb cb, void *data);
9dd44c7e
DLM
132blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
133 enum req_opf op, sector_t sector,
134 sector_t nr_sectors);
dd85b492
AJ
135size_t null_zone_valid_read_len(struct nullb *nullb,
136 sector_t sector, unsigned int len);
ca4b2a01 137#else
d205bde7
DLM
138static inline int null_init_zoned_dev(struct nullb_device *dev,
139 struct request_queue *q)
ca4b2a01 140{
9c7eddf1 141 pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
ca4b2a01
MB
142 return -EINVAL;
143}
d205bde7
DLM
144static inline int null_register_zoned_dev(struct nullb *nullb)
145{
146 return -ENODEV;
147}
148static inline void null_free_zoned_dev(struct nullb_device *dev) {}
9dd44c7e
DLM
149static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
150 enum req_opf op, sector_t sector, sector_t nr_sectors)
b228ba1c 151{
fceb5d1b 152 return BLK_STS_NOTSUPP;
b228ba1c 153}
dd85b492
AJ
154static inline size_t null_zone_valid_read_len(struct nullb *nullb,
155 sector_t sector,
156 unsigned int len)
157{
158 return len;
159}
7fc8fb51 160#define null_report_zones NULL
ca4b2a01 161#endif /* CONFIG_BLK_DEV_ZONED */
6dad38d3 162#endif /* __NULL_BLK_H */