Merge tag 'erofs-for-6.8-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / block / null_blk / null_blk.h
CommitLineData
6dad38d3
MB
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __BLK_NULL_BLK_H
3#define __BLK_NULL_BLK_H
4
9c7eddf1
AA
5#undef pr_fmt
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
6dad38d3
MB
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/blk-mq.h>
11#include <linux/hrtimer.h>
12#include <linux/configfs.h>
13#include <linux/badblocks.h>
14#include <linux/fault-inject.h>
2b8b7ed7
DLM
15#include <linux/spinlock.h>
16#include <linux/mutex.h>
6dad38d3
MB
17
18struct nullb_cmd {
8ba816b2
YK
19 union {
20 struct request *rq;
21 struct bio *bio;
22 };
6dad38d3
MB
23 unsigned int tag;
24 blk_status_t error;
8ba816b2 25 bool fake_timeout;
6dad38d3
MB
26 struct nullb_queue *nq;
27 struct hrtimer timer;
28};
29
30struct nullb_queue {
31 unsigned long *tag_map;
32 wait_queue_head_t wait;
33 unsigned int queue_depth;
34 struct nullb_device *dev;
35 unsigned int requeue_selection;
36
0a593fbb
JA
37 struct list_head poll_list;
38 spinlock_t poll_lock;
39
6dad38d3
MB
40 struct nullb_cmd *cmds;
41};
42
2b8b7ed7
DLM
43struct nullb_zone {
44 /*
45 * Zone lock to prevent concurrent modification of a zone write
46 * pointer position and condition: with memory backing, a write
47 * command execution may sleep on memory allocation. For this case,
48 * use mutex as the zone lock. Otherwise, use the spinlock for
49 * locking the zone.
50 */
51 union {
52 spinlock_t spinlock;
53 struct mutex mutex;
54 };
55 enum blk_zone_type type;
56 enum blk_zone_cond cond;
57 sector_t start;
58 sector_t wp;
59 unsigned int len;
60 unsigned int capacity;
61};
62
aacae8c4
DLM
63/* Queue modes */
64enum {
65 NULL_Q_BIO = 0,
66 NULL_Q_RQ = 1,
67 NULL_Q_MQ = 2,
68};
69
6dad38d3
MB
70struct nullb_device {
71 struct nullb *nullb;
bb4c19e0
AM
72 struct config_group group;
73#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
74 struct fault_config timeout_config;
75 struct fault_config requeue_config;
76 struct fault_config init_hctx_fault_config;
77#endif
6dad38d3
MB
78 struct radix_tree_root data; /* data stored in the disk */
79 struct radix_tree_root cache; /* disk cache data */
80 unsigned long flags; /* device flags */
81 unsigned int curr_cache;
82 struct badblocks badblocks;
83
ca4b2a01 84 unsigned int nr_zones;
dc4d137e
NC
85 unsigned int nr_zones_imp_open;
86 unsigned int nr_zones_exp_open;
87 unsigned int nr_zones_closed;
2e8c6e0e 88 unsigned int imp_close_zone_no;
2b8b7ed7 89 struct nullb_zone *zones;
ca4b2a01 90 sector_t zone_size_sects;
2b8b7ed7
DLM
91 bool need_zone_res_mgmt;
92 spinlock_t zone_res_lock;
ca4b2a01 93
6dad38d3
MB
94 unsigned long size; /* device size in MB */
95 unsigned long completion_nsec; /* time in ns to complete a request */
96 unsigned long cache_size; /* disk cache size in MB */
ca4b2a01 97 unsigned long zone_size; /* zone size in MB if device is zoned */
089565fb 98 unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
ea2c18e1 99 unsigned int zone_nr_conv; /* number of conventional zones */
dc4d137e
NC
100 unsigned int zone_max_open; /* max number of open zones */
101 unsigned int zone_max_active; /* max number of active zones */
6dad38d3 102 unsigned int submit_queues; /* number of submission queues */
15dfc662 103 unsigned int prev_submit_queues; /* number of submission queues before change */
0a593fbb 104 unsigned int poll_queues; /* number of IOPOLL submission queues */
15dfc662 105 unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
6dad38d3
MB
106 unsigned int home_node; /* home node for the device */
107 unsigned int queue_mode; /* block interface */
108 unsigned int blocksize; /* block size */
ea17fd35 109 unsigned int max_sectors; /* Max sectors per command */
6dad38d3
MB
110 unsigned int irqmode; /* IRQ completion handler */
111 unsigned int hw_queue_depth; /* queue depth */
112 unsigned int index; /* index of the disk, only valid with a disk */
113 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
114 bool blocking; /* blocking blk-mq device */
115 bool use_per_node_hctx; /* use per-node allocation for hardware context */
116 bool power; /* power on/off the device */
117 bool memory_backed; /* if data is stored in memory */
118 bool discard; /* if support discard */
ca4b2a01 119 bool zoned; /* if device is zoned */
cee1b215 120 bool virt_boundary; /* virtual boundary on/off for the device */
7012eef5
VF
121 bool no_sched; /* no IO scheduler for the device */
122 bool shared_tag_bitmap; /* use hostwide shared tags */
6dad38d3
MB
123};
124
125struct nullb {
126 struct nullb_device *dev;
127 struct list_head list;
128 unsigned int index;
129 struct request_queue *q;
130 struct gendisk *disk;
131 struct blk_mq_tag_set *tag_set;
132 struct blk_mq_tag_set __tag_set;
133 unsigned int queue_depth;
134 atomic_long_t cur_bytes;
135 struct hrtimer bw_timer;
136 unsigned long cache_flush_pos;
137 spinlock_t lock;
138
139 struct nullb_queue *queues;
140 unsigned int nr_queues;
141 char disk_name[DISK_NAME_LEN];
142};
ca4b2a01 143
0ec4d913
DLM
144blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
145 sector_t nr_sectors);
ff07a02e
BVA
146blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
147 sector_t sector, unsigned int nr_sectors);
9dd44c7e 148
ca4b2a01 149#ifdef CONFIG_BLK_DEV_ZONED
d205bde7
DLM
150int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
151int null_register_zoned_dev(struct nullb *nullb);
152void null_free_zoned_dev(struct nullb_device *dev);
7fc8fb51 153int null_report_zones(struct gendisk *disk, sector_t sector,
d4100351 154 unsigned int nr_zones, report_zones_cb cb, void *data);
ff07a02e
BVA
155blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
156 sector_t sector, sector_t nr_sectors);
dd85b492
AJ
157size_t null_zone_valid_read_len(struct nullb *nullb,
158 sector_t sector, unsigned int len);
d3a57388
SK
159ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
160 size_t count, enum blk_zone_cond cond);
ca4b2a01 161#else
d205bde7
DLM
162static inline int null_init_zoned_dev(struct nullb_device *dev,
163 struct request_queue *q)
ca4b2a01 164{
9c7eddf1 165 pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
ca4b2a01
MB
166 return -EINVAL;
167}
d205bde7
DLM
168static inline int null_register_zoned_dev(struct nullb *nullb)
169{
170 return -ENODEV;
171}
172static inline void null_free_zoned_dev(struct nullb_device *dev) {}
9dd44c7e 173static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
ff07a02e 174 enum req_op op, sector_t sector, sector_t nr_sectors)
b228ba1c 175{
fceb5d1b 176 return BLK_STS_NOTSUPP;
b228ba1c 177}
dd85b492
AJ
178static inline size_t null_zone_valid_read_len(struct nullb *nullb,
179 sector_t sector,
180 unsigned int len)
181{
182 return len;
183}
d3a57388
SK
184static inline ssize_t zone_cond_store(struct nullb_device *dev,
185 const char *page, size_t count,
186 enum blk_zone_cond cond)
187{
188 return -EOPNOTSUPP;
189}
7fc8fb51 190#define null_report_zones NULL
ca4b2a01 191#endif /* CONFIG_BLK_DEV_ZONED */
6dad38d3 192#endif /* __NULL_BLK_H */