Commit | Line | Data |
---|---|---|
bae9a0aa | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3b1a94c8 DLM |
2 | /* |
3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-zoned.h" | |
9 | ||
10 | #include <linux/module.h> | |
11 | ||
12 | #define DM_MSG_PREFIX "zoned" | |
13 | ||
14 | #define DMZ_MIN_BIOS 8192 | |
15 | ||
16 | /* | |
17 | * Zone BIO context. | |
18 | */ | |
19 | struct dmz_bioctx { | |
20 | struct dmz_target *target; | |
21 | struct dm_zone *zone; | |
22 | struct bio *bio; | |
092b5648 | 23 | refcount_t ref; |
3b1a94c8 DLM |
24 | }; |
25 | ||
26 | /* | |
27 | * Chunk work descriptor. | |
28 | */ | |
29 | struct dm_chunk_work { | |
30 | struct work_struct work; | |
092b5648 | 31 | refcount_t refcount; |
3b1a94c8 DLM |
32 | struct dmz_target *target; |
33 | unsigned int chunk; | |
34 | struct bio_list bio_list; | |
35 | }; | |
36 | ||
37 | /* | |
38 | * Target descriptor. | |
39 | */ | |
40 | struct dmz_target { | |
41 | struct dm_dev *ddev; | |
42 | ||
43 | unsigned long flags; | |
44 | ||
45 | /* Zoned block device information */ | |
46 | struct dmz_dev *dev; | |
47 | ||
48 | /* For metadata handling */ | |
49 | struct dmz_metadata *metadata; | |
50 | ||
51 | /* For reclaim */ | |
52 | struct dmz_reclaim *reclaim; | |
53 | ||
54 | /* For chunk work */ | |
3b1a94c8 DLM |
55 | struct radix_tree_root chunk_rxtree; |
56 | struct workqueue_struct *chunk_wq; | |
72d711c8 | 57 | struct mutex chunk_lock; |
3b1a94c8 DLM |
58 | |
59 | /* For cloned BIOs to zones */ | |
6f1c819c | 60 | struct bio_set bio_set; |
3b1a94c8 DLM |
61 | |
62 | /* For flush */ | |
63 | spinlock_t flush_lock; | |
64 | struct bio_list flush_list; | |
65 | struct delayed_work flush_work; | |
66 | struct workqueue_struct *flush_wq; | |
67 | }; | |
68 | ||
69 | /* | |
70 | * Flush intervals (seconds). | |
71 | */ | |
72 | #define DMZ_FLUSH_PERIOD (10 * HZ) | |
73 | ||
74 | /* | |
75 | * Target BIO completion. | |
76 | */ | |
77 | static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) | |
78 | { | |
79 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
80 | ||
d57f9da8 DLM |
81 | if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) |
82 | bio->bi_status = status; | |
e7fad909 DF |
83 | if (bio->bi_status != BLK_STS_OK) |
84 | bioctx->target->dev->flags |= DMZ_CHECK_BDEV; | |
d57f9da8 DLM |
85 | |
86 | if (refcount_dec_and_test(&bioctx->ref)) { | |
87 | struct dm_zone *zone = bioctx->zone; | |
88 | ||
89 | if (zone) { | |
90 | if (bio->bi_status != BLK_STS_OK && | |
91 | bio_op(bio) == REQ_OP_WRITE && | |
92 | dmz_is_seq(zone)) | |
93 | set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); | |
94 | dmz_deactivate_zone(zone); | |
95 | } | |
96 | bio_endio(bio); | |
97 | } | |
3b1a94c8 DLM |
98 | } |
99 | ||
100 | /* | |
d57f9da8 | 101 | * Completion callback for an internally cloned target BIO. This terminates the |
3b1a94c8 DLM |
102 | * target BIO when there are no more references to its context. |
103 | */ | |
d57f9da8 | 104 | static void dmz_clone_endio(struct bio *clone) |
3b1a94c8 | 105 | { |
d57f9da8 DLM |
106 | struct dmz_bioctx *bioctx = clone->bi_private; |
107 | blk_status_t status = clone->bi_status; | |
3b1a94c8 | 108 | |
d57f9da8 | 109 | bio_put(clone); |
3b1a94c8 DLM |
110 | dmz_bio_endio(bioctx->bio, status); |
111 | } | |
112 | ||
113 | /* | |
d57f9da8 | 114 | * Issue a clone of a target BIO. The clone may only partially process the |
3b1a94c8 DLM |
115 | * original target BIO. |
116 | */ | |
d57f9da8 DLM |
117 | static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, |
118 | struct bio *bio, sector_t chunk_block, | |
119 | unsigned int nr_blocks) | |
3b1a94c8 DLM |
120 | { |
121 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
3b1a94c8 DLM |
122 | struct bio *clone; |
123 | ||
6f1c819c | 124 | clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); |
3b1a94c8 DLM |
125 | if (!clone) |
126 | return -ENOMEM; | |
127 | ||
d57f9da8 DLM |
128 | bio_set_dev(clone, dmz->dev->bdev); |
129 | clone->bi_iter.bi_sector = | |
130 | dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); | |
3b1a94c8 | 131 | clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; |
d57f9da8 | 132 | clone->bi_end_io = dmz_clone_endio; |
3b1a94c8 DLM |
133 | clone->bi_private = bioctx; |
134 | ||
135 | bio_advance(bio, clone->bi_iter.bi_size); | |
136 | ||
092b5648 | 137 | refcount_inc(&bioctx->ref); |
3b1a94c8 DLM |
138 | generic_make_request(clone); |
139 | ||
d57f9da8 DLM |
140 | if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) |
141 | zone->wp_block += nr_blocks; | |
142 | ||
3b1a94c8 DLM |
143 | return 0; |
144 | } | |
145 | ||
146 | /* | |
147 | * Zero out pages of discarded blocks accessed by a read BIO. | |
148 | */ | |
149 | static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, | |
150 | sector_t chunk_block, unsigned int nr_blocks) | |
151 | { | |
152 | unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; | |
153 | ||
154 | /* Clear nr_blocks */ | |
155 | swap(bio->bi_iter.bi_size, size); | |
156 | zero_fill_bio(bio); | |
157 | swap(bio->bi_iter.bi_size, size); | |
158 | ||
159 | bio_advance(bio, size); | |
160 | } | |
161 | ||
162 | /* | |
163 | * Process a read BIO. | |
164 | */ | |
165 | static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, | |
166 | struct bio *bio) | |
167 | { | |
168 | sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); | |
169 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
170 | sector_t end_block = chunk_block + nr_blocks; | |
171 | struct dm_zone *rzone, *bzone; | |
172 | int ret; | |
173 | ||
174 | /* Read into unmapped chunks need only zeroing the BIO buffer */ | |
175 | if (!zone) { | |
176 | zero_fill_bio(bio); | |
177 | return 0; | |
178 | } | |
179 | ||
180 | dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", | |
181 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
182 | (dmz_is_rnd(zone) ? "RND" : "SEQ"), | |
183 | dmz_id(dmz->metadata, zone), | |
184 | (unsigned long long)chunk_block, nr_blocks); | |
185 | ||
186 | /* Check block validity to determine the read location */ | |
187 | bzone = zone->bzone; | |
188 | while (chunk_block < end_block) { | |
189 | nr_blocks = 0; | |
190 | if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { | |
191 | /* Test block validity in the data zone */ | |
192 | ret = dmz_block_valid(dmz->metadata, zone, chunk_block); | |
193 | if (ret < 0) | |
194 | return ret; | |
195 | if (ret > 0) { | |
196 | /* Read data zone blocks */ | |
197 | nr_blocks = ret; | |
198 | rzone = zone; | |
199 | } | |
200 | } | |
201 | ||
202 | /* | |
203 | * No valid blocks found in the data zone. | |
204 | * Check the buffer zone, if there is one. | |
205 | */ | |
206 | if (!nr_blocks && bzone) { | |
207 | ret = dmz_block_valid(dmz->metadata, bzone, chunk_block); | |
208 | if (ret < 0) | |
209 | return ret; | |
210 | if (ret > 0) { | |
211 | /* Read buffer zone blocks */ | |
212 | nr_blocks = ret; | |
213 | rzone = bzone; | |
214 | } | |
215 | } | |
216 | ||
217 | if (nr_blocks) { | |
218 | /* Valid blocks found: read them */ | |
219 | nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); | |
d57f9da8 | 220 | ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); |
3b1a94c8 DLM |
221 | if (ret) |
222 | return ret; | |
223 | chunk_block += nr_blocks; | |
224 | } else { | |
225 | /* No valid block: zeroout the current BIO block */ | |
226 | dmz_handle_read_zero(dmz, bio, chunk_block, 1); | |
227 | chunk_block++; | |
228 | } | |
229 | } | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
3b1a94c8 DLM |
234 | /* |
235 | * Write blocks directly in a data zone, at the write pointer. | |
236 | * If a buffer zone is assigned, invalidate the blocks written | |
237 | * in place. | |
238 | */ | |
239 | static int dmz_handle_direct_write(struct dmz_target *dmz, | |
240 | struct dm_zone *zone, struct bio *bio, | |
241 | sector_t chunk_block, | |
242 | unsigned int nr_blocks) | |
243 | { | |
244 | struct dmz_metadata *zmd = dmz->metadata; | |
245 | struct dm_zone *bzone = zone->bzone; | |
246 | int ret; | |
247 | ||
248 | if (dmz_is_readonly(zone)) | |
249 | return -EROFS; | |
250 | ||
251 | /* Submit write */ | |
d57f9da8 DLM |
252 | ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); |
253 | if (ret) | |
254 | return ret; | |
3b1a94c8 DLM |
255 | |
256 | /* | |
257 | * Validate the blocks in the data zone and invalidate | |
258 | * in the buffer zone, if there is one. | |
259 | */ | |
260 | ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks); | |
261 | if (ret == 0 && bzone) | |
262 | ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks); | |
263 | ||
264 | return ret; | |
265 | } | |
266 | ||
267 | /* | |
268 | * Write blocks in the buffer zone of @zone. | |
269 | * If no buffer zone is assigned yet, get one. | |
270 | * Called with @zone write locked. | |
271 | */ | |
272 | static int dmz_handle_buffered_write(struct dmz_target *dmz, | |
273 | struct dm_zone *zone, struct bio *bio, | |
274 | sector_t chunk_block, | |
275 | unsigned int nr_blocks) | |
276 | { | |
277 | struct dmz_metadata *zmd = dmz->metadata; | |
278 | struct dm_zone *bzone; | |
279 | int ret; | |
280 | ||
281 | /* Get the buffer zone. One will be allocated if needed */ | |
282 | bzone = dmz_get_chunk_buffer(zmd, zone); | |
75d66ffb DF |
283 | if (IS_ERR(bzone)) |
284 | return PTR_ERR(bzone); | |
3b1a94c8 DLM |
285 | |
286 | if (dmz_is_readonly(bzone)) | |
287 | return -EROFS; | |
288 | ||
289 | /* Submit write */ | |
d57f9da8 DLM |
290 | ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); |
291 | if (ret) | |
292 | return ret; | |
3b1a94c8 DLM |
293 | |
294 | /* | |
295 | * Validate the blocks in the buffer zone | |
296 | * and invalidate in the data zone. | |
297 | */ | |
298 | ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks); | |
299 | if (ret == 0 && chunk_block < zone->wp_block) | |
300 | ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); | |
301 | ||
302 | return ret; | |
303 | } | |
304 | ||
305 | /* | |
306 | * Process a write BIO. | |
307 | */ | |
308 | static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, | |
309 | struct bio *bio) | |
310 | { | |
311 | sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); | |
312 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
313 | ||
314 | if (!zone) | |
315 | return -ENOSPC; | |
316 | ||
317 | dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", | |
318 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
319 | (dmz_is_rnd(zone) ? "RND" : "SEQ"), | |
320 | dmz_id(dmz->metadata, zone), | |
321 | (unsigned long long)chunk_block, nr_blocks); | |
322 | ||
323 | if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) { | |
324 | /* | |
325 | * zone is a random zone or it is a sequential zone | |
326 | * and the BIO is aligned to the zone write pointer: | |
327 | * direct write the zone. | |
328 | */ | |
329 | return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks); | |
330 | } | |
331 | ||
332 | /* | |
333 | * This is an unaligned write in a sequential zone: | |
334 | * use buffered write. | |
335 | */ | |
336 | return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Process a discard BIO. | |
341 | */ | |
342 | static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, | |
343 | struct bio *bio) | |
344 | { | |
345 | struct dmz_metadata *zmd = dmz->metadata; | |
346 | sector_t block = dmz_bio_block(bio); | |
347 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
348 | sector_t chunk_block = dmz_chunk_block(dmz->dev, block); | |
349 | int ret = 0; | |
350 | ||
351 | /* For unmapped chunks, there is nothing to do */ | |
352 | if (!zone) | |
353 | return 0; | |
354 | ||
355 | if (dmz_is_readonly(zone)) | |
356 | return -EROFS; | |
357 | ||
358 | dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks", | |
359 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
360 | dmz_id(zmd, zone), | |
361 | (unsigned long long)chunk_block, nr_blocks); | |
362 | ||
363 | /* | |
364 | * Invalidate blocks in the data zone and its | |
365 | * buffer zone if one is mapped. | |
366 | */ | |
367 | if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) | |
368 | ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); | |
369 | if (ret == 0 && zone->bzone) | |
370 | ret = dmz_invalidate_blocks(zmd, zone->bzone, | |
371 | chunk_block, nr_blocks); | |
372 | return ret; | |
373 | } | |
374 | ||
375 | /* | |
376 | * Process a BIO. | |
377 | */ | |
378 | static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, | |
379 | struct bio *bio) | |
380 | { | |
381 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
382 | struct dmz_metadata *zmd = dmz->metadata; | |
383 | struct dm_zone *zone; | |
384 | int ret; | |
385 | ||
386 | /* | |
387 | * Write may trigger a zone allocation. So make sure the | |
388 | * allocation can succeed. | |
389 | */ | |
390 | if (bio_op(bio) == REQ_OP_WRITE) | |
391 | dmz_schedule_reclaim(dmz->reclaim); | |
392 | ||
393 | dmz_lock_metadata(zmd); | |
394 | ||
75d66ffb DF |
395 | if (dmz->dev->flags & DMZ_BDEV_DYING) { |
396 | ret = -EIO; | |
397 | goto out; | |
398 | } | |
399 | ||
3b1a94c8 DLM |
400 | /* |
401 | * Get the data zone mapping the chunk. There may be no | |
402 | * mapping for read and discard. If a mapping is obtained, | |
403 | + the zone returned will be set to active state. | |
404 | */ | |
405 | zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio), | |
406 | bio_op(bio)); | |
407 | if (IS_ERR(zone)) { | |
408 | ret = PTR_ERR(zone); | |
409 | goto out; | |
410 | } | |
411 | ||
412 | /* Process the BIO */ | |
413 | if (zone) { | |
414 | dmz_activate_zone(zone); | |
415 | bioctx->zone = zone; | |
416 | } | |
417 | ||
418 | switch (bio_op(bio)) { | |
419 | case REQ_OP_READ: | |
420 | ret = dmz_handle_read(dmz, zone, bio); | |
421 | break; | |
422 | case REQ_OP_WRITE: | |
423 | ret = dmz_handle_write(dmz, zone, bio); | |
424 | break; | |
425 | case REQ_OP_DISCARD: | |
426 | case REQ_OP_WRITE_ZEROES: | |
427 | ret = dmz_handle_discard(dmz, zone, bio); | |
428 | break; | |
429 | default: | |
430 | dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x", | |
431 | bio_op(bio)); | |
432 | ret = -EIO; | |
433 | } | |
434 | ||
435 | /* | |
436 | * Release the chunk mapping. This will check that the mapping | |
437 | * is still valid, that is, that the zone used still has valid blocks. | |
438 | */ | |
439 | if (zone) | |
440 | dmz_put_chunk_mapping(zmd, zone); | |
441 | out: | |
442 | dmz_bio_endio(bio, errno_to_blk_status(ret)); | |
443 | ||
444 | dmz_unlock_metadata(zmd); | |
445 | } | |
446 | ||
447 | /* | |
448 | * Increment a chunk reference counter. | |
449 | */ | |
450 | static inline void dmz_get_chunk_work(struct dm_chunk_work *cw) | |
451 | { | |
092b5648 | 452 | refcount_inc(&cw->refcount); |
3b1a94c8 DLM |
453 | } |
454 | ||
455 | /* | |
456 | * Decrement a chunk work reference count and | |
457 | * free it if it becomes 0. | |
458 | */ | |
459 | static void dmz_put_chunk_work(struct dm_chunk_work *cw) | |
460 | { | |
092b5648 | 461 | if (refcount_dec_and_test(&cw->refcount)) { |
3b1a94c8 DLM |
462 | WARN_ON(!bio_list_empty(&cw->bio_list)); |
463 | radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk); | |
464 | kfree(cw); | |
465 | } | |
466 | } | |
467 | ||
468 | /* | |
469 | * Chunk BIO work function. | |
470 | */ | |
471 | static void dmz_chunk_work(struct work_struct *work) | |
472 | { | |
473 | struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work); | |
474 | struct dmz_target *dmz = cw->target; | |
475 | struct bio *bio; | |
476 | ||
477 | mutex_lock(&dmz->chunk_lock); | |
478 | ||
479 | /* Process the chunk BIOs */ | |
480 | while ((bio = bio_list_pop(&cw->bio_list))) { | |
481 | mutex_unlock(&dmz->chunk_lock); | |
482 | dmz_handle_bio(dmz, cw, bio); | |
483 | mutex_lock(&dmz->chunk_lock); | |
484 | dmz_put_chunk_work(cw); | |
485 | } | |
486 | ||
487 | /* Queueing the work incremented the work refcount */ | |
488 | dmz_put_chunk_work(cw); | |
489 | ||
490 | mutex_unlock(&dmz->chunk_lock); | |
491 | } | |
492 | ||
493 | /* | |
494 | * Flush work. | |
495 | */ | |
496 | static void dmz_flush_work(struct work_struct *work) | |
497 | { | |
498 | struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work); | |
499 | struct bio *bio; | |
500 | int ret; | |
501 | ||
502 | /* Flush dirty metadata blocks */ | |
503 | ret = dmz_flush_metadata(dmz->metadata); | |
75d66ffb DF |
504 | if (ret) |
505 | dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); | |
3b1a94c8 DLM |
506 | |
507 | /* Process queued flush requests */ | |
508 | while (1) { | |
509 | spin_lock(&dmz->flush_lock); | |
510 | bio = bio_list_pop(&dmz->flush_list); | |
511 | spin_unlock(&dmz->flush_lock); | |
512 | ||
513 | if (!bio) | |
514 | break; | |
515 | ||
516 | dmz_bio_endio(bio, errno_to_blk_status(ret)); | |
517 | } | |
518 | ||
519 | queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
520 | } | |
521 | ||
522 | /* | |
523 | * Get a chunk work and start it to process a new BIO. | |
524 | * If the BIO chunk has no work yet, create one. | |
525 | */ | |
d7428c50 | 526 | static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) |
3b1a94c8 DLM |
527 | { |
528 | unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); | |
529 | struct dm_chunk_work *cw; | |
d7428c50 | 530 | int ret = 0; |
3b1a94c8 DLM |
531 | |
532 | mutex_lock(&dmz->chunk_lock); | |
533 | ||
534 | /* Get the BIO chunk work. If one is not active yet, create one */ | |
535 | cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); | |
536 | if (!cw) { | |
3b1a94c8 DLM |
537 | |
538 | /* Create a new chunk work */ | |
4218a955 | 539 | cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); |
d7428c50 DF |
540 | if (unlikely(!cw)) { |
541 | ret = -ENOMEM; | |
3b1a94c8 | 542 | goto out; |
d7428c50 | 543 | } |
3b1a94c8 DLM |
544 | |
545 | INIT_WORK(&cw->work, dmz_chunk_work); | |
092b5648 | 546 | refcount_set(&cw->refcount, 0); |
3b1a94c8 DLM |
547 | cw->target = dmz; |
548 | cw->chunk = chunk; | |
549 | bio_list_init(&cw->bio_list); | |
550 | ||
551 | ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); | |
552 | if (unlikely(ret)) { | |
553 | kfree(cw); | |
3b1a94c8 DLM |
554 | goto out; |
555 | } | |
556 | } | |
557 | ||
558 | bio_list_add(&cw->bio_list, bio); | |
559 | dmz_get_chunk_work(cw); | |
560 | ||
d7428c50 | 561 | dmz_reclaim_bio_acc(dmz->reclaim); |
3b1a94c8 DLM |
562 | if (queue_work(dmz->chunk_wq, &cw->work)) |
563 | dmz_get_chunk_work(cw); | |
564 | out: | |
565 | mutex_unlock(&dmz->chunk_lock); | |
d7428c50 | 566 | return ret; |
3b1a94c8 DLM |
567 | } |
568 | ||
75d66ffb | 569 | /* |
e7fad909 | 570 | * Check if the backing device is being removed. If it's on the way out, |
75d66ffb DF |
571 | * start failing I/O. Reclaim and metadata components also call this |
572 | * function to cleanly abort operation in the event of such failure. | |
573 | */ | |
574 | bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) | |
575 | { | |
e7fad909 DF |
576 | if (dmz_dev->flags & DMZ_BDEV_DYING) |
577 | return true; | |
75d66ffb | 578 | |
e7fad909 DF |
579 | if (dmz_dev->flags & DMZ_CHECK_BDEV) |
580 | return !dmz_check_bdev(dmz_dev); | |
581 | ||
582 | if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { | |
583 | dmz_dev_warn(dmz_dev, "Backing device queue dying"); | |
584 | dmz_dev->flags |= DMZ_BDEV_DYING; | |
75d66ffb DF |
585 | } |
586 | ||
587 | return dmz_dev->flags & DMZ_BDEV_DYING; | |
588 | } | |
589 | ||
e7fad909 DF |
590 | /* |
591 | * Check the backing device availability. This detects such events as | |
592 | * backing device going offline due to errors, media removals, etc. | |
593 | * This check is less efficient than dmz_bdev_is_dying() and should | |
594 | * only be performed as a part of error handling. | |
595 | */ | |
596 | bool dmz_check_bdev(struct dmz_dev *dmz_dev) | |
597 | { | |
598 | struct gendisk *disk; | |
599 | ||
600 | dmz_dev->flags &= ~DMZ_CHECK_BDEV; | |
601 | ||
602 | if (dmz_bdev_is_dying(dmz_dev)) | |
603 | return false; | |
604 | ||
605 | disk = dmz_dev->bdev->bd_disk; | |
606 | if (disk->fops->check_events && | |
607 | disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) { | |
608 | dmz_dev_warn(dmz_dev, "Backing device offline"); | |
609 | dmz_dev->flags |= DMZ_BDEV_DYING; | |
610 | } | |
611 | ||
612 | return !(dmz_dev->flags & DMZ_BDEV_DYING); | |
613 | } | |
614 | ||
3b1a94c8 DLM |
615 | /* |
616 | * Process a new BIO. | |
617 | */ | |
618 | static int dmz_map(struct dm_target *ti, struct bio *bio) | |
619 | { | |
620 | struct dmz_target *dmz = ti->private; | |
621 | struct dmz_dev *dev = dmz->dev; | |
622 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
623 | sector_t sector = bio->bi_iter.bi_sector; | |
624 | unsigned int nr_sectors = bio_sectors(bio); | |
625 | sector_t chunk_sector; | |
d7428c50 | 626 | int ret; |
3b1a94c8 | 627 | |
75d66ffb DF |
628 | if (dmz_bdev_is_dying(dmz->dev)) |
629 | return DM_MAPIO_KILL; | |
630 | ||
3b1a94c8 DLM |
631 | dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", |
632 | bio_op(bio), (unsigned long long)sector, nr_sectors, | |
633 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
634 | (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), | |
635 | (unsigned int)dmz_bio_blocks(bio)); | |
636 | ||
74d46992 | 637 | bio_set_dev(bio, dev->bdev); |
3b1a94c8 | 638 | |
edbe9597 | 639 | if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) |
3b1a94c8 DLM |
640 | return DM_MAPIO_REMAPPED; |
641 | ||
642 | /* The BIO should be block aligned */ | |
643 | if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) | |
644 | return DM_MAPIO_KILL; | |
645 | ||
646 | /* Initialize the BIO context */ | |
647 | bioctx->target = dmz; | |
648 | bioctx->zone = NULL; | |
649 | bioctx->bio = bio; | |
092b5648 | 650 | refcount_set(&bioctx->ref, 1); |
3b1a94c8 DLM |
651 | |
652 | /* Set the BIO pending in the flush list */ | |
edbe9597 | 653 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { |
3b1a94c8 DLM |
654 | spin_lock(&dmz->flush_lock); |
655 | bio_list_add(&dmz->flush_list, bio); | |
656 | spin_unlock(&dmz->flush_lock); | |
657 | mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0); | |
658 | return DM_MAPIO_SUBMITTED; | |
659 | } | |
660 | ||
661 | /* Split zone BIOs to fit entirely into a zone */ | |
662 | chunk_sector = sector & (dev->zone_nr_sectors - 1); | |
663 | if (chunk_sector + nr_sectors > dev->zone_nr_sectors) | |
664 | dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); | |
665 | ||
666 | /* Now ready to handle this BIO */ | |
d7428c50 DF |
667 | ret = dmz_queue_chunk_work(dmz, bio); |
668 | if (ret) { | |
669 | dmz_dev_debug(dmz->dev, | |
670 | "BIO op %d, can't process chunk %llu, err %i\n", | |
671 | bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), | |
672 | ret); | |
673 | return DM_MAPIO_REQUEUE; | |
674 | } | |
3b1a94c8 DLM |
675 | |
676 | return DM_MAPIO_SUBMITTED; | |
677 | } | |
678 | ||
3b1a94c8 DLM |
679 | /* |
680 | * Get zoned device information. | |
681 | */ | |
682 | static int dmz_get_zoned_device(struct dm_target *ti, char *path) | |
683 | { | |
684 | struct dmz_target *dmz = ti->private; | |
685 | struct request_queue *q; | |
686 | struct dmz_dev *dev; | |
114e0259 | 687 | sector_t aligned_capacity; |
3b1a94c8 DLM |
688 | int ret; |
689 | ||
690 | /* Get the target device */ | |
691 | ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); | |
692 | if (ret) { | |
693 | ti->error = "Get target device failed"; | |
694 | dmz->ddev = NULL; | |
695 | return ret; | |
696 | } | |
697 | ||
698 | dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL); | |
699 | if (!dev) { | |
700 | ret = -ENOMEM; | |
701 | goto err; | |
702 | } | |
703 | ||
704 | dev->bdev = dmz->ddev->bdev; | |
705 | (void)bdevname(dev->bdev, dev->name); | |
706 | ||
707 | if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) { | |
708 | ti->error = "Not a zoned block device"; | |
709 | ret = -EINVAL; | |
710 | goto err; | |
711 | } | |
712 | ||
114e0259 | 713 | q = bdev_get_queue(dev->bdev); |
3b1a94c8 | 714 | dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; |
a3839bc6 DC |
715 | aligned_capacity = dev->capacity & |
716 | ~((sector_t)blk_queue_zone_sectors(q) - 1); | |
114e0259 DLM |
717 | if (ti->begin || |
718 | ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { | |
3b1a94c8 DLM |
719 | ti->error = "Partial mapping not supported"; |
720 | ret = -EINVAL; | |
721 | goto err; | |
722 | } | |
723 | ||
114e0259 | 724 | dev->zone_nr_sectors = blk_queue_zone_sectors(q); |
3b1a94c8 DLM |
725 | dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); |
726 | ||
727 | dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); | |
728 | dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks); | |
729 | ||
9b38bb4b | 730 | dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk); |
3b1a94c8 DLM |
731 | |
732 | dmz->dev = dev; | |
733 | ||
734 | return 0; | |
735 | err: | |
736 | dm_put_device(ti, dmz->ddev); | |
737 | kfree(dev); | |
738 | ||
739 | return ret; | |
740 | } | |
741 | ||
742 | /* | |
743 | * Cleanup zoned device information. | |
744 | */ | |
745 | static void dmz_put_zoned_device(struct dm_target *ti) | |
746 | { | |
747 | struct dmz_target *dmz = ti->private; | |
748 | ||
749 | dm_put_device(ti, dmz->ddev); | |
750 | kfree(dmz->dev); | |
751 | dmz->dev = NULL; | |
752 | } | |
753 | ||
754 | /* | |
755 | * Setup target. | |
756 | */ | |
757 | static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
758 | { | |
759 | struct dmz_target *dmz; | |
760 | struct dmz_dev *dev; | |
761 | int ret; | |
762 | ||
763 | /* Check arguments */ | |
764 | if (argc != 1) { | |
765 | ti->error = "Invalid argument count"; | |
766 | return -EINVAL; | |
767 | } | |
768 | ||
769 | /* Allocate and initialize the target descriptor */ | |
770 | dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL); | |
771 | if (!dmz) { | |
772 | ti->error = "Unable to allocate the zoned target descriptor"; | |
773 | return -ENOMEM; | |
774 | } | |
775 | ti->private = dmz; | |
776 | ||
777 | /* Get the target zoned block device */ | |
778 | ret = dmz_get_zoned_device(ti, argv[0]); | |
779 | if (ret) { | |
780 | dmz->ddev = NULL; | |
781 | goto err; | |
782 | } | |
783 | ||
784 | /* Initialize metadata */ | |
785 | dev = dmz->dev; | |
786 | ret = dmz_ctr_metadata(dev, &dmz->metadata); | |
787 | if (ret) { | |
788 | ti->error = "Metadata initialization failed"; | |
789 | goto err_dev; | |
790 | } | |
791 | ||
792 | /* Set target (no write same support) */ | |
793 | ti->max_io_len = dev->zone_nr_sectors << 9; | |
794 | ti->num_flush_bios = 1; | |
795 | ti->num_discard_bios = 1; | |
796 | ti->num_write_zeroes_bios = 1; | |
797 | ti->per_io_data_size = sizeof(struct dmz_bioctx); | |
798 | ti->flush_supported = true; | |
799 | ti->discards_supported = true; | |
3b1a94c8 DLM |
800 | |
801 | /* The exposed capacity is the number of chunks that can be mapped */ | |
802 | ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; | |
803 | ||
804 | /* Zone BIO */ | |
6f1c819c KO |
805 | ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0); |
806 | if (ret) { | |
3b1a94c8 | 807 | ti->error = "Create BIO set failed"; |
3b1a94c8 DLM |
808 | goto err_meta; |
809 | } | |
810 | ||
811 | /* Chunk BIO work */ | |
812 | mutex_init(&dmz->chunk_lock); | |
2d0b2d64 | 813 | INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); |
3b1a94c8 DLM |
814 | dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, |
815 | 0, dev->name); | |
816 | if (!dmz->chunk_wq) { | |
817 | ti->error = "Create chunk workqueue failed"; | |
818 | ret = -ENOMEM; | |
819 | goto err_bio; | |
820 | } | |
821 | ||
822 | /* Flush work */ | |
823 | spin_lock_init(&dmz->flush_lock); | |
824 | bio_list_init(&dmz->flush_list); | |
825 | INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work); | |
826 | dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM, | |
827 | dev->name); | |
828 | if (!dmz->flush_wq) { | |
829 | ti->error = "Create flush workqueue failed"; | |
830 | ret = -ENOMEM; | |
831 | goto err_cwq; | |
832 | } | |
833 | mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
834 | ||
835 | /* Initialize reclaim */ | |
836 | ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim); | |
837 | if (ret) { | |
838 | ti->error = "Zone reclaim initialization failed"; | |
839 | goto err_fwq; | |
840 | } | |
841 | ||
842 | dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)", | |
843 | (unsigned long long)ti->len, | |
844 | (unsigned long long)dmz_sect2blk(ti->len)); | |
845 | ||
846 | return 0; | |
847 | err_fwq: | |
848 | destroy_workqueue(dmz->flush_wq); | |
849 | err_cwq: | |
850 | destroy_workqueue(dmz->chunk_wq); | |
851 | err_bio: | |
d5ffebdd | 852 | mutex_destroy(&dmz->chunk_lock); |
6f1c819c | 853 | bioset_exit(&dmz->bio_set); |
3b1a94c8 DLM |
854 | err_meta: |
855 | dmz_dtr_metadata(dmz->metadata); | |
856 | err_dev: | |
857 | dmz_put_zoned_device(ti); | |
858 | err: | |
859 | kfree(dmz); | |
860 | ||
861 | return ret; | |
862 | } | |
863 | ||
864 | /* | |
865 | * Cleanup target. | |
866 | */ | |
867 | static void dmz_dtr(struct dm_target *ti) | |
868 | { | |
869 | struct dmz_target *dmz = ti->private; | |
870 | ||
871 | flush_workqueue(dmz->chunk_wq); | |
872 | destroy_workqueue(dmz->chunk_wq); | |
873 | ||
874 | dmz_dtr_reclaim(dmz->reclaim); | |
875 | ||
876 | cancel_delayed_work_sync(&dmz->flush_work); | |
877 | destroy_workqueue(dmz->flush_wq); | |
878 | ||
879 | (void) dmz_flush_metadata(dmz->metadata); | |
880 | ||
881 | dmz_dtr_metadata(dmz->metadata); | |
882 | ||
6f1c819c | 883 | bioset_exit(&dmz->bio_set); |
3b1a94c8 DLM |
884 | |
885 | dmz_put_zoned_device(ti); | |
886 | ||
d5ffebdd MS |
887 | mutex_destroy(&dmz->chunk_lock); |
888 | ||
3b1a94c8 DLM |
889 | kfree(dmz); |
890 | } | |
891 | ||
892 | /* | |
893 | * Setup target request queue limits. | |
894 | */ | |
895 | static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
896 | { | |
897 | struct dmz_target *dmz = ti->private; | |
898 | unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; | |
899 | ||
900 | limits->logical_block_size = DMZ_BLOCK_SIZE; | |
901 | limits->physical_block_size = DMZ_BLOCK_SIZE; | |
902 | ||
903 | blk_limits_io_min(limits, DMZ_BLOCK_SIZE); | |
904 | blk_limits_io_opt(limits, DMZ_BLOCK_SIZE); | |
905 | ||
906 | limits->discard_alignment = DMZ_BLOCK_SIZE; | |
907 | limits->discard_granularity = DMZ_BLOCK_SIZE; | |
908 | limits->max_discard_sectors = chunk_sectors; | |
909 | limits->max_hw_discard_sectors = chunk_sectors; | |
910 | limits->max_write_zeroes_sectors = chunk_sectors; | |
911 | ||
912 | /* FS hint to try to align to the device zone size */ | |
913 | limits->chunk_sectors = chunk_sectors; | |
914 | limits->max_sectors = chunk_sectors; | |
915 | ||
916 | /* We are exposing a drive-managed zoned block device */ | |
917 | limits->zoned = BLK_ZONED_NONE; | |
918 | } | |
919 | ||
920 | /* | |
921 | * Pass on ioctl to the backend device. | |
922 | */ | |
5bd5e8d8 | 923 | static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) |
3b1a94c8 DLM |
924 | { |
925 | struct dmz_target *dmz = ti->private; | |
926 | ||
e7fad909 DF |
927 | if (!dmz_check_bdev(dmz->dev)) |
928 | return -EIO; | |
75d66ffb | 929 | |
3b1a94c8 DLM |
930 | *bdev = dmz->dev->bdev; |
931 | ||
932 | return 0; | |
933 | } | |
934 | ||
935 | /* | |
936 | * Stop works on suspend. | |
937 | */ | |
938 | static void dmz_suspend(struct dm_target *ti) | |
939 | { | |
940 | struct dmz_target *dmz = ti->private; | |
941 | ||
942 | flush_workqueue(dmz->chunk_wq); | |
943 | dmz_suspend_reclaim(dmz->reclaim); | |
944 | cancel_delayed_work_sync(&dmz->flush_work); | |
945 | } | |
946 | ||
947 | /* | |
948 | * Restart works on resume or if suspend failed. | |
949 | */ | |
950 | static void dmz_resume(struct dm_target *ti) | |
951 | { | |
952 | struct dmz_target *dmz = ti->private; | |
953 | ||
954 | queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
955 | dmz_resume_reclaim(dmz->reclaim); | |
956 | } | |
957 | ||
958 | static int dmz_iterate_devices(struct dm_target *ti, | |
959 | iterate_devices_callout_fn fn, void *data) | |
960 | { | |
961 | struct dmz_target *dmz = ti->private; | |
114e0259 DLM |
962 | struct dmz_dev *dev = dmz->dev; |
963 | sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); | |
3b1a94c8 | 964 | |
114e0259 | 965 | return fn(ti, dmz->ddev, 0, capacity, data); |
3b1a94c8 DLM |
966 | } |
967 | ||
968 | static struct target_type dmz_type = { | |
969 | .name = "zoned", | |
970 | .version = {1, 0, 0}, | |
971 | .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, | |
972 | .module = THIS_MODULE, | |
973 | .ctr = dmz_ctr, | |
974 | .dtr = dmz_dtr, | |
975 | .map = dmz_map, | |
3b1a94c8 DLM |
976 | .io_hints = dmz_io_hints, |
977 | .prepare_ioctl = dmz_prepare_ioctl, | |
978 | .postsuspend = dmz_suspend, | |
979 | .resume = dmz_resume, | |
980 | .iterate_devices = dmz_iterate_devices, | |
981 | }; | |
982 | ||
983 | static int __init dmz_init(void) | |
984 | { | |
985 | return dm_register_target(&dmz_type); | |
986 | } | |
987 | ||
988 | static void __exit dmz_exit(void) | |
989 | { | |
990 | dm_unregister_target(&dmz_type); | |
991 | } | |
992 | ||
993 | module_init(dmz_init); | |
994 | module_exit(dmz_exit); | |
995 | ||
996 | MODULE_DESCRIPTION(DM_NAME " target for zoned block devices"); | |
997 | MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>"); | |
998 | MODULE_LICENSE("GPL"); |