include/linux/kconfig.h: ese macros which are already defined
[linux-2.6-block.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
0f30af98 14#include <linux/jiffies.h>
604ea906 15#include <linux/log2.h>
991d9fa0 16#include <linux/list.h>
c140e1c4 17#include <linux/rculist.h>
991d9fa0
JT
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/slab.h>
ac4c3f34 21#include <linux/sort.h>
67324ea1 22#include <linux/rbtree.h>
991d9fa0
JT
23
24#define DM_MSG_PREFIX "thin"
25
26/*
27 * Tunable constants
28 */
7768ed33 29#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0 30#define MAPPING_POOL_SIZE 1024
905e51b3 31#define COMMIT_PERIOD HZ
80c57893
MS
32#define NO_SPACE_TIMEOUT_SECS 60
33
34static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
991d9fa0 35
df5d2e90
MP
36DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
37 "A percentage of time allocated for copy on write");
38
991d9fa0
JT
39/*
40 * The block size of the device holding pool data must be
41 * between 64KB and 1GB.
42 */
43#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
44#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
45
991d9fa0
JT
46/*
47 * Device id is restricted to 24 bits.
48 */
49#define MAX_DEV_ID ((1 << 24) - 1)
50
51/*
52 * How do we handle breaking sharing of data blocks?
53 * =================================================
54 *
55 * We use a standard copy-on-write btree to store the mappings for the
56 * devices (note I'm talking about copy-on-write of the metadata here, not
57 * the data). When you take an internal snapshot you clone the root node
58 * of the origin btree. After this there is no concept of an origin or a
59 * snapshot. They are just two device trees that happen to point to the
60 * same data blocks.
61 *
62 * When we get a write in we decide if it's to a shared data block using
63 * some timestamp magic. If it is, we have to break sharing.
64 *
65 * Let's say we write to a shared block in what was the origin. The
66 * steps are:
67 *
68 * i) plug io further to this physical block. (see bio_prison code).
69 *
70 * ii) quiesce any read io to that shared data block. Obviously
44feb387 71 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
72 *
73 * iii) copy the data block to a newly allocate block. This step can be
74 * missed out if the io covers the block. (schedule_copy).
75 *
76 * iv) insert the new mapping into the origin's btree
fe878f34 77 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
78 * sharing of btree nodes between the two devices. Breaking sharing only
79 * effects the btree of that specific device. Btrees for the other
80 * devices that share the block never change. The btree for the origin
81 * device as it was after the last commit is untouched, ie. we're using
82 * persistent data structures in the functional programming sense.
83 *
84 * v) unplug io to this physical block, including the io that triggered
85 * the breaking of sharing.
86 *
87 * Steps (ii) and (iii) occur in parallel.
88 *
89 * The metadata _doesn't_ need to be committed before the io continues. We
90 * get away with this because the io is always written to a _new_ block.
91 * If there's a crash, then:
92 *
93 * - The origin mapping will point to the old origin block (the shared
94 * one). This will contain the data as it was before the io that triggered
95 * the breaking of sharing came in.
96 *
97 * - The snap mapping still points to the old block. As it would after
98 * the commit.
99 *
100 * The downside of this scheme is the timestamp magic isn't perfect, and
101 * will continue to think that data block in the snapshot device is shared
102 * even after the write to the origin has broken sharing. I suspect data
103 * blocks will typically be shared by many different devices, so we're
104 * breaking sharing n + 1 times, rather than n, where n is the number of
105 * devices that reference this data block. At the moment I think the
106 * benefits far, far outweigh the disadvantages.
107 */
108
109/*----------------------------------------------------------------*/
110
991d9fa0
JT
111/*
112 * Key building.
113 */
114static void build_data_key(struct dm_thin_device *td,
44feb387 115 dm_block_t b, struct dm_cell_key *key)
991d9fa0
JT
116{
117 key->virtual = 0;
118 key->dev = dm_thin_dev_id(td);
5f274d88
JT
119 key->block_begin = b;
120 key->block_end = b + 1ULL;
991d9fa0
JT
121}
122
123static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 124 struct dm_cell_key *key)
991d9fa0
JT
125{
126 key->virtual = 1;
127 key->dev = dm_thin_dev_id(td);
5f274d88
JT
128 key->block_begin = b;
129 key->block_end = b + 1ULL;
991d9fa0
JT
130}
131
132/*----------------------------------------------------------------*/
133
7d327fe0
JT
134#define THROTTLE_THRESHOLD (1 * HZ)
135
136struct throttle {
137 struct rw_semaphore lock;
138 unsigned long threshold;
139 bool throttle_applied;
140};
141
142static void throttle_init(struct throttle *t)
143{
144 init_rwsem(&t->lock);
145 t->throttle_applied = false;
146}
147
148static void throttle_work_start(struct throttle *t)
149{
150 t->threshold = jiffies + THROTTLE_THRESHOLD;
151}
152
153static void throttle_work_update(struct throttle *t)
154{
155 if (!t->throttle_applied && jiffies > t->threshold) {
156 down_write(&t->lock);
157 t->throttle_applied = true;
158 }
159}
160
161static void throttle_work_complete(struct throttle *t)
162{
163 if (t->throttle_applied) {
164 t->throttle_applied = false;
165 up_write(&t->lock);
166 }
167}
168
169static void throttle_lock(struct throttle *t)
170{
171 down_read(&t->lock);
172}
173
174static void throttle_unlock(struct throttle *t)
175{
176 up_read(&t->lock);
177}
178
179/*----------------------------------------------------------------*/
180
991d9fa0
JT
181/*
182 * A pool device ties together a metadata device and a data device. It
183 * also provides the interface for creating and destroying internal
184 * devices.
185 */
a24c2569 186struct dm_thin_new_mapping;
67e2e2b2 187
e49e5829 188/*
3e1a0699 189 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
e49e5829
JT
190 */
191enum pool_mode {
192 PM_WRITE, /* metadata may be changed */
3e1a0699 193 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
e49e5829
JT
194 PM_READ_ONLY, /* metadata may not be changed */
195 PM_FAIL, /* all I/O fails */
196};
197
67e2e2b2 198struct pool_features {
e49e5829
JT
199 enum pool_mode mode;
200
9bc142dd
MS
201 bool zero_new_blocks:1;
202 bool discard_enabled:1;
203 bool discard_passdown:1;
787a996c 204 bool error_if_no_space:1;
67e2e2b2
JT
205};
206
e49e5829
JT
207struct thin_c;
208typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
a374bb21 209typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
e49e5829
JT
210typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
211
ac4c3f34
JT
212#define CELL_SORT_ARRAY_SIZE 8192
213
991d9fa0
JT
214struct pool {
215 struct list_head list;
216 struct dm_target *ti; /* Only set if a pool target is bound */
217
218 struct mapped_device *pool_md;
219 struct block_device *md_dev;
220 struct dm_pool_metadata *pmd;
221
991d9fa0 222 dm_block_t low_water_blocks;
55f2b8bd 223 uint32_t sectors_per_block;
f9a8e0cd 224 int sectors_per_block_shift;
991d9fa0 225
67e2e2b2 226 struct pool_features pf;
88a6621b 227 bool low_water_triggered:1; /* A dm event has been sent */
80e96c54 228 bool suspended:1;
991d9fa0 229
44feb387 230 struct dm_bio_prison *prison;
991d9fa0
JT
231 struct dm_kcopyd_client *copier;
232
233 struct workqueue_struct *wq;
7d327fe0 234 struct throttle throttle;
991d9fa0 235 struct work_struct worker;
905e51b3 236 struct delayed_work waker;
85ad643b 237 struct delayed_work no_space_timeout;
991d9fa0 238
905e51b3 239 unsigned long last_commit_jiffies;
55f2b8bd 240 unsigned ref_count;
991d9fa0
JT
241
242 spinlock_t lock;
991d9fa0
JT
243 struct bio_list deferred_flush_bios;
244 struct list_head prepared_mappings;
104655fd 245 struct list_head prepared_discards;
c140e1c4 246 struct list_head active_thins;
991d9fa0 247
44feb387
MS
248 struct dm_deferred_set *shared_read_ds;
249 struct dm_deferred_set *all_io_ds;
991d9fa0 250
a24c2569 251 struct dm_thin_new_mapping *next_mapping;
991d9fa0 252 mempool_t *mapping_pool;
e49e5829
JT
253
254 process_bio_fn process_bio;
255 process_bio_fn process_discard;
256
a374bb21
JT
257 process_cell_fn process_cell;
258 process_cell_fn process_discard_cell;
259
e49e5829
JT
260 process_mapping_fn process_prepared_mapping;
261 process_mapping_fn process_prepared_discard;
ac4c3f34
JT
262
263 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
991d9fa0
JT
264};
265
e49e5829 266static enum pool_mode get_pool_mode(struct pool *pool);
b5330655 267static void metadata_operation_failed(struct pool *pool, const char *op, int r);
e49e5829 268
991d9fa0
JT
269/*
270 * Target context for a pool.
271 */
272struct pool_c {
273 struct dm_target *ti;
274 struct pool *pool;
275 struct dm_dev *data_dev;
276 struct dm_dev *metadata_dev;
277 struct dm_target_callbacks callbacks;
278
279 dm_block_t low_water_blocks;
0424caa1
MS
280 struct pool_features requested_pf; /* Features requested during table load */
281 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
282};
283
284/*
285 * Target context for a thin.
286 */
287struct thin_c {
c140e1c4 288 struct list_head list;
991d9fa0 289 struct dm_dev *pool_dev;
2dd9c257 290 struct dm_dev *origin_dev;
e5aea7b4 291 sector_t origin_size;
991d9fa0
JT
292 dm_thin_id dev_id;
293
294 struct pool *pool;
295 struct dm_thin_device *td;
583024d2
MS
296 struct mapped_device *thin_md;
297
738211f7 298 bool requeue_mode:1;
c140e1c4 299 spinlock_t lock;
a374bb21 300 struct list_head deferred_cells;
c140e1c4
MS
301 struct bio_list deferred_bio_list;
302 struct bio_list retry_on_resume_list;
67324ea1 303 struct rb_root sort_bio_list; /* sorted list of deferred bios */
b10ebd34
JT
304
305 /*
306 * Ensures the thin is not destroyed until the worker has finished
307 * iterating the active_thins list.
308 */
309 atomic_t refcount;
310 struct completion can_destroy;
991d9fa0
JT
311};
312
313/*----------------------------------------------------------------*/
314
025b9685
JT
315/*
316 * wake_worker() is used when new work is queued and when pool_resume is
317 * ready to continue deferred IO processing.
318 */
319static void wake_worker(struct pool *pool)
320{
321 queue_work(pool->wq, &pool->worker);
322}
323
324/*----------------------------------------------------------------*/
325
6beca5eb
JT
326static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
327 struct dm_bio_prison_cell **cell_result)
328{
329 int r;
330 struct dm_bio_prison_cell *cell_prealloc;
331
332 /*
333 * Allocate a cell from the prison's mempool.
334 * This might block but it can't fail.
335 */
336 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
337
338 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
339 if (r)
340 /*
341 * We reused an old cell; we can get rid of
342 * the new one.
343 */
344 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
345
346 return r;
347}
348
349static void cell_release(struct pool *pool,
350 struct dm_bio_prison_cell *cell,
351 struct bio_list *bios)
352{
353 dm_cell_release(pool->prison, cell, bios);
354 dm_bio_prison_free_cell(pool->prison, cell);
355}
356
2d759a46
JT
357static void cell_visit_release(struct pool *pool,
358 void (*fn)(void *, struct dm_bio_prison_cell *),
359 void *context,
360 struct dm_bio_prison_cell *cell)
361{
362 dm_cell_visit_release(pool->prison, fn, context, cell);
363 dm_bio_prison_free_cell(pool->prison, cell);
364}
365
6beca5eb
JT
366static void cell_release_no_holder(struct pool *pool,
367 struct dm_bio_prison_cell *cell,
368 struct bio_list *bios)
369{
370 dm_cell_release_no_holder(pool->prison, cell, bios);
371 dm_bio_prison_free_cell(pool->prison, cell);
372}
373
af91805a
MS
374static void cell_error_with_code(struct pool *pool,
375 struct dm_bio_prison_cell *cell, int error_code)
6beca5eb 376{
af91805a 377 dm_cell_error(pool->prison, cell, error_code);
6beca5eb
JT
378 dm_bio_prison_free_cell(pool->prison, cell);
379}
380
af91805a
MS
381static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
382{
383 cell_error_with_code(pool, cell, -EIO);
384}
385
a374bb21
JT
386static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
387{
388 cell_error_with_code(pool, cell, 0);
389}
390
391static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
392{
393 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
394}
395
6beca5eb
JT
396/*----------------------------------------------------------------*/
397
991d9fa0
JT
398/*
399 * A global list of pools that uses a struct mapped_device as a key.
400 */
401static struct dm_thin_pool_table {
402 struct mutex mutex;
403 struct list_head pools;
404} dm_thin_pool_table;
405
406static void pool_table_init(void)
407{
408 mutex_init(&dm_thin_pool_table.mutex);
409 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
410}
411
412static void __pool_table_insert(struct pool *pool)
413{
414 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
415 list_add(&pool->list, &dm_thin_pool_table.pools);
416}
417
418static void __pool_table_remove(struct pool *pool)
419{
420 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
421 list_del(&pool->list);
422}
423
424static struct pool *__pool_table_lookup(struct mapped_device *md)
425{
426 struct pool *pool = NULL, *tmp;
427
428 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
429
430 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
431 if (tmp->pool_md == md) {
432 pool = tmp;
433 break;
434 }
435 }
436
437 return pool;
438}
439
440static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
441{
442 struct pool *pool = NULL, *tmp;
443
444 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
445
446 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
447 if (tmp->md_dev == md_dev) {
448 pool = tmp;
449 break;
450 }
451 }
452
453 return pool;
454}
455
456/*----------------------------------------------------------------*/
457
a24c2569 458struct dm_thin_endio_hook {
eb2aa48d 459 struct thin_c *tc;
44feb387
MS
460 struct dm_deferred_entry *shared_read_entry;
461 struct dm_deferred_entry *all_io_entry;
a24c2569 462 struct dm_thin_new_mapping *overwrite_mapping;
67324ea1 463 struct rb_node rb_node;
eb2aa48d
JT
464};
465
42d6a8ce
MS
466static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
467{
468 bio_list_merge(bios, master);
469 bio_list_init(master);
470}
471
472static void error_bio_list(struct bio_list *bios, int error)
991d9fa0
JT
473{
474 struct bio *bio;
42d6a8ce
MS
475
476 while ((bio = bio_list_pop(bios)))
477 bio_endio(bio, error);
478}
479
480static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
481{
991d9fa0 482 struct bio_list bios;
18adc577 483 unsigned long flags;
991d9fa0
JT
484
485 bio_list_init(&bios);
18adc577 486
c140e1c4 487 spin_lock_irqsave(&tc->lock, flags);
42d6a8ce 488 __merge_bio_list(&bios, master);
c140e1c4 489 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 490
42d6a8ce 491 error_bio_list(&bios, error);
991d9fa0
JT
492}
493
a374bb21
JT
494static void requeue_deferred_cells(struct thin_c *tc)
495{
496 struct pool *pool = tc->pool;
497 unsigned long flags;
498 struct list_head cells;
499 struct dm_bio_prison_cell *cell, *tmp;
500
501 INIT_LIST_HEAD(&cells);
502
503 spin_lock_irqsave(&tc->lock, flags);
504 list_splice_init(&tc->deferred_cells, &cells);
505 spin_unlock_irqrestore(&tc->lock, flags);
506
507 list_for_each_entry_safe(cell, tmp, &cells, user_list)
508 cell_requeue(pool, cell);
509}
510
991d9fa0
JT
511static void requeue_io(struct thin_c *tc)
512{
3e1a0699 513 struct bio_list bios;
42d6a8ce 514 unsigned long flags;
3e1a0699
JT
515
516 bio_list_init(&bios);
517
c140e1c4 518 spin_lock_irqsave(&tc->lock, flags);
42d6a8ce
MS
519 __merge_bio_list(&bios, &tc->deferred_bio_list);
520 __merge_bio_list(&bios, &tc->retry_on_resume_list);
c140e1c4 521 spin_unlock_irqrestore(&tc->lock, flags);
3e1a0699 522
42d6a8ce
MS
523 error_bio_list(&bios, DM_ENDIO_REQUEUE);
524 requeue_deferred_cells(tc);
3e1a0699
JT
525}
526
c140e1c4
MS
527static void error_retry_list(struct pool *pool)
528{
529 struct thin_c *tc;
530
531 rcu_read_lock();
532 list_for_each_entry_rcu(tc, &pool->active_thins, list)
42d6a8ce 533 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
c140e1c4
MS
534 rcu_read_unlock();
535}
536
991d9fa0
JT
537/*
538 * This section of code contains the logic for processing a thin device's IO.
539 * Much of the code depends on pool object resources (lists, workqueues, etc)
540 * but most is exclusively called from the thin target rather than the thin-pool
541 * target.
542 */
543
58f77a21
MS
544static bool block_size_is_power_of_two(struct pool *pool)
545{
546 return pool->sectors_per_block_shift >= 0;
547}
548
991d9fa0
JT
549static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
550{
58f77a21 551 struct pool *pool = tc->pool;
4f024f37 552 sector_t block_nr = bio->bi_iter.bi_sector;
55f2b8bd 553
58f77a21
MS
554 if (block_size_is_power_of_two(pool))
555 block_nr >>= pool->sectors_per_block_shift;
f9a8e0cd 556 else
58f77a21 557 (void) sector_div(block_nr, pool->sectors_per_block);
55f2b8bd
MS
558
559 return block_nr;
991d9fa0
JT
560}
561
562static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
563{
564 struct pool *pool = tc->pool;
4f024f37 565 sector_t bi_sector = bio->bi_iter.bi_sector;
991d9fa0
JT
566
567 bio->bi_bdev = tc->pool_dev->bdev;
58f77a21 568 if (block_size_is_power_of_two(pool))
4f024f37
KO
569 bio->bi_iter.bi_sector =
570 (block << pool->sectors_per_block_shift) |
571 (bi_sector & (pool->sectors_per_block - 1));
58f77a21 572 else
4f024f37 573 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
58f77a21 574 sector_div(bi_sector, pool->sectors_per_block);
991d9fa0
JT
575}
576
2dd9c257
JT
577static void remap_to_origin(struct thin_c *tc, struct bio *bio)
578{
579 bio->bi_bdev = tc->origin_dev->bdev;
580}
581
4afdd680
JT
582static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
583{
584 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
585 dm_thin_changed_this_transaction(tc->td);
586}
587
e8088073
JT
588static void inc_all_io_entry(struct pool *pool, struct bio *bio)
589{
590 struct dm_thin_endio_hook *h;
591
592 if (bio->bi_rw & REQ_DISCARD)
593 return;
594
59c3d2c6 595 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
596 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
597}
598
2dd9c257 599static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
600{
601 struct pool *pool = tc->pool;
602 unsigned long flags;
603
e49e5829
JT
604 if (!bio_triggers_commit(tc, bio)) {
605 generic_make_request(bio);
606 return;
607 }
608
991d9fa0 609 /*
e49e5829
JT
610 * Complete bio with an error if earlier I/O caused changes to
611 * the metadata that can't be committed e.g, due to I/O errors
612 * on the metadata device.
991d9fa0 613 */
e49e5829
JT
614 if (dm_thin_aborted_changes(tc->td)) {
615 bio_io_error(bio);
616 return;
617 }
618
619 /*
620 * Batch together any bios that trigger commits and then issue a
621 * single commit for them in process_deferred_bios().
622 */
623 spin_lock_irqsave(&pool->lock, flags);
624 bio_list_add(&pool->deferred_flush_bios, bio);
625 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
626}
627
2dd9c257
JT
628static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
629{
630 remap_to_origin(tc, bio);
631 issue(tc, bio);
632}
633
634static void remap_and_issue(struct thin_c *tc, struct bio *bio,
635 dm_block_t block)
636{
637 remap(tc, bio, block);
638 issue(tc, bio);
639}
640
991d9fa0
JT
641/*----------------------------------------------------------------*/
642
643/*
644 * Bio endio functions.
645 */
a24c2569 646struct dm_thin_new_mapping {
991d9fa0
JT
647 struct list_head list;
648
7f214665
MS
649 bool pass_discard:1;
650 bool definitely_not_shared:1;
991d9fa0 651
50f3c3ef
JT
652 /*
653 * Track quiescing, copying and zeroing preparation actions. When this
654 * counter hits zero the block is prepared and can be inserted into the
655 * btree.
656 */
657 atomic_t prepare_actions;
658
7f214665 659 int err;
991d9fa0
JT
660 struct thin_c *tc;
661 dm_block_t virt_block;
662 dm_block_t data_block;
a24c2569 663 struct dm_bio_prison_cell *cell, *cell2;
991d9fa0
JT
664
665 /*
666 * If the bio covers the whole area of a block then we can avoid
667 * zeroing or copying. Instead this bio is hooked. The bio will
668 * still be in the cell, so care has to be taken to avoid issuing
669 * the bio twice.
670 */
671 struct bio *bio;
672 bio_end_io_t *saved_bi_end_io;
673};
674
50f3c3ef 675static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
676{
677 struct pool *pool = m->tc->pool;
678
50f3c3ef 679 if (atomic_dec_and_test(&m->prepare_actions)) {
daec338b 680 list_add_tail(&m->list, &pool->prepared_mappings);
991d9fa0
JT
681 wake_worker(pool);
682 }
683}
684
e5aea7b4 685static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
686{
687 unsigned long flags;
991d9fa0
JT
688 struct pool *pool = m->tc->pool;
689
991d9fa0 690 spin_lock_irqsave(&pool->lock, flags);
50f3c3ef 691 __complete_mapping_preparation(m);
991d9fa0
JT
692 spin_unlock_irqrestore(&pool->lock, flags);
693}
694
e5aea7b4
JT
695static void copy_complete(int read_err, unsigned long write_err, void *context)
696{
697 struct dm_thin_new_mapping *m = context;
698
699 m->err = read_err || write_err ? -EIO : 0;
700 complete_mapping_preparation(m);
701}
702
991d9fa0
JT
703static void overwrite_endio(struct bio *bio, int err)
704{
59c3d2c6 705 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 706 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0
JT
707
708 m->err = err;
e5aea7b4 709 complete_mapping_preparation(m);
991d9fa0
JT
710}
711
991d9fa0
JT
712/*----------------------------------------------------------------*/
713
714/*
715 * Workqueue.
716 */
717
718/*
719 * Prepared mapping jobs.
720 */
721
722/*
2d759a46
JT
723 * This sends the bios in the cell, except the original holder, back
724 * to the deferred_bios list.
991d9fa0 725 */
f286ba0e 726static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 727{
991d9fa0
JT
728 struct pool *pool = tc->pool;
729 unsigned long flags;
730
c140e1c4
MS
731 spin_lock_irqsave(&tc->lock, flags);
732 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
733 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
734
735 wake_worker(pool);
736}
737
a374bb21
JT
738static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
739
2d759a46
JT
740struct remap_info {
741 struct thin_c *tc;
742 struct bio_list defer_bios;
743 struct bio_list issue_bios;
744};
745
746static void __inc_remap_and_issue_cell(void *context,
747 struct dm_bio_prison_cell *cell)
a374bb21 748{
2d759a46 749 struct remap_info *info = context;
a374bb21 750 struct bio *bio;
a374bb21 751
2d759a46 752 while ((bio = bio_list_pop(&cell->bios))) {
a374bb21 753 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
2d759a46 754 bio_list_add(&info->defer_bios, bio);
a374bb21 755 else {
2d759a46
JT
756 inc_all_io_entry(info->tc->pool, bio);
757
758 /*
759 * We can't issue the bios with the bio prison lock
760 * held, so we add them to a list to issue on
761 * return from this function.
762 */
763 bio_list_add(&info->issue_bios, bio);
a374bb21
JT
764 }
765 }
766}
767
2d759a46
JT
768static void inc_remap_and_issue_cell(struct thin_c *tc,
769 struct dm_bio_prison_cell *cell,
770 dm_block_t block)
771{
772 struct bio *bio;
773 struct remap_info info;
774
775 info.tc = tc;
776 bio_list_init(&info.defer_bios);
777 bio_list_init(&info.issue_bios);
778
779 /*
780 * We have to be careful to inc any bios we're about to issue
781 * before the cell is released, and avoid a race with new bios
782 * being added to the cell.
783 */
784 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
785 &info, cell);
786
787 while ((bio = bio_list_pop(&info.defer_bios)))
788 thin_defer_bio(tc, bio);
789
790 while ((bio = bio_list_pop(&info.issue_bios)))
791 remap_and_issue(info.tc, bio, block);
792}
793
e49e5829
JT
794static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
795{
196d38bc 796 if (m->bio) {
e49e5829 797 m->bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
798 atomic_inc(&m->bio->bi_remaining);
799 }
6beca5eb 800 cell_error(m->tc->pool, m->cell);
e49e5829
JT
801 list_del(&m->list);
802 mempool_free(m, m->tc->pool->mapping_pool);
803}
025b9685 804
a24c2569 805static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
806{
807 struct thin_c *tc = m->tc;
6beca5eb 808 struct pool *pool = tc->pool;
991d9fa0
JT
809 struct bio *bio;
810 int r;
811
812 bio = m->bio;
196d38bc 813 if (bio) {
991d9fa0 814 bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
815 atomic_inc(&bio->bi_remaining);
816 }
991d9fa0
JT
817
818 if (m->err) {
6beca5eb 819 cell_error(pool, m->cell);
905386f8 820 goto out;
991d9fa0
JT
821 }
822
823 /*
824 * Commit the prepared block into the mapping btree.
825 * Any I/O for this block arriving after this point will get
826 * remapped to it directly.
827 */
828 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
829 if (r) {
b5330655 830 metadata_operation_failed(pool, "dm_thin_insert_block", r);
6beca5eb 831 cell_error(pool, m->cell);
905386f8 832 goto out;
991d9fa0
JT
833 }
834
835 /*
836 * Release any bios held while the block was being provisioned.
837 * If we are processing a write bio that completely covers the block,
838 * we already processed it so can ignore it now when processing
839 * the bios in the cell.
840 */
841 if (bio) {
2d759a46 842 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991d9fa0 843 bio_endio(bio, 0);
2d759a46
JT
844 } else {
845 inc_all_io_entry(tc->pool, m->cell->holder);
846 remap_and_issue(tc, m->cell->holder, m->data_block);
847 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
848 }
991d9fa0 849
905386f8 850out:
991d9fa0 851 list_del(&m->list);
6beca5eb 852 mempool_free(m, pool->mapping_pool);
991d9fa0
JT
853}
854
e49e5829 855static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
104655fd 856{
104655fd
JT
857 struct thin_c *tc = m->tc;
858
e49e5829 859 bio_io_error(m->bio);
f286ba0e
JT
860 cell_defer_no_holder(tc, m->cell);
861 cell_defer_no_holder(tc, m->cell2);
e49e5829
JT
862 mempool_free(m, tc->pool->mapping_pool);
863}
864
865static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
866{
867 struct thin_c *tc = m->tc;
104655fd 868
e8088073 869 inc_all_io_entry(tc->pool, m->bio);
f286ba0e
JT
870 cell_defer_no_holder(tc, m->cell);
871 cell_defer_no_holder(tc, m->cell2);
e8088073 872
104655fd 873 if (m->pass_discard)
19fa1a67
JT
874 if (m->definitely_not_shared)
875 remap_and_issue(tc, m->bio, m->data_block);
876 else {
877 bool used = false;
878 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
879 bio_endio(m->bio, 0);
880 else
881 remap_and_issue(tc, m->bio, m->data_block);
882 }
104655fd
JT
883 else
884 bio_endio(m->bio, 0);
885
104655fd
JT
886 mempool_free(m, tc->pool->mapping_pool);
887}
888
e49e5829
JT
889static void process_prepared_discard(struct dm_thin_new_mapping *m)
890{
891 int r;
892 struct thin_c *tc = m->tc;
893
894 r = dm_thin_remove_block(tc->td, m->virt_block);
895 if (r)
c397741c 896 DMERR_LIMIT("dm_thin_remove_block() failed");
e49e5829
JT
897
898 process_prepared_discard_passdown(m);
899}
900
104655fd 901static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 902 process_mapping_fn *fn)
991d9fa0
JT
903{
904 unsigned long flags;
905 struct list_head maps;
a24c2569 906 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
907
908 INIT_LIST_HEAD(&maps);
909 spin_lock_irqsave(&pool->lock, flags);
104655fd 910 list_splice_init(head, &maps);
991d9fa0
JT
911 spin_unlock_irqrestore(&pool->lock, flags);
912
913 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 914 (*fn)(m);
991d9fa0
JT
915}
916
917/*
918 * Deferred bio jobs.
919 */
104655fd 920static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 921{
4f024f37
KO
922 return bio->bi_iter.bi_size ==
923 (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
924}
925
926static int io_overwrites_block(struct pool *pool, struct bio *bio)
927{
928 return (bio_data_dir(bio) == WRITE) &&
929 io_overlaps_block(pool, bio);
991d9fa0
JT
930}
931
932static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
933 bio_end_io_t *fn)
934{
935 *save = bio->bi_end_io;
936 bio->bi_end_io = fn;
937}
938
939static int ensure_next_mapping(struct pool *pool)
940{
941 if (pool->next_mapping)
942 return 0;
943
944 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
945
946 return pool->next_mapping ? 0 : -ENOMEM;
947}
948
a24c2569 949static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 950{
16961b04 951 struct dm_thin_new_mapping *m = pool->next_mapping;
991d9fa0
JT
952
953 BUG_ON(!pool->next_mapping);
954
16961b04
MS
955 memset(m, 0, sizeof(struct dm_thin_new_mapping));
956 INIT_LIST_HEAD(&m->list);
957 m->bio = NULL;
958
991d9fa0
JT
959 pool->next_mapping = NULL;
960
16961b04 961 return m;
991d9fa0
JT
962}
963
e5aea7b4
JT
964static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
965 sector_t begin, sector_t end)
966{
967 int r;
968 struct dm_io_region to;
969
970 to.bdev = tc->pool_dev->bdev;
971 to.sector = begin;
972 to.count = end - begin;
973
974 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
975 if (r < 0) {
976 DMERR_LIMIT("dm_kcopyd_zero() failed");
977 copy_complete(1, 1, m);
978 }
979}
980
452d7a62
MS
981static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
982 dm_block_t data_block,
983 struct dm_thin_new_mapping *m)
984{
985 struct pool *pool = tc->pool;
986 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
987
988 h->overwrite_mapping = m;
989 m->bio = bio;
990 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
991 inc_all_io_entry(pool, bio);
992 remap_and_issue(tc, bio, data_block);
993}
994
e5aea7b4
JT
995/*
996 * A partial copy also needs to zero the uncopied region.
997 */
991d9fa0 998static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
999 struct dm_dev *origin, dm_block_t data_origin,
1000 dm_block_t data_dest,
e5aea7b4
JT
1001 struct dm_bio_prison_cell *cell, struct bio *bio,
1002 sector_t len)
991d9fa0
JT
1003{
1004 int r;
1005 struct pool *pool = tc->pool;
a24c2569 1006 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 1007
991d9fa0
JT
1008 m->tc = tc;
1009 m->virt_block = virt_block;
1010 m->data_block = data_dest;
1011 m->cell = cell;
991d9fa0 1012
e5aea7b4
JT
1013 /*
1014 * quiesce action + copy action + an extra reference held for the
1015 * duration of this function (we may need to inc later for a
1016 * partial zero).
1017 */
1018 atomic_set(&m->prepare_actions, 3);
1019
44feb387 1020 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
e5aea7b4 1021 complete_mapping_preparation(m); /* already quiesced */
991d9fa0
JT
1022
1023 /*
1024 * IO to pool_dev remaps to the pool target's data_dev.
1025 *
1026 * If the whole block of data is being overwritten, we can issue the
1027 * bio immediately. Otherwise we use kcopyd to clone the data first.
1028 */
452d7a62
MS
1029 if (io_overwrites_block(pool, bio))
1030 remap_and_issue_overwrite(tc, bio, data_dest, m);
1031 else {
991d9fa0
JT
1032 struct dm_io_region from, to;
1033
2dd9c257 1034 from.bdev = origin->bdev;
991d9fa0 1035 from.sector = data_origin * pool->sectors_per_block;
e5aea7b4 1036 from.count = len;
991d9fa0
JT
1037
1038 to.bdev = tc->pool_dev->bdev;
1039 to.sector = data_dest * pool->sectors_per_block;
e5aea7b4 1040 to.count = len;
991d9fa0
JT
1041
1042 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1043 0, copy_complete, m);
1044 if (r < 0) {
c397741c 1045 DMERR_LIMIT("dm_kcopyd_copy() failed");
e5aea7b4
JT
1046 copy_complete(1, 1, m);
1047
1048 /*
1049 * We allow the zero to be issued, to simplify the
1050 * error path. Otherwise we'd need to start
1051 * worrying about decrementing the prepare_actions
1052 * counter.
1053 */
1054 }
1055
1056 /*
1057 * Do we need to zero a tail region?
1058 */
1059 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1060 atomic_inc(&m->prepare_actions);
1061 ll_zero(tc, m,
1062 data_dest * pool->sectors_per_block + len,
1063 (data_dest + 1) * pool->sectors_per_block);
991d9fa0
JT
1064 }
1065 }
e5aea7b4
JT
1066
1067 complete_mapping_preparation(m); /* drop our ref */
991d9fa0
JT
1068}
1069
2dd9c257
JT
1070static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1071 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 1072 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
1073{
1074 schedule_copy(tc, virt_block, tc->pool_dev,
e5aea7b4
JT
1075 data_origin, data_dest, cell, bio,
1076 tc->pool->sectors_per_block);
2dd9c257
JT
1077}
1078
991d9fa0 1079static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 1080 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
1081 struct bio *bio)
1082{
1083 struct pool *pool = tc->pool;
a24c2569 1084 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 1085
50f3c3ef 1086 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
991d9fa0
JT
1087 m->tc = tc;
1088 m->virt_block = virt_block;
1089 m->data_block = data_block;
1090 m->cell = cell;
991d9fa0
JT
1091
1092 /*
1093 * If the whole block of data is being overwritten or we are not
1094 * zeroing pre-existing data, we can issue the bio immediately.
1095 * Otherwise we use kcopyd to zero the data first.
1096 */
67e2e2b2 1097 if (!pool->pf.zero_new_blocks)
991d9fa0
JT
1098 process_prepared_mapping(m);
1099
452d7a62
MS
1100 else if (io_overwrites_block(pool, bio))
1101 remap_and_issue_overwrite(tc, bio, data_block, m);
991d9fa0 1102
452d7a62 1103 else
e5aea7b4
JT
1104 ll_zero(tc, m,
1105 data_block * pool->sectors_per_block,
1106 (data_block + 1) * pool->sectors_per_block);
1107}
991d9fa0 1108
e5aea7b4
JT
1109static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1110 dm_block_t data_dest,
1111 struct dm_bio_prison_cell *cell, struct bio *bio)
1112{
1113 struct pool *pool = tc->pool;
1114 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1115 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1116
1117 if (virt_block_end <= tc->origin_size)
1118 schedule_copy(tc, virt_block, tc->origin_dev,
1119 virt_block, data_dest, cell, bio,
1120 pool->sectors_per_block);
1121
1122 else if (virt_block_begin < tc->origin_size)
1123 schedule_copy(tc, virt_block, tc->origin_dev,
1124 virt_block, data_dest, cell, bio,
1125 tc->origin_size - virt_block_begin);
1126
1127 else
1128 schedule_zero(tc, virt_block, data_dest, cell, bio);
991d9fa0
JT
1129}
1130
2c43fd26
JT
1131static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1132
1133static void check_for_space(struct pool *pool)
1134{
1135 int r;
1136 dm_block_t nr_free;
1137
1138 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1139 return;
1140
1141 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1142 if (r)
1143 return;
1144
1145 if (nr_free)
1146 set_pool_mode(pool, PM_WRITE);
1147}
1148
e49e5829
JT
1149/*
1150 * A non-zero return indicates read_only or fail_io mode.
1151 * Many callers don't care about the return value.
1152 */
020cc3b5 1153static int commit(struct pool *pool)
e49e5829
JT
1154{
1155 int r;
1156
8d07e8a5 1157 if (get_pool_mode(pool) >= PM_READ_ONLY)
e49e5829
JT
1158 return -EINVAL;
1159
020cc3b5 1160 r = dm_pool_commit_metadata(pool->pmd);
b5330655
JT
1161 if (r)
1162 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
2c43fd26
JT
1163 else
1164 check_for_space(pool);
e49e5829
JT
1165
1166 return r;
1167}
1168
88a6621b
JT
1169static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1170{
1171 unsigned long flags;
1172
1173 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1174 DMWARN("%s: reached low water mark for data device: sending event.",
1175 dm_device_name(pool->pool_md));
1176 spin_lock_irqsave(&pool->lock, flags);
1177 pool->low_water_triggered = true;
1178 spin_unlock_irqrestore(&pool->lock, flags);
1179 dm_table_event(pool->ti->table);
1180 }
1181}
1182
991d9fa0
JT
1183static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1184{
1185 int r;
1186 dm_block_t free_blocks;
991d9fa0
JT
1187 struct pool *pool = tc->pool;
1188
3e1a0699 1189 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
8d30abff
JT
1190 return -EINVAL;
1191
991d9fa0 1192 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1193 if (r) {
1194 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
991d9fa0 1195 return r;
b5330655 1196 }
991d9fa0 1197
88a6621b 1198 check_low_water_mark(pool, free_blocks);
991d9fa0
JT
1199
1200 if (!free_blocks) {
94563bad
MS
1201 /*
1202 * Try to commit to see if that will free up some
1203 * more space.
1204 */
020cc3b5
JT
1205 r = commit(pool);
1206 if (r)
1207 return r;
991d9fa0 1208
94563bad 1209 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1210 if (r) {
1211 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
94563bad 1212 return r;
b5330655 1213 }
991d9fa0 1214
94563bad 1215 if (!free_blocks) {
3e1a0699 1216 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
94563bad 1217 return -ENOSPC;
991d9fa0
JT
1218 }
1219 }
1220
1221 r = dm_pool_alloc_data_block(pool->pmd, result);
4a02b34e 1222 if (r) {
b5330655 1223 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
991d9fa0 1224 return r;
4a02b34e 1225 }
991d9fa0
JT
1226
1227 return 0;
1228}
1229
1230/*
1231 * If we have run out of space, queue bios until the device is
1232 * resumed, presumably after having been reloaded with more space.
1233 */
1234static void retry_on_resume(struct bio *bio)
1235{
59c3d2c6 1236 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 1237 struct thin_c *tc = h->tc;
991d9fa0
JT
1238 unsigned long flags;
1239
c140e1c4
MS
1240 spin_lock_irqsave(&tc->lock, flags);
1241 bio_list_add(&tc->retry_on_resume_list, bio);
1242 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1243}
1244
af91805a 1245static int should_error_unserviceable_bio(struct pool *pool)
8c0f0e8c 1246{
3e1a0699
JT
1247 enum pool_mode m = get_pool_mode(pool);
1248
1249 switch (m) {
1250 case PM_WRITE:
1251 /* Shouldn't get here */
1252 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
af91805a 1253 return -EIO;
3e1a0699
JT
1254
1255 case PM_OUT_OF_DATA_SPACE:
af91805a 1256 return pool->pf.error_if_no_space ? -ENOSPC : 0;
3e1a0699
JT
1257
1258 case PM_READ_ONLY:
1259 case PM_FAIL:
af91805a 1260 return -EIO;
3e1a0699
JT
1261 default:
1262 /* Shouldn't get here */
1263 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
af91805a 1264 return -EIO;
3e1a0699
JT
1265 }
1266}
8c0f0e8c 1267
3e1a0699
JT
1268static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1269{
af91805a
MS
1270 int error = should_error_unserviceable_bio(pool);
1271
1272 if (error)
1273 bio_endio(bio, error);
6d16202b
MS
1274 else
1275 retry_on_resume(bio);
8c0f0e8c
MS
1276}
1277
399caddf 1278static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1279{
1280 struct bio *bio;
1281 struct bio_list bios;
af91805a 1282 int error;
991d9fa0 1283
af91805a
MS
1284 error = should_error_unserviceable_bio(pool);
1285 if (error) {
1286 cell_error_with_code(pool, cell, error);
3e1a0699
JT
1287 return;
1288 }
1289
991d9fa0 1290 bio_list_init(&bios);
6beca5eb 1291 cell_release(pool, cell, &bios);
991d9fa0 1292
9d094eeb
MS
1293 while ((bio = bio_list_pop(&bios)))
1294 retry_on_resume(bio);
991d9fa0
JT
1295}
1296
a374bb21 1297static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
104655fd
JT
1298{
1299 int r;
a374bb21 1300 struct bio *bio = cell->holder;
104655fd 1301 struct pool *pool = tc->pool;
a374bb21
JT
1302 struct dm_bio_prison_cell *cell2;
1303 struct dm_cell_key key2;
104655fd
JT
1304 dm_block_t block = get_bio_block(tc, bio);
1305 struct dm_thin_lookup_result lookup_result;
a24c2569 1306 struct dm_thin_new_mapping *m;
104655fd 1307
a374bb21
JT
1308 if (tc->requeue_mode) {
1309 cell_requeue(pool, cell);
104655fd 1310 return;
a374bb21 1311 }
104655fd
JT
1312
1313 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1314 switch (r) {
1315 case 0:
1316 /*
1317 * Check nobody is fiddling with this pool block. This can
1318 * happen if someone's in the process of breaking sharing
1319 * on this block.
1320 */
1321 build_data_key(tc->td, lookup_result.block, &key2);
6beca5eb 1322 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
f286ba0e 1323 cell_defer_no_holder(tc, cell);
104655fd
JT
1324 break;
1325 }
1326
1327 if (io_overlaps_block(pool, bio)) {
1328 /*
1329 * IO may still be going to the destination block. We must
1330 * quiesce before we can do the removal.
1331 */
1332 m = get_next_mapping(pool);
1333 m->tc = tc;
19fa1a67
JT
1334 m->pass_discard = pool->pf.discard_passdown;
1335 m->definitely_not_shared = !lookup_result.shared;
104655fd
JT
1336 m->virt_block = block;
1337 m->data_block = lookup_result.block;
1338 m->cell = cell;
1339 m->cell2 = cell2;
104655fd
JT
1340 m->bio = bio;
1341
7a7e97ca
JT
1342 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1343 pool->process_prepared_discard(m);
1344
104655fd 1345 } else {
e8088073 1346 inc_all_io_entry(pool, bio);
f286ba0e
JT
1347 cell_defer_no_holder(tc, cell);
1348 cell_defer_no_holder(tc, cell2);
e8088073 1349
104655fd 1350 /*
49296309
MP
1351 * The DM core makes sure that the discard doesn't span
1352 * a block boundary. So we submit the discard of a
1353 * partial block appropriately.
104655fd 1354 */
650d2a06
MP
1355 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1356 remap_and_issue(tc, bio, lookup_result.block);
1357 else
1358 bio_endio(bio, 0);
104655fd
JT
1359 }
1360 break;
1361
1362 case -ENODATA:
1363 /*
1364 * It isn't provisioned, just forget it.
1365 */
f286ba0e 1366 cell_defer_no_holder(tc, cell);
104655fd
JT
1367 bio_endio(bio, 0);
1368 break;
1369
1370 default:
c397741c
MS
1371 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1372 __func__, r);
f286ba0e 1373 cell_defer_no_holder(tc, cell);
104655fd
JT
1374 bio_io_error(bio);
1375 break;
1376 }
1377}
1378
a374bb21
JT
1379static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1380{
1381 struct dm_bio_prison_cell *cell;
1382 struct dm_cell_key key;
1383 dm_block_t block = get_bio_block(tc, bio);
1384
1385 build_virtual_key(tc->td, block, &key);
1386 if (bio_detain(tc->pool, &key, bio, &cell))
1387 return;
1388
1389 process_discard_cell(tc, cell);
1390}
1391
991d9fa0 1392static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1393 struct dm_cell_key *key,
991d9fa0 1394 struct dm_thin_lookup_result *lookup_result,
a24c2569 1395 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1396{
1397 int r;
1398 dm_block_t data_block;
d6fc2042 1399 struct pool *pool = tc->pool;
991d9fa0
JT
1400
1401 r = alloc_data_block(tc, &data_block);
1402 switch (r) {
1403 case 0:
2dd9c257
JT
1404 schedule_internal_copy(tc, block, lookup_result->block,
1405 data_block, cell, bio);
991d9fa0
JT
1406 break;
1407
1408 case -ENOSPC:
399caddf 1409 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1410 break;
1411
1412 default:
c397741c
MS
1413 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1414 __func__, r);
d6fc2042 1415 cell_error(pool, cell);
991d9fa0
JT
1416 break;
1417 }
1418}
1419
23ca2bb6
JT
1420static void __remap_and_issue_shared_cell(void *context,
1421 struct dm_bio_prison_cell *cell)
1422{
1423 struct remap_info *info = context;
1424 struct bio *bio;
1425
1426 while ((bio = bio_list_pop(&cell->bios))) {
1427 if ((bio_data_dir(bio) == WRITE) ||
1428 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1429 bio_list_add(&info->defer_bios, bio);
1430 else {
1431 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1432
1433 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1434 inc_all_io_entry(info->tc->pool, bio);
1435 bio_list_add(&info->issue_bios, bio);
1436 }
1437 }
1438}
1439
1440static void remap_and_issue_shared_cell(struct thin_c *tc,
1441 struct dm_bio_prison_cell *cell,
1442 dm_block_t block)
1443{
1444 struct bio *bio;
1445 struct remap_info info;
1446
1447 info.tc = tc;
1448 bio_list_init(&info.defer_bios);
1449 bio_list_init(&info.issue_bios);
1450
1451 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1452 &info, cell);
1453
1454 while ((bio = bio_list_pop(&info.defer_bios)))
1455 thin_defer_bio(tc, bio);
1456
1457 while ((bio = bio_list_pop(&info.issue_bios)))
1458 remap_and_issue(tc, bio, block);
1459}
1460
991d9fa0
JT
1461static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1462 dm_block_t block,
23ca2bb6
JT
1463 struct dm_thin_lookup_result *lookup_result,
1464 struct dm_bio_prison_cell *virt_cell)
991d9fa0 1465{
23ca2bb6 1466 struct dm_bio_prison_cell *data_cell;
991d9fa0 1467 struct pool *pool = tc->pool;
44feb387 1468 struct dm_cell_key key;
991d9fa0
JT
1469
1470 /*
1471 * If cell is already occupied, then sharing is already in the process
1472 * of being broken so we have nothing further to do here.
1473 */
1474 build_data_key(tc->td, lookup_result->block, &key);
23ca2bb6
JT
1475 if (bio_detain(pool, &key, bio, &data_cell)) {
1476 cell_defer_no_holder(tc, virt_cell);
991d9fa0 1477 return;
23ca2bb6 1478 }
991d9fa0 1479
23ca2bb6
JT
1480 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1481 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1482 cell_defer_no_holder(tc, virt_cell);
1483 } else {
59c3d2c6 1484 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1485
44feb387 1486 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1487 inc_all_io_entry(pool, bio);
991d9fa0 1488 remap_and_issue(tc, bio, lookup_result->block);
23ca2bb6
JT
1489
1490 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1491 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
991d9fa0
JT
1492 }
1493}
1494
1495static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1496 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1497{
1498 int r;
1499 dm_block_t data_block;
6beca5eb 1500 struct pool *pool = tc->pool;
991d9fa0
JT
1501
1502 /*
1503 * Remap empty bios (flushes) immediately, without provisioning.
1504 */
4f024f37 1505 if (!bio->bi_iter.bi_size) {
6beca5eb 1506 inc_all_io_entry(pool, bio);
f286ba0e 1507 cell_defer_no_holder(tc, cell);
e8088073 1508
991d9fa0
JT
1509 remap_and_issue(tc, bio, 0);
1510 return;
1511 }
1512
1513 /*
1514 * Fill read bios with zeroes and complete them immediately.
1515 */
1516 if (bio_data_dir(bio) == READ) {
1517 zero_fill_bio(bio);
f286ba0e 1518 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1519 bio_endio(bio, 0);
1520 return;
1521 }
1522
1523 r = alloc_data_block(tc, &data_block);
1524 switch (r) {
1525 case 0:
2dd9c257
JT
1526 if (tc->origin_dev)
1527 schedule_external_copy(tc, block, data_block, cell, bio);
1528 else
1529 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1530 break;
1531
1532 case -ENOSPC:
399caddf 1533 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1534 break;
1535
1536 default:
c397741c
MS
1537 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1538 __func__, r);
6beca5eb 1539 cell_error(pool, cell);
991d9fa0
JT
1540 break;
1541 }
1542}
1543
a374bb21 1544static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1545{
1546 int r;
6beca5eb 1547 struct pool *pool = tc->pool;
a374bb21 1548 struct bio *bio = cell->holder;
991d9fa0 1549 dm_block_t block = get_bio_block(tc, bio);
991d9fa0
JT
1550 struct dm_thin_lookup_result lookup_result;
1551
a374bb21
JT
1552 if (tc->requeue_mode) {
1553 cell_requeue(pool, cell);
991d9fa0 1554 return;
a374bb21 1555 }
991d9fa0
JT
1556
1557 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1558 switch (r) {
1559 case 0:
23ca2bb6
JT
1560 if (lookup_result.shared)
1561 process_shared_bio(tc, bio, block, &lookup_result, cell);
1562 else {
6beca5eb 1563 inc_all_io_entry(pool, bio);
991d9fa0 1564 remap_and_issue(tc, bio, lookup_result.block);
a374bb21 1565 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1566 }
991d9fa0
JT
1567 break;
1568
1569 case -ENODATA:
2dd9c257 1570 if (bio_data_dir(bio) == READ && tc->origin_dev) {
6beca5eb 1571 inc_all_io_entry(pool, bio);
f286ba0e 1572 cell_defer_no_holder(tc, cell);
e8088073 1573
e5aea7b4
JT
1574 if (bio_end_sector(bio) <= tc->origin_size)
1575 remap_to_origin_and_issue(tc, bio);
1576
1577 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1578 zero_fill_bio(bio);
1579 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1580 remap_to_origin_and_issue(tc, bio);
1581
1582 } else {
1583 zero_fill_bio(bio);
1584 bio_endio(bio, 0);
1585 }
2dd9c257
JT
1586 } else
1587 provision_block(tc, bio, block, cell);
991d9fa0
JT
1588 break;
1589
1590 default:
c397741c
MS
1591 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1592 __func__, r);
f286ba0e 1593 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1594 bio_io_error(bio);
1595 break;
1596 }
1597}
1598
a374bb21
JT
1599static void process_bio(struct thin_c *tc, struct bio *bio)
1600{
1601 struct pool *pool = tc->pool;
1602 dm_block_t block = get_bio_block(tc, bio);
1603 struct dm_bio_prison_cell *cell;
1604 struct dm_cell_key key;
1605
1606 /*
1607 * If cell is already occupied, then the block is already
1608 * being provisioned so we have nothing further to do here.
1609 */
1610 build_virtual_key(tc->td, block, &key);
1611 if (bio_detain(pool, &key, bio, &cell))
1612 return;
1613
1614 process_cell(tc, cell);
1615}
1616
1617static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1618 struct dm_bio_prison_cell *cell)
e49e5829
JT
1619{
1620 int r;
1621 int rw = bio_data_dir(bio);
1622 dm_block_t block = get_bio_block(tc, bio);
1623 struct dm_thin_lookup_result lookup_result;
1624
1625 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1626 switch (r) {
1627 case 0:
a374bb21 1628 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
8c0f0e8c 1629 handle_unserviceable_bio(tc->pool, bio);
a374bb21
JT
1630 if (cell)
1631 cell_defer_no_holder(tc, cell);
1632 } else {
e8088073 1633 inc_all_io_entry(tc->pool, bio);
e49e5829 1634 remap_and_issue(tc, bio, lookup_result.block);
a374bb21
JT
1635 if (cell)
1636 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1637 }
e49e5829
JT
1638 break;
1639
1640 case -ENODATA:
a374bb21
JT
1641 if (cell)
1642 cell_defer_no_holder(tc, cell);
e49e5829 1643 if (rw != READ) {
8c0f0e8c 1644 handle_unserviceable_bio(tc->pool, bio);
e49e5829
JT
1645 break;
1646 }
1647
1648 if (tc->origin_dev) {
e8088073 1649 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1650 remap_to_origin_and_issue(tc, bio);
1651 break;
1652 }
1653
1654 zero_fill_bio(bio);
1655 bio_endio(bio, 0);
1656 break;
1657
1658 default:
c397741c
MS
1659 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1660 __func__, r);
a374bb21
JT
1661 if (cell)
1662 cell_defer_no_holder(tc, cell);
e49e5829
JT
1663 bio_io_error(bio);
1664 break;
1665 }
1666}
1667
a374bb21
JT
1668static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1669{
1670 __process_bio_read_only(tc, bio, NULL);
1671}
1672
1673static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1674{
1675 __process_bio_read_only(tc, cell->holder, cell);
1676}
1677
3e1a0699
JT
1678static void process_bio_success(struct thin_c *tc, struct bio *bio)
1679{
1680 bio_endio(bio, 0);
1681}
1682
e49e5829
JT
1683static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1684{
1685 bio_io_error(bio);
1686}
1687
a374bb21
JT
1688static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1689{
1690 cell_success(tc->pool, cell);
1691}
1692
1693static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1694{
1695 cell_error(tc->pool, cell);
1696}
1697
ac8c3f3d
JT
1698/*
1699 * FIXME: should we also commit due to size of transaction, measured in
1700 * metadata blocks?
1701 */
905e51b3
JT
1702static int need_commit_due_to_time(struct pool *pool)
1703{
0f30af98
MS
1704 return !time_in_range(jiffies, pool->last_commit_jiffies,
1705 pool->last_commit_jiffies + COMMIT_PERIOD);
905e51b3
JT
1706}
1707
67324ea1
MS
1708#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1709#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1710
1711static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1712{
1713 struct rb_node **rbp, *parent;
1714 struct dm_thin_endio_hook *pbd;
1715 sector_t bi_sector = bio->bi_iter.bi_sector;
1716
1717 rbp = &tc->sort_bio_list.rb_node;
1718 parent = NULL;
1719 while (*rbp) {
1720 parent = *rbp;
1721 pbd = thin_pbd(parent);
1722
1723 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1724 rbp = &(*rbp)->rb_left;
1725 else
1726 rbp = &(*rbp)->rb_right;
1727 }
1728
1729 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1730 rb_link_node(&pbd->rb_node, parent, rbp);
1731 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1732}
1733
1734static void __extract_sorted_bios(struct thin_c *tc)
1735{
1736 struct rb_node *node;
1737 struct dm_thin_endio_hook *pbd;
1738 struct bio *bio;
1739
1740 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1741 pbd = thin_pbd(node);
1742 bio = thin_bio(pbd);
1743
1744 bio_list_add(&tc->deferred_bio_list, bio);
1745 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1746 }
1747
1748 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1749}
1750
1751static void __sort_thin_deferred_bios(struct thin_c *tc)
1752{
1753 struct bio *bio;
1754 struct bio_list bios;
1755
1756 bio_list_init(&bios);
1757 bio_list_merge(&bios, &tc->deferred_bio_list);
1758 bio_list_init(&tc->deferred_bio_list);
1759
1760 /* Sort deferred_bio_list using rb-tree */
1761 while ((bio = bio_list_pop(&bios)))
1762 __thin_bio_rb_add(tc, bio);
1763
1764 /*
1765 * Transfer the sorted bios in sort_bio_list back to
1766 * deferred_bio_list to allow lockless submission of
1767 * all bios.
1768 */
1769 __extract_sorted_bios(tc);
1770}
1771
c140e1c4 1772static void process_thin_deferred_bios(struct thin_c *tc)
991d9fa0 1773{
c140e1c4 1774 struct pool *pool = tc->pool;
991d9fa0
JT
1775 unsigned long flags;
1776 struct bio *bio;
1777 struct bio_list bios;
67324ea1 1778 struct blk_plug plug;
8a01a6af 1779 unsigned count = 0;
991d9fa0 1780
c140e1c4 1781 if (tc->requeue_mode) {
42d6a8ce 1782 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
c140e1c4
MS
1783 return;
1784 }
1785
991d9fa0
JT
1786 bio_list_init(&bios);
1787
c140e1c4 1788 spin_lock_irqsave(&tc->lock, flags);
67324ea1
MS
1789
1790 if (bio_list_empty(&tc->deferred_bio_list)) {
1791 spin_unlock_irqrestore(&tc->lock, flags);
1792 return;
1793 }
1794
1795 __sort_thin_deferred_bios(tc);
1796
c140e1c4
MS
1797 bio_list_merge(&bios, &tc->deferred_bio_list);
1798 bio_list_init(&tc->deferred_bio_list);
67324ea1 1799
c140e1c4 1800 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 1801
67324ea1 1802 blk_start_plug(&plug);
991d9fa0 1803 while ((bio = bio_list_pop(&bios))) {
991d9fa0
JT
1804 /*
1805 * If we've got no free new_mapping structs, and processing
1806 * this bio might require one, we pause until there are some
1807 * prepared mappings to process.
1808 */
1809 if (ensure_next_mapping(pool)) {
c140e1c4
MS
1810 spin_lock_irqsave(&tc->lock, flags);
1811 bio_list_add(&tc->deferred_bio_list, bio);
1812 bio_list_merge(&tc->deferred_bio_list, &bios);
1813 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1814 break;
1815 }
104655fd
JT
1816
1817 if (bio->bi_rw & REQ_DISCARD)
e49e5829 1818 pool->process_discard(tc, bio);
104655fd 1819 else
e49e5829 1820 pool->process_bio(tc, bio);
8a01a6af
JT
1821
1822 if ((count++ & 127) == 0) {
7d327fe0 1823 throttle_work_update(&pool->throttle);
8a01a6af
JT
1824 dm_pool_issue_prefetches(pool->pmd);
1825 }
991d9fa0 1826 }
67324ea1 1827 blk_finish_plug(&plug);
c140e1c4
MS
1828}
1829
ac4c3f34
JT
1830static int cmp_cells(const void *lhs, const void *rhs)
1831{
1832 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1833 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1834
1835 BUG_ON(!lhs_cell->holder);
1836 BUG_ON(!rhs_cell->holder);
1837
1838 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1839 return -1;
1840
1841 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1842 return 1;
1843
1844 return 0;
1845}
1846
1847static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1848{
1849 unsigned count = 0;
1850 struct dm_bio_prison_cell *cell, *tmp;
1851
1852 list_for_each_entry_safe(cell, tmp, cells, user_list) {
1853 if (count >= CELL_SORT_ARRAY_SIZE)
1854 break;
1855
1856 pool->cell_sort_array[count++] = cell;
1857 list_del(&cell->user_list);
1858 }
1859
1860 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1861
1862 return count;
1863}
1864
a374bb21
JT
1865static void process_thin_deferred_cells(struct thin_c *tc)
1866{
1867 struct pool *pool = tc->pool;
1868 unsigned long flags;
1869 struct list_head cells;
ac4c3f34
JT
1870 struct dm_bio_prison_cell *cell;
1871 unsigned i, j, count;
a374bb21
JT
1872
1873 INIT_LIST_HEAD(&cells);
1874
1875 spin_lock_irqsave(&tc->lock, flags);
1876 list_splice_init(&tc->deferred_cells, &cells);
1877 spin_unlock_irqrestore(&tc->lock, flags);
1878
1879 if (list_empty(&cells))
1880 return;
1881
ac4c3f34
JT
1882 do {
1883 count = sort_cells(tc->pool, &cells);
a374bb21 1884
ac4c3f34
JT
1885 for (i = 0; i < count; i++) {
1886 cell = pool->cell_sort_array[i];
1887 BUG_ON(!cell->holder);
a374bb21 1888
ac4c3f34
JT
1889 /*
1890 * If we've got no free new_mapping structs, and processing
1891 * this bio might require one, we pause until there are some
1892 * prepared mappings to process.
1893 */
1894 if (ensure_next_mapping(pool)) {
1895 for (j = i; j < count; j++)
1896 list_add(&pool->cell_sort_array[j]->user_list, &cells);
1897
1898 spin_lock_irqsave(&tc->lock, flags);
1899 list_splice(&cells, &tc->deferred_cells);
1900 spin_unlock_irqrestore(&tc->lock, flags);
1901 return;
1902 }
1903
1904 if (cell->holder->bi_rw & REQ_DISCARD)
1905 pool->process_discard_cell(tc, cell);
1906 else
1907 pool->process_cell(tc, cell);
1908 }
1909 } while (!list_empty(&cells));
a374bb21
JT
1910}
1911
b10ebd34
JT
1912static void thin_get(struct thin_c *tc);
1913static void thin_put(struct thin_c *tc);
1914
1915/*
1916 * We can't hold rcu_read_lock() around code that can block. So we
1917 * find a thin with the rcu lock held; bump a refcount; then drop
1918 * the lock.
1919 */
1920static struct thin_c *get_first_thin(struct pool *pool)
1921{
1922 struct thin_c *tc = NULL;
1923
1924 rcu_read_lock();
1925 if (!list_empty(&pool->active_thins)) {
1926 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1927 thin_get(tc);
1928 }
1929 rcu_read_unlock();
1930
1931 return tc;
1932}
1933
1934static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1935{
1936 struct thin_c *old_tc = tc;
1937
1938 rcu_read_lock();
1939 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1940 thin_get(tc);
1941 thin_put(old_tc);
1942 rcu_read_unlock();
1943 return tc;
1944 }
1945 thin_put(old_tc);
1946 rcu_read_unlock();
1947
1948 return NULL;
1949}
1950
c140e1c4
MS
1951static void process_deferred_bios(struct pool *pool)
1952{
1953 unsigned long flags;
1954 struct bio *bio;
1955 struct bio_list bios;
1956 struct thin_c *tc;
1957
b10ebd34
JT
1958 tc = get_first_thin(pool);
1959 while (tc) {
a374bb21 1960 process_thin_deferred_cells(tc);
c140e1c4 1961 process_thin_deferred_bios(tc);
b10ebd34
JT
1962 tc = get_next_thin(pool, tc);
1963 }
991d9fa0
JT
1964
1965 /*
1966 * If there are any deferred flush bios, we must commit
1967 * the metadata before issuing them.
1968 */
1969 bio_list_init(&bios);
1970 spin_lock_irqsave(&pool->lock, flags);
1971 bio_list_merge(&bios, &pool->deferred_flush_bios);
1972 bio_list_init(&pool->deferred_flush_bios);
1973 spin_unlock_irqrestore(&pool->lock, flags);
1974
4d1662a3
MS
1975 if (bio_list_empty(&bios) &&
1976 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
991d9fa0
JT
1977 return;
1978
020cc3b5 1979 if (commit(pool)) {
991d9fa0
JT
1980 while ((bio = bio_list_pop(&bios)))
1981 bio_io_error(bio);
1982 return;
1983 }
905e51b3 1984 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1985
1986 while ((bio = bio_list_pop(&bios)))
1987 generic_make_request(bio);
1988}
1989
1990static void do_worker(struct work_struct *ws)
1991{
1992 struct pool *pool = container_of(ws, struct pool, worker);
1993
7d327fe0 1994 throttle_work_start(&pool->throttle);
8a01a6af 1995 dm_pool_issue_prefetches(pool->pmd);
7d327fe0 1996 throttle_work_update(&pool->throttle);
e49e5829 1997 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
7d327fe0 1998 throttle_work_update(&pool->throttle);
e49e5829 1999 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
7d327fe0 2000 throttle_work_update(&pool->throttle);
991d9fa0 2001 process_deferred_bios(pool);
7d327fe0 2002 throttle_work_complete(&pool->throttle);
991d9fa0
JT
2003}
2004
905e51b3
JT
2005/*
2006 * We want to commit periodically so that not too much
2007 * unwritten data builds up.
2008 */
2009static void do_waker(struct work_struct *ws)
2010{
2011 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2012 wake_worker(pool);
2013 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2014}
2015
85ad643b
JT
2016/*
2017 * We're holding onto IO to allow userland time to react. After the
2018 * timeout either the pool will have been resized (and thus back in
2019 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
2020 */
2021static void do_no_space_timeout(struct work_struct *ws)
2022{
2023 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2024 no_space_timeout);
2025
2026 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2027 set_pool_mode(pool, PM_READ_ONLY);
2028}
2029
991d9fa0
JT
2030/*----------------------------------------------------------------*/
2031
e7a3e871 2032struct pool_work {
738211f7 2033 struct work_struct worker;
e7a3e871
JT
2034 struct completion complete;
2035};
2036
2037static struct pool_work *to_pool_work(struct work_struct *ws)
2038{
2039 return container_of(ws, struct pool_work, worker);
2040}
2041
2042static void pool_work_complete(struct pool_work *pw)
2043{
2044 complete(&pw->complete);
2045}
738211f7 2046
e7a3e871
JT
2047static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2048 void (*fn)(struct work_struct *))
2049{
2050 INIT_WORK_ONSTACK(&pw->worker, fn);
2051 init_completion(&pw->complete);
2052 queue_work(pool->wq, &pw->worker);
2053 wait_for_completion(&pw->complete);
2054}
2055
2056/*----------------------------------------------------------------*/
2057
2058struct noflush_work {
2059 struct pool_work pw;
2060 struct thin_c *tc;
738211f7
JT
2061};
2062
e7a3e871 2063static struct noflush_work *to_noflush(struct work_struct *ws)
738211f7 2064{
e7a3e871 2065 return container_of(to_pool_work(ws), struct noflush_work, pw);
738211f7
JT
2066}
2067
2068static void do_noflush_start(struct work_struct *ws)
2069{
e7a3e871 2070 struct noflush_work *w = to_noflush(ws);
738211f7
JT
2071 w->tc->requeue_mode = true;
2072 requeue_io(w->tc);
e7a3e871 2073 pool_work_complete(&w->pw);
738211f7
JT
2074}
2075
2076static void do_noflush_stop(struct work_struct *ws)
2077{
e7a3e871 2078 struct noflush_work *w = to_noflush(ws);
738211f7 2079 w->tc->requeue_mode = false;
e7a3e871 2080 pool_work_complete(&w->pw);
738211f7
JT
2081}
2082
2083static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2084{
2085 struct noflush_work w;
2086
738211f7 2087 w.tc = tc;
e7a3e871 2088 pool_work_wait(&w.pw, tc->pool, fn);
738211f7
JT
2089}
2090
2091/*----------------------------------------------------------------*/
2092
e49e5829
JT
2093static enum pool_mode get_pool_mode(struct pool *pool)
2094{
2095 return pool->pf.mode;
2096}
2097
3e1a0699
JT
2098static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2099{
2100 dm_table_event(pool->ti->table);
2101 DMINFO("%s: switching pool to %s mode",
2102 dm_device_name(pool->pool_md), new_mode);
2103}
2104
8b64e881 2105static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
e49e5829 2106{
cdc2b415 2107 struct pool_c *pt = pool->ti->private;
07f2b6e0
MS
2108 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2109 enum pool_mode old_mode = get_pool_mode(pool);
80c57893 2110 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
07f2b6e0
MS
2111
2112 /*
2113 * Never allow the pool to transition to PM_WRITE mode if user
2114 * intervention is required to verify metadata and data consistency.
2115 */
2116 if (new_mode == PM_WRITE && needs_check) {
2117 DMERR("%s: unable to switch pool to write mode until repaired.",
2118 dm_device_name(pool->pool_md));
2119 if (old_mode != new_mode)
2120 new_mode = old_mode;
2121 else
2122 new_mode = PM_READ_ONLY;
2123 }
2124 /*
2125 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2126 * not going to recover without a thin_repair. So we never let the
2127 * pool move out of the old mode.
2128 */
2129 if (old_mode == PM_FAIL)
2130 new_mode = old_mode;
e49e5829 2131
8b64e881 2132 switch (new_mode) {
e49e5829 2133 case PM_FAIL:
8b64e881 2134 if (old_mode != new_mode)
3e1a0699 2135 notify_of_pool_mode_change(pool, "failure");
5383ef3a 2136 dm_pool_metadata_read_only(pool->pmd);
e49e5829
JT
2137 pool->process_bio = process_bio_fail;
2138 pool->process_discard = process_bio_fail;
a374bb21
JT
2139 pool->process_cell = process_cell_fail;
2140 pool->process_discard_cell = process_cell_fail;
e49e5829
JT
2141 pool->process_prepared_mapping = process_prepared_mapping_fail;
2142 pool->process_prepared_discard = process_prepared_discard_fail;
3e1a0699
JT
2143
2144 error_retry_list(pool);
e49e5829
JT
2145 break;
2146
2147 case PM_READ_ONLY:
8b64e881 2148 if (old_mode != new_mode)
3e1a0699
JT
2149 notify_of_pool_mode_change(pool, "read-only");
2150 dm_pool_metadata_read_only(pool->pmd);
2151 pool->process_bio = process_bio_read_only;
2152 pool->process_discard = process_bio_success;
a374bb21
JT
2153 pool->process_cell = process_cell_read_only;
2154 pool->process_discard_cell = process_cell_success;
3e1a0699
JT
2155 pool->process_prepared_mapping = process_prepared_mapping_fail;
2156 pool->process_prepared_discard = process_prepared_discard_passdown;
2157
2158 error_retry_list(pool);
2159 break;
2160
2161 case PM_OUT_OF_DATA_SPACE:
2162 /*
2163 * Ideally we'd never hit this state; the low water mark
2164 * would trigger userland to extend the pool before we
2165 * completely run out of data space. However, many small
2166 * IOs to unprovisioned space can consume data space at an
2167 * alarming rate. Adjust your low water mark if you're
2168 * frequently seeing this mode.
2169 */
2170 if (old_mode != new_mode)
2171 notify_of_pool_mode_change(pool, "out-of-data-space");
2172 pool->process_bio = process_bio_read_only;
a374bb21
JT
2173 pool->process_discard = process_discard_bio;
2174 pool->process_cell = process_cell_read_only;
2175 pool->process_discard_cell = process_discard_cell;
3e1a0699 2176 pool->process_prepared_mapping = process_prepared_mapping;
45ec9bd0 2177 pool->process_prepared_discard = process_prepared_discard;
85ad643b 2178
80c57893
MS
2179 if (!pool->pf.error_if_no_space && no_space_timeout)
2180 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
e49e5829
JT
2181 break;
2182
2183 case PM_WRITE:
8b64e881 2184 if (old_mode != new_mode)
3e1a0699 2185 notify_of_pool_mode_change(pool, "write");
9b7aaa64 2186 dm_pool_metadata_read_write(pool->pmd);
e49e5829 2187 pool->process_bio = process_bio;
a374bb21
JT
2188 pool->process_discard = process_discard_bio;
2189 pool->process_cell = process_cell;
2190 pool->process_discard_cell = process_discard_cell;
e49e5829
JT
2191 pool->process_prepared_mapping = process_prepared_mapping;
2192 pool->process_prepared_discard = process_prepared_discard;
2193 break;
2194 }
8b64e881
MS
2195
2196 pool->pf.mode = new_mode;
cdc2b415
MS
2197 /*
2198 * The pool mode may have changed, sync it so bind_control_target()
2199 * doesn't cause an unexpected mode transition on resume.
2200 */
2201 pt->adjusted_pf.mode = new_mode;
e49e5829
JT
2202}
2203
07f2b6e0 2204static void abort_transaction(struct pool *pool)
b5330655 2205{
07f2b6e0
MS
2206 const char *dev_name = dm_device_name(pool->pool_md);
2207
2208 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2209 if (dm_pool_abort_metadata(pool->pmd)) {
2210 DMERR("%s: failed to abort metadata transaction", dev_name);
2211 set_pool_mode(pool, PM_FAIL);
2212 }
2213
2214 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2215 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2216 set_pool_mode(pool, PM_FAIL);
2217 }
2218}
399caddf 2219
07f2b6e0
MS
2220static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2221{
b5330655
JT
2222 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2223 dm_device_name(pool->pool_md), op, r);
2224
07f2b6e0 2225 abort_transaction(pool);
b5330655
JT
2226 set_pool_mode(pool, PM_READ_ONLY);
2227}
2228
e49e5829
JT
2229/*----------------------------------------------------------------*/
2230
991d9fa0
JT
2231/*
2232 * Mapping functions.
2233 */
2234
2235/*
2236 * Called only while mapping a thin bio to hand it over to the workqueue.
2237 */
2238static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2239{
2240 unsigned long flags;
2241 struct pool *pool = tc->pool;
2242
c140e1c4
MS
2243 spin_lock_irqsave(&tc->lock, flags);
2244 bio_list_add(&tc->deferred_bio_list, bio);
2245 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
2246
2247 wake_worker(pool);
2248}
2249
7d327fe0
JT
2250static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2251{
2252 struct pool *pool = tc->pool;
2253
2254 throttle_lock(&pool->throttle);
2255 thin_defer_bio(tc, bio);
2256 throttle_unlock(&pool->throttle);
2257}
2258
a374bb21
JT
2259static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2260{
2261 unsigned long flags;
2262 struct pool *pool = tc->pool;
2263
2264 throttle_lock(&pool->throttle);
2265 spin_lock_irqsave(&tc->lock, flags);
2266 list_add_tail(&cell->user_list, &tc->deferred_cells);
2267 spin_unlock_irqrestore(&tc->lock, flags);
2268 throttle_unlock(&pool->throttle);
2269
2270 wake_worker(pool);
2271}
2272
59c3d2c6 2273static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 2274{
59c3d2c6 2275 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
2276
2277 h->tc = tc;
2278 h->shared_read_entry = NULL;
e8088073 2279 h->all_io_entry = NULL;
eb2aa48d 2280 h->overwrite_mapping = NULL;
eb2aa48d
JT
2281}
2282
991d9fa0
JT
2283/*
2284 * Non-blocking function called from the thin target's map function.
2285 */
7de3ee57 2286static int thin_bio_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2287{
2288 int r;
2289 struct thin_c *tc = ti->private;
2290 dm_block_t block = get_bio_block(tc, bio);
2291 struct dm_thin_device *td = tc->td;
2292 struct dm_thin_lookup_result result;
a374bb21 2293 struct dm_bio_prison_cell *virt_cell, *data_cell;
e8088073 2294 struct dm_cell_key key;
991d9fa0 2295
59c3d2c6 2296 thin_hook_bio(tc, bio);
e49e5829 2297
738211f7
JT
2298 if (tc->requeue_mode) {
2299 bio_endio(bio, DM_ENDIO_REQUEUE);
2300 return DM_MAPIO_SUBMITTED;
2301 }
2302
e49e5829
JT
2303 if (get_pool_mode(tc->pool) == PM_FAIL) {
2304 bio_io_error(bio);
2305 return DM_MAPIO_SUBMITTED;
2306 }
2307
104655fd 2308 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
7d327fe0 2309 thin_defer_bio_with_throttle(tc, bio);
991d9fa0
JT
2310 return DM_MAPIO_SUBMITTED;
2311 }
2312
c822ed96
JT
2313 /*
2314 * We must hold the virtual cell before doing the lookup, otherwise
2315 * there's a race with discard.
2316 */
2317 build_virtual_key(tc->td, block, &key);
a374bb21 2318 if (bio_detain(tc->pool, &key, bio, &virt_cell))
c822ed96
JT
2319 return DM_MAPIO_SUBMITTED;
2320
991d9fa0
JT
2321 r = dm_thin_find_block(td, block, 0, &result);
2322
2323 /*
2324 * Note that we defer readahead too.
2325 */
2326 switch (r) {
2327 case 0:
2328 if (unlikely(result.shared)) {
2329 /*
2330 * We have a race condition here between the
2331 * result.shared value returned by the lookup and
2332 * snapshot creation, which may cause new
2333 * sharing.
2334 *
2335 * To avoid this always quiesce the origin before
2336 * taking the snap. You want to do this anyway to
2337 * ensure a consistent application view
2338 * (i.e. lockfs).
2339 *
2340 * More distant ancestors are irrelevant. The
2341 * shared flag will be set in their case.
2342 */
a374bb21 2343 thin_defer_cell(tc, virt_cell);
e8088073 2344 return DM_MAPIO_SUBMITTED;
991d9fa0 2345 }
e8088073 2346
e8088073 2347 build_data_key(tc->td, result.block, &key);
a374bb21
JT
2348 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2349 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2350 return DM_MAPIO_SUBMITTED;
2351 }
2352
2353 inc_all_io_entry(tc->pool, bio);
a374bb21
JT
2354 cell_defer_no_holder(tc, data_cell);
2355 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2356
2357 remap(tc, bio, result.block);
2358 return DM_MAPIO_REMAPPED;
991d9fa0
JT
2359
2360 case -ENODATA:
e49e5829 2361 case -EWOULDBLOCK:
a374bb21 2362 thin_defer_cell(tc, virt_cell);
2aab3850 2363 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2364
2365 default:
2366 /*
2367 * Must always call bio_io_error on failure.
2368 * dm_thin_find_block can fail with -EINVAL if the
2369 * pool is switched to fail-io mode.
2370 */
2371 bio_io_error(bio);
a374bb21 2372 cell_defer_no_holder(tc, virt_cell);
2aab3850 2373 return DM_MAPIO_SUBMITTED;
991d9fa0 2374 }
991d9fa0
JT
2375}
2376
2377static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2378{
991d9fa0 2379 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
760fe67e 2380 struct request_queue *q;
991d9fa0 2381
760fe67e
MS
2382 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2383 return 1;
991d9fa0 2384
760fe67e
MS
2385 q = bdev_get_queue(pt->data_dev->bdev);
2386 return bdi_congested(&q->backing_dev_info, bdi_bits);
991d9fa0
JT
2387}
2388
c140e1c4 2389static void requeue_bios(struct pool *pool)
991d9fa0 2390{
c140e1c4
MS
2391 unsigned long flags;
2392 struct thin_c *tc;
2393
2394 rcu_read_lock();
2395 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2396 spin_lock_irqsave(&tc->lock, flags);
2397 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2398 bio_list_init(&tc->retry_on_resume_list);
2399 spin_unlock_irqrestore(&tc->lock, flags);
2400 }
2401 rcu_read_unlock();
991d9fa0
JT
2402}
2403
2404/*----------------------------------------------------------------
2405 * Binding of control targets to a pool object
2406 *--------------------------------------------------------------*/
9bc142dd
MS
2407static bool data_dev_supports_discard(struct pool_c *pt)
2408{
2409 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2410
2411 return q && blk_queue_discard(q);
2412}
2413
58051b94
JT
2414static bool is_factor(sector_t block_size, uint32_t n)
2415{
2416 return !sector_div(block_size, n);
2417}
2418
9bc142dd
MS
2419/*
2420 * If discard_passdown was enabled verify that the data device
0424caa1 2421 * supports discards. Disable discard_passdown if not.
9bc142dd 2422 */
0424caa1 2423static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 2424{
0424caa1
MS
2425 struct pool *pool = pt->pool;
2426 struct block_device *data_bdev = pt->data_dev->bdev;
2427 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2428 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2429 const char *reason = NULL;
9bc142dd
MS
2430 char buf[BDEVNAME_SIZE];
2431
0424caa1 2432 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
2433 return;
2434
0424caa1
MS
2435 if (!data_dev_supports_discard(pt))
2436 reason = "discard unsupported";
2437
2438 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2439 reason = "max discard sectors smaller than a block";
9bc142dd 2440
0424caa1
MS
2441 else if (data_limits->discard_granularity > block_size)
2442 reason = "discard granularity larger than a block";
2443
58051b94 2444 else if (!is_factor(block_size, data_limits->discard_granularity))
0424caa1
MS
2445 reason = "discard granularity not a factor of block size";
2446
2447 if (reason) {
2448 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2449 pt->adjusted_pf.discard_passdown = false;
2450 }
9bc142dd
MS
2451}
2452
991d9fa0
JT
2453static int bind_control_target(struct pool *pool, struct dm_target *ti)
2454{
2455 struct pool_c *pt = ti->private;
2456
e49e5829 2457 /*
9b7aaa64 2458 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
e49e5829 2459 */
07f2b6e0 2460 enum pool_mode old_mode = get_pool_mode(pool);
0424caa1 2461 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829 2462
8b64e881
MS
2463 /*
2464 * Don't change the pool's mode until set_pool_mode() below.
2465 * Otherwise the pool's process_* function pointers may
2466 * not match the desired pool mode.
2467 */
2468 pt->adjusted_pf.mode = old_mode;
2469
2470 pool->ti = ti;
2471 pool->pf = pt->adjusted_pf;
2472 pool->low_water_blocks = pt->low_water_blocks;
2473
9bc142dd 2474 set_pool_mode(pool, new_mode);
f402693d 2475
991d9fa0
JT
2476 return 0;
2477}
2478
2479static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2480{
2481 if (pool->ti == ti)
2482 pool->ti = NULL;
2483}
2484
2485/*----------------------------------------------------------------
2486 * Pool creation
2487 *--------------------------------------------------------------*/
67e2e2b2
JT
2488/* Initialize pool features. */
2489static void pool_features_init(struct pool_features *pf)
2490{
e49e5829 2491 pf->mode = PM_WRITE;
9bc142dd
MS
2492 pf->zero_new_blocks = true;
2493 pf->discard_enabled = true;
2494 pf->discard_passdown = true;
787a996c 2495 pf->error_if_no_space = false;
67e2e2b2
JT
2496}
2497
991d9fa0
JT
2498static void __pool_destroy(struct pool *pool)
2499{
2500 __pool_table_remove(pool);
2501
2502 if (dm_pool_metadata_close(pool->pmd) < 0)
2503 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2504
44feb387 2505 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2506 dm_kcopyd_client_destroy(pool->copier);
2507
2508 if (pool->wq)
2509 destroy_workqueue(pool->wq);
2510
2511 if (pool->next_mapping)
2512 mempool_free(pool->next_mapping, pool->mapping_pool);
2513 mempool_destroy(pool->mapping_pool);
44feb387
MS
2514 dm_deferred_set_destroy(pool->shared_read_ds);
2515 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
2516 kfree(pool);
2517}
2518
a24c2569 2519static struct kmem_cache *_new_mapping_cache;
a24c2569 2520
991d9fa0
JT
2521static struct pool *pool_create(struct mapped_device *pool_md,
2522 struct block_device *metadata_dev,
e49e5829
JT
2523 unsigned long block_size,
2524 int read_only, char **error)
991d9fa0
JT
2525{
2526 int r;
2527 void *err_p;
2528 struct pool *pool;
2529 struct dm_pool_metadata *pmd;
e49e5829 2530 bool format_device = read_only ? false : true;
991d9fa0 2531
e49e5829 2532 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
2533 if (IS_ERR(pmd)) {
2534 *error = "Error creating metadata object";
2535 return (struct pool *)pmd;
2536 }
2537
2538 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2539 if (!pool) {
2540 *error = "Error allocating memory for pool";
2541 err_p = ERR_PTR(-ENOMEM);
2542 goto bad_pool;
2543 }
2544
2545 pool->pmd = pmd;
2546 pool->sectors_per_block = block_size;
f9a8e0cd
MP
2547 if (block_size & (block_size - 1))
2548 pool->sectors_per_block_shift = -1;
2549 else
2550 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 2551 pool->low_water_blocks = 0;
67e2e2b2 2552 pool_features_init(&pool->pf);
a195db2d 2553 pool->prison = dm_bio_prison_create();
991d9fa0
JT
2554 if (!pool->prison) {
2555 *error = "Error creating pool's bio prison";
2556 err_p = ERR_PTR(-ENOMEM);
2557 goto bad_prison;
2558 }
2559
df5d2e90 2560 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
991d9fa0
JT
2561 if (IS_ERR(pool->copier)) {
2562 r = PTR_ERR(pool->copier);
2563 *error = "Error creating pool's kcopyd client";
2564 err_p = ERR_PTR(r);
2565 goto bad_kcopyd_client;
2566 }
2567
2568 /*
2569 * Create singlethreaded workqueue that will service all devices
2570 * that use this metadata.
2571 */
2572 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2573 if (!pool->wq) {
2574 *error = "Error creating pool's workqueue";
2575 err_p = ERR_PTR(-ENOMEM);
2576 goto bad_wq;
2577 }
2578
7d327fe0 2579 throttle_init(&pool->throttle);
991d9fa0 2580 INIT_WORK(&pool->worker, do_worker);
905e51b3 2581 INIT_DELAYED_WORK(&pool->waker, do_waker);
85ad643b 2582 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
991d9fa0 2583 spin_lock_init(&pool->lock);
991d9fa0
JT
2584 bio_list_init(&pool->deferred_flush_bios);
2585 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 2586 INIT_LIST_HEAD(&pool->prepared_discards);
c140e1c4 2587 INIT_LIST_HEAD(&pool->active_thins);
88a6621b 2588 pool->low_water_triggered = false;
80e96c54 2589 pool->suspended = true;
44feb387
MS
2590
2591 pool->shared_read_ds = dm_deferred_set_create();
2592 if (!pool->shared_read_ds) {
2593 *error = "Error creating pool's shared read deferred set";
2594 err_p = ERR_PTR(-ENOMEM);
2595 goto bad_shared_read_ds;
2596 }
2597
2598 pool->all_io_ds = dm_deferred_set_create();
2599 if (!pool->all_io_ds) {
2600 *error = "Error creating pool's all io deferred set";
2601 err_p = ERR_PTR(-ENOMEM);
2602 goto bad_all_io_ds;
2603 }
991d9fa0
JT
2604
2605 pool->next_mapping = NULL;
a24c2569
MS
2606 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2607 _new_mapping_cache);
991d9fa0
JT
2608 if (!pool->mapping_pool) {
2609 *error = "Error creating pool's mapping mempool";
2610 err_p = ERR_PTR(-ENOMEM);
2611 goto bad_mapping_pool;
2612 }
2613
991d9fa0 2614 pool->ref_count = 1;
905e51b3 2615 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2616 pool->pool_md = pool_md;
2617 pool->md_dev = metadata_dev;
2618 __pool_table_insert(pool);
2619
2620 return pool;
2621
991d9fa0 2622bad_mapping_pool:
44feb387
MS
2623 dm_deferred_set_destroy(pool->all_io_ds);
2624bad_all_io_ds:
2625 dm_deferred_set_destroy(pool->shared_read_ds);
2626bad_shared_read_ds:
991d9fa0
JT
2627 destroy_workqueue(pool->wq);
2628bad_wq:
2629 dm_kcopyd_client_destroy(pool->copier);
2630bad_kcopyd_client:
44feb387 2631 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2632bad_prison:
2633 kfree(pool);
2634bad_pool:
2635 if (dm_pool_metadata_close(pmd))
2636 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2637
2638 return err_p;
2639}
2640
2641static void __pool_inc(struct pool *pool)
2642{
2643 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2644 pool->ref_count++;
2645}
2646
2647static void __pool_dec(struct pool *pool)
2648{
2649 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2650 BUG_ON(!pool->ref_count);
2651 if (!--pool->ref_count)
2652 __pool_destroy(pool);
2653}
2654
2655static struct pool *__pool_find(struct mapped_device *pool_md,
2656 struct block_device *metadata_dev,
e49e5829
JT
2657 unsigned long block_size, int read_only,
2658 char **error, int *created)
991d9fa0
JT
2659{
2660 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2661
2662 if (pool) {
f09996c9
MS
2663 if (pool->pool_md != pool_md) {
2664 *error = "metadata device already in use by a pool";
991d9fa0 2665 return ERR_PTR(-EBUSY);
f09996c9 2666 }
991d9fa0
JT
2667 __pool_inc(pool);
2668
2669 } else {
2670 pool = __pool_table_lookup(pool_md);
2671 if (pool) {
f09996c9
MS
2672 if (pool->md_dev != metadata_dev) {
2673 *error = "different pool cannot replace a pool";
991d9fa0 2674 return ERR_PTR(-EINVAL);
f09996c9 2675 }
991d9fa0
JT
2676 __pool_inc(pool);
2677
67e2e2b2 2678 } else {
e49e5829 2679 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
2680 *created = 1;
2681 }
991d9fa0
JT
2682 }
2683
2684 return pool;
2685}
2686
2687/*----------------------------------------------------------------
2688 * Pool target methods
2689 *--------------------------------------------------------------*/
2690static void pool_dtr(struct dm_target *ti)
2691{
2692 struct pool_c *pt = ti->private;
2693
2694 mutex_lock(&dm_thin_pool_table.mutex);
2695
2696 unbind_control_target(pt->pool, ti);
2697 __pool_dec(pt->pool);
2698 dm_put_device(ti, pt->metadata_dev);
2699 dm_put_device(ti, pt->data_dev);
2700 kfree(pt);
2701
2702 mutex_unlock(&dm_thin_pool_table.mutex);
2703}
2704
991d9fa0
JT
2705static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2706 struct dm_target *ti)
2707{
2708 int r;
2709 unsigned argc;
2710 const char *arg_name;
2711
2712 static struct dm_arg _args[] = {
74aa45c3 2713 {0, 4, "Invalid number of pool feature arguments"},
991d9fa0
JT
2714 };
2715
2716 /*
2717 * No feature arguments supplied.
2718 */
2719 if (!as->argc)
2720 return 0;
2721
2722 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2723 if (r)
2724 return -EINVAL;
2725
2726 while (argc && !r) {
2727 arg_name = dm_shift_arg(as);
2728 argc--;
2729
e49e5829 2730 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 2731 pf->zero_new_blocks = false;
e49e5829
JT
2732
2733 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 2734 pf->discard_enabled = false;
e49e5829
JT
2735
2736 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 2737 pf->discard_passdown = false;
991d9fa0 2738
e49e5829
JT
2739 else if (!strcasecmp(arg_name, "read_only"))
2740 pf->mode = PM_READ_ONLY;
2741
787a996c
MS
2742 else if (!strcasecmp(arg_name, "error_if_no_space"))
2743 pf->error_if_no_space = true;
2744
e49e5829
JT
2745 else {
2746 ti->error = "Unrecognised pool feature requested";
2747 r = -EINVAL;
2748 break;
2749 }
991d9fa0
JT
2750 }
2751
2752 return r;
2753}
2754
ac8c3f3d
JT
2755static void metadata_low_callback(void *context)
2756{
2757 struct pool *pool = context;
2758
2759 DMWARN("%s: reached low water mark for metadata device: sending event.",
2760 dm_device_name(pool->pool_md));
2761
2762 dm_table_event(pool->ti->table);
2763}
2764
7d48935e
MS
2765static sector_t get_dev_size(struct block_device *bdev)
2766{
2767 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2768}
2769
2770static void warn_if_metadata_device_too_big(struct block_device *bdev)
b17446df 2771{
7d48935e 2772 sector_t metadata_dev_size = get_dev_size(bdev);
b17446df
JT
2773 char buffer[BDEVNAME_SIZE];
2774
7d48935e 2775 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
b17446df
JT
2776 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2777 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
7d48935e
MS
2778}
2779
2780static sector_t get_metadata_dev_size(struct block_device *bdev)
2781{
2782 sector_t metadata_dev_size = get_dev_size(bdev);
2783
2784 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2785 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
b17446df
JT
2786
2787 return metadata_dev_size;
2788}
2789
24347e95
JT
2790static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2791{
2792 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2793
7d48935e 2794 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
24347e95
JT
2795
2796 return metadata_dev_size;
2797}
2798
ac8c3f3d
JT
2799/*
2800 * When a metadata threshold is crossed a dm event is triggered, and
2801 * userland should respond by growing the metadata device. We could let
2802 * userland set the threshold, like we do with the data threshold, but I'm
2803 * not sure they know enough to do this well.
2804 */
2805static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2806{
2807 /*
2808 * 4M is ample for all ops with the possible exception of thin
2809 * device deletion which is harmless if it fails (just retry the
2810 * delete after you've grown the device).
2811 */
2812 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2813 return min((dm_block_t)1024ULL /* 4M */, quarter);
2814}
2815
991d9fa0
JT
2816/*
2817 * thin-pool <metadata dev> <data dev>
2818 * <data block size (sectors)>
2819 * <low water mark (blocks)>
2820 * [<#feature args> [<arg>]*]
2821 *
2822 * Optional feature arguments are:
2823 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
2824 * ignore_discard: disable discard
2825 * no_discard_passdown: don't pass discards down to the data device
787a996c
MS
2826 * read_only: Don't allow any changes to be made to the pool metadata.
2827 * error_if_no_space: error IOs, instead of queueing, if no space.
991d9fa0
JT
2828 */
2829static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2830{
67e2e2b2 2831 int r, pool_created = 0;
991d9fa0
JT
2832 struct pool_c *pt;
2833 struct pool *pool;
2834 struct pool_features pf;
2835 struct dm_arg_set as;
2836 struct dm_dev *data_dev;
2837 unsigned long block_size;
2838 dm_block_t low_water_blocks;
2839 struct dm_dev *metadata_dev;
5d0db96d 2840 fmode_t metadata_mode;
991d9fa0
JT
2841
2842 /*
2843 * FIXME Remove validation from scope of lock.
2844 */
2845 mutex_lock(&dm_thin_pool_table.mutex);
2846
2847 if (argc < 4) {
2848 ti->error = "Invalid argument count";
2849 r = -EINVAL;
2850 goto out_unlock;
2851 }
5d0db96d 2852
991d9fa0
JT
2853 as.argc = argc;
2854 as.argv = argv;
2855
5d0db96d
JT
2856 /*
2857 * Set default pool features.
2858 */
2859 pool_features_init(&pf);
2860
2861 dm_consume_args(&as, 4);
2862 r = parse_pool_features(&as, &pf, ti);
2863 if (r)
2864 goto out_unlock;
2865
2866 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2867 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
991d9fa0
JT
2868 if (r) {
2869 ti->error = "Error opening metadata block device";
2870 goto out_unlock;
2871 }
7d48935e 2872 warn_if_metadata_device_too_big(metadata_dev->bdev);
991d9fa0
JT
2873
2874 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2875 if (r) {
2876 ti->error = "Error getting data device";
2877 goto out_metadata;
2878 }
2879
2880 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2881 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2882 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 2883 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
2884 ti->error = "Invalid block size";
2885 r = -EINVAL;
2886 goto out;
2887 }
2888
2889 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2890 ti->error = "Invalid low water mark";
2891 r = -EINVAL;
2892 goto out;
2893 }
2894
991d9fa0
JT
2895 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2896 if (!pt) {
2897 r = -ENOMEM;
2898 goto out;
2899 }
2900
2901 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 2902 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
2903 if (IS_ERR(pool)) {
2904 r = PTR_ERR(pool);
2905 goto out_free_pt;
2906 }
2907
67e2e2b2
JT
2908 /*
2909 * 'pool_created' reflects whether this is the first table load.
2910 * Top level discard support is not allowed to be changed after
2911 * initial load. This would require a pool reload to trigger thin
2912 * device changes.
2913 */
2914 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2915 ti->error = "Discard support cannot be disabled once enabled";
2916 r = -EINVAL;
2917 goto out_flags_changed;
2918 }
2919
991d9fa0
JT
2920 pt->pool = pool;
2921 pt->ti = ti;
2922 pt->metadata_dev = metadata_dev;
2923 pt->data_dev = data_dev;
2924 pt->low_water_blocks = low_water_blocks;
0424caa1 2925 pt->adjusted_pf = pt->requested_pf = pf;
55a62eef 2926 ti->num_flush_bios = 1;
9bc142dd 2927
67e2e2b2
JT
2928 /*
2929 * Only need to enable discards if the pool should pass
2930 * them down to the data device. The thin device's discard
2931 * processing will cause mappings to be removed from the btree.
2932 */
b60ab990 2933 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 2934 if (pf.discard_enabled && pf.discard_passdown) {
55a62eef 2935 ti->num_discard_bios = 1;
9bc142dd 2936
67e2e2b2
JT
2937 /*
2938 * Setting 'discards_supported' circumvents the normal
2939 * stacking of discard limits (this keeps the pool and
2940 * thin devices' discard limits consistent).
2941 */
0ac55489 2942 ti->discards_supported = true;
67e2e2b2 2943 }
991d9fa0
JT
2944 ti->private = pt;
2945
ac8c3f3d
JT
2946 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2947 calc_metadata_threshold(pt),
2948 metadata_low_callback,
2949 pool);
2950 if (r)
2951 goto out_free_pt;
2952
991d9fa0
JT
2953 pt->callbacks.congested_fn = pool_is_congested;
2954 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2955
2956 mutex_unlock(&dm_thin_pool_table.mutex);
2957
2958 return 0;
2959
67e2e2b2
JT
2960out_flags_changed:
2961 __pool_dec(pool);
991d9fa0
JT
2962out_free_pt:
2963 kfree(pt);
2964out:
2965 dm_put_device(ti, data_dev);
2966out_metadata:
2967 dm_put_device(ti, metadata_dev);
2968out_unlock:
2969 mutex_unlock(&dm_thin_pool_table.mutex);
2970
2971 return r;
2972}
2973
7de3ee57 2974static int pool_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2975{
2976 int r;
2977 struct pool_c *pt = ti->private;
2978 struct pool *pool = pt->pool;
2979 unsigned long flags;
2980
2981 /*
2982 * As this is a singleton target, ti->begin is always zero.
2983 */
2984 spin_lock_irqsave(&pool->lock, flags);
2985 bio->bi_bdev = pt->data_dev->bdev;
2986 r = DM_MAPIO_REMAPPED;
2987 spin_unlock_irqrestore(&pool->lock, flags);
2988
2989 return r;
2990}
2991
b17446df 2992static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
991d9fa0
JT
2993{
2994 int r;
2995 struct pool_c *pt = ti->private;
2996 struct pool *pool = pt->pool;
55f2b8bd
MS
2997 sector_t data_size = ti->len;
2998 dm_block_t sb_data_size;
991d9fa0 2999
b17446df 3000 *need_commit = false;
991d9fa0 3001
55f2b8bd
MS
3002 (void) sector_div(data_size, pool->sectors_per_block);
3003
991d9fa0
JT
3004 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3005 if (r) {
4fa5971a
MS
3006 DMERR("%s: failed to retrieve data device size",
3007 dm_device_name(pool->pool_md));
991d9fa0
JT
3008 return r;
3009 }
3010
3011 if (data_size < sb_data_size) {
4fa5971a
MS
3012 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3013 dm_device_name(pool->pool_md),
55f2b8bd 3014 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
3015 return -EINVAL;
3016
3017 } else if (data_size > sb_data_size) {
07f2b6e0
MS
3018 if (dm_pool_metadata_needs_check(pool->pmd)) {
3019 DMERR("%s: unable to grow the data device until repaired.",
3020 dm_device_name(pool->pool_md));
3021 return 0;
3022 }
3023
6f7f51d4
MS
3024 if (sb_data_size)
3025 DMINFO("%s: growing the data device from %llu to %llu blocks",
3026 dm_device_name(pool->pool_md),
3027 sb_data_size, (unsigned long long)data_size);
991d9fa0
JT
3028 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3029 if (r) {
b5330655 3030 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
991d9fa0
JT
3031 return r;
3032 }
3033
b17446df 3034 *need_commit = true;
991d9fa0
JT
3035 }
3036
3037 return 0;
3038}
3039
24347e95
JT
3040static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3041{
3042 int r;
3043 struct pool_c *pt = ti->private;
3044 struct pool *pool = pt->pool;
3045 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3046
3047 *need_commit = false;
3048
610bba8b 3049 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
24347e95
JT
3050
3051 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3052 if (r) {
4fa5971a
MS
3053 DMERR("%s: failed to retrieve metadata device size",
3054 dm_device_name(pool->pool_md));
24347e95
JT
3055 return r;
3056 }
3057
3058 if (metadata_dev_size < sb_metadata_dev_size) {
4fa5971a
MS
3059 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3060 dm_device_name(pool->pool_md),
24347e95
JT
3061 metadata_dev_size, sb_metadata_dev_size);
3062 return -EINVAL;
3063
3064 } else if (metadata_dev_size > sb_metadata_dev_size) {
07f2b6e0
MS
3065 if (dm_pool_metadata_needs_check(pool->pmd)) {
3066 DMERR("%s: unable to grow the metadata device until repaired.",
3067 dm_device_name(pool->pool_md));
3068 return 0;
3069 }
3070
7d48935e 3071 warn_if_metadata_device_too_big(pool->md_dev);
6f7f51d4
MS
3072 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3073 dm_device_name(pool->pool_md),
3074 sb_metadata_dev_size, metadata_dev_size);
24347e95
JT
3075 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3076 if (r) {
b5330655 3077 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
24347e95
JT
3078 return r;
3079 }
3080
3081 *need_commit = true;
3082 }
3083
3084 return 0;
3085}
3086
b17446df
JT
3087/*
3088 * Retrieves the number of blocks of the data device from
3089 * the superblock and compares it to the actual device size,
3090 * thus resizing the data device in case it has grown.
3091 *
3092 * This both copes with opening preallocated data devices in the ctr
3093 * being followed by a resume
3094 * -and-
3095 * calling the resume method individually after userspace has
3096 * grown the data device in reaction to a table event.
3097 */
3098static int pool_preresume(struct dm_target *ti)
3099{
3100 int r;
24347e95 3101 bool need_commit1, need_commit2;
b17446df
JT
3102 struct pool_c *pt = ti->private;
3103 struct pool *pool = pt->pool;
3104
3105 /*
3106 * Take control of the pool object.
3107 */
3108 r = bind_control_target(pool, ti);
3109 if (r)
3110 return r;
3111
3112 r = maybe_resize_data_dev(ti, &need_commit1);
3113 if (r)
3114 return r;
3115
24347e95
JT
3116 r = maybe_resize_metadata_dev(ti, &need_commit2);
3117 if (r)
3118 return r;
3119
3120 if (need_commit1 || need_commit2)
020cc3b5 3121 (void) commit(pool);
b17446df
JT
3122
3123 return 0;
3124}
3125
583024d2
MS
3126static void pool_suspend_active_thins(struct pool *pool)
3127{
3128 struct thin_c *tc;
3129
3130 /* Suspend all active thin devices */
3131 tc = get_first_thin(pool);
3132 while (tc) {
3133 dm_internal_suspend_noflush(tc->thin_md);
3134 tc = get_next_thin(pool, tc);
3135 }
3136}
3137
3138static void pool_resume_active_thins(struct pool *pool)
3139{
3140 struct thin_c *tc;
3141
3142 /* Resume all active thin devices */
3143 tc = get_first_thin(pool);
3144 while (tc) {
3145 dm_internal_resume(tc->thin_md);
3146 tc = get_next_thin(pool, tc);
3147 }
3148}
3149
991d9fa0
JT
3150static void pool_resume(struct dm_target *ti)
3151{
3152 struct pool_c *pt = ti->private;
3153 struct pool *pool = pt->pool;
3154 unsigned long flags;
3155
583024d2
MS
3156 /*
3157 * Must requeue active_thins' bios and then resume
3158 * active_thins _before_ clearing 'suspend' flag.
3159 */
3160 requeue_bios(pool);
3161 pool_resume_active_thins(pool);
3162
991d9fa0 3163 spin_lock_irqsave(&pool->lock, flags);
88a6621b 3164 pool->low_water_triggered = false;
80e96c54 3165 pool->suspended = false;
991d9fa0 3166 spin_unlock_irqrestore(&pool->lock, flags);
80e96c54 3167
905e51b3 3168 do_waker(&pool->waker.work);
991d9fa0
JT
3169}
3170
80e96c54
MS
3171static void pool_presuspend(struct dm_target *ti)
3172{
3173 struct pool_c *pt = ti->private;
3174 struct pool *pool = pt->pool;
3175 unsigned long flags;
3176
3177 spin_lock_irqsave(&pool->lock, flags);
3178 pool->suspended = true;
3179 spin_unlock_irqrestore(&pool->lock, flags);
583024d2
MS
3180
3181 pool_suspend_active_thins(pool);
80e96c54
MS
3182}
3183
3184static void pool_presuspend_undo(struct dm_target *ti)
3185{
3186 struct pool_c *pt = ti->private;
3187 struct pool *pool = pt->pool;
3188 unsigned long flags;
3189
583024d2
MS
3190 pool_resume_active_thins(pool);
3191
80e96c54
MS
3192 spin_lock_irqsave(&pool->lock, flags);
3193 pool->suspended = false;
3194 spin_unlock_irqrestore(&pool->lock, flags);
3195}
3196
991d9fa0
JT
3197static void pool_postsuspend(struct dm_target *ti)
3198{
991d9fa0
JT
3199 struct pool_c *pt = ti->private;
3200 struct pool *pool = pt->pool;
3201
905e51b3 3202 cancel_delayed_work(&pool->waker);
85ad643b 3203 cancel_delayed_work(&pool->no_space_timeout);
991d9fa0 3204 flush_workqueue(pool->wq);
020cc3b5 3205 (void) commit(pool);
991d9fa0
JT
3206}
3207
3208static int check_arg_count(unsigned argc, unsigned args_required)
3209{
3210 if (argc != args_required) {
3211 DMWARN("Message received with %u arguments instead of %u.",
3212 argc, args_required);
3213 return -EINVAL;
3214 }
3215
3216 return 0;
3217}
3218
3219static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3220{
3221 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3222 *dev_id <= MAX_DEV_ID)
3223 return 0;
3224
3225 if (warning)
3226 DMWARN("Message received with invalid device id: %s", arg);
3227
3228 return -EINVAL;
3229}
3230
3231static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3232{
3233 dm_thin_id dev_id;
3234 int r;
3235
3236 r = check_arg_count(argc, 2);
3237 if (r)
3238 return r;
3239
3240 r = read_dev_id(argv[1], &dev_id, 1);
3241 if (r)
3242 return r;
3243
3244 r = dm_pool_create_thin(pool->pmd, dev_id);
3245 if (r) {
3246 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3247 argv[1]);
3248 return r;
3249 }
3250
3251 return 0;
3252}
3253
3254static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3255{
3256 dm_thin_id dev_id;
3257 dm_thin_id origin_dev_id;
3258 int r;
3259
3260 r = check_arg_count(argc, 3);
3261 if (r)
3262 return r;
3263
3264 r = read_dev_id(argv[1], &dev_id, 1);
3265 if (r)
3266 return r;
3267
3268 r = read_dev_id(argv[2], &origin_dev_id, 1);
3269 if (r)
3270 return r;
3271
3272 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3273 if (r) {
3274 DMWARN("Creation of new snapshot %s of device %s failed.",
3275 argv[1], argv[2]);
3276 return r;
3277 }
3278
3279 return 0;
3280}
3281
3282static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3283{
3284 dm_thin_id dev_id;
3285 int r;
3286
3287 r = check_arg_count(argc, 2);
3288 if (r)
3289 return r;
3290
3291 r = read_dev_id(argv[1], &dev_id, 1);
3292 if (r)
3293 return r;
3294
3295 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3296 if (r)
3297 DMWARN("Deletion of thin device %s failed.", argv[1]);
3298
3299 return r;
3300}
3301
3302static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3303{
3304 dm_thin_id old_id, new_id;
3305 int r;
3306
3307 r = check_arg_count(argc, 3);
3308 if (r)
3309 return r;
3310
3311 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3312 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3313 return -EINVAL;
3314 }
3315
3316 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3317 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3318 return -EINVAL;
3319 }
3320
3321 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3322 if (r) {
3323 DMWARN("Failed to change transaction id from %s to %s.",
3324 argv[1], argv[2]);
3325 return r;
3326 }
3327
3328 return 0;
3329}
3330
cc8394d8
JT
3331static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3332{
3333 int r;
3334
3335 r = check_arg_count(argc, 1);
3336 if (r)
3337 return r;
3338
020cc3b5 3339 (void) commit(pool);
0d200aef 3340
cc8394d8
JT
3341 r = dm_pool_reserve_metadata_snap(pool->pmd);
3342 if (r)
3343 DMWARN("reserve_metadata_snap message failed.");
3344
3345 return r;
3346}
3347
3348static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3349{
3350 int r;
3351
3352 r = check_arg_count(argc, 1);
3353 if (r)
3354 return r;
3355
3356 r = dm_pool_release_metadata_snap(pool->pmd);
3357 if (r)
3358 DMWARN("release_metadata_snap message failed.");
3359
3360 return r;
3361}
3362
991d9fa0
JT
3363/*
3364 * Messages supported:
3365 * create_thin <dev_id>
3366 * create_snap <dev_id> <origin_id>
3367 * delete <dev_id>
991d9fa0 3368 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
3369 * reserve_metadata_snap
3370 * release_metadata_snap
991d9fa0
JT
3371 */
3372static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3373{
3374 int r = -EINVAL;
3375 struct pool_c *pt = ti->private;
3376 struct pool *pool = pt->pool;
3377
2a7eaea0
JT
3378 if (get_pool_mode(pool) >= PM_READ_ONLY) {
3379 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3380 dm_device_name(pool->pool_md));
3381 return -EINVAL;
3382 }
3383
991d9fa0
JT
3384 if (!strcasecmp(argv[0], "create_thin"))
3385 r = process_create_thin_mesg(argc, argv, pool);
3386
3387 else if (!strcasecmp(argv[0], "create_snap"))
3388 r = process_create_snap_mesg(argc, argv, pool);
3389
3390 else if (!strcasecmp(argv[0], "delete"))
3391 r = process_delete_mesg(argc, argv, pool);
3392
3393 else if (!strcasecmp(argv[0], "set_transaction_id"))
3394 r = process_set_transaction_id_mesg(argc, argv, pool);
3395
cc8394d8
JT
3396 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3397 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3398
3399 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3400 r = process_release_metadata_snap_mesg(argc, argv, pool);
3401
991d9fa0
JT
3402 else
3403 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3404
e49e5829 3405 if (!r)
020cc3b5 3406 (void) commit(pool);
991d9fa0
JT
3407
3408 return r;
3409}
3410
e49e5829
JT
3411static void emit_flags(struct pool_features *pf, char *result,
3412 unsigned sz, unsigned maxlen)
3413{
3414 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
787a996c
MS
3415 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3416 pf->error_if_no_space;
e49e5829
JT
3417 DMEMIT("%u ", count);
3418
3419 if (!pf->zero_new_blocks)
3420 DMEMIT("skip_block_zeroing ");
3421
3422 if (!pf->discard_enabled)
3423 DMEMIT("ignore_discard ");
3424
3425 if (!pf->discard_passdown)
3426 DMEMIT("no_discard_passdown ");
3427
3428 if (pf->mode == PM_READ_ONLY)
3429 DMEMIT("read_only ");
787a996c
MS
3430
3431 if (pf->error_if_no_space)
3432 DMEMIT("error_if_no_space ");
e49e5829
JT
3433}
3434
991d9fa0
JT
3435/*
3436 * Status line is:
3437 * <transaction id> <used metadata sectors>/<total metadata sectors>
3438 * <used data sectors>/<total data sectors> <held metadata root>
3439 */
fd7c092e
MP
3440static void pool_status(struct dm_target *ti, status_type_t type,
3441 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 3442{
e49e5829 3443 int r;
991d9fa0
JT
3444 unsigned sz = 0;
3445 uint64_t transaction_id;
3446 dm_block_t nr_free_blocks_data;
3447 dm_block_t nr_free_blocks_metadata;
3448 dm_block_t nr_blocks_data;
3449 dm_block_t nr_blocks_metadata;
3450 dm_block_t held_root;
3451 char buf[BDEVNAME_SIZE];
3452 char buf2[BDEVNAME_SIZE];
3453 struct pool_c *pt = ti->private;
3454 struct pool *pool = pt->pool;
3455
3456 switch (type) {
3457 case STATUSTYPE_INFO:
e49e5829
JT
3458 if (get_pool_mode(pool) == PM_FAIL) {
3459 DMEMIT("Fail");
3460 break;
3461 }
3462
1f4e0ff0
AK
3463 /* Commit to ensure statistics aren't out-of-date */
3464 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
020cc3b5 3465 (void) commit(pool);
1f4e0ff0 3466
fd7c092e
MP
3467 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3468 if (r) {
4fa5971a
MS
3469 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3470 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3471 goto err;
3472 }
991d9fa0 3473
fd7c092e
MP
3474 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3475 if (r) {
4fa5971a
MS
3476 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3477 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3478 goto err;
3479 }
991d9fa0
JT
3480
3481 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
fd7c092e 3482 if (r) {
4fa5971a
MS
3483 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3484 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3485 goto err;
3486 }
991d9fa0 3487
fd7c092e
MP
3488 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3489 if (r) {
4fa5971a
MS
3490 DMERR("%s: dm_pool_get_free_block_count returned %d",
3491 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3492 goto err;
3493 }
991d9fa0
JT
3494
3495 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
fd7c092e 3496 if (r) {
4fa5971a
MS
3497 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3498 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3499 goto err;
3500 }
991d9fa0 3501
cc8394d8 3502 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
fd7c092e 3503 if (r) {
4fa5971a
MS
3504 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3505 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3506 goto err;
3507 }
991d9fa0
JT
3508
3509 DMEMIT("%llu %llu/%llu %llu/%llu ",
3510 (unsigned long long)transaction_id,
3511 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3512 (unsigned long long)nr_blocks_metadata,
3513 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3514 (unsigned long long)nr_blocks_data);
3515
3516 if (held_root)
e49e5829
JT
3517 DMEMIT("%llu ", held_root);
3518 else
3519 DMEMIT("- ");
3520
3e1a0699
JT
3521 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3522 DMEMIT("out_of_data_space ");
3523 else if (pool->pf.mode == PM_READ_ONLY)
e49e5829 3524 DMEMIT("ro ");
991d9fa0 3525 else
e49e5829
JT
3526 DMEMIT("rw ");
3527
018debea 3528 if (!pool->pf.discard_enabled)
787a996c 3529 DMEMIT("ignore_discard ");
018debea 3530 else if (pool->pf.discard_passdown)
787a996c
MS
3531 DMEMIT("discard_passdown ");
3532 else
3533 DMEMIT("no_discard_passdown ");
3534
3535 if (pool->pf.error_if_no_space)
3536 DMEMIT("error_if_no_space ");
e49e5829 3537 else
787a996c 3538 DMEMIT("queue_if_no_space ");
991d9fa0
JT
3539
3540 break;
3541
3542 case STATUSTYPE_TABLE:
3543 DMEMIT("%s %s %lu %llu ",
3544 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3545 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3546 (unsigned long)pool->sectors_per_block,
3547 (unsigned long long)pt->low_water_blocks);
0424caa1 3548 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
3549 break;
3550 }
fd7c092e 3551 return;
991d9fa0 3552
fd7c092e
MP
3553err:
3554 DMEMIT("Error");
991d9fa0
JT
3555}
3556
3557static int pool_iterate_devices(struct dm_target *ti,
3558 iterate_devices_callout_fn fn, void *data)
3559{
3560 struct pool_c *pt = ti->private;
3561
3562 return fn(ti, pt->data_dev, 0, ti->len, data);
3563}
3564
3565static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3566 struct bio_vec *biovec, int max_size)
3567{
3568 struct pool_c *pt = ti->private;
3569 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3570
3571 if (!q->merge_bvec_fn)
3572 return max_size;
3573
3574 bvm->bi_bdev = pt->data_dev->bdev;
3575
3576 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3577}
3578
0424caa1 3579static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
104655fd 3580{
0424caa1
MS
3581 struct pool *pool = pt->pool;
3582 struct queue_limits *data_limits;
3583
104655fd
JT
3584 limits->max_discard_sectors = pool->sectors_per_block;
3585
3586 /*
0424caa1 3587 * discard_granularity is just a hint, and not enforced.
104655fd 3588 */
0424caa1
MS
3589 if (pt->adjusted_pf.discard_passdown) {
3590 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
09869de5
LC
3591 limits->discard_granularity = max(data_limits->discard_granularity,
3592 pool->sectors_per_block << SECTOR_SHIFT);
f13945d7 3593 } else
0424caa1 3594 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
104655fd
JT
3595}
3596
991d9fa0
JT
3597static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3598{
3599 struct pool_c *pt = ti->private;
3600 struct pool *pool = pt->pool;
604ea906
MS
3601 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3602
3603 /*
d200c30e
MS
3604 * If max_sectors is smaller than pool->sectors_per_block adjust it
3605 * to the highest possible power-of-2 factor of pool->sectors_per_block.
3606 * This is especially beneficial when the pool's data device is a RAID
3607 * device that has a full stripe width that matches pool->sectors_per_block
3608 * -- because even though partial RAID stripe-sized IOs will be issued to a
3609 * single RAID stripe; when aggregated they will end on a full RAID stripe
3610 * boundary.. which avoids additional partial RAID stripe writes cascading
604ea906 3611 */
604ea906
MS
3612 if (limits->max_sectors < pool->sectors_per_block) {
3613 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3614 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3615 limits->max_sectors--;
3616 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3617 }
604ea906 3618 }
991d9fa0 3619
0cc67cd9
MS
3620 /*
3621 * If the system-determined stacked limits are compatible with the
3622 * pool's blocksize (io_opt is a factor) do not override them.
3623 */
3624 if (io_opt_sectors < pool->sectors_per_block ||
604ea906
MS
3625 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3626 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3627 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3628 else
3629 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
0cc67cd9
MS
3630 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3631 }
0424caa1
MS
3632
3633 /*
3634 * pt->adjusted_pf is a staging area for the actual features to use.
3635 * They get transferred to the live pool in bind_control_target()
3636 * called from pool_preresume().
3637 */
b60ab990
MS
3638 if (!pt->adjusted_pf.discard_enabled) {
3639 /*
3640 * Must explicitly disallow stacking discard limits otherwise the
3641 * block layer will stack them if pool's data device has support.
3642 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3643 * user to see that, so make sure to set all discard limits to 0.
3644 */
3645 limits->discard_granularity = 0;
0424caa1 3646 return;
b60ab990 3647 }
0424caa1
MS
3648
3649 disable_passdown_if_not_supported(pt);
3650
3651 set_discard_limits(pt, limits);
991d9fa0
JT
3652}
3653
3654static struct target_type pool_target = {
3655 .name = "thin-pool",
3656 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3657 DM_TARGET_IMMUTABLE,
36f12aeb 3658 .version = {1, 14, 0},
991d9fa0
JT
3659 .module = THIS_MODULE,
3660 .ctr = pool_ctr,
3661 .dtr = pool_dtr,
3662 .map = pool_map,
80e96c54
MS
3663 .presuspend = pool_presuspend,
3664 .presuspend_undo = pool_presuspend_undo,
991d9fa0
JT
3665 .postsuspend = pool_postsuspend,
3666 .preresume = pool_preresume,
3667 .resume = pool_resume,
3668 .message = pool_message,
3669 .status = pool_status,
3670 .merge = pool_merge,
3671 .iterate_devices = pool_iterate_devices,
3672 .io_hints = pool_io_hints,
3673};
3674
3675/*----------------------------------------------------------------
3676 * Thin target methods
3677 *--------------------------------------------------------------*/
b10ebd34
JT
3678static void thin_get(struct thin_c *tc)
3679{
3680 atomic_inc(&tc->refcount);
3681}
3682
3683static void thin_put(struct thin_c *tc)
3684{
3685 if (atomic_dec_and_test(&tc->refcount))
3686 complete(&tc->can_destroy);
3687}
3688
991d9fa0
JT
3689static void thin_dtr(struct dm_target *ti)
3690{
3691 struct thin_c *tc = ti->private;
c140e1c4
MS
3692 unsigned long flags;
3693
3694 spin_lock_irqsave(&tc->pool->lock, flags);
3695 list_del_rcu(&tc->list);
3696 spin_unlock_irqrestore(&tc->pool->lock, flags);
3697 synchronize_rcu();
991d9fa0 3698
17181fb7
MP
3699 thin_put(tc);
3700 wait_for_completion(&tc->can_destroy);
3701
991d9fa0
JT
3702 mutex_lock(&dm_thin_pool_table.mutex);
3703
3704 __pool_dec(tc->pool);
3705 dm_pool_close_thin_device(tc->td);
3706 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
3707 if (tc->origin_dev)
3708 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
3709 kfree(tc);
3710
3711 mutex_unlock(&dm_thin_pool_table.mutex);
3712}
3713
3714/*
3715 * Thin target parameters:
3716 *
2dd9c257 3717 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
3718 *
3719 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3720 * dev_id: the internal device identifier
2dd9c257 3721 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
3722 *
3723 * If the pool device has discards disabled, they get disabled for the thin
3724 * device as well.
991d9fa0
JT
3725 */
3726static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3727{
3728 int r;
3729 struct thin_c *tc;
2dd9c257 3730 struct dm_dev *pool_dev, *origin_dev;
991d9fa0 3731 struct mapped_device *pool_md;
5e3283e2 3732 unsigned long flags;
991d9fa0
JT
3733
3734 mutex_lock(&dm_thin_pool_table.mutex);
3735
2dd9c257 3736 if (argc != 2 && argc != 3) {
991d9fa0
JT
3737 ti->error = "Invalid argument count";
3738 r = -EINVAL;
3739 goto out_unlock;
3740 }
3741
3742 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3743 if (!tc) {
3744 ti->error = "Out of memory";
3745 r = -ENOMEM;
3746 goto out_unlock;
3747 }
583024d2 3748 tc->thin_md = dm_table_get_md(ti->table);
c140e1c4 3749 spin_lock_init(&tc->lock);
a374bb21 3750 INIT_LIST_HEAD(&tc->deferred_cells);
c140e1c4
MS
3751 bio_list_init(&tc->deferred_bio_list);
3752 bio_list_init(&tc->retry_on_resume_list);
67324ea1 3753 tc->sort_bio_list = RB_ROOT;
991d9fa0 3754
2dd9c257
JT
3755 if (argc == 3) {
3756 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3757 if (r) {
3758 ti->error = "Error opening origin device";
3759 goto bad_origin_dev;
3760 }
3761 tc->origin_dev = origin_dev;
3762 }
3763
991d9fa0
JT
3764 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3765 if (r) {
3766 ti->error = "Error opening pool device";
3767 goto bad_pool_dev;
3768 }
3769 tc->pool_dev = pool_dev;
3770
3771 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3772 ti->error = "Invalid device id";
3773 r = -EINVAL;
3774 goto bad_common;
3775 }
3776
3777 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3778 if (!pool_md) {
3779 ti->error = "Couldn't get pool mapped device";
3780 r = -EINVAL;
3781 goto bad_common;
3782 }
3783
3784 tc->pool = __pool_table_lookup(pool_md);
3785 if (!tc->pool) {
3786 ti->error = "Couldn't find pool object";
3787 r = -EINVAL;
3788 goto bad_pool_lookup;
3789 }
3790 __pool_inc(tc->pool);
3791
e49e5829
JT
3792 if (get_pool_mode(tc->pool) == PM_FAIL) {
3793 ti->error = "Couldn't open thin device, Pool is in fail mode";
1acacc07 3794 r = -EINVAL;
80e96c54 3795 goto bad_pool;
e49e5829
JT
3796 }
3797
991d9fa0
JT
3798 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3799 if (r) {
3800 ti->error = "Couldn't open thin internal device";
80e96c54 3801 goto bad_pool;
991d9fa0
JT
3802 }
3803
542f9038
MS
3804 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3805 if (r)
80e96c54 3806 goto bad;
542f9038 3807
55a62eef 3808 ti->num_flush_bios = 1;
16ad3d10 3809 ti->flush_supported = true;
59c3d2c6 3810 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
3811
3812 /* In case the pool supports discards, pass them on. */
b60ab990 3813 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 3814 if (tc->pool->pf.discard_enabled) {
0ac55489 3815 ti->discards_supported = true;
55a62eef 3816 ti->num_discard_bios = 1;
55a62eef
AK
3817 /* Discard bios must be split on a block boundary */
3818 ti->split_discard_bios = true;
67e2e2b2 3819 }
991d9fa0 3820
991d9fa0
JT
3821 mutex_unlock(&dm_thin_pool_table.mutex);
3822
5e3283e2 3823 spin_lock_irqsave(&tc->pool->lock, flags);
80e96c54
MS
3824 if (tc->pool->suspended) {
3825 spin_unlock_irqrestore(&tc->pool->lock, flags);
3826 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
3827 ti->error = "Unable to activate thin device while pool is suspended";
3828 r = -EINVAL;
3829 goto bad;
3830 }
2b94e896
MD
3831 atomic_set(&tc->refcount, 1);
3832 init_completion(&tc->can_destroy);
c140e1c4 3833 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
5e3283e2 3834 spin_unlock_irqrestore(&tc->pool->lock, flags);
c140e1c4
MS
3835 /*
3836 * This synchronize_rcu() call is needed here otherwise we risk a
3837 * wake_worker() call finding no bios to process (because the newly
3838 * added tc isn't yet visible). So this reduces latency since we
3839 * aren't then dependent on the periodic commit to wake_worker().
3840 */
3841 synchronize_rcu();
3842
80e96c54
MS
3843 dm_put(pool_md);
3844
991d9fa0
JT
3845 return 0;
3846
80e96c54 3847bad:
1acacc07 3848 dm_pool_close_thin_device(tc->td);
80e96c54 3849bad_pool:
991d9fa0
JT
3850 __pool_dec(tc->pool);
3851bad_pool_lookup:
3852 dm_put(pool_md);
3853bad_common:
3854 dm_put_device(ti, tc->pool_dev);
3855bad_pool_dev:
2dd9c257
JT
3856 if (tc->origin_dev)
3857 dm_put_device(ti, tc->origin_dev);
3858bad_origin_dev:
991d9fa0
JT
3859 kfree(tc);
3860out_unlock:
3861 mutex_unlock(&dm_thin_pool_table.mutex);
3862
3863 return r;
3864}
3865
7de3ee57 3866static int thin_map(struct dm_target *ti, struct bio *bio)
991d9fa0 3867{
4f024f37 3868 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
991d9fa0 3869
7de3ee57 3870 return thin_bio_map(ti, bio);
991d9fa0
JT
3871}
3872
7de3ee57 3873static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
eb2aa48d
JT
3874{
3875 unsigned long flags;
59c3d2c6 3876 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 3877 struct list_head work;
a24c2569 3878 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
3879 struct pool *pool = h->tc->pool;
3880
3881 if (h->shared_read_entry) {
3882 INIT_LIST_HEAD(&work);
44feb387 3883 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
3884
3885 spin_lock_irqsave(&pool->lock, flags);
3886 list_for_each_entry_safe(m, tmp, &work, list) {
3887 list_del(&m->list);
50f3c3ef 3888 __complete_mapping_preparation(m);
eb2aa48d
JT
3889 }
3890 spin_unlock_irqrestore(&pool->lock, flags);
3891 }
3892
104655fd
JT
3893 if (h->all_io_entry) {
3894 INIT_LIST_HEAD(&work);
44feb387 3895 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
3896 if (!list_empty(&work)) {
3897 spin_lock_irqsave(&pool->lock, flags);
3898 list_for_each_entry_safe(m, tmp, &work, list)
daec338b 3899 list_add_tail(&m->list, &pool->prepared_discards);
563af186
JT
3900 spin_unlock_irqrestore(&pool->lock, flags);
3901 wake_worker(pool);
3902 }
104655fd
JT
3903 }
3904
eb2aa48d
JT
3905 return 0;
3906}
3907
738211f7 3908static void thin_presuspend(struct dm_target *ti)
991d9fa0 3909{
738211f7
JT
3910 struct thin_c *tc = ti->private;
3911
991d9fa0 3912 if (dm_noflush_suspending(ti))
738211f7
JT
3913 noflush_work(tc, do_noflush_start);
3914}
3915
3916static void thin_postsuspend(struct dm_target *ti)
3917{
3918 struct thin_c *tc = ti->private;
3919
3920 /*
3921 * The dm_noflush_suspending flag has been cleared by now, so
3922 * unfortunately we must always run this.
3923 */
3924 noflush_work(tc, do_noflush_stop);
991d9fa0
JT
3925}
3926
e5aea7b4
JT
3927static int thin_preresume(struct dm_target *ti)
3928{
3929 struct thin_c *tc = ti->private;
3930
3931 if (tc->origin_dev)
3932 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3933
3934 return 0;
3935}
3936
991d9fa0
JT
3937/*
3938 * <nr mapped sectors> <highest mapped sector>
3939 */
fd7c092e
MP
3940static void thin_status(struct dm_target *ti, status_type_t type,
3941 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
3942{
3943 int r;
3944 ssize_t sz = 0;
3945 dm_block_t mapped, highest;
3946 char buf[BDEVNAME_SIZE];
3947 struct thin_c *tc = ti->private;
3948
e49e5829
JT
3949 if (get_pool_mode(tc->pool) == PM_FAIL) {
3950 DMEMIT("Fail");
fd7c092e 3951 return;
e49e5829
JT
3952 }
3953
991d9fa0
JT
3954 if (!tc->td)
3955 DMEMIT("-");
3956 else {
3957 switch (type) {
3958 case STATUSTYPE_INFO:
3959 r = dm_thin_get_mapped_count(tc->td, &mapped);
fd7c092e
MP
3960 if (r) {
3961 DMERR("dm_thin_get_mapped_count returned %d", r);
3962 goto err;
3963 }
991d9fa0
JT
3964
3965 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
fd7c092e
MP
3966 if (r < 0) {
3967 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3968 goto err;
3969 }
991d9fa0
JT
3970
3971 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3972 if (r)
3973 DMEMIT("%llu", ((highest + 1) *
3974 tc->pool->sectors_per_block) - 1);
3975 else
3976 DMEMIT("-");
3977 break;
3978
3979 case STATUSTYPE_TABLE:
3980 DMEMIT("%s %lu",
3981 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3982 (unsigned long) tc->dev_id);
2dd9c257
JT
3983 if (tc->origin_dev)
3984 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
3985 break;
3986 }
3987 }
3988
fd7c092e
MP
3989 return;
3990
3991err:
3992 DMEMIT("Error");
991d9fa0
JT
3993}
3994
36f12aeb
MS
3995static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3996 struct bio_vec *biovec, int max_size)
3997{
3998 struct thin_c *tc = ti->private;
3999 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
4000
4001 if (!q->merge_bvec_fn)
4002 return max_size;
4003
4004 bvm->bi_bdev = tc->pool_dev->bdev;
4005 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
4006
4007 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
4008}
4009
991d9fa0
JT
4010static int thin_iterate_devices(struct dm_target *ti,
4011 iterate_devices_callout_fn fn, void *data)
4012{
55f2b8bd 4013 sector_t blocks;
991d9fa0 4014 struct thin_c *tc = ti->private;
55f2b8bd 4015 struct pool *pool = tc->pool;
991d9fa0
JT
4016
4017 /*
4018 * We can't call dm_pool_get_data_dev_size() since that blocks. So
4019 * we follow a more convoluted path through to the pool's target.
4020 */
55f2b8bd 4021 if (!pool->ti)
991d9fa0
JT
4022 return 0; /* nothing is bound */
4023
55f2b8bd
MS
4024 blocks = pool->ti->len;
4025 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 4026 if (blocks)
55f2b8bd 4027 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
4028
4029 return 0;
4030}
4031
991d9fa0
JT
4032static struct target_type thin_target = {
4033 .name = "thin",
36f12aeb 4034 .version = {1, 14, 0},
991d9fa0
JT
4035 .module = THIS_MODULE,
4036 .ctr = thin_ctr,
4037 .dtr = thin_dtr,
4038 .map = thin_map,
eb2aa48d 4039 .end_io = thin_endio,
e5aea7b4 4040 .preresume = thin_preresume,
738211f7 4041 .presuspend = thin_presuspend,
991d9fa0
JT
4042 .postsuspend = thin_postsuspend,
4043 .status = thin_status,
36f12aeb 4044 .merge = thin_merge,
991d9fa0 4045 .iterate_devices = thin_iterate_devices,
991d9fa0
JT
4046};
4047
4048/*----------------------------------------------------------------*/
4049
4050static int __init dm_thin_init(void)
4051{
4052 int r;
4053
4054 pool_table_init();
4055
4056 r = dm_register_target(&thin_target);
4057 if (r)
4058 return r;
4059
4060 r = dm_register_target(&pool_target);
4061 if (r)
a24c2569
MS
4062 goto bad_pool_target;
4063
4064 r = -ENOMEM;
4065
a24c2569
MS
4066 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4067 if (!_new_mapping_cache)
4068 goto bad_new_mapping_cache;
4069
a24c2569
MS
4070 return 0;
4071
a24c2569 4072bad_new_mapping_cache:
a24c2569
MS
4073 dm_unregister_target(&pool_target);
4074bad_pool_target:
4075 dm_unregister_target(&thin_target);
991d9fa0
JT
4076
4077 return r;
4078}
4079
4080static void dm_thin_exit(void)
4081{
4082 dm_unregister_target(&thin_target);
4083 dm_unregister_target(&pool_target);
a24c2569 4084
a24c2569 4085 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
4086}
4087
4088module_init(dm_thin_init);
4089module_exit(dm_thin_exit);
4090
80c57893
MS
4091module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4092MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4093
7cab8bf1 4094MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
4095MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4096MODULE_LICENSE("GPL");