| 1 | /* |
| 2 | * raid5.c : Multiple Devices driver for Linux |
| 3 | * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman |
| 4 | * Copyright (C) 1999, 2000 Ingo Molnar |
| 5 | * Copyright (C) 2002, 2003 H. Peter Anvin |
| 6 | * |
| 7 | * RAID-4/5/6 management functions. |
| 8 | * Thanks to Penguin Computing for making the RAID-6 development possible |
| 9 | * by donating a test server! |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2, or (at your option) |
| 14 | * any later version. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * (for example /usr/src/linux/COPYING); if not, write to the Free |
| 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | */ |
| 20 | |
| 21 | /* |
| 22 | * BITMAP UNPLUGGING: |
| 23 | * |
| 24 | * The sequencing for updating the bitmap reliably is a little |
| 25 | * subtle (and I got it wrong the first time) so it deserves some |
| 26 | * explanation. |
| 27 | * |
| 28 | * We group bitmap updates into batches. Each batch has a number. |
| 29 | * We may write out several batches at once, but that isn't very important. |
| 30 | * conf->seq_write is the number of the last batch successfully written. |
| 31 | * conf->seq_flush is the number of the last batch that was closed to |
| 32 | * new additions. |
| 33 | * When we discover that we will need to write to any block in a stripe |
| 34 | * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq |
| 35 | * the number of the batch it will be in. This is seq_flush+1. |
| 36 | * When we are ready to do a write, if that batch hasn't been written yet, |
| 37 | * we plug the array and queue the stripe for later. |
| 38 | * When an unplug happens, we increment bm_flush, thus closing the current |
| 39 | * batch. |
| 40 | * When we notice that bm_flush > bm_write, we write out all pending updates |
| 41 | * to the bitmap, and advance bm_write to where bm_flush was. |
| 42 | * This may occasionally write a bit out twice, but is sure never to |
| 43 | * miss any bits. |
| 44 | */ |
| 45 | |
| 46 | #include <linux/blkdev.h> |
| 47 | #include <linux/kthread.h> |
| 48 | #include <linux/raid/pq.h> |
| 49 | #include <linux/async_tx.h> |
| 50 | #include <linux/module.h> |
| 51 | #include <linux/async.h> |
| 52 | #include <linux/seq_file.h> |
| 53 | #include <linux/cpu.h> |
| 54 | #include <linux/slab.h> |
| 55 | #include <linux/ratelimit.h> |
| 56 | #include <linux/nodemask.h> |
| 57 | #include <linux/flex_array.h> |
| 58 | #include <linux/sched/signal.h> |
| 59 | |
| 60 | #include <trace/events/block.h> |
| 61 | #include <linux/list_sort.h> |
| 62 | |
| 63 | #include "md.h" |
| 64 | #include "raid5.h" |
| 65 | #include "raid0.h" |
| 66 | #include "bitmap.h" |
| 67 | #include "raid5-log.h" |
| 68 | |
| 69 | #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) |
| 70 | |
| 71 | #define cpu_to_group(cpu) cpu_to_node(cpu) |
| 72 | #define ANY_GROUP NUMA_NO_NODE |
| 73 | |
| 74 | static bool devices_handle_discard_safely = false; |
| 75 | module_param(devices_handle_discard_safely, bool, 0644); |
| 76 | MODULE_PARM_DESC(devices_handle_discard_safely, |
| 77 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); |
| 78 | static struct workqueue_struct *raid5_wq; |
| 79 | |
| 80 | static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) |
| 81 | { |
| 82 | int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; |
| 83 | return &conf->stripe_hashtbl[hash]; |
| 84 | } |
| 85 | |
| 86 | static inline int stripe_hash_locks_hash(sector_t sect) |
| 87 | { |
| 88 | return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; |
| 89 | } |
| 90 | |
| 91 | static inline void lock_device_hash_lock(struct r5conf *conf, int hash) |
| 92 | { |
| 93 | spin_lock_irq(conf->hash_locks + hash); |
| 94 | spin_lock(&conf->device_lock); |
| 95 | } |
| 96 | |
| 97 | static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) |
| 98 | { |
| 99 | spin_unlock(&conf->device_lock); |
| 100 | spin_unlock_irq(conf->hash_locks + hash); |
| 101 | } |
| 102 | |
| 103 | static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) |
| 104 | { |
| 105 | int i; |
| 106 | local_irq_disable(); |
| 107 | spin_lock(conf->hash_locks); |
| 108 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) |
| 109 | spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); |
| 110 | spin_lock(&conf->device_lock); |
| 111 | } |
| 112 | |
| 113 | static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) |
| 114 | { |
| 115 | int i; |
| 116 | spin_unlock(&conf->device_lock); |
| 117 | for (i = NR_STRIPE_HASH_LOCKS; i; i--) |
| 118 | spin_unlock(conf->hash_locks + i - 1); |
| 119 | local_irq_enable(); |
| 120 | } |
| 121 | |
| 122 | /* Find first data disk in a raid6 stripe */ |
| 123 | static inline int raid6_d0(struct stripe_head *sh) |
| 124 | { |
| 125 | if (sh->ddf_layout) |
| 126 | /* ddf always start from first device */ |
| 127 | return 0; |
| 128 | /* md starts just after Q block */ |
| 129 | if (sh->qd_idx == sh->disks - 1) |
| 130 | return 0; |
| 131 | else |
| 132 | return sh->qd_idx + 1; |
| 133 | } |
| 134 | static inline int raid6_next_disk(int disk, int raid_disks) |
| 135 | { |
| 136 | disk++; |
| 137 | return (disk < raid_disks) ? disk : 0; |
| 138 | } |
| 139 | |
| 140 | /* When walking through the disks in a raid5, starting at raid6_d0, |
| 141 | * We need to map each disk to a 'slot', where the data disks are slot |
| 142 | * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk |
| 143 | * is raid_disks-1. This help does that mapping. |
| 144 | */ |
| 145 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, |
| 146 | int *count, int syndrome_disks) |
| 147 | { |
| 148 | int slot = *count; |
| 149 | |
| 150 | if (sh->ddf_layout) |
| 151 | (*count)++; |
| 152 | if (idx == sh->pd_idx) |
| 153 | return syndrome_disks; |
| 154 | if (idx == sh->qd_idx) |
| 155 | return syndrome_disks + 1; |
| 156 | if (!sh->ddf_layout) |
| 157 | (*count)++; |
| 158 | return slot; |
| 159 | } |
| 160 | |
| 161 | static void print_raid5_conf (struct r5conf *conf); |
| 162 | |
| 163 | static int stripe_operations_active(struct stripe_head *sh) |
| 164 | { |
| 165 | return sh->check_state || sh->reconstruct_state || |
| 166 | test_bit(STRIPE_BIOFILL_RUN, &sh->state) || |
| 167 | test_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 168 | } |
| 169 | |
| 170 | static bool stripe_is_lowprio(struct stripe_head *sh) |
| 171 | { |
| 172 | return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || |
| 173 | test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && |
| 174 | !test_bit(STRIPE_R5C_CACHING, &sh->state); |
| 175 | } |
| 176 | |
| 177 | static void raid5_wakeup_stripe_thread(struct stripe_head *sh) |
| 178 | { |
| 179 | struct r5conf *conf = sh->raid_conf; |
| 180 | struct r5worker_group *group; |
| 181 | int thread_cnt; |
| 182 | int i, cpu = sh->cpu; |
| 183 | |
| 184 | if (!cpu_online(cpu)) { |
| 185 | cpu = cpumask_any(cpu_online_mask); |
| 186 | sh->cpu = cpu; |
| 187 | } |
| 188 | |
| 189 | if (list_empty(&sh->lru)) { |
| 190 | struct r5worker_group *group; |
| 191 | group = conf->worker_groups + cpu_to_group(cpu); |
| 192 | if (stripe_is_lowprio(sh)) |
| 193 | list_add_tail(&sh->lru, &group->loprio_list); |
| 194 | else |
| 195 | list_add_tail(&sh->lru, &group->handle_list); |
| 196 | group->stripes_cnt++; |
| 197 | sh->group = group; |
| 198 | } |
| 199 | |
| 200 | if (conf->worker_cnt_per_group == 0) { |
| 201 | md_wakeup_thread(conf->mddev->thread); |
| 202 | return; |
| 203 | } |
| 204 | |
| 205 | group = conf->worker_groups + cpu_to_group(sh->cpu); |
| 206 | |
| 207 | group->workers[0].working = true; |
| 208 | /* at least one worker should run to avoid race */ |
| 209 | queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); |
| 210 | |
| 211 | thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; |
| 212 | /* wakeup more workers */ |
| 213 | for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { |
| 214 | if (group->workers[i].working == false) { |
| 215 | group->workers[i].working = true; |
| 216 | queue_work_on(sh->cpu, raid5_wq, |
| 217 | &group->workers[i].work); |
| 218 | thread_cnt--; |
| 219 | } |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, |
| 224 | struct list_head *temp_inactive_list) |
| 225 | { |
| 226 | int i; |
| 227 | int injournal = 0; /* number of date pages with R5_InJournal */ |
| 228 | |
| 229 | BUG_ON(!list_empty(&sh->lru)); |
| 230 | BUG_ON(atomic_read(&conf->active_stripes)==0); |
| 231 | |
| 232 | if (r5c_is_writeback(conf->log)) |
| 233 | for (i = sh->disks; i--; ) |
| 234 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) |
| 235 | injournal++; |
| 236 | /* |
| 237 | * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with |
| 238 | * data in journal, so they are not released to cached lists |
| 239 | */ |
| 240 | if (conf->quiesce && r5c_is_writeback(conf->log) && |
| 241 | !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { |
| 242 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) |
| 243 | r5c_make_stripe_write_out(sh); |
| 244 | set_bit(STRIPE_HANDLE, &sh->state); |
| 245 | } |
| 246 | |
| 247 | if (test_bit(STRIPE_HANDLE, &sh->state)) { |
| 248 | if (test_bit(STRIPE_DELAYED, &sh->state) && |
| 249 | !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 250 | list_add_tail(&sh->lru, &conf->delayed_list); |
| 251 | else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && |
| 252 | sh->bm_seq - conf->seq_write > 0) |
| 253 | list_add_tail(&sh->lru, &conf->bitmap_list); |
| 254 | else { |
| 255 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 256 | clear_bit(STRIPE_BIT_DELAY, &sh->state); |
| 257 | if (conf->worker_cnt_per_group == 0) { |
| 258 | if (stripe_is_lowprio(sh)) |
| 259 | list_add_tail(&sh->lru, |
| 260 | &conf->loprio_list); |
| 261 | else |
| 262 | list_add_tail(&sh->lru, |
| 263 | &conf->handle_list); |
| 264 | } else { |
| 265 | raid5_wakeup_stripe_thread(sh); |
| 266 | return; |
| 267 | } |
| 268 | } |
| 269 | md_wakeup_thread(conf->mddev->thread); |
| 270 | } else { |
| 271 | BUG_ON(stripe_operations_active(sh)); |
| 272 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 273 | if (atomic_dec_return(&conf->preread_active_stripes) |
| 274 | < IO_THRESHOLD) |
| 275 | md_wakeup_thread(conf->mddev->thread); |
| 276 | atomic_dec(&conf->active_stripes); |
| 277 | if (!test_bit(STRIPE_EXPANDING, &sh->state)) { |
| 278 | if (!r5c_is_writeback(conf->log)) |
| 279 | list_add_tail(&sh->lru, temp_inactive_list); |
| 280 | else { |
| 281 | WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); |
| 282 | if (injournal == 0) |
| 283 | list_add_tail(&sh->lru, temp_inactive_list); |
| 284 | else if (injournal == conf->raid_disks - conf->max_degraded) { |
| 285 | /* full stripe */ |
| 286 | if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) |
| 287 | atomic_inc(&conf->r5c_cached_full_stripes); |
| 288 | if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) |
| 289 | atomic_dec(&conf->r5c_cached_partial_stripes); |
| 290 | list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); |
| 291 | r5c_check_cached_full_stripe(conf); |
| 292 | } else |
| 293 | /* |
| 294 | * STRIPE_R5C_PARTIAL_STRIPE is set in |
| 295 | * r5c_try_caching_write(). No need to |
| 296 | * set it again. |
| 297 | */ |
| 298 | list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); |
| 299 | } |
| 300 | } |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, |
| 305 | struct list_head *temp_inactive_list) |
| 306 | { |
| 307 | if (atomic_dec_and_test(&sh->count)) |
| 308 | do_release_stripe(conf, sh, temp_inactive_list); |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list |
| 313 | * |
| 314 | * Be careful: Only one task can add/delete stripes from temp_inactive_list at |
| 315 | * given time. Adding stripes only takes device lock, while deleting stripes |
| 316 | * only takes hash lock. |
| 317 | */ |
| 318 | static void release_inactive_stripe_list(struct r5conf *conf, |
| 319 | struct list_head *temp_inactive_list, |
| 320 | int hash) |
| 321 | { |
| 322 | int size; |
| 323 | bool do_wakeup = false; |
| 324 | unsigned long flags; |
| 325 | |
| 326 | if (hash == NR_STRIPE_HASH_LOCKS) { |
| 327 | size = NR_STRIPE_HASH_LOCKS; |
| 328 | hash = NR_STRIPE_HASH_LOCKS - 1; |
| 329 | } else |
| 330 | size = 1; |
| 331 | while (size) { |
| 332 | struct list_head *list = &temp_inactive_list[size - 1]; |
| 333 | |
| 334 | /* |
| 335 | * We don't hold any lock here yet, raid5_get_active_stripe() might |
| 336 | * remove stripes from the list |
| 337 | */ |
| 338 | if (!list_empty_careful(list)) { |
| 339 | spin_lock_irqsave(conf->hash_locks + hash, flags); |
| 340 | if (list_empty(conf->inactive_list + hash) && |
| 341 | !list_empty(list)) |
| 342 | atomic_dec(&conf->empty_inactive_list_nr); |
| 343 | list_splice_tail_init(list, conf->inactive_list + hash); |
| 344 | do_wakeup = true; |
| 345 | spin_unlock_irqrestore(conf->hash_locks + hash, flags); |
| 346 | } |
| 347 | size--; |
| 348 | hash--; |
| 349 | } |
| 350 | |
| 351 | if (do_wakeup) { |
| 352 | wake_up(&conf->wait_for_stripe); |
| 353 | if (atomic_read(&conf->active_stripes) == 0) |
| 354 | wake_up(&conf->wait_for_quiescent); |
| 355 | if (conf->retry_read_aligned) |
| 356 | md_wakeup_thread(conf->mddev->thread); |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | /* should hold conf->device_lock already */ |
| 361 | static int release_stripe_list(struct r5conf *conf, |
| 362 | struct list_head *temp_inactive_list) |
| 363 | { |
| 364 | struct stripe_head *sh, *t; |
| 365 | int count = 0; |
| 366 | struct llist_node *head; |
| 367 | |
| 368 | head = llist_del_all(&conf->released_stripes); |
| 369 | head = llist_reverse_order(head); |
| 370 | llist_for_each_entry_safe(sh, t, head, release_list) { |
| 371 | int hash; |
| 372 | |
| 373 | /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ |
| 374 | smp_mb(); |
| 375 | clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); |
| 376 | /* |
| 377 | * Don't worry the bit is set here, because if the bit is set |
| 378 | * again, the count is always > 1. This is true for |
| 379 | * STRIPE_ON_UNPLUG_LIST bit too. |
| 380 | */ |
| 381 | hash = sh->hash_lock_index; |
| 382 | __release_stripe(conf, sh, &temp_inactive_list[hash]); |
| 383 | count++; |
| 384 | } |
| 385 | |
| 386 | return count; |
| 387 | } |
| 388 | |
| 389 | void raid5_release_stripe(struct stripe_head *sh) |
| 390 | { |
| 391 | struct r5conf *conf = sh->raid_conf; |
| 392 | unsigned long flags; |
| 393 | struct list_head list; |
| 394 | int hash; |
| 395 | bool wakeup; |
| 396 | |
| 397 | /* Avoid release_list until the last reference. |
| 398 | */ |
| 399 | if (atomic_add_unless(&sh->count, -1, 1)) |
| 400 | return; |
| 401 | |
| 402 | if (unlikely(!conf->mddev->thread) || |
| 403 | test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) |
| 404 | goto slow_path; |
| 405 | wakeup = llist_add(&sh->release_list, &conf->released_stripes); |
| 406 | if (wakeup) |
| 407 | md_wakeup_thread(conf->mddev->thread); |
| 408 | return; |
| 409 | slow_path: |
| 410 | local_irq_save(flags); |
| 411 | /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ |
| 412 | if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { |
| 413 | INIT_LIST_HEAD(&list); |
| 414 | hash = sh->hash_lock_index; |
| 415 | do_release_stripe(conf, sh, &list); |
| 416 | spin_unlock(&conf->device_lock); |
| 417 | release_inactive_stripe_list(conf, &list, hash); |
| 418 | } |
| 419 | local_irq_restore(flags); |
| 420 | } |
| 421 | |
| 422 | static inline void remove_hash(struct stripe_head *sh) |
| 423 | { |
| 424 | pr_debug("remove_hash(), stripe %llu\n", |
| 425 | (unsigned long long)sh->sector); |
| 426 | |
| 427 | hlist_del_init(&sh->hash); |
| 428 | } |
| 429 | |
| 430 | static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) |
| 431 | { |
| 432 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
| 433 | |
| 434 | pr_debug("insert_hash(), stripe %llu\n", |
| 435 | (unsigned long long)sh->sector); |
| 436 | |
| 437 | hlist_add_head(&sh->hash, hp); |
| 438 | } |
| 439 | |
| 440 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
| 441 | static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) |
| 442 | { |
| 443 | struct stripe_head *sh = NULL; |
| 444 | struct list_head *first; |
| 445 | |
| 446 | if (list_empty(conf->inactive_list + hash)) |
| 447 | goto out; |
| 448 | first = (conf->inactive_list + hash)->next; |
| 449 | sh = list_entry(first, struct stripe_head, lru); |
| 450 | list_del_init(first); |
| 451 | remove_hash(sh); |
| 452 | atomic_inc(&conf->active_stripes); |
| 453 | BUG_ON(hash != sh->hash_lock_index); |
| 454 | if (list_empty(conf->inactive_list + hash)) |
| 455 | atomic_inc(&conf->empty_inactive_list_nr); |
| 456 | out: |
| 457 | return sh; |
| 458 | } |
| 459 | |
| 460 | static void shrink_buffers(struct stripe_head *sh) |
| 461 | { |
| 462 | struct page *p; |
| 463 | int i; |
| 464 | int num = sh->raid_conf->pool_size; |
| 465 | |
| 466 | for (i = 0; i < num ; i++) { |
| 467 | WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); |
| 468 | p = sh->dev[i].page; |
| 469 | if (!p) |
| 470 | continue; |
| 471 | sh->dev[i].page = NULL; |
| 472 | put_page(p); |
| 473 | } |
| 474 | |
| 475 | if (sh->ppl_page) { |
| 476 | put_page(sh->ppl_page); |
| 477 | sh->ppl_page = NULL; |
| 478 | } |
| 479 | } |
| 480 | |
| 481 | static int grow_buffers(struct stripe_head *sh, gfp_t gfp) |
| 482 | { |
| 483 | int i; |
| 484 | int num = sh->raid_conf->pool_size; |
| 485 | |
| 486 | for (i = 0; i < num; i++) { |
| 487 | struct page *page; |
| 488 | |
| 489 | if (!(page = alloc_page(gfp))) { |
| 490 | return 1; |
| 491 | } |
| 492 | sh->dev[i].page = page; |
| 493 | sh->dev[i].orig_page = page; |
| 494 | } |
| 495 | |
| 496 | if (raid5_has_ppl(sh->raid_conf)) { |
| 497 | sh->ppl_page = alloc_page(gfp); |
| 498 | if (!sh->ppl_page) |
| 499 | return 1; |
| 500 | } |
| 501 | |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); |
| 506 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
| 507 | struct stripe_head *sh); |
| 508 | |
| 509 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
| 510 | { |
| 511 | struct r5conf *conf = sh->raid_conf; |
| 512 | int i, seq; |
| 513 | |
| 514 | BUG_ON(atomic_read(&sh->count) != 0); |
| 515 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
| 516 | BUG_ON(stripe_operations_active(sh)); |
| 517 | BUG_ON(sh->batch_head); |
| 518 | |
| 519 | pr_debug("init_stripe called, stripe %llu\n", |
| 520 | (unsigned long long)sector); |
| 521 | retry: |
| 522 | seq = read_seqcount_begin(&conf->gen_lock); |
| 523 | sh->generation = conf->generation - previous; |
| 524 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
| 525 | sh->sector = sector; |
| 526 | stripe_set_idx(sector, conf, previous, sh); |
| 527 | sh->state = 0; |
| 528 | |
| 529 | for (i = sh->disks; i--; ) { |
| 530 | struct r5dev *dev = &sh->dev[i]; |
| 531 | |
| 532 | if (dev->toread || dev->read || dev->towrite || dev->written || |
| 533 | test_bit(R5_LOCKED, &dev->flags)) { |
| 534 | pr_err("sector=%llx i=%d %p %p %p %p %d\n", |
| 535 | (unsigned long long)sh->sector, i, dev->toread, |
| 536 | dev->read, dev->towrite, dev->written, |
| 537 | test_bit(R5_LOCKED, &dev->flags)); |
| 538 | WARN_ON(1); |
| 539 | } |
| 540 | dev->flags = 0; |
| 541 | raid5_build_block(sh, i, previous); |
| 542 | } |
| 543 | if (read_seqcount_retry(&conf->gen_lock, seq)) |
| 544 | goto retry; |
| 545 | sh->overwrite_disks = 0; |
| 546 | insert_hash(conf, sh); |
| 547 | sh->cpu = smp_processor_id(); |
| 548 | set_bit(STRIPE_BATCH_READY, &sh->state); |
| 549 | } |
| 550 | |
| 551 | static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, |
| 552 | short generation) |
| 553 | { |
| 554 | struct stripe_head *sh; |
| 555 | |
| 556 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
| 557 | hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) |
| 558 | if (sh->sector == sector && sh->generation == generation) |
| 559 | return sh; |
| 560 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); |
| 561 | return NULL; |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * Need to check if array has failed when deciding whether to: |
| 566 | * - start an array |
| 567 | * - remove non-faulty devices |
| 568 | * - add a spare |
| 569 | * - allow a reshape |
| 570 | * This determination is simple when no reshape is happening. |
| 571 | * However if there is a reshape, we need to carefully check |
| 572 | * both the before and after sections. |
| 573 | * This is because some failed devices may only affect one |
| 574 | * of the two sections, and some non-in_sync devices may |
| 575 | * be insync in the section most affected by failed devices. |
| 576 | */ |
| 577 | int raid5_calc_degraded(struct r5conf *conf) |
| 578 | { |
| 579 | int degraded, degraded2; |
| 580 | int i; |
| 581 | |
| 582 | rcu_read_lock(); |
| 583 | degraded = 0; |
| 584 | for (i = 0; i < conf->previous_raid_disks; i++) { |
| 585 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
| 586 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 587 | rdev = rcu_dereference(conf->disks[i].replacement); |
| 588 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
| 589 | degraded++; |
| 590 | else if (test_bit(In_sync, &rdev->flags)) |
| 591 | ; |
| 592 | else |
| 593 | /* not in-sync or faulty. |
| 594 | * If the reshape increases the number of devices, |
| 595 | * this is being recovered by the reshape, so |
| 596 | * this 'previous' section is not in_sync. |
| 597 | * If the number of devices is being reduced however, |
| 598 | * the device can only be part of the array if |
| 599 | * we are reverting a reshape, so this section will |
| 600 | * be in-sync. |
| 601 | */ |
| 602 | if (conf->raid_disks >= conf->previous_raid_disks) |
| 603 | degraded++; |
| 604 | } |
| 605 | rcu_read_unlock(); |
| 606 | if (conf->raid_disks == conf->previous_raid_disks) |
| 607 | return degraded; |
| 608 | rcu_read_lock(); |
| 609 | degraded2 = 0; |
| 610 | for (i = 0; i < conf->raid_disks; i++) { |
| 611 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
| 612 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 613 | rdev = rcu_dereference(conf->disks[i].replacement); |
| 614 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
| 615 | degraded2++; |
| 616 | else if (test_bit(In_sync, &rdev->flags)) |
| 617 | ; |
| 618 | else |
| 619 | /* not in-sync or faulty. |
| 620 | * If reshape increases the number of devices, this |
| 621 | * section has already been recovered, else it |
| 622 | * almost certainly hasn't. |
| 623 | */ |
| 624 | if (conf->raid_disks <= conf->previous_raid_disks) |
| 625 | degraded2++; |
| 626 | } |
| 627 | rcu_read_unlock(); |
| 628 | if (degraded2 > degraded) |
| 629 | return degraded2; |
| 630 | return degraded; |
| 631 | } |
| 632 | |
| 633 | static int has_failed(struct r5conf *conf) |
| 634 | { |
| 635 | int degraded; |
| 636 | |
| 637 | if (conf->mddev->reshape_position == MaxSector) |
| 638 | return conf->mddev->degraded > conf->max_degraded; |
| 639 | |
| 640 | degraded = raid5_calc_degraded(conf); |
| 641 | if (degraded > conf->max_degraded) |
| 642 | return 1; |
| 643 | return 0; |
| 644 | } |
| 645 | |
| 646 | struct stripe_head * |
| 647 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, |
| 648 | int previous, int noblock, int noquiesce) |
| 649 | { |
| 650 | struct stripe_head *sh; |
| 651 | int hash = stripe_hash_locks_hash(sector); |
| 652 | int inc_empty_inactive_list_flag; |
| 653 | |
| 654 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
| 655 | |
| 656 | spin_lock_irq(conf->hash_locks + hash); |
| 657 | |
| 658 | do { |
| 659 | wait_event_lock_irq(conf->wait_for_quiescent, |
| 660 | conf->quiesce == 0 || noquiesce, |
| 661 | *(conf->hash_locks + hash)); |
| 662 | sh = __find_stripe(conf, sector, conf->generation - previous); |
| 663 | if (!sh) { |
| 664 | if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { |
| 665 | sh = get_free_stripe(conf, hash); |
| 666 | if (!sh && !test_bit(R5_DID_ALLOC, |
| 667 | &conf->cache_state)) |
| 668 | set_bit(R5_ALLOC_MORE, |
| 669 | &conf->cache_state); |
| 670 | } |
| 671 | if (noblock && sh == NULL) |
| 672 | break; |
| 673 | |
| 674 | r5c_check_stripe_cache_usage(conf); |
| 675 | if (!sh) { |
| 676 | set_bit(R5_INACTIVE_BLOCKED, |
| 677 | &conf->cache_state); |
| 678 | r5l_wake_reclaim(conf->log, 0); |
| 679 | wait_event_lock_irq( |
| 680 | conf->wait_for_stripe, |
| 681 | !list_empty(conf->inactive_list + hash) && |
| 682 | (atomic_read(&conf->active_stripes) |
| 683 | < (conf->max_nr_stripes * 3 / 4) |
| 684 | || !test_bit(R5_INACTIVE_BLOCKED, |
| 685 | &conf->cache_state)), |
| 686 | *(conf->hash_locks + hash)); |
| 687 | clear_bit(R5_INACTIVE_BLOCKED, |
| 688 | &conf->cache_state); |
| 689 | } else { |
| 690 | init_stripe(sh, sector, previous); |
| 691 | atomic_inc(&sh->count); |
| 692 | } |
| 693 | } else if (!atomic_inc_not_zero(&sh->count)) { |
| 694 | spin_lock(&conf->device_lock); |
| 695 | if (!atomic_read(&sh->count)) { |
| 696 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
| 697 | atomic_inc(&conf->active_stripes); |
| 698 | BUG_ON(list_empty(&sh->lru) && |
| 699 | !test_bit(STRIPE_EXPANDING, &sh->state)); |
| 700 | inc_empty_inactive_list_flag = 0; |
| 701 | if (!list_empty(conf->inactive_list + hash)) |
| 702 | inc_empty_inactive_list_flag = 1; |
| 703 | list_del_init(&sh->lru); |
| 704 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) |
| 705 | atomic_inc(&conf->empty_inactive_list_nr); |
| 706 | if (sh->group) { |
| 707 | sh->group->stripes_cnt--; |
| 708 | sh->group = NULL; |
| 709 | } |
| 710 | } |
| 711 | atomic_inc(&sh->count); |
| 712 | spin_unlock(&conf->device_lock); |
| 713 | } |
| 714 | } while (sh == NULL); |
| 715 | |
| 716 | spin_unlock_irq(conf->hash_locks + hash); |
| 717 | return sh; |
| 718 | } |
| 719 | |
| 720 | static bool is_full_stripe_write(struct stripe_head *sh) |
| 721 | { |
| 722 | BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); |
| 723 | return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); |
| 724 | } |
| 725 | |
| 726 | static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
| 727 | { |
| 728 | local_irq_disable(); |
| 729 | if (sh1 > sh2) { |
| 730 | spin_lock(&sh2->stripe_lock); |
| 731 | spin_lock_nested(&sh1->stripe_lock, 1); |
| 732 | } else { |
| 733 | spin_lock(&sh1->stripe_lock); |
| 734 | spin_lock_nested(&sh2->stripe_lock, 1); |
| 735 | } |
| 736 | } |
| 737 | |
| 738 | static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
| 739 | { |
| 740 | spin_unlock(&sh1->stripe_lock); |
| 741 | spin_unlock(&sh2->stripe_lock); |
| 742 | local_irq_enable(); |
| 743 | } |
| 744 | |
| 745 | /* Only freshly new full stripe normal write stripe can be added to a batch list */ |
| 746 | static bool stripe_can_batch(struct stripe_head *sh) |
| 747 | { |
| 748 | struct r5conf *conf = sh->raid_conf; |
| 749 | |
| 750 | if (conf->log || raid5_has_ppl(conf)) |
| 751 | return false; |
| 752 | return test_bit(STRIPE_BATCH_READY, &sh->state) && |
| 753 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && |
| 754 | is_full_stripe_write(sh); |
| 755 | } |
| 756 | |
| 757 | /* we only do back search */ |
| 758 | static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) |
| 759 | { |
| 760 | struct stripe_head *head; |
| 761 | sector_t head_sector, tmp_sec; |
| 762 | int hash; |
| 763 | int dd_idx; |
| 764 | int inc_empty_inactive_list_flag; |
| 765 | |
| 766 | /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ |
| 767 | tmp_sec = sh->sector; |
| 768 | if (!sector_div(tmp_sec, conf->chunk_sectors)) |
| 769 | return; |
| 770 | head_sector = sh->sector - STRIPE_SECTORS; |
| 771 | |
| 772 | hash = stripe_hash_locks_hash(head_sector); |
| 773 | spin_lock_irq(conf->hash_locks + hash); |
| 774 | head = __find_stripe(conf, head_sector, conf->generation); |
| 775 | if (head && !atomic_inc_not_zero(&head->count)) { |
| 776 | spin_lock(&conf->device_lock); |
| 777 | if (!atomic_read(&head->count)) { |
| 778 | if (!test_bit(STRIPE_HANDLE, &head->state)) |
| 779 | atomic_inc(&conf->active_stripes); |
| 780 | BUG_ON(list_empty(&head->lru) && |
| 781 | !test_bit(STRIPE_EXPANDING, &head->state)); |
| 782 | inc_empty_inactive_list_flag = 0; |
| 783 | if (!list_empty(conf->inactive_list + hash)) |
| 784 | inc_empty_inactive_list_flag = 1; |
| 785 | list_del_init(&head->lru); |
| 786 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) |
| 787 | atomic_inc(&conf->empty_inactive_list_nr); |
| 788 | if (head->group) { |
| 789 | head->group->stripes_cnt--; |
| 790 | head->group = NULL; |
| 791 | } |
| 792 | } |
| 793 | atomic_inc(&head->count); |
| 794 | spin_unlock(&conf->device_lock); |
| 795 | } |
| 796 | spin_unlock_irq(conf->hash_locks + hash); |
| 797 | |
| 798 | if (!head) |
| 799 | return; |
| 800 | if (!stripe_can_batch(head)) |
| 801 | goto out; |
| 802 | |
| 803 | lock_two_stripes(head, sh); |
| 804 | /* clear_batch_ready clear the flag */ |
| 805 | if (!stripe_can_batch(head) || !stripe_can_batch(sh)) |
| 806 | goto unlock_out; |
| 807 | |
| 808 | if (sh->batch_head) |
| 809 | goto unlock_out; |
| 810 | |
| 811 | dd_idx = 0; |
| 812 | while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) |
| 813 | dd_idx++; |
| 814 | if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || |
| 815 | bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) |
| 816 | goto unlock_out; |
| 817 | |
| 818 | if (head->batch_head) { |
| 819 | spin_lock(&head->batch_head->batch_lock); |
| 820 | /* This batch list is already running */ |
| 821 | if (!stripe_can_batch(head)) { |
| 822 | spin_unlock(&head->batch_head->batch_lock); |
| 823 | goto unlock_out; |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * at this point, head's BATCH_READY could be cleared, but we |
| 828 | * can still add the stripe to batch list |
| 829 | */ |
| 830 | list_add(&sh->batch_list, &head->batch_list); |
| 831 | spin_unlock(&head->batch_head->batch_lock); |
| 832 | |
| 833 | sh->batch_head = head->batch_head; |
| 834 | } else { |
| 835 | head->batch_head = head; |
| 836 | sh->batch_head = head->batch_head; |
| 837 | spin_lock(&head->batch_lock); |
| 838 | list_add_tail(&sh->batch_list, &head->batch_list); |
| 839 | spin_unlock(&head->batch_lock); |
| 840 | } |
| 841 | |
| 842 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 843 | if (atomic_dec_return(&conf->preread_active_stripes) |
| 844 | < IO_THRESHOLD) |
| 845 | md_wakeup_thread(conf->mddev->thread); |
| 846 | |
| 847 | if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { |
| 848 | int seq = sh->bm_seq; |
| 849 | if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && |
| 850 | sh->batch_head->bm_seq > seq) |
| 851 | seq = sh->batch_head->bm_seq; |
| 852 | set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); |
| 853 | sh->batch_head->bm_seq = seq; |
| 854 | } |
| 855 | |
| 856 | atomic_inc(&sh->count); |
| 857 | unlock_out: |
| 858 | unlock_two_stripes(head, sh); |
| 859 | out: |
| 860 | raid5_release_stripe(head); |
| 861 | } |
| 862 | |
| 863 | /* Determine if 'data_offset' or 'new_data_offset' should be used |
| 864 | * in this stripe_head. |
| 865 | */ |
| 866 | static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) |
| 867 | { |
| 868 | sector_t progress = conf->reshape_progress; |
| 869 | /* Need a memory barrier to make sure we see the value |
| 870 | * of conf->generation, or ->data_offset that was set before |
| 871 | * reshape_progress was updated. |
| 872 | */ |
| 873 | smp_rmb(); |
| 874 | if (progress == MaxSector) |
| 875 | return 0; |
| 876 | if (sh->generation == conf->generation - 1) |
| 877 | return 0; |
| 878 | /* We are in a reshape, and this is a new-generation stripe, |
| 879 | * so use new_data_offset. |
| 880 | */ |
| 881 | return 1; |
| 882 | } |
| 883 | |
| 884 | static void dispatch_bio_list(struct bio_list *tmp) |
| 885 | { |
| 886 | struct bio *bio; |
| 887 | |
| 888 | while ((bio = bio_list_pop(tmp))) |
| 889 | generic_make_request(bio); |
| 890 | } |
| 891 | |
| 892 | static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) |
| 893 | { |
| 894 | const struct r5pending_data *da = list_entry(a, |
| 895 | struct r5pending_data, sibling); |
| 896 | const struct r5pending_data *db = list_entry(b, |
| 897 | struct r5pending_data, sibling); |
| 898 | if (da->sector > db->sector) |
| 899 | return 1; |
| 900 | if (da->sector < db->sector) |
| 901 | return -1; |
| 902 | return 0; |
| 903 | } |
| 904 | |
| 905 | static void dispatch_defer_bios(struct r5conf *conf, int target, |
| 906 | struct bio_list *list) |
| 907 | { |
| 908 | struct r5pending_data *data; |
| 909 | struct list_head *first, *next = NULL; |
| 910 | int cnt = 0; |
| 911 | |
| 912 | if (conf->pending_data_cnt == 0) |
| 913 | return; |
| 914 | |
| 915 | list_sort(NULL, &conf->pending_list, cmp_stripe); |
| 916 | |
| 917 | first = conf->pending_list.next; |
| 918 | |
| 919 | /* temporarily move the head */ |
| 920 | if (conf->next_pending_data) |
| 921 | list_move_tail(&conf->pending_list, |
| 922 | &conf->next_pending_data->sibling); |
| 923 | |
| 924 | while (!list_empty(&conf->pending_list)) { |
| 925 | data = list_first_entry(&conf->pending_list, |
| 926 | struct r5pending_data, sibling); |
| 927 | if (&data->sibling == first) |
| 928 | first = data->sibling.next; |
| 929 | next = data->sibling.next; |
| 930 | |
| 931 | bio_list_merge(list, &data->bios); |
| 932 | list_move(&data->sibling, &conf->free_list); |
| 933 | cnt++; |
| 934 | if (cnt >= target) |
| 935 | break; |
| 936 | } |
| 937 | conf->pending_data_cnt -= cnt; |
| 938 | BUG_ON(conf->pending_data_cnt < 0 || cnt < target); |
| 939 | |
| 940 | if (next != &conf->pending_list) |
| 941 | conf->next_pending_data = list_entry(next, |
| 942 | struct r5pending_data, sibling); |
| 943 | else |
| 944 | conf->next_pending_data = NULL; |
| 945 | /* list isn't empty */ |
| 946 | if (first != &conf->pending_list) |
| 947 | list_move_tail(&conf->pending_list, first); |
| 948 | } |
| 949 | |
| 950 | static void flush_deferred_bios(struct r5conf *conf) |
| 951 | { |
| 952 | struct bio_list tmp = BIO_EMPTY_LIST; |
| 953 | |
| 954 | if (conf->pending_data_cnt == 0) |
| 955 | return; |
| 956 | |
| 957 | spin_lock(&conf->pending_bios_lock); |
| 958 | dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); |
| 959 | BUG_ON(conf->pending_data_cnt != 0); |
| 960 | spin_unlock(&conf->pending_bios_lock); |
| 961 | |
| 962 | dispatch_bio_list(&tmp); |
| 963 | } |
| 964 | |
| 965 | static void defer_issue_bios(struct r5conf *conf, sector_t sector, |
| 966 | struct bio_list *bios) |
| 967 | { |
| 968 | struct bio_list tmp = BIO_EMPTY_LIST; |
| 969 | struct r5pending_data *ent; |
| 970 | |
| 971 | spin_lock(&conf->pending_bios_lock); |
| 972 | ent = list_first_entry(&conf->free_list, struct r5pending_data, |
| 973 | sibling); |
| 974 | list_move_tail(&ent->sibling, &conf->pending_list); |
| 975 | ent->sector = sector; |
| 976 | bio_list_init(&ent->bios); |
| 977 | bio_list_merge(&ent->bios, bios); |
| 978 | conf->pending_data_cnt++; |
| 979 | if (conf->pending_data_cnt >= PENDING_IO_MAX) |
| 980 | dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); |
| 981 | |
| 982 | spin_unlock(&conf->pending_bios_lock); |
| 983 | |
| 984 | dispatch_bio_list(&tmp); |
| 985 | } |
| 986 | |
| 987 | static void |
| 988 | raid5_end_read_request(struct bio *bi); |
| 989 | static void |
| 990 | raid5_end_write_request(struct bio *bi); |
| 991 | |
| 992 | static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) |
| 993 | { |
| 994 | struct r5conf *conf = sh->raid_conf; |
| 995 | int i, disks = sh->disks; |
| 996 | struct stripe_head *head_sh = sh; |
| 997 | struct bio_list pending_bios = BIO_EMPTY_LIST; |
| 998 | bool should_defer; |
| 999 | |
| 1000 | might_sleep(); |
| 1001 | |
| 1002 | if (log_stripe(sh, s) == 0) |
| 1003 | return; |
| 1004 | |
| 1005 | should_defer = conf->batch_bio_dispatch && conf->group_cnt; |
| 1006 | |
| 1007 | for (i = disks; i--; ) { |
| 1008 | int op, op_flags = 0; |
| 1009 | int replace_only = 0; |
| 1010 | struct bio *bi, *rbi; |
| 1011 | struct md_rdev *rdev, *rrdev = NULL; |
| 1012 | |
| 1013 | sh = head_sh; |
| 1014 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
| 1015 | op = REQ_OP_WRITE; |
| 1016 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
| 1017 | op_flags = REQ_FUA; |
| 1018 | if (test_bit(R5_Discard, &sh->dev[i].flags)) |
| 1019 | op = REQ_OP_DISCARD; |
| 1020 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
| 1021 | op = REQ_OP_READ; |
| 1022 | else if (test_and_clear_bit(R5_WantReplace, |
| 1023 | &sh->dev[i].flags)) { |
| 1024 | op = REQ_OP_WRITE; |
| 1025 | replace_only = 1; |
| 1026 | } else |
| 1027 | continue; |
| 1028 | if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) |
| 1029 | op_flags |= REQ_SYNC; |
| 1030 | |
| 1031 | again: |
| 1032 | bi = &sh->dev[i].req; |
| 1033 | rbi = &sh->dev[i].rreq; /* For writing to replacement */ |
| 1034 | |
| 1035 | rcu_read_lock(); |
| 1036 | rrdev = rcu_dereference(conf->disks[i].replacement); |
| 1037 | smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ |
| 1038 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 1039 | if (!rdev) { |
| 1040 | rdev = rrdev; |
| 1041 | rrdev = NULL; |
| 1042 | } |
| 1043 | if (op_is_write(op)) { |
| 1044 | if (replace_only) |
| 1045 | rdev = NULL; |
| 1046 | if (rdev == rrdev) |
| 1047 | /* We raced and saw duplicates */ |
| 1048 | rrdev = NULL; |
| 1049 | } else { |
| 1050 | if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) |
| 1051 | rdev = rrdev; |
| 1052 | rrdev = NULL; |
| 1053 | } |
| 1054 | |
| 1055 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 1056 | rdev = NULL; |
| 1057 | if (rdev) |
| 1058 | atomic_inc(&rdev->nr_pending); |
| 1059 | if (rrdev && test_bit(Faulty, &rrdev->flags)) |
| 1060 | rrdev = NULL; |
| 1061 | if (rrdev) |
| 1062 | atomic_inc(&rrdev->nr_pending); |
| 1063 | rcu_read_unlock(); |
| 1064 | |
| 1065 | /* We have already checked bad blocks for reads. Now |
| 1066 | * need to check for writes. We never accept write errors |
| 1067 | * on the replacement, so we don't to check rrdev. |
| 1068 | */ |
| 1069 | while (op_is_write(op) && rdev && |
| 1070 | test_bit(WriteErrorSeen, &rdev->flags)) { |
| 1071 | sector_t first_bad; |
| 1072 | int bad_sectors; |
| 1073 | int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 1074 | &first_bad, &bad_sectors); |
| 1075 | if (!bad) |
| 1076 | break; |
| 1077 | |
| 1078 | if (bad < 0) { |
| 1079 | set_bit(BlockedBadBlocks, &rdev->flags); |
| 1080 | if (!conf->mddev->external && |
| 1081 | conf->mddev->sb_flags) { |
| 1082 | /* It is very unlikely, but we might |
| 1083 | * still need to write out the |
| 1084 | * bad block log - better give it |
| 1085 | * a chance*/ |
| 1086 | md_check_recovery(conf->mddev); |
| 1087 | } |
| 1088 | /* |
| 1089 | * Because md_wait_for_blocked_rdev |
| 1090 | * will dec nr_pending, we must |
| 1091 | * increment it first. |
| 1092 | */ |
| 1093 | atomic_inc(&rdev->nr_pending); |
| 1094 | md_wait_for_blocked_rdev(rdev, conf->mddev); |
| 1095 | } else { |
| 1096 | /* Acknowledged bad block - skip the write */ |
| 1097 | rdev_dec_pending(rdev, conf->mddev); |
| 1098 | rdev = NULL; |
| 1099 | } |
| 1100 | } |
| 1101 | |
| 1102 | if (rdev) { |
| 1103 | if (s->syncing || s->expanding || s->expanded |
| 1104 | || s->replacing) |
| 1105 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); |
| 1106 | |
| 1107 | set_bit(STRIPE_IO_STARTED, &sh->state); |
| 1108 | |
| 1109 | bi->bi_bdev = rdev->bdev; |
| 1110 | bio_set_op_attrs(bi, op, op_flags); |
| 1111 | bi->bi_end_io = op_is_write(op) |
| 1112 | ? raid5_end_write_request |
| 1113 | : raid5_end_read_request; |
| 1114 | bi->bi_private = sh; |
| 1115 | |
| 1116 | pr_debug("%s: for %llu schedule op %d on disc %d\n", |
| 1117 | __func__, (unsigned long long)sh->sector, |
| 1118 | bi->bi_opf, i); |
| 1119 | atomic_inc(&sh->count); |
| 1120 | if (sh != head_sh) |
| 1121 | atomic_inc(&head_sh->count); |
| 1122 | if (use_new_offset(conf, sh)) |
| 1123 | bi->bi_iter.bi_sector = (sh->sector |
| 1124 | + rdev->new_data_offset); |
| 1125 | else |
| 1126 | bi->bi_iter.bi_sector = (sh->sector |
| 1127 | + rdev->data_offset); |
| 1128 | if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) |
| 1129 | bi->bi_opf |= REQ_NOMERGE; |
| 1130 | |
| 1131 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) |
| 1132 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| 1133 | |
| 1134 | if (!op_is_write(op) && |
| 1135 | test_bit(R5_InJournal, &sh->dev[i].flags)) |
| 1136 | /* |
| 1137 | * issuing read for a page in journal, this |
| 1138 | * must be preparing for prexor in rmw; read |
| 1139 | * the data into orig_page |
| 1140 | */ |
| 1141 | sh->dev[i].vec.bv_page = sh->dev[i].orig_page; |
| 1142 | else |
| 1143 | sh->dev[i].vec.bv_page = sh->dev[i].page; |
| 1144 | bi->bi_vcnt = 1; |
| 1145 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| 1146 | bi->bi_io_vec[0].bv_offset = 0; |
| 1147 | bi->bi_iter.bi_size = STRIPE_SIZE; |
| 1148 | /* |
| 1149 | * If this is discard request, set bi_vcnt 0. We don't |
| 1150 | * want to confuse SCSI because SCSI will replace payload |
| 1151 | */ |
| 1152 | if (op == REQ_OP_DISCARD) |
| 1153 | bi->bi_vcnt = 0; |
| 1154 | if (rrdev) |
| 1155 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); |
| 1156 | |
| 1157 | if (conf->mddev->gendisk) |
| 1158 | trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), |
| 1159 | bi, disk_devt(conf->mddev->gendisk), |
| 1160 | sh->dev[i].sector); |
| 1161 | if (should_defer && op_is_write(op)) |
| 1162 | bio_list_add(&pending_bios, bi); |
| 1163 | else |
| 1164 | generic_make_request(bi); |
| 1165 | } |
| 1166 | if (rrdev) { |
| 1167 | if (s->syncing || s->expanding || s->expanded |
| 1168 | || s->replacing) |
| 1169 | md_sync_acct(rrdev->bdev, STRIPE_SECTORS); |
| 1170 | |
| 1171 | set_bit(STRIPE_IO_STARTED, &sh->state); |
| 1172 | |
| 1173 | rbi->bi_bdev = rrdev->bdev; |
| 1174 | bio_set_op_attrs(rbi, op, op_flags); |
| 1175 | BUG_ON(!op_is_write(op)); |
| 1176 | rbi->bi_end_io = raid5_end_write_request; |
| 1177 | rbi->bi_private = sh; |
| 1178 | |
| 1179 | pr_debug("%s: for %llu schedule op %d on " |
| 1180 | "replacement disc %d\n", |
| 1181 | __func__, (unsigned long long)sh->sector, |
| 1182 | rbi->bi_opf, i); |
| 1183 | atomic_inc(&sh->count); |
| 1184 | if (sh != head_sh) |
| 1185 | atomic_inc(&head_sh->count); |
| 1186 | if (use_new_offset(conf, sh)) |
| 1187 | rbi->bi_iter.bi_sector = (sh->sector |
| 1188 | + rrdev->new_data_offset); |
| 1189 | else |
| 1190 | rbi->bi_iter.bi_sector = (sh->sector |
| 1191 | + rrdev->data_offset); |
| 1192 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) |
| 1193 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| 1194 | sh->dev[i].rvec.bv_page = sh->dev[i].page; |
| 1195 | rbi->bi_vcnt = 1; |
| 1196 | rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| 1197 | rbi->bi_io_vec[0].bv_offset = 0; |
| 1198 | rbi->bi_iter.bi_size = STRIPE_SIZE; |
| 1199 | /* |
| 1200 | * If this is discard request, set bi_vcnt 0. We don't |
| 1201 | * want to confuse SCSI because SCSI will replace payload |
| 1202 | */ |
| 1203 | if (op == REQ_OP_DISCARD) |
| 1204 | rbi->bi_vcnt = 0; |
| 1205 | if (conf->mddev->gendisk) |
| 1206 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), |
| 1207 | rbi, disk_devt(conf->mddev->gendisk), |
| 1208 | sh->dev[i].sector); |
| 1209 | if (should_defer && op_is_write(op)) |
| 1210 | bio_list_add(&pending_bios, rbi); |
| 1211 | else |
| 1212 | generic_make_request(rbi); |
| 1213 | } |
| 1214 | if (!rdev && !rrdev) { |
| 1215 | if (op_is_write(op)) |
| 1216 | set_bit(STRIPE_DEGRADED, &sh->state); |
| 1217 | pr_debug("skip op %d on disc %d for sector %llu\n", |
| 1218 | bi->bi_opf, i, (unsigned long long)sh->sector); |
| 1219 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1220 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1221 | } |
| 1222 | |
| 1223 | if (!head_sh->batch_head) |
| 1224 | continue; |
| 1225 | sh = list_first_entry(&sh->batch_list, struct stripe_head, |
| 1226 | batch_list); |
| 1227 | if (sh != head_sh) |
| 1228 | goto again; |
| 1229 | } |
| 1230 | |
| 1231 | if (should_defer && !bio_list_empty(&pending_bios)) |
| 1232 | defer_issue_bios(conf, head_sh->sector, &pending_bios); |
| 1233 | } |
| 1234 | |
| 1235 | static struct dma_async_tx_descriptor * |
| 1236 | async_copy_data(int frombio, struct bio *bio, struct page **page, |
| 1237 | sector_t sector, struct dma_async_tx_descriptor *tx, |
| 1238 | struct stripe_head *sh, int no_skipcopy) |
| 1239 | { |
| 1240 | struct bio_vec bvl; |
| 1241 | struct bvec_iter iter; |
| 1242 | struct page *bio_page; |
| 1243 | int page_offset; |
| 1244 | struct async_submit_ctl submit; |
| 1245 | enum async_tx_flags flags = 0; |
| 1246 | |
| 1247 | if (bio->bi_iter.bi_sector >= sector) |
| 1248 | page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; |
| 1249 | else |
| 1250 | page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; |
| 1251 | |
| 1252 | if (frombio) |
| 1253 | flags |= ASYNC_TX_FENCE; |
| 1254 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); |
| 1255 | |
| 1256 | bio_for_each_segment(bvl, bio, iter) { |
| 1257 | int len = bvl.bv_len; |
| 1258 | int clen; |
| 1259 | int b_offset = 0; |
| 1260 | |
| 1261 | if (page_offset < 0) { |
| 1262 | b_offset = -page_offset; |
| 1263 | page_offset += b_offset; |
| 1264 | len -= b_offset; |
| 1265 | } |
| 1266 | |
| 1267 | if (len > 0 && page_offset + len > STRIPE_SIZE) |
| 1268 | clen = STRIPE_SIZE - page_offset; |
| 1269 | else |
| 1270 | clen = len; |
| 1271 | |
| 1272 | if (clen > 0) { |
| 1273 | b_offset += bvl.bv_offset; |
| 1274 | bio_page = bvl.bv_page; |
| 1275 | if (frombio) { |
| 1276 | if (sh->raid_conf->skip_copy && |
| 1277 | b_offset == 0 && page_offset == 0 && |
| 1278 | clen == STRIPE_SIZE && |
| 1279 | !no_skipcopy) |
| 1280 | *page = bio_page; |
| 1281 | else |
| 1282 | tx = async_memcpy(*page, bio_page, page_offset, |
| 1283 | b_offset, clen, &submit); |
| 1284 | } else |
| 1285 | tx = async_memcpy(bio_page, *page, b_offset, |
| 1286 | page_offset, clen, &submit); |
| 1287 | } |
| 1288 | /* chain the operations */ |
| 1289 | submit.depend_tx = tx; |
| 1290 | |
| 1291 | if (clen < len) /* hit end of page */ |
| 1292 | break; |
| 1293 | page_offset += len; |
| 1294 | } |
| 1295 | |
| 1296 | return tx; |
| 1297 | } |
| 1298 | |
| 1299 | static void ops_complete_biofill(void *stripe_head_ref) |
| 1300 | { |
| 1301 | struct stripe_head *sh = stripe_head_ref; |
| 1302 | int i; |
| 1303 | |
| 1304 | pr_debug("%s: stripe %llu\n", __func__, |
| 1305 | (unsigned long long)sh->sector); |
| 1306 | |
| 1307 | /* clear completed biofills */ |
| 1308 | for (i = sh->disks; i--; ) { |
| 1309 | struct r5dev *dev = &sh->dev[i]; |
| 1310 | |
| 1311 | /* acknowledge completion of a biofill operation */ |
| 1312 | /* and check if we need to reply to a read request, |
| 1313 | * new R5_Wantfill requests are held off until |
| 1314 | * !STRIPE_BIOFILL_RUN |
| 1315 | */ |
| 1316 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { |
| 1317 | struct bio *rbi, *rbi2; |
| 1318 | |
| 1319 | BUG_ON(!dev->read); |
| 1320 | rbi = dev->read; |
| 1321 | dev->read = NULL; |
| 1322 | while (rbi && rbi->bi_iter.bi_sector < |
| 1323 | dev->sector + STRIPE_SECTORS) { |
| 1324 | rbi2 = r5_next_bio(rbi, dev->sector); |
| 1325 | bio_endio(rbi); |
| 1326 | rbi = rbi2; |
| 1327 | } |
| 1328 | } |
| 1329 | } |
| 1330 | clear_bit(STRIPE_BIOFILL_RUN, &sh->state); |
| 1331 | |
| 1332 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1333 | raid5_release_stripe(sh); |
| 1334 | } |
| 1335 | |
| 1336 | static void ops_run_biofill(struct stripe_head *sh) |
| 1337 | { |
| 1338 | struct dma_async_tx_descriptor *tx = NULL; |
| 1339 | struct async_submit_ctl submit; |
| 1340 | int i; |
| 1341 | |
| 1342 | BUG_ON(sh->batch_head); |
| 1343 | pr_debug("%s: stripe %llu\n", __func__, |
| 1344 | (unsigned long long)sh->sector); |
| 1345 | |
| 1346 | for (i = sh->disks; i--; ) { |
| 1347 | struct r5dev *dev = &sh->dev[i]; |
| 1348 | if (test_bit(R5_Wantfill, &dev->flags)) { |
| 1349 | struct bio *rbi; |
| 1350 | spin_lock_irq(&sh->stripe_lock); |
| 1351 | dev->read = rbi = dev->toread; |
| 1352 | dev->toread = NULL; |
| 1353 | spin_unlock_irq(&sh->stripe_lock); |
| 1354 | while (rbi && rbi->bi_iter.bi_sector < |
| 1355 | dev->sector + STRIPE_SECTORS) { |
| 1356 | tx = async_copy_data(0, rbi, &dev->page, |
| 1357 | dev->sector, tx, sh, 0); |
| 1358 | rbi = r5_next_bio(rbi, dev->sector); |
| 1359 | } |
| 1360 | } |
| 1361 | } |
| 1362 | |
| 1363 | atomic_inc(&sh->count); |
| 1364 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); |
| 1365 | async_trigger_callback(&submit); |
| 1366 | } |
| 1367 | |
| 1368 | static void mark_target_uptodate(struct stripe_head *sh, int target) |
| 1369 | { |
| 1370 | struct r5dev *tgt; |
| 1371 | |
| 1372 | if (target < 0) |
| 1373 | return; |
| 1374 | |
| 1375 | tgt = &sh->dev[target]; |
| 1376 | set_bit(R5_UPTODATE, &tgt->flags); |
| 1377 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 1378 | clear_bit(R5_Wantcompute, &tgt->flags); |
| 1379 | } |
| 1380 | |
| 1381 | static void ops_complete_compute(void *stripe_head_ref) |
| 1382 | { |
| 1383 | struct stripe_head *sh = stripe_head_ref; |
| 1384 | |
| 1385 | pr_debug("%s: stripe %llu\n", __func__, |
| 1386 | (unsigned long long)sh->sector); |
| 1387 | |
| 1388 | /* mark the computed target(s) as uptodate */ |
| 1389 | mark_target_uptodate(sh, sh->ops.target); |
| 1390 | mark_target_uptodate(sh, sh->ops.target2); |
| 1391 | |
| 1392 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 1393 | if (sh->check_state == check_state_compute_run) |
| 1394 | sh->check_state = check_state_compute_result; |
| 1395 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1396 | raid5_release_stripe(sh); |
| 1397 | } |
| 1398 | |
| 1399 | /* return a pointer to the address conversion region of the scribble buffer */ |
| 1400 | static addr_conv_t *to_addr_conv(struct stripe_head *sh, |
| 1401 | struct raid5_percpu *percpu, int i) |
| 1402 | { |
| 1403 | void *addr; |
| 1404 | |
| 1405 | addr = flex_array_get(percpu->scribble, i); |
| 1406 | return addr + sizeof(struct page *) * (sh->disks + 2); |
| 1407 | } |
| 1408 | |
| 1409 | /* return a pointer to the address conversion region of the scribble buffer */ |
| 1410 | static struct page **to_addr_page(struct raid5_percpu *percpu, int i) |
| 1411 | { |
| 1412 | void *addr; |
| 1413 | |
| 1414 | addr = flex_array_get(percpu->scribble, i); |
| 1415 | return addr; |
| 1416 | } |
| 1417 | |
| 1418 | static struct dma_async_tx_descriptor * |
| 1419 | ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 1420 | { |
| 1421 | int disks = sh->disks; |
| 1422 | struct page **xor_srcs = to_addr_page(percpu, 0); |
| 1423 | int target = sh->ops.target; |
| 1424 | struct r5dev *tgt = &sh->dev[target]; |
| 1425 | struct page *xor_dest = tgt->page; |
| 1426 | int count = 0; |
| 1427 | struct dma_async_tx_descriptor *tx; |
| 1428 | struct async_submit_ctl submit; |
| 1429 | int i; |
| 1430 | |
| 1431 | BUG_ON(sh->batch_head); |
| 1432 | |
| 1433 | pr_debug("%s: stripe %llu block: %d\n", |
| 1434 | __func__, (unsigned long long)sh->sector, target); |
| 1435 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 1436 | |
| 1437 | for (i = disks; i--; ) |
| 1438 | if (i != target) |
| 1439 | xor_srcs[count++] = sh->dev[i].page; |
| 1440 | |
| 1441 | atomic_inc(&sh->count); |
| 1442 | |
| 1443 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, |
| 1444 | ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); |
| 1445 | if (unlikely(count == 1)) |
| 1446 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
| 1447 | else |
| 1448 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
| 1449 | |
| 1450 | return tx; |
| 1451 | } |
| 1452 | |
| 1453 | /* set_syndrome_sources - populate source buffers for gen_syndrome |
| 1454 | * @srcs - (struct page *) array of size sh->disks |
| 1455 | * @sh - stripe_head to parse |
| 1456 | * |
| 1457 | * Populates srcs in proper layout order for the stripe and returns the |
| 1458 | * 'count' of sources to be used in a call to async_gen_syndrome. The P |
| 1459 | * destination buffer is recorded in srcs[count] and the Q destination |
| 1460 | * is recorded in srcs[count+1]]. |
| 1461 | */ |
| 1462 | static int set_syndrome_sources(struct page **srcs, |
| 1463 | struct stripe_head *sh, |
| 1464 | int srctype) |
| 1465 | { |
| 1466 | int disks = sh->disks; |
| 1467 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); |
| 1468 | int d0_idx = raid6_d0(sh); |
| 1469 | int count; |
| 1470 | int i; |
| 1471 | |
| 1472 | for (i = 0; i < disks; i++) |
| 1473 | srcs[i] = NULL; |
| 1474 | |
| 1475 | count = 0; |
| 1476 | i = d0_idx; |
| 1477 | do { |
| 1478 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 1479 | struct r5dev *dev = &sh->dev[i]; |
| 1480 | |
| 1481 | if (i == sh->qd_idx || i == sh->pd_idx || |
| 1482 | (srctype == SYNDROME_SRC_ALL) || |
| 1483 | (srctype == SYNDROME_SRC_WANT_DRAIN && |
| 1484 | (test_bit(R5_Wantdrain, &dev->flags) || |
| 1485 | test_bit(R5_InJournal, &dev->flags))) || |
| 1486 | (srctype == SYNDROME_SRC_WRITTEN && |
| 1487 | (dev->written || |
| 1488 | test_bit(R5_InJournal, &dev->flags)))) { |
| 1489 | if (test_bit(R5_InJournal, &dev->flags)) |
| 1490 | srcs[slot] = sh->dev[i].orig_page; |
| 1491 | else |
| 1492 | srcs[slot] = sh->dev[i].page; |
| 1493 | } |
| 1494 | i = raid6_next_disk(i, disks); |
| 1495 | } while (i != d0_idx); |
| 1496 | |
| 1497 | return syndrome_disks; |
| 1498 | } |
| 1499 | |
| 1500 | static struct dma_async_tx_descriptor * |
| 1501 | ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 1502 | { |
| 1503 | int disks = sh->disks; |
| 1504 | struct page **blocks = to_addr_page(percpu, 0); |
| 1505 | int target; |
| 1506 | int qd_idx = sh->qd_idx; |
| 1507 | struct dma_async_tx_descriptor *tx; |
| 1508 | struct async_submit_ctl submit; |
| 1509 | struct r5dev *tgt; |
| 1510 | struct page *dest; |
| 1511 | int i; |
| 1512 | int count; |
| 1513 | |
| 1514 | BUG_ON(sh->batch_head); |
| 1515 | if (sh->ops.target < 0) |
| 1516 | target = sh->ops.target2; |
| 1517 | else if (sh->ops.target2 < 0) |
| 1518 | target = sh->ops.target; |
| 1519 | else |
| 1520 | /* we should only have one valid target */ |
| 1521 | BUG(); |
| 1522 | BUG_ON(target < 0); |
| 1523 | pr_debug("%s: stripe %llu block: %d\n", |
| 1524 | __func__, (unsigned long long)sh->sector, target); |
| 1525 | |
| 1526 | tgt = &sh->dev[target]; |
| 1527 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 1528 | dest = tgt->page; |
| 1529 | |
| 1530 | atomic_inc(&sh->count); |
| 1531 | |
| 1532 | if (target == qd_idx) { |
| 1533 | count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); |
| 1534 | blocks[count] = NULL; /* regenerating p is not necessary */ |
| 1535 | BUG_ON(blocks[count+1] != dest); /* q should already be set */ |
| 1536 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 1537 | ops_complete_compute, sh, |
| 1538 | to_addr_conv(sh, percpu, 0)); |
| 1539 | tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); |
| 1540 | } else { |
| 1541 | /* Compute any data- or p-drive using XOR */ |
| 1542 | count = 0; |
| 1543 | for (i = disks; i-- ; ) { |
| 1544 | if (i == target || i == qd_idx) |
| 1545 | continue; |
| 1546 | blocks[count++] = sh->dev[i].page; |
| 1547 | } |
| 1548 | |
| 1549 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, |
| 1550 | NULL, ops_complete_compute, sh, |
| 1551 | to_addr_conv(sh, percpu, 0)); |
| 1552 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); |
| 1553 | } |
| 1554 | |
| 1555 | return tx; |
| 1556 | } |
| 1557 | |
| 1558 | static struct dma_async_tx_descriptor * |
| 1559 | ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 1560 | { |
| 1561 | int i, count, disks = sh->disks; |
| 1562 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; |
| 1563 | int d0_idx = raid6_d0(sh); |
| 1564 | int faila = -1, failb = -1; |
| 1565 | int target = sh->ops.target; |
| 1566 | int target2 = sh->ops.target2; |
| 1567 | struct r5dev *tgt = &sh->dev[target]; |
| 1568 | struct r5dev *tgt2 = &sh->dev[target2]; |
| 1569 | struct dma_async_tx_descriptor *tx; |
| 1570 | struct page **blocks = to_addr_page(percpu, 0); |
| 1571 | struct async_submit_ctl submit; |
| 1572 | |
| 1573 | BUG_ON(sh->batch_head); |
| 1574 | pr_debug("%s: stripe %llu block1: %d block2: %d\n", |
| 1575 | __func__, (unsigned long long)sh->sector, target, target2); |
| 1576 | BUG_ON(target < 0 || target2 < 0); |
| 1577 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 1578 | BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); |
| 1579 | |
| 1580 | /* we need to open-code set_syndrome_sources to handle the |
| 1581 | * slot number conversion for 'faila' and 'failb' |
| 1582 | */ |
| 1583 | for (i = 0; i < disks ; i++) |
| 1584 | blocks[i] = NULL; |
| 1585 | count = 0; |
| 1586 | i = d0_idx; |
| 1587 | do { |
| 1588 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 1589 | |
| 1590 | blocks[slot] = sh->dev[i].page; |
| 1591 | |
| 1592 | if (i == target) |
| 1593 | faila = slot; |
| 1594 | if (i == target2) |
| 1595 | failb = slot; |
| 1596 | i = raid6_next_disk(i, disks); |
| 1597 | } while (i != d0_idx); |
| 1598 | |
| 1599 | BUG_ON(faila == failb); |
| 1600 | if (failb < faila) |
| 1601 | swap(faila, failb); |
| 1602 | pr_debug("%s: stripe: %llu faila: %d failb: %d\n", |
| 1603 | __func__, (unsigned long long)sh->sector, faila, failb); |
| 1604 | |
| 1605 | atomic_inc(&sh->count); |
| 1606 | |
| 1607 | if (failb == syndrome_disks+1) { |
| 1608 | /* Q disk is one of the missing disks */ |
| 1609 | if (faila == syndrome_disks) { |
| 1610 | /* Missing P+Q, just recompute */ |
| 1611 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 1612 | ops_complete_compute, sh, |
| 1613 | to_addr_conv(sh, percpu, 0)); |
| 1614 | return async_gen_syndrome(blocks, 0, syndrome_disks+2, |
| 1615 | STRIPE_SIZE, &submit); |
| 1616 | } else { |
| 1617 | struct page *dest; |
| 1618 | int data_target; |
| 1619 | int qd_idx = sh->qd_idx; |
| 1620 | |
| 1621 | /* Missing D+Q: recompute D from P, then recompute Q */ |
| 1622 | if (target == qd_idx) |
| 1623 | data_target = target2; |
| 1624 | else |
| 1625 | data_target = target; |
| 1626 | |
| 1627 | count = 0; |
| 1628 | for (i = disks; i-- ; ) { |
| 1629 | if (i == data_target || i == qd_idx) |
| 1630 | continue; |
| 1631 | blocks[count++] = sh->dev[i].page; |
| 1632 | } |
| 1633 | dest = sh->dev[data_target].page; |
| 1634 | init_async_submit(&submit, |
| 1635 | ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, |
| 1636 | NULL, NULL, NULL, |
| 1637 | to_addr_conv(sh, percpu, 0)); |
| 1638 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, |
| 1639 | &submit); |
| 1640 | |
| 1641 | count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); |
| 1642 | init_async_submit(&submit, ASYNC_TX_FENCE, tx, |
| 1643 | ops_complete_compute, sh, |
| 1644 | to_addr_conv(sh, percpu, 0)); |
| 1645 | return async_gen_syndrome(blocks, 0, count+2, |
| 1646 | STRIPE_SIZE, &submit); |
| 1647 | } |
| 1648 | } else { |
| 1649 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 1650 | ops_complete_compute, sh, |
| 1651 | to_addr_conv(sh, percpu, 0)); |
| 1652 | if (failb == syndrome_disks) { |
| 1653 | /* We're missing D+P. */ |
| 1654 | return async_raid6_datap_recov(syndrome_disks+2, |
| 1655 | STRIPE_SIZE, faila, |
| 1656 | blocks, &submit); |
| 1657 | } else { |
| 1658 | /* We're missing D+D. */ |
| 1659 | return async_raid6_2data_recov(syndrome_disks+2, |
| 1660 | STRIPE_SIZE, faila, failb, |
| 1661 | blocks, &submit); |
| 1662 | } |
| 1663 | } |
| 1664 | } |
| 1665 | |
| 1666 | static void ops_complete_prexor(void *stripe_head_ref) |
| 1667 | { |
| 1668 | struct stripe_head *sh = stripe_head_ref; |
| 1669 | |
| 1670 | pr_debug("%s: stripe %llu\n", __func__, |
| 1671 | (unsigned long long)sh->sector); |
| 1672 | |
| 1673 | if (r5c_is_writeback(sh->raid_conf->log)) |
| 1674 | /* |
| 1675 | * raid5-cache write back uses orig_page during prexor. |
| 1676 | * After prexor, it is time to free orig_page |
| 1677 | */ |
| 1678 | r5c_release_extra_page(sh); |
| 1679 | } |
| 1680 | |
| 1681 | static struct dma_async_tx_descriptor * |
| 1682 | ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1683 | struct dma_async_tx_descriptor *tx) |
| 1684 | { |
| 1685 | int disks = sh->disks; |
| 1686 | struct page **xor_srcs = to_addr_page(percpu, 0); |
| 1687 | int count = 0, pd_idx = sh->pd_idx, i; |
| 1688 | struct async_submit_ctl submit; |
| 1689 | |
| 1690 | /* existing parity data subtracted */ |
| 1691 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 1692 | |
| 1693 | BUG_ON(sh->batch_head); |
| 1694 | pr_debug("%s: stripe %llu\n", __func__, |
| 1695 | (unsigned long long)sh->sector); |
| 1696 | |
| 1697 | for (i = disks; i--; ) { |
| 1698 | struct r5dev *dev = &sh->dev[i]; |
| 1699 | /* Only process blocks that are known to be uptodate */ |
| 1700 | if (test_bit(R5_InJournal, &dev->flags)) |
| 1701 | xor_srcs[count++] = dev->orig_page; |
| 1702 | else if (test_bit(R5_Wantdrain, &dev->flags)) |
| 1703 | xor_srcs[count++] = dev->page; |
| 1704 | } |
| 1705 | |
| 1706 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
| 1707 | ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); |
| 1708 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
| 1709 | |
| 1710 | return tx; |
| 1711 | } |
| 1712 | |
| 1713 | static struct dma_async_tx_descriptor * |
| 1714 | ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1715 | struct dma_async_tx_descriptor *tx) |
| 1716 | { |
| 1717 | struct page **blocks = to_addr_page(percpu, 0); |
| 1718 | int count; |
| 1719 | struct async_submit_ctl submit; |
| 1720 | |
| 1721 | pr_debug("%s: stripe %llu\n", __func__, |
| 1722 | (unsigned long long)sh->sector); |
| 1723 | |
| 1724 | count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); |
| 1725 | |
| 1726 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, |
| 1727 | ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); |
| 1728 | tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); |
| 1729 | |
| 1730 | return tx; |
| 1731 | } |
| 1732 | |
| 1733 | static struct dma_async_tx_descriptor * |
| 1734 | ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) |
| 1735 | { |
| 1736 | struct r5conf *conf = sh->raid_conf; |
| 1737 | int disks = sh->disks; |
| 1738 | int i; |
| 1739 | struct stripe_head *head_sh = sh; |
| 1740 | |
| 1741 | pr_debug("%s: stripe %llu\n", __func__, |
| 1742 | (unsigned long long)sh->sector); |
| 1743 | |
| 1744 | for (i = disks; i--; ) { |
| 1745 | struct r5dev *dev; |
| 1746 | struct bio *chosen; |
| 1747 | |
| 1748 | sh = head_sh; |
| 1749 | if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { |
| 1750 | struct bio *wbi; |
| 1751 | |
| 1752 | again: |
| 1753 | dev = &sh->dev[i]; |
| 1754 | /* |
| 1755 | * clear R5_InJournal, so when rewriting a page in |
| 1756 | * journal, it is not skipped by r5l_log_stripe() |
| 1757 | */ |
| 1758 | clear_bit(R5_InJournal, &dev->flags); |
| 1759 | spin_lock_irq(&sh->stripe_lock); |
| 1760 | chosen = dev->towrite; |
| 1761 | dev->towrite = NULL; |
| 1762 | sh->overwrite_disks = 0; |
| 1763 | BUG_ON(dev->written); |
| 1764 | wbi = dev->written = chosen; |
| 1765 | spin_unlock_irq(&sh->stripe_lock); |
| 1766 | WARN_ON(dev->page != dev->orig_page); |
| 1767 | |
| 1768 | while (wbi && wbi->bi_iter.bi_sector < |
| 1769 | dev->sector + STRIPE_SECTORS) { |
| 1770 | if (wbi->bi_opf & REQ_FUA) |
| 1771 | set_bit(R5_WantFUA, &dev->flags); |
| 1772 | if (wbi->bi_opf & REQ_SYNC) |
| 1773 | set_bit(R5_SyncIO, &dev->flags); |
| 1774 | if (bio_op(wbi) == REQ_OP_DISCARD) |
| 1775 | set_bit(R5_Discard, &dev->flags); |
| 1776 | else { |
| 1777 | tx = async_copy_data(1, wbi, &dev->page, |
| 1778 | dev->sector, tx, sh, |
| 1779 | r5c_is_writeback(conf->log)); |
| 1780 | if (dev->page != dev->orig_page && |
| 1781 | !r5c_is_writeback(conf->log)) { |
| 1782 | set_bit(R5_SkipCopy, &dev->flags); |
| 1783 | clear_bit(R5_UPTODATE, &dev->flags); |
| 1784 | clear_bit(R5_OVERWRITE, &dev->flags); |
| 1785 | } |
| 1786 | } |
| 1787 | wbi = r5_next_bio(wbi, dev->sector); |
| 1788 | } |
| 1789 | |
| 1790 | if (head_sh->batch_head) { |
| 1791 | sh = list_first_entry(&sh->batch_list, |
| 1792 | struct stripe_head, |
| 1793 | batch_list); |
| 1794 | if (sh == head_sh) |
| 1795 | continue; |
| 1796 | goto again; |
| 1797 | } |
| 1798 | } |
| 1799 | } |
| 1800 | |
| 1801 | return tx; |
| 1802 | } |
| 1803 | |
| 1804 | static void ops_complete_reconstruct(void *stripe_head_ref) |
| 1805 | { |
| 1806 | struct stripe_head *sh = stripe_head_ref; |
| 1807 | int disks = sh->disks; |
| 1808 | int pd_idx = sh->pd_idx; |
| 1809 | int qd_idx = sh->qd_idx; |
| 1810 | int i; |
| 1811 | bool fua = false, sync = false, discard = false; |
| 1812 | |
| 1813 | pr_debug("%s: stripe %llu\n", __func__, |
| 1814 | (unsigned long long)sh->sector); |
| 1815 | |
| 1816 | for (i = disks; i--; ) { |
| 1817 | fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); |
| 1818 | sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); |
| 1819 | discard |= test_bit(R5_Discard, &sh->dev[i].flags); |
| 1820 | } |
| 1821 | |
| 1822 | for (i = disks; i--; ) { |
| 1823 | struct r5dev *dev = &sh->dev[i]; |
| 1824 | |
| 1825 | if (dev->written || i == pd_idx || i == qd_idx) { |
| 1826 | if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) |
| 1827 | set_bit(R5_UPTODATE, &dev->flags); |
| 1828 | if (fua) |
| 1829 | set_bit(R5_WantFUA, &dev->flags); |
| 1830 | if (sync) |
| 1831 | set_bit(R5_SyncIO, &dev->flags); |
| 1832 | } |
| 1833 | } |
| 1834 | |
| 1835 | if (sh->reconstruct_state == reconstruct_state_drain_run) |
| 1836 | sh->reconstruct_state = reconstruct_state_drain_result; |
| 1837 | else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) |
| 1838 | sh->reconstruct_state = reconstruct_state_prexor_drain_result; |
| 1839 | else { |
| 1840 | BUG_ON(sh->reconstruct_state != reconstruct_state_run); |
| 1841 | sh->reconstruct_state = reconstruct_state_result; |
| 1842 | } |
| 1843 | |
| 1844 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1845 | raid5_release_stripe(sh); |
| 1846 | } |
| 1847 | |
| 1848 | static void |
| 1849 | ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1850 | struct dma_async_tx_descriptor *tx) |
| 1851 | { |
| 1852 | int disks = sh->disks; |
| 1853 | struct page **xor_srcs; |
| 1854 | struct async_submit_ctl submit; |
| 1855 | int count, pd_idx = sh->pd_idx, i; |
| 1856 | struct page *xor_dest; |
| 1857 | int prexor = 0; |
| 1858 | unsigned long flags; |
| 1859 | int j = 0; |
| 1860 | struct stripe_head *head_sh = sh; |
| 1861 | int last_stripe; |
| 1862 | |
| 1863 | pr_debug("%s: stripe %llu\n", __func__, |
| 1864 | (unsigned long long)sh->sector); |
| 1865 | |
| 1866 | for (i = 0; i < sh->disks; i++) { |
| 1867 | if (pd_idx == i) |
| 1868 | continue; |
| 1869 | if (!test_bit(R5_Discard, &sh->dev[i].flags)) |
| 1870 | break; |
| 1871 | } |
| 1872 | if (i >= sh->disks) { |
| 1873 | atomic_inc(&sh->count); |
| 1874 | set_bit(R5_Discard, &sh->dev[pd_idx].flags); |
| 1875 | ops_complete_reconstruct(sh); |
| 1876 | return; |
| 1877 | } |
| 1878 | again: |
| 1879 | count = 0; |
| 1880 | xor_srcs = to_addr_page(percpu, j); |
| 1881 | /* check if prexor is active which means only process blocks |
| 1882 | * that are part of a read-modify-write (written) |
| 1883 | */ |
| 1884 | if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { |
| 1885 | prexor = 1; |
| 1886 | xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 1887 | for (i = disks; i--; ) { |
| 1888 | struct r5dev *dev = &sh->dev[i]; |
| 1889 | if (head_sh->dev[i].written || |
| 1890 | test_bit(R5_InJournal, &head_sh->dev[i].flags)) |
| 1891 | xor_srcs[count++] = dev->page; |
| 1892 | } |
| 1893 | } else { |
| 1894 | xor_dest = sh->dev[pd_idx].page; |
| 1895 | for (i = disks; i--; ) { |
| 1896 | struct r5dev *dev = &sh->dev[i]; |
| 1897 | if (i != pd_idx) |
| 1898 | xor_srcs[count++] = dev->page; |
| 1899 | } |
| 1900 | } |
| 1901 | |
| 1902 | /* 1/ if we prexor'd then the dest is reused as a source |
| 1903 | * 2/ if we did not prexor then we are redoing the parity |
| 1904 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST |
| 1905 | * for the synchronous xor case |
| 1906 | */ |
| 1907 | last_stripe = !head_sh->batch_head || |
| 1908 | list_first_entry(&sh->batch_list, |
| 1909 | struct stripe_head, batch_list) == head_sh; |
| 1910 | if (last_stripe) { |
| 1911 | flags = ASYNC_TX_ACK | |
| 1912 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); |
| 1913 | |
| 1914 | atomic_inc(&head_sh->count); |
| 1915 | init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, |
| 1916 | to_addr_conv(sh, percpu, j)); |
| 1917 | } else { |
| 1918 | flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; |
| 1919 | init_async_submit(&submit, flags, tx, NULL, NULL, |
| 1920 | to_addr_conv(sh, percpu, j)); |
| 1921 | } |
| 1922 | |
| 1923 | if (unlikely(count == 1)) |
| 1924 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
| 1925 | else |
| 1926 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
| 1927 | if (!last_stripe) { |
| 1928 | j++; |
| 1929 | sh = list_first_entry(&sh->batch_list, struct stripe_head, |
| 1930 | batch_list); |
| 1931 | goto again; |
| 1932 | } |
| 1933 | } |
| 1934 | |
| 1935 | static void |
| 1936 | ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1937 | struct dma_async_tx_descriptor *tx) |
| 1938 | { |
| 1939 | struct async_submit_ctl submit; |
| 1940 | struct page **blocks; |
| 1941 | int count, i, j = 0; |
| 1942 | struct stripe_head *head_sh = sh; |
| 1943 | int last_stripe; |
| 1944 | int synflags; |
| 1945 | unsigned long txflags; |
| 1946 | |
| 1947 | pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); |
| 1948 | |
| 1949 | for (i = 0; i < sh->disks; i++) { |
| 1950 | if (sh->pd_idx == i || sh->qd_idx == i) |
| 1951 | continue; |
| 1952 | if (!test_bit(R5_Discard, &sh->dev[i].flags)) |
| 1953 | break; |
| 1954 | } |
| 1955 | if (i >= sh->disks) { |
| 1956 | atomic_inc(&sh->count); |
| 1957 | set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); |
| 1958 | set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); |
| 1959 | ops_complete_reconstruct(sh); |
| 1960 | return; |
| 1961 | } |
| 1962 | |
| 1963 | again: |
| 1964 | blocks = to_addr_page(percpu, j); |
| 1965 | |
| 1966 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { |
| 1967 | synflags = SYNDROME_SRC_WRITTEN; |
| 1968 | txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; |
| 1969 | } else { |
| 1970 | synflags = SYNDROME_SRC_ALL; |
| 1971 | txflags = ASYNC_TX_ACK; |
| 1972 | } |
| 1973 | |
| 1974 | count = set_syndrome_sources(blocks, sh, synflags); |
| 1975 | last_stripe = !head_sh->batch_head || |
| 1976 | list_first_entry(&sh->batch_list, |
| 1977 | struct stripe_head, batch_list) == head_sh; |
| 1978 | |
| 1979 | if (last_stripe) { |
| 1980 | atomic_inc(&head_sh->count); |
| 1981 | init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, |
| 1982 | head_sh, to_addr_conv(sh, percpu, j)); |
| 1983 | } else |
| 1984 | init_async_submit(&submit, 0, tx, NULL, NULL, |
| 1985 | to_addr_conv(sh, percpu, j)); |
| 1986 | tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); |
| 1987 | if (!last_stripe) { |
| 1988 | j++; |
| 1989 | sh = list_first_entry(&sh->batch_list, struct stripe_head, |
| 1990 | batch_list); |
| 1991 | goto again; |
| 1992 | } |
| 1993 | } |
| 1994 | |
| 1995 | static void ops_complete_check(void *stripe_head_ref) |
| 1996 | { |
| 1997 | struct stripe_head *sh = stripe_head_ref; |
| 1998 | |
| 1999 | pr_debug("%s: stripe %llu\n", __func__, |
| 2000 | (unsigned long long)sh->sector); |
| 2001 | |
| 2002 | sh->check_state = check_state_check_result; |
| 2003 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2004 | raid5_release_stripe(sh); |
| 2005 | } |
| 2006 | |
| 2007 | static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 2008 | { |
| 2009 | int disks = sh->disks; |
| 2010 | int pd_idx = sh->pd_idx; |
| 2011 | int qd_idx = sh->qd_idx; |
| 2012 | struct page *xor_dest; |
| 2013 | struct page **xor_srcs = to_addr_page(percpu, 0); |
| 2014 | struct dma_async_tx_descriptor *tx; |
| 2015 | struct async_submit_ctl submit; |
| 2016 | int count; |
| 2017 | int i; |
| 2018 | |
| 2019 | pr_debug("%s: stripe %llu\n", __func__, |
| 2020 | (unsigned long long)sh->sector); |
| 2021 | |
| 2022 | BUG_ON(sh->batch_head); |
| 2023 | count = 0; |
| 2024 | xor_dest = sh->dev[pd_idx].page; |
| 2025 | xor_srcs[count++] = xor_dest; |
| 2026 | for (i = disks; i--; ) { |
| 2027 | if (i == pd_idx || i == qd_idx) |
| 2028 | continue; |
| 2029 | xor_srcs[count++] = sh->dev[i].page; |
| 2030 | } |
| 2031 | |
| 2032 | init_async_submit(&submit, 0, NULL, NULL, NULL, |
| 2033 | to_addr_conv(sh, percpu, 0)); |
| 2034 | tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
| 2035 | &sh->ops.zero_sum_result, &submit); |
| 2036 | |
| 2037 | atomic_inc(&sh->count); |
| 2038 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); |
| 2039 | tx = async_trigger_callback(&submit); |
| 2040 | } |
| 2041 | |
| 2042 | static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) |
| 2043 | { |
| 2044 | struct page **srcs = to_addr_page(percpu, 0); |
| 2045 | struct async_submit_ctl submit; |
| 2046 | int count; |
| 2047 | |
| 2048 | pr_debug("%s: stripe %llu checkp: %d\n", __func__, |
| 2049 | (unsigned long long)sh->sector, checkp); |
| 2050 | |
| 2051 | BUG_ON(sh->batch_head); |
| 2052 | count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); |
| 2053 | if (!checkp) |
| 2054 | srcs[count] = NULL; |
| 2055 | |
| 2056 | atomic_inc(&sh->count); |
| 2057 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, |
| 2058 | sh, to_addr_conv(sh, percpu, 0)); |
| 2059 | async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, |
| 2060 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); |
| 2061 | } |
| 2062 | |
| 2063 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
| 2064 | { |
| 2065 | int overlap_clear = 0, i, disks = sh->disks; |
| 2066 | struct dma_async_tx_descriptor *tx = NULL; |
| 2067 | struct r5conf *conf = sh->raid_conf; |
| 2068 | int level = conf->level; |
| 2069 | struct raid5_percpu *percpu; |
| 2070 | unsigned long cpu; |
| 2071 | |
| 2072 | cpu = get_cpu(); |
| 2073 | percpu = per_cpu_ptr(conf->percpu, cpu); |
| 2074 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
| 2075 | ops_run_biofill(sh); |
| 2076 | overlap_clear++; |
| 2077 | } |
| 2078 | |
| 2079 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { |
| 2080 | if (level < 6) |
| 2081 | tx = ops_run_compute5(sh, percpu); |
| 2082 | else { |
| 2083 | if (sh->ops.target2 < 0 || sh->ops.target < 0) |
| 2084 | tx = ops_run_compute6_1(sh, percpu); |
| 2085 | else |
| 2086 | tx = ops_run_compute6_2(sh, percpu); |
| 2087 | } |
| 2088 | /* terminate the chain if reconstruct is not set to be run */ |
| 2089 | if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) |
| 2090 | async_tx_ack(tx); |
| 2091 | } |
| 2092 | |
| 2093 | if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request)) |
| 2094 | tx = ops_run_partial_parity(sh, percpu, tx); |
| 2095 | |
| 2096 | if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { |
| 2097 | if (level < 6) |
| 2098 | tx = ops_run_prexor5(sh, percpu, tx); |
| 2099 | else |
| 2100 | tx = ops_run_prexor6(sh, percpu, tx); |
| 2101 | } |
| 2102 | |
| 2103 | if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { |
| 2104 | tx = ops_run_biodrain(sh, tx); |
| 2105 | overlap_clear++; |
| 2106 | } |
| 2107 | |
| 2108 | if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { |
| 2109 | if (level < 6) |
| 2110 | ops_run_reconstruct5(sh, percpu, tx); |
| 2111 | else |
| 2112 | ops_run_reconstruct6(sh, percpu, tx); |
| 2113 | } |
| 2114 | |
| 2115 | if (test_bit(STRIPE_OP_CHECK, &ops_request)) { |
| 2116 | if (sh->check_state == check_state_run) |
| 2117 | ops_run_check_p(sh, percpu); |
| 2118 | else if (sh->check_state == check_state_run_q) |
| 2119 | ops_run_check_pq(sh, percpu, 0); |
| 2120 | else if (sh->check_state == check_state_run_pq) |
| 2121 | ops_run_check_pq(sh, percpu, 1); |
| 2122 | else |
| 2123 | BUG(); |
| 2124 | } |
| 2125 | |
| 2126 | if (overlap_clear && !sh->batch_head) |
| 2127 | for (i = disks; i--; ) { |
| 2128 | struct r5dev *dev = &sh->dev[i]; |
| 2129 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
| 2130 | wake_up(&sh->raid_conf->wait_for_overlap); |
| 2131 | } |
| 2132 | put_cpu(); |
| 2133 | } |
| 2134 | |
| 2135 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, |
| 2136 | int disks) |
| 2137 | { |
| 2138 | struct stripe_head *sh; |
| 2139 | int i; |
| 2140 | |
| 2141 | sh = kmem_cache_zalloc(sc, gfp); |
| 2142 | if (sh) { |
| 2143 | spin_lock_init(&sh->stripe_lock); |
| 2144 | spin_lock_init(&sh->batch_lock); |
| 2145 | INIT_LIST_HEAD(&sh->batch_list); |
| 2146 | INIT_LIST_HEAD(&sh->lru); |
| 2147 | INIT_LIST_HEAD(&sh->r5c); |
| 2148 | INIT_LIST_HEAD(&sh->log_list); |
| 2149 | atomic_set(&sh->count, 1); |
| 2150 | sh->log_start = MaxSector; |
| 2151 | for (i = 0; i < disks; i++) { |
| 2152 | struct r5dev *dev = &sh->dev[i]; |
| 2153 | |
| 2154 | bio_init(&dev->req, &dev->vec, 1); |
| 2155 | bio_init(&dev->rreq, &dev->rvec, 1); |
| 2156 | } |
| 2157 | } |
| 2158 | return sh; |
| 2159 | } |
| 2160 | static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) |
| 2161 | { |
| 2162 | struct stripe_head *sh; |
| 2163 | |
| 2164 | sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size); |
| 2165 | if (!sh) |
| 2166 | return 0; |
| 2167 | |
| 2168 | sh->raid_conf = conf; |
| 2169 | |
| 2170 | if (grow_buffers(sh, gfp)) { |
| 2171 | shrink_buffers(sh); |
| 2172 | kmem_cache_free(conf->slab_cache, sh); |
| 2173 | return 0; |
| 2174 | } |
| 2175 | sh->hash_lock_index = |
| 2176 | conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; |
| 2177 | /* we just created an active stripe so... */ |
| 2178 | atomic_inc(&conf->active_stripes); |
| 2179 | |
| 2180 | raid5_release_stripe(sh); |
| 2181 | conf->max_nr_stripes++; |
| 2182 | return 1; |
| 2183 | } |
| 2184 | |
| 2185 | static int grow_stripes(struct r5conf *conf, int num) |
| 2186 | { |
| 2187 | struct kmem_cache *sc; |
| 2188 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
| 2189 | |
| 2190 | if (conf->mddev->gendisk) |
| 2191 | sprintf(conf->cache_name[0], |
| 2192 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
| 2193 | else |
| 2194 | sprintf(conf->cache_name[0], |
| 2195 | "raid%d-%p", conf->level, conf->mddev); |
| 2196 | sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); |
| 2197 | |
| 2198 | conf->active_name = 0; |
| 2199 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
| 2200 | sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), |
| 2201 | 0, 0, NULL); |
| 2202 | if (!sc) |
| 2203 | return 1; |
| 2204 | conf->slab_cache = sc; |
| 2205 | conf->pool_size = devs; |
| 2206 | while (num--) |
| 2207 | if (!grow_one_stripe(conf, GFP_KERNEL)) |
| 2208 | return 1; |
| 2209 | |
| 2210 | return 0; |
| 2211 | } |
| 2212 | |
| 2213 | /** |
| 2214 | * scribble_len - return the required size of the scribble region |
| 2215 | * @num - total number of disks in the array |
| 2216 | * |
| 2217 | * The size must be enough to contain: |
| 2218 | * 1/ a struct page pointer for each device in the array +2 |
| 2219 | * 2/ room to convert each entry in (1) to its corresponding dma |
| 2220 | * (dma_map_page()) or page (page_address()) address. |
| 2221 | * |
| 2222 | * Note: the +2 is for the destination buffers of the ddf/raid6 case where we |
| 2223 | * calculate over all devices (not just the data blocks), using zeros in place |
| 2224 | * of the P and Q blocks. |
| 2225 | */ |
| 2226 | static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) |
| 2227 | { |
| 2228 | struct flex_array *ret; |
| 2229 | size_t len; |
| 2230 | |
| 2231 | len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); |
| 2232 | ret = flex_array_alloc(len, cnt, flags); |
| 2233 | if (!ret) |
| 2234 | return NULL; |
| 2235 | /* always prealloc all elements, so no locking is required */ |
| 2236 | if (flex_array_prealloc(ret, 0, cnt, flags)) { |
| 2237 | flex_array_free(ret); |
| 2238 | return NULL; |
| 2239 | } |
| 2240 | return ret; |
| 2241 | } |
| 2242 | |
| 2243 | static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) |
| 2244 | { |
| 2245 | unsigned long cpu; |
| 2246 | int err = 0; |
| 2247 | |
| 2248 | /* |
| 2249 | * Never shrink. And mddev_suspend() could deadlock if this is called |
| 2250 | * from raid5d. In that case, scribble_disks and scribble_sectors |
| 2251 | * should equal to new_disks and new_sectors |
| 2252 | */ |
| 2253 | if (conf->scribble_disks >= new_disks && |
| 2254 | conf->scribble_sectors >= new_sectors) |
| 2255 | return 0; |
| 2256 | mddev_suspend(conf->mddev); |
| 2257 | get_online_cpus(); |
| 2258 | for_each_present_cpu(cpu) { |
| 2259 | struct raid5_percpu *percpu; |
| 2260 | struct flex_array *scribble; |
| 2261 | |
| 2262 | percpu = per_cpu_ptr(conf->percpu, cpu); |
| 2263 | scribble = scribble_alloc(new_disks, |
| 2264 | new_sectors / STRIPE_SECTORS, |
| 2265 | GFP_NOIO); |
| 2266 | |
| 2267 | if (scribble) { |
| 2268 | flex_array_free(percpu->scribble); |
| 2269 | percpu->scribble = scribble; |
| 2270 | } else { |
| 2271 | err = -ENOMEM; |
| 2272 | break; |
| 2273 | } |
| 2274 | } |
| 2275 | put_online_cpus(); |
| 2276 | mddev_resume(conf->mddev); |
| 2277 | if (!err) { |
| 2278 | conf->scribble_disks = new_disks; |
| 2279 | conf->scribble_sectors = new_sectors; |
| 2280 | } |
| 2281 | return err; |
| 2282 | } |
| 2283 | |
| 2284 | static int resize_stripes(struct r5conf *conf, int newsize) |
| 2285 | { |
| 2286 | /* Make all the stripes able to hold 'newsize' devices. |
| 2287 | * New slots in each stripe get 'page' set to a new page. |
| 2288 | * |
| 2289 | * This happens in stages: |
| 2290 | * 1/ create a new kmem_cache and allocate the required number of |
| 2291 | * stripe_heads. |
| 2292 | * 2/ gather all the old stripe_heads and transfer the pages across |
| 2293 | * to the new stripe_heads. This will have the side effect of |
| 2294 | * freezing the array as once all stripe_heads have been collected, |
| 2295 | * no IO will be possible. Old stripe heads are freed once their |
| 2296 | * pages have been transferred over, and the old kmem_cache is |
| 2297 | * freed when all stripes are done. |
| 2298 | * 3/ reallocate conf->disks to be suitable bigger. If this fails, |
| 2299 | * we simple return a failure status - no need to clean anything up. |
| 2300 | * 4/ allocate new pages for the new slots in the new stripe_heads. |
| 2301 | * If this fails, we don't bother trying the shrink the |
| 2302 | * stripe_heads down again, we just leave them as they are. |
| 2303 | * As each stripe_head is processed the new one is released into |
| 2304 | * active service. |
| 2305 | * |
| 2306 | * Once step2 is started, we cannot afford to wait for a write, |
| 2307 | * so we use GFP_NOIO allocations. |
| 2308 | */ |
| 2309 | struct stripe_head *osh, *nsh; |
| 2310 | LIST_HEAD(newstripes); |
| 2311 | struct disk_info *ndisks; |
| 2312 | int err; |
| 2313 | struct kmem_cache *sc; |
| 2314 | int i; |
| 2315 | int hash, cnt; |
| 2316 | |
| 2317 | if (newsize <= conf->pool_size) |
| 2318 | return 0; /* never bother to shrink */ |
| 2319 | |
| 2320 | err = md_allow_write(conf->mddev); |
| 2321 | if (err) |
| 2322 | return err; |
| 2323 | |
| 2324 | /* Step 1 */ |
| 2325 | sc = kmem_cache_create(conf->cache_name[1-conf->active_name], |
| 2326 | sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), |
| 2327 | 0, 0, NULL); |
| 2328 | if (!sc) |
| 2329 | return -ENOMEM; |
| 2330 | |
| 2331 | /* Need to ensure auto-resizing doesn't interfere */ |
| 2332 | mutex_lock(&conf->cache_size_mutex); |
| 2333 | |
| 2334 | for (i = conf->max_nr_stripes; i; i--) { |
| 2335 | nsh = alloc_stripe(sc, GFP_KERNEL, newsize); |
| 2336 | if (!nsh) |
| 2337 | break; |
| 2338 | |
| 2339 | nsh->raid_conf = conf; |
| 2340 | list_add(&nsh->lru, &newstripes); |
| 2341 | } |
| 2342 | if (i) { |
| 2343 | /* didn't get enough, give up */ |
| 2344 | while (!list_empty(&newstripes)) { |
| 2345 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 2346 | list_del(&nsh->lru); |
| 2347 | kmem_cache_free(sc, nsh); |
| 2348 | } |
| 2349 | kmem_cache_destroy(sc); |
| 2350 | mutex_unlock(&conf->cache_size_mutex); |
| 2351 | return -ENOMEM; |
| 2352 | } |
| 2353 | /* Step 2 - Must use GFP_NOIO now. |
| 2354 | * OK, we have enough stripes, start collecting inactive |
| 2355 | * stripes and copying them over |
| 2356 | */ |
| 2357 | hash = 0; |
| 2358 | cnt = 0; |
| 2359 | list_for_each_entry(nsh, &newstripes, lru) { |
| 2360 | lock_device_hash_lock(conf, hash); |
| 2361 | wait_event_cmd(conf->wait_for_stripe, |
| 2362 | !list_empty(conf->inactive_list + hash), |
| 2363 | unlock_device_hash_lock(conf, hash), |
| 2364 | lock_device_hash_lock(conf, hash)); |
| 2365 | osh = get_free_stripe(conf, hash); |
| 2366 | unlock_device_hash_lock(conf, hash); |
| 2367 | |
| 2368 | for(i=0; i<conf->pool_size; i++) { |
| 2369 | nsh->dev[i].page = osh->dev[i].page; |
| 2370 | nsh->dev[i].orig_page = osh->dev[i].page; |
| 2371 | } |
| 2372 | nsh->hash_lock_index = hash; |
| 2373 | kmem_cache_free(conf->slab_cache, osh); |
| 2374 | cnt++; |
| 2375 | if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + |
| 2376 | !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { |
| 2377 | hash++; |
| 2378 | cnt = 0; |
| 2379 | } |
| 2380 | } |
| 2381 | kmem_cache_destroy(conf->slab_cache); |
| 2382 | |
| 2383 | /* Step 3. |
| 2384 | * At this point, we are holding all the stripes so the array |
| 2385 | * is completely stalled, so now is a good time to resize |
| 2386 | * conf->disks and the scribble region |
| 2387 | */ |
| 2388 | ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); |
| 2389 | if (ndisks) { |
| 2390 | for (i = 0; i < conf->pool_size; i++) |
| 2391 | ndisks[i] = conf->disks[i]; |
| 2392 | |
| 2393 | for (i = conf->pool_size; i < newsize; i++) { |
| 2394 | ndisks[i].extra_page = alloc_page(GFP_NOIO); |
| 2395 | if (!ndisks[i].extra_page) |
| 2396 | err = -ENOMEM; |
| 2397 | } |
| 2398 | |
| 2399 | if (err) { |
| 2400 | for (i = conf->pool_size; i < newsize; i++) |
| 2401 | if (ndisks[i].extra_page) |
| 2402 | put_page(ndisks[i].extra_page); |
| 2403 | kfree(ndisks); |
| 2404 | } else { |
| 2405 | kfree(conf->disks); |
| 2406 | conf->disks = ndisks; |
| 2407 | } |
| 2408 | } else |
| 2409 | err = -ENOMEM; |
| 2410 | |
| 2411 | mutex_unlock(&conf->cache_size_mutex); |
| 2412 | /* Step 4, return new stripes to service */ |
| 2413 | while(!list_empty(&newstripes)) { |
| 2414 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 2415 | list_del_init(&nsh->lru); |
| 2416 | |
| 2417 | for (i=conf->raid_disks; i < newsize; i++) |
| 2418 | if (nsh->dev[i].page == NULL) { |
| 2419 | struct page *p = alloc_page(GFP_NOIO); |
| 2420 | nsh->dev[i].page = p; |
| 2421 | nsh->dev[i].orig_page = p; |
| 2422 | if (!p) |
| 2423 | err = -ENOMEM; |
| 2424 | } |
| 2425 | raid5_release_stripe(nsh); |
| 2426 | } |
| 2427 | /* critical section pass, GFP_NOIO no longer needed */ |
| 2428 | |
| 2429 | conf->slab_cache = sc; |
| 2430 | conf->active_name = 1-conf->active_name; |
| 2431 | if (!err) |
| 2432 | conf->pool_size = newsize; |
| 2433 | return err; |
| 2434 | } |
| 2435 | |
| 2436 | static int drop_one_stripe(struct r5conf *conf) |
| 2437 | { |
| 2438 | struct stripe_head *sh; |
| 2439 | int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; |
| 2440 | |
| 2441 | spin_lock_irq(conf->hash_locks + hash); |
| 2442 | sh = get_free_stripe(conf, hash); |
| 2443 | spin_unlock_irq(conf->hash_locks + hash); |
| 2444 | if (!sh) |
| 2445 | return 0; |
| 2446 | BUG_ON(atomic_read(&sh->count)); |
| 2447 | shrink_buffers(sh); |
| 2448 | kmem_cache_free(conf->slab_cache, sh); |
| 2449 | atomic_dec(&conf->active_stripes); |
| 2450 | conf->max_nr_stripes--; |
| 2451 | return 1; |
| 2452 | } |
| 2453 | |
| 2454 | static void shrink_stripes(struct r5conf *conf) |
| 2455 | { |
| 2456 | while (conf->max_nr_stripes && |
| 2457 | drop_one_stripe(conf)) |
| 2458 | ; |
| 2459 | |
| 2460 | kmem_cache_destroy(conf->slab_cache); |
| 2461 | conf->slab_cache = NULL; |
| 2462 | } |
| 2463 | |
| 2464 | static void raid5_end_read_request(struct bio * bi) |
| 2465 | { |
| 2466 | struct stripe_head *sh = bi->bi_private; |
| 2467 | struct r5conf *conf = sh->raid_conf; |
| 2468 | int disks = sh->disks, i; |
| 2469 | char b[BDEVNAME_SIZE]; |
| 2470 | struct md_rdev *rdev = NULL; |
| 2471 | sector_t s; |
| 2472 | |
| 2473 | for (i=0 ; i<disks; i++) |
| 2474 | if (bi == &sh->dev[i].req) |
| 2475 | break; |
| 2476 | |
| 2477 | pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", |
| 2478 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
| 2479 | bi->bi_error); |
| 2480 | if (i == disks) { |
| 2481 | bio_reset(bi); |
| 2482 | BUG(); |
| 2483 | return; |
| 2484 | } |
| 2485 | if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) |
| 2486 | /* If replacement finished while this request was outstanding, |
| 2487 | * 'replacement' might be NULL already. |
| 2488 | * In that case it moved down to 'rdev'. |
| 2489 | * rdev is not removed until all requests are finished. |
| 2490 | */ |
| 2491 | rdev = conf->disks[i].replacement; |
| 2492 | if (!rdev) |
| 2493 | rdev = conf->disks[i].rdev; |
| 2494 | |
| 2495 | if (use_new_offset(conf, sh)) |
| 2496 | s = sh->sector + rdev->new_data_offset; |
| 2497 | else |
| 2498 | s = sh->sector + rdev->data_offset; |
| 2499 | if (!bi->bi_error) { |
| 2500 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
| 2501 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
| 2502 | /* Note that this cannot happen on a |
| 2503 | * replacement device. We just fail those on |
| 2504 | * any error |
| 2505 | */ |
| 2506 | pr_info_ratelimited( |
| 2507 | "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n", |
| 2508 | mdname(conf->mddev), STRIPE_SECTORS, |
| 2509 | (unsigned long long)s, |
| 2510 | bdevname(rdev->bdev, b)); |
| 2511 | atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); |
| 2512 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 2513 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
| 2514 | } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
| 2515 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); |
| 2516 | |
| 2517 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) |
| 2518 | /* |
| 2519 | * end read for a page in journal, this |
| 2520 | * must be preparing for prexor in rmw |
| 2521 | */ |
| 2522 | set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); |
| 2523 | |
| 2524 | if (atomic_read(&rdev->read_errors)) |
| 2525 | atomic_set(&rdev->read_errors, 0); |
| 2526 | } else { |
| 2527 | const char *bdn = bdevname(rdev->bdev, b); |
| 2528 | int retry = 0; |
| 2529 | int set_bad = 0; |
| 2530 | |
| 2531 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); |
| 2532 | atomic_inc(&rdev->read_errors); |
| 2533 | if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) |
| 2534 | pr_warn_ratelimited( |
| 2535 | "md/raid:%s: read error on replacement device (sector %llu on %s).\n", |
| 2536 | mdname(conf->mddev), |
| 2537 | (unsigned long long)s, |
| 2538 | bdn); |
| 2539 | else if (conf->mddev->degraded >= conf->max_degraded) { |
| 2540 | set_bad = 1; |
| 2541 | pr_warn_ratelimited( |
| 2542 | "md/raid:%s: read error not correctable (sector %llu on %s).\n", |
| 2543 | mdname(conf->mddev), |
| 2544 | (unsigned long long)s, |
| 2545 | bdn); |
| 2546 | } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { |
| 2547 | /* Oh, no!!! */ |
| 2548 | set_bad = 1; |
| 2549 | pr_warn_ratelimited( |
| 2550 | "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n", |
| 2551 | mdname(conf->mddev), |
| 2552 | (unsigned long long)s, |
| 2553 | bdn); |
| 2554 | } else if (atomic_read(&rdev->read_errors) |
| 2555 | > conf->max_nr_stripes) |
| 2556 | pr_warn("md/raid:%s: Too many read errors, failing device %s.\n", |
| 2557 | mdname(conf->mddev), bdn); |
| 2558 | else |
| 2559 | retry = 1; |
| 2560 | if (set_bad && test_bit(In_sync, &rdev->flags) |
| 2561 | && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
| 2562 | retry = 1; |
| 2563 | if (retry) |
| 2564 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { |
| 2565 | set_bit(R5_ReadError, &sh->dev[i].flags); |
| 2566 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); |
| 2567 | } else |
| 2568 | set_bit(R5_ReadNoMerge, &sh->dev[i].flags); |
| 2569 | else { |
| 2570 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 2571 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
| 2572 | if (!(set_bad |
| 2573 | && test_bit(In_sync, &rdev->flags) |
| 2574 | && rdev_set_badblocks( |
| 2575 | rdev, sh->sector, STRIPE_SECTORS, 0))) |
| 2576 | md_error(conf->mddev, rdev); |
| 2577 | } |
| 2578 | } |
| 2579 | rdev_dec_pending(rdev, conf->mddev); |
| 2580 | bio_reset(bi); |
| 2581 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 2582 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2583 | raid5_release_stripe(sh); |
| 2584 | } |
| 2585 | |
| 2586 | static void raid5_end_write_request(struct bio *bi) |
| 2587 | { |
| 2588 | struct stripe_head *sh = bi->bi_private; |
| 2589 | struct r5conf *conf = sh->raid_conf; |
| 2590 | int disks = sh->disks, i; |
| 2591 | struct md_rdev *uninitialized_var(rdev); |
| 2592 | sector_t first_bad; |
| 2593 | int bad_sectors; |
| 2594 | int replacement = 0; |
| 2595 | |
| 2596 | for (i = 0 ; i < disks; i++) { |
| 2597 | if (bi == &sh->dev[i].req) { |
| 2598 | rdev = conf->disks[i].rdev; |
| 2599 | break; |
| 2600 | } |
| 2601 | if (bi == &sh->dev[i].rreq) { |
| 2602 | rdev = conf->disks[i].replacement; |
| 2603 | if (rdev) |
| 2604 | replacement = 1; |
| 2605 | else |
| 2606 | /* rdev was removed and 'replacement' |
| 2607 | * replaced it. rdev is not removed |
| 2608 | * until all requests are finished. |
| 2609 | */ |
| 2610 | rdev = conf->disks[i].rdev; |
| 2611 | break; |
| 2612 | } |
| 2613 | } |
| 2614 | pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", |
| 2615 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
| 2616 | bi->bi_error); |
| 2617 | if (i == disks) { |
| 2618 | bio_reset(bi); |
| 2619 | BUG(); |
| 2620 | return; |
| 2621 | } |
| 2622 | |
| 2623 | if (replacement) { |
| 2624 | if (bi->bi_error) |
| 2625 | md_error(conf->mddev, rdev); |
| 2626 | else if (is_badblock(rdev, sh->sector, |
| 2627 | STRIPE_SECTORS, |
| 2628 | &first_bad, &bad_sectors)) |
| 2629 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); |
| 2630 | } else { |
| 2631 | if (bi->bi_error) { |
| 2632 | set_bit(STRIPE_DEGRADED, &sh->state); |
| 2633 | set_bit(WriteErrorSeen, &rdev->flags); |
| 2634 | set_bit(R5_WriteError, &sh->dev[i].flags); |
| 2635 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
| 2636 | set_bit(MD_RECOVERY_NEEDED, |
| 2637 | &rdev->mddev->recovery); |
| 2638 | } else if (is_badblock(rdev, sh->sector, |
| 2639 | STRIPE_SECTORS, |
| 2640 | &first_bad, &bad_sectors)) { |
| 2641 | set_bit(R5_MadeGood, &sh->dev[i].flags); |
| 2642 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) |
| 2643 | /* That was a successful write so make |
| 2644 | * sure it looks like we already did |
| 2645 | * a re-write. |
| 2646 | */ |
| 2647 | set_bit(R5_ReWrite, &sh->dev[i].flags); |
| 2648 | } |
| 2649 | } |
| 2650 | rdev_dec_pending(rdev, conf->mddev); |
| 2651 | |
| 2652 | if (sh->batch_head && bi->bi_error && !replacement) |
| 2653 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); |
| 2654 | |
| 2655 | bio_reset(bi); |
| 2656 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) |
| 2657 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 2658 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2659 | raid5_release_stripe(sh); |
| 2660 | |
| 2661 | if (sh->batch_head && sh != sh->batch_head) |
| 2662 | raid5_release_stripe(sh->batch_head); |
| 2663 | } |
| 2664 | |
| 2665 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) |
| 2666 | { |
| 2667 | struct r5dev *dev = &sh->dev[i]; |
| 2668 | |
| 2669 | dev->flags = 0; |
| 2670 | dev->sector = raid5_compute_blocknr(sh, i, previous); |
| 2671 | } |
| 2672 | |
| 2673 | static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) |
| 2674 | { |
| 2675 | char b[BDEVNAME_SIZE]; |
| 2676 | struct r5conf *conf = mddev->private; |
| 2677 | unsigned long flags; |
| 2678 | pr_debug("raid456: error called\n"); |
| 2679 | |
| 2680 | spin_lock_irqsave(&conf->device_lock, flags); |
| 2681 | clear_bit(In_sync, &rdev->flags); |
| 2682 | mddev->degraded = raid5_calc_degraded(conf); |
| 2683 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 2684 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 2685 | |
| 2686 | set_bit(Blocked, &rdev->flags); |
| 2687 | set_bit(Faulty, &rdev->flags); |
| 2688 | set_mask_bits(&mddev->sb_flags, 0, |
| 2689 | BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); |
| 2690 | pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" |
| 2691 | "md/raid:%s: Operation continuing on %d devices.\n", |
| 2692 | mdname(mddev), |
| 2693 | bdevname(rdev->bdev, b), |
| 2694 | mdname(mddev), |
| 2695 | conf->raid_disks - mddev->degraded); |
| 2696 | r5c_update_on_rdev_error(mddev); |
| 2697 | } |
| 2698 | |
| 2699 | /* |
| 2700 | * Input: a 'big' sector number, |
| 2701 | * Output: index of the data and parity disk, and the sector # in them. |
| 2702 | */ |
| 2703 | sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, |
| 2704 | int previous, int *dd_idx, |
| 2705 | struct stripe_head *sh) |
| 2706 | { |
| 2707 | sector_t stripe, stripe2; |
| 2708 | sector_t chunk_number; |
| 2709 | unsigned int chunk_offset; |
| 2710 | int pd_idx, qd_idx; |
| 2711 | int ddf_layout = 0; |
| 2712 | sector_t new_sector; |
| 2713 | int algorithm = previous ? conf->prev_algo |
| 2714 | : conf->algorithm; |
| 2715 | int sectors_per_chunk = previous ? conf->prev_chunk_sectors |
| 2716 | : conf->chunk_sectors; |
| 2717 | int raid_disks = previous ? conf->previous_raid_disks |
| 2718 | : conf->raid_disks; |
| 2719 | int data_disks = raid_disks - conf->max_degraded; |
| 2720 | |
| 2721 | /* First compute the information on this sector */ |
| 2722 | |
| 2723 | /* |
| 2724 | * Compute the chunk number and the sector offset inside the chunk |
| 2725 | */ |
| 2726 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
| 2727 | chunk_number = r_sector; |
| 2728 | |
| 2729 | /* |
| 2730 | * Compute the stripe number |
| 2731 | */ |
| 2732 | stripe = chunk_number; |
| 2733 | *dd_idx = sector_div(stripe, data_disks); |
| 2734 | stripe2 = stripe; |
| 2735 | /* |
| 2736 | * Select the parity disk based on the user selected algorithm. |
| 2737 | */ |
| 2738 | pd_idx = qd_idx = -1; |
| 2739 | switch(conf->level) { |
| 2740 | case 4: |
| 2741 | pd_idx = data_disks; |
| 2742 | break; |
| 2743 | case 5: |
| 2744 | switch (algorithm) { |
| 2745 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2746 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 2747 | if (*dd_idx >= pd_idx) |
| 2748 | (*dd_idx)++; |
| 2749 | break; |
| 2750 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 2751 | pd_idx = sector_div(stripe2, raid_disks); |
| 2752 | if (*dd_idx >= pd_idx) |
| 2753 | (*dd_idx)++; |
| 2754 | break; |
| 2755 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2756 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 2757 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 2758 | break; |
| 2759 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2760 | pd_idx = sector_div(stripe2, raid_disks); |
| 2761 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 2762 | break; |
| 2763 | case ALGORITHM_PARITY_0: |
| 2764 | pd_idx = 0; |
| 2765 | (*dd_idx)++; |
| 2766 | break; |
| 2767 | case ALGORITHM_PARITY_N: |
| 2768 | pd_idx = data_disks; |
| 2769 | break; |
| 2770 | default: |
| 2771 | BUG(); |
| 2772 | } |
| 2773 | break; |
| 2774 | case 6: |
| 2775 | |
| 2776 | switch (algorithm) { |
| 2777 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2778 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 2779 | qd_idx = pd_idx + 1; |
| 2780 | if (pd_idx == raid_disks-1) { |
| 2781 | (*dd_idx)++; /* Q D D D P */ |
| 2782 | qd_idx = 0; |
| 2783 | } else if (*dd_idx >= pd_idx) |
| 2784 | (*dd_idx) += 2; /* D D P Q D */ |
| 2785 | break; |
| 2786 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 2787 | pd_idx = sector_div(stripe2, raid_disks); |
| 2788 | qd_idx = pd_idx + 1; |
| 2789 | if (pd_idx == raid_disks-1) { |
| 2790 | (*dd_idx)++; /* Q D D D P */ |
| 2791 | qd_idx = 0; |
| 2792 | } else if (*dd_idx >= pd_idx) |
| 2793 | (*dd_idx) += 2; /* D D P Q D */ |
| 2794 | break; |
| 2795 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2796 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 2797 | qd_idx = (pd_idx + 1) % raid_disks; |
| 2798 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 2799 | break; |
| 2800 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2801 | pd_idx = sector_div(stripe2, raid_disks); |
| 2802 | qd_idx = (pd_idx + 1) % raid_disks; |
| 2803 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 2804 | break; |
| 2805 | |
| 2806 | case ALGORITHM_PARITY_0: |
| 2807 | pd_idx = 0; |
| 2808 | qd_idx = 1; |
| 2809 | (*dd_idx) += 2; |
| 2810 | break; |
| 2811 | case ALGORITHM_PARITY_N: |
| 2812 | pd_idx = data_disks; |
| 2813 | qd_idx = data_disks + 1; |
| 2814 | break; |
| 2815 | |
| 2816 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 2817 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
| 2818 | * of blocks for computing Q is different. |
| 2819 | */ |
| 2820 | pd_idx = sector_div(stripe2, raid_disks); |
| 2821 | qd_idx = pd_idx + 1; |
| 2822 | if (pd_idx == raid_disks-1) { |
| 2823 | (*dd_idx)++; /* Q D D D P */ |
| 2824 | qd_idx = 0; |
| 2825 | } else if (*dd_idx >= pd_idx) |
| 2826 | (*dd_idx) += 2; /* D D P Q D */ |
| 2827 | ddf_layout = 1; |
| 2828 | break; |
| 2829 | |
| 2830 | case ALGORITHM_ROTATING_N_RESTART: |
| 2831 | /* Same a left_asymmetric, by first stripe is |
| 2832 | * D D D P Q rather than |
| 2833 | * Q D D D P |
| 2834 | */ |
| 2835 | stripe2 += 1; |
| 2836 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 2837 | qd_idx = pd_idx + 1; |
| 2838 | if (pd_idx == raid_disks-1) { |
| 2839 | (*dd_idx)++; /* Q D D D P */ |
| 2840 | qd_idx = 0; |
| 2841 | } else if (*dd_idx >= pd_idx) |
| 2842 | (*dd_idx) += 2; /* D D P Q D */ |
| 2843 | ddf_layout = 1; |
| 2844 | break; |
| 2845 | |
| 2846 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 2847 | /* Same as left_symmetric but Q is before P */ |
| 2848 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 2849 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
| 2850 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 2851 | ddf_layout = 1; |
| 2852 | break; |
| 2853 | |
| 2854 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 2855 | /* RAID5 left_asymmetric, with Q on last device */ |
| 2856 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 2857 | if (*dd_idx >= pd_idx) |
| 2858 | (*dd_idx)++; |
| 2859 | qd_idx = raid_disks - 1; |
| 2860 | break; |
| 2861 | |
| 2862 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 2863 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 2864 | if (*dd_idx >= pd_idx) |
| 2865 | (*dd_idx)++; |
| 2866 | qd_idx = raid_disks - 1; |
| 2867 | break; |
| 2868 | |
| 2869 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 2870 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 2871 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 2872 | qd_idx = raid_disks - 1; |
| 2873 | break; |
| 2874 | |
| 2875 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 2876 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 2877 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 2878 | qd_idx = raid_disks - 1; |
| 2879 | break; |
| 2880 | |
| 2881 | case ALGORITHM_PARITY_0_6: |
| 2882 | pd_idx = 0; |
| 2883 | (*dd_idx)++; |
| 2884 | qd_idx = raid_disks - 1; |
| 2885 | break; |
| 2886 | |
| 2887 | default: |
| 2888 | BUG(); |
| 2889 | } |
| 2890 | break; |
| 2891 | } |
| 2892 | |
| 2893 | if (sh) { |
| 2894 | sh->pd_idx = pd_idx; |
| 2895 | sh->qd_idx = qd_idx; |
| 2896 | sh->ddf_layout = ddf_layout; |
| 2897 | } |
| 2898 | /* |
| 2899 | * Finally, compute the new sector number |
| 2900 | */ |
| 2901 | new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; |
| 2902 | return new_sector; |
| 2903 | } |
| 2904 | |
| 2905 | sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) |
| 2906 | { |
| 2907 | struct r5conf *conf = sh->raid_conf; |
| 2908 | int raid_disks = sh->disks; |
| 2909 | int data_disks = raid_disks - conf->max_degraded; |
| 2910 | sector_t new_sector = sh->sector, check; |
| 2911 | int sectors_per_chunk = previous ? conf->prev_chunk_sectors |
| 2912 | : conf->chunk_sectors; |
| 2913 | int algorithm = previous ? conf->prev_algo |
| 2914 | : conf->algorithm; |
| 2915 | sector_t stripe; |
| 2916 | int chunk_offset; |
| 2917 | sector_t chunk_number; |
| 2918 | int dummy1, dd_idx = i; |
| 2919 | sector_t r_sector; |
| 2920 | struct stripe_head sh2; |
| 2921 | |
| 2922 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 2923 | stripe = new_sector; |
| 2924 | |
| 2925 | if (i == sh->pd_idx) |
| 2926 | return 0; |
| 2927 | switch(conf->level) { |
| 2928 | case 4: break; |
| 2929 | case 5: |
| 2930 | switch (algorithm) { |
| 2931 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2932 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 2933 | if (i > sh->pd_idx) |
| 2934 | i--; |
| 2935 | break; |
| 2936 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2937 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2938 | if (i < sh->pd_idx) |
| 2939 | i += raid_disks; |
| 2940 | i -= (sh->pd_idx + 1); |
| 2941 | break; |
| 2942 | case ALGORITHM_PARITY_0: |
| 2943 | i -= 1; |
| 2944 | break; |
| 2945 | case ALGORITHM_PARITY_N: |
| 2946 | break; |
| 2947 | default: |
| 2948 | BUG(); |
| 2949 | } |
| 2950 | break; |
| 2951 | case 6: |
| 2952 | if (i == sh->qd_idx) |
| 2953 | return 0; /* It is the Q disk */ |
| 2954 | switch (algorithm) { |
| 2955 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2956 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 2957 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 2958 | case ALGORITHM_ROTATING_N_RESTART: |
| 2959 | if (sh->pd_idx == raid_disks-1) |
| 2960 | i--; /* Q D D D P */ |
| 2961 | else if (i > sh->pd_idx) |
| 2962 | i -= 2; /* D D P Q D */ |
| 2963 | break; |
| 2964 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2965 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2966 | if (sh->pd_idx == raid_disks-1) |
| 2967 | i--; /* Q D D D P */ |
| 2968 | else { |
| 2969 | /* D D P Q D */ |
| 2970 | if (i < sh->pd_idx) |
| 2971 | i += raid_disks; |
| 2972 | i -= (sh->pd_idx + 2); |
| 2973 | } |
| 2974 | break; |
| 2975 | case ALGORITHM_PARITY_0: |
| 2976 | i -= 2; |
| 2977 | break; |
| 2978 | case ALGORITHM_PARITY_N: |
| 2979 | break; |
| 2980 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 2981 | /* Like left_symmetric, but P is before Q */ |
| 2982 | if (sh->pd_idx == 0) |
| 2983 | i--; /* P D D D Q */ |
| 2984 | else { |
| 2985 | /* D D Q P D */ |
| 2986 | if (i < sh->pd_idx) |
| 2987 | i += raid_disks; |
| 2988 | i -= (sh->pd_idx + 1); |
| 2989 | } |
| 2990 | break; |
| 2991 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 2992 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 2993 | if (i > sh->pd_idx) |
| 2994 | i--; |
| 2995 | break; |
| 2996 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 2997 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 2998 | if (i < sh->pd_idx) |
| 2999 | i += data_disks + 1; |
| 3000 | i -= (sh->pd_idx + 1); |
| 3001 | break; |
| 3002 | case ALGORITHM_PARITY_0_6: |
| 3003 | i -= 1; |
| 3004 | break; |
| 3005 | default: |
| 3006 | BUG(); |
| 3007 | } |
| 3008 | break; |
| 3009 | } |
| 3010 | |
| 3011 | chunk_number = stripe * data_disks + i; |
| 3012 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
| 3013 | |
| 3014 | check = raid5_compute_sector(conf, r_sector, |
| 3015 | previous, &dummy1, &sh2); |
| 3016 | if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx |
| 3017 | || sh2.qd_idx != sh->qd_idx) { |
| 3018 | pr_warn("md/raid:%s: compute_blocknr: map not correct\n", |
| 3019 | mdname(conf->mddev)); |
| 3020 | return 0; |
| 3021 | } |
| 3022 | return r_sector; |
| 3023 | } |
| 3024 | |
| 3025 | /* |
| 3026 | * There are cases where we want handle_stripe_dirtying() and |
| 3027 | * schedule_reconstruction() to delay towrite to some dev of a stripe. |
| 3028 | * |
| 3029 | * This function checks whether we want to delay the towrite. Specifically, |
| 3030 | * we delay the towrite when: |
| 3031 | * |
| 3032 | * 1. degraded stripe has a non-overwrite to the missing dev, AND this |
| 3033 | * stripe has data in journal (for other devices). |
| 3034 | * |
| 3035 | * In this case, when reading data for the non-overwrite dev, it is |
| 3036 | * necessary to handle complex rmw of write back cache (prexor with |
| 3037 | * orig_page, and xor with page). To keep read path simple, we would |
| 3038 | * like to flush data in journal to RAID disks first, so complex rmw |
| 3039 | * is handled in the write patch (handle_stripe_dirtying). |
| 3040 | * |
| 3041 | * 2. when journal space is critical (R5C_LOG_CRITICAL=1) |
| 3042 | * |
| 3043 | * It is important to be able to flush all stripes in raid5-cache. |
| 3044 | * Therefore, we need reserve some space on the journal device for |
| 3045 | * these flushes. If flush operation includes pending writes to the |
| 3046 | * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe |
| 3047 | * for the flush out. If we exclude these pending writes from flush |
| 3048 | * operation, we only need (conf->max_degraded + 1) pages per stripe. |
| 3049 | * Therefore, excluding pending writes in these cases enables more |
| 3050 | * efficient use of the journal device. |
| 3051 | * |
| 3052 | * Note: To make sure the stripe makes progress, we only delay |
| 3053 | * towrite for stripes with data already in journal (injournal > 0). |
| 3054 | * When LOG_CRITICAL, stripes with injournal == 0 will be sent to |
| 3055 | * no_space_stripes list. |
| 3056 | * |
| 3057 | */ |
| 3058 | static inline bool delay_towrite(struct r5conf *conf, |
| 3059 | struct r5dev *dev, |
| 3060 | struct stripe_head_state *s) |
| 3061 | { |
| 3062 | /* case 1 above */ |
| 3063 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
| 3064 | !test_bit(R5_Insync, &dev->flags) && s->injournal) |
| 3065 | return true; |
| 3066 | /* case 2 above */ |
| 3067 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && |
| 3068 | s->injournal > 0) |
| 3069 | return true; |
| 3070 | return false; |
| 3071 | } |
| 3072 | |
| 3073 | static void |
| 3074 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
| 3075 | int rcw, int expand) |
| 3076 | { |
| 3077 | int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; |
| 3078 | struct r5conf *conf = sh->raid_conf; |
| 3079 | int level = conf->level; |
| 3080 | |
| 3081 | if (rcw) { |
| 3082 | /* |
| 3083 | * In some cases, handle_stripe_dirtying initially decided to |
| 3084 | * run rmw and allocates extra page for prexor. However, rcw is |
| 3085 | * cheaper later on. We need to free the extra page now, |
| 3086 | * because we won't be able to do that in ops_complete_prexor(). |
| 3087 | */ |
| 3088 | r5c_release_extra_page(sh); |
| 3089 | |
| 3090 | for (i = disks; i--; ) { |
| 3091 | struct r5dev *dev = &sh->dev[i]; |
| 3092 | |
| 3093 | if (dev->towrite && !delay_towrite(conf, dev, s)) { |
| 3094 | set_bit(R5_LOCKED, &dev->flags); |
| 3095 | set_bit(R5_Wantdrain, &dev->flags); |
| 3096 | if (!expand) |
| 3097 | clear_bit(R5_UPTODATE, &dev->flags); |
| 3098 | s->locked++; |
| 3099 | } else if (test_bit(R5_InJournal, &dev->flags)) { |
| 3100 | set_bit(R5_LOCKED, &dev->flags); |
| 3101 | s->locked++; |
| 3102 | } |
| 3103 | } |
| 3104 | /* if we are not expanding this is a proper write request, and |
| 3105 | * there will be bios with new data to be drained into the |
| 3106 | * stripe cache |
| 3107 | */ |
| 3108 | if (!expand) { |
| 3109 | if (!s->locked) |
| 3110 | /* False alarm, nothing to do */ |
| 3111 | return; |
| 3112 | sh->reconstruct_state = reconstruct_state_drain_run; |
| 3113 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 3114 | } else |
| 3115 | sh->reconstruct_state = reconstruct_state_run; |
| 3116 | |
| 3117 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
| 3118 | |
| 3119 | if (s->locked + conf->max_degraded == disks) |
| 3120 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 3121 | atomic_inc(&conf->pending_full_writes); |
| 3122 | } else { |
| 3123 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
| 3124 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
| 3125 | BUG_ON(level == 6 && |
| 3126 | (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || |
| 3127 | test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); |
| 3128 | |
| 3129 | for (i = disks; i--; ) { |
| 3130 | struct r5dev *dev = &sh->dev[i]; |
| 3131 | if (i == pd_idx || i == qd_idx) |
| 3132 | continue; |
| 3133 | |
| 3134 | if (dev->towrite && |
| 3135 | (test_bit(R5_UPTODATE, &dev->flags) || |
| 3136 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 3137 | set_bit(R5_Wantdrain, &dev->flags); |
| 3138 | set_bit(R5_LOCKED, &dev->flags); |
| 3139 | clear_bit(R5_UPTODATE, &dev->flags); |
| 3140 | s->locked++; |
| 3141 | } else if (test_bit(R5_InJournal, &dev->flags)) { |
| 3142 | set_bit(R5_LOCKED, &dev->flags); |
| 3143 | s->locked++; |
| 3144 | } |
| 3145 | } |
| 3146 | if (!s->locked) |
| 3147 | /* False alarm - nothing to do */ |
| 3148 | return; |
| 3149 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; |
| 3150 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); |
| 3151 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 3152 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
| 3153 | } |
| 3154 | |
| 3155 | /* keep the parity disk(s) locked while asynchronous operations |
| 3156 | * are in flight |
| 3157 | */ |
| 3158 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); |
| 3159 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 3160 | s->locked++; |
| 3161 | |
| 3162 | if (level == 6) { |
| 3163 | int qd_idx = sh->qd_idx; |
| 3164 | struct r5dev *dev = &sh->dev[qd_idx]; |
| 3165 | |
| 3166 | set_bit(R5_LOCKED, &dev->flags); |
| 3167 | clear_bit(R5_UPTODATE, &dev->flags); |
| 3168 | s->locked++; |
| 3169 | } |
| 3170 | |
| 3171 | if (raid5_has_ppl(sh->raid_conf) && |
| 3172 | test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) && |
| 3173 | !test_bit(STRIPE_FULL_WRITE, &sh->state) && |
| 3174 | test_bit(R5_Insync, &sh->dev[pd_idx].flags)) |
| 3175 | set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request); |
| 3176 | |
| 3177 | pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", |
| 3178 | __func__, (unsigned long long)sh->sector, |
| 3179 | s->locked, s->ops_request); |
| 3180 | } |
| 3181 | |
| 3182 | /* |
| 3183 | * Each stripe/dev can have one or more bion attached. |
| 3184 | * toread/towrite point to the first in a chain. |
| 3185 | * The bi_next chain must be in order. |
| 3186 | */ |
| 3187 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, |
| 3188 | int forwrite, int previous) |
| 3189 | { |
| 3190 | struct bio **bip; |
| 3191 | struct r5conf *conf = sh->raid_conf; |
| 3192 | int firstwrite=0; |
| 3193 | |
| 3194 | pr_debug("adding bi b#%llu to stripe s#%llu\n", |
| 3195 | (unsigned long long)bi->bi_iter.bi_sector, |
| 3196 | (unsigned long long)sh->sector); |
| 3197 | |
| 3198 | spin_lock_irq(&sh->stripe_lock); |
| 3199 | /* Don't allow new IO added to stripes in batch list */ |
| 3200 | if (sh->batch_head) |
| 3201 | goto overlap; |
| 3202 | if (forwrite) { |
| 3203 | bip = &sh->dev[dd_idx].towrite; |
| 3204 | if (*bip == NULL) |
| 3205 | firstwrite = 1; |
| 3206 | } else |
| 3207 | bip = &sh->dev[dd_idx].toread; |
| 3208 | while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { |
| 3209 | if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) |
| 3210 | goto overlap; |
| 3211 | bip = & (*bip)->bi_next; |
| 3212 | } |
| 3213 | if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) |
| 3214 | goto overlap; |
| 3215 | |
| 3216 | if (forwrite && raid5_has_ppl(conf)) { |
| 3217 | /* |
| 3218 | * With PPL only writes to consecutive data chunks within a |
| 3219 | * stripe are allowed because for a single stripe_head we can |
| 3220 | * only have one PPL entry at a time, which describes one data |
| 3221 | * range. Not really an overlap, but wait_for_overlap can be |
| 3222 | * used to handle this. |
| 3223 | */ |
| 3224 | sector_t sector; |
| 3225 | sector_t first = 0; |
| 3226 | sector_t last = 0; |
| 3227 | int count = 0; |
| 3228 | int i; |
| 3229 | |
| 3230 | for (i = 0; i < sh->disks; i++) { |
| 3231 | if (i != sh->pd_idx && |
| 3232 | (i == dd_idx || sh->dev[i].towrite)) { |
| 3233 | sector = sh->dev[i].sector; |
| 3234 | if (count == 0 || sector < first) |
| 3235 | first = sector; |
| 3236 | if (sector > last) |
| 3237 | last = sector; |
| 3238 | count++; |
| 3239 | } |
| 3240 | } |
| 3241 | |
| 3242 | if (first + conf->chunk_sectors * (count - 1) != last) |
| 3243 | goto overlap; |
| 3244 | } |
| 3245 | |
| 3246 | if (!forwrite || previous) |
| 3247 | clear_bit(STRIPE_BATCH_READY, &sh->state); |
| 3248 | |
| 3249 | BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); |
| 3250 | if (*bip) |
| 3251 | bi->bi_next = *bip; |
| 3252 | *bip = bi; |
| 3253 | bio_inc_remaining(bi); |
| 3254 | md_write_inc(conf->mddev, bi); |
| 3255 | |
| 3256 | if (forwrite) { |
| 3257 | /* check if page is covered */ |
| 3258 | sector_t sector = sh->dev[dd_idx].sector; |
| 3259 | for (bi=sh->dev[dd_idx].towrite; |
| 3260 | sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && |
| 3261 | bi && bi->bi_iter.bi_sector <= sector; |
| 3262 | bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { |
| 3263 | if (bio_end_sector(bi) >= sector) |
| 3264 | sector = bio_end_sector(bi); |
| 3265 | } |
| 3266 | if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) |
| 3267 | if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) |
| 3268 | sh->overwrite_disks++; |
| 3269 | } |
| 3270 | |
| 3271 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
| 3272 | (unsigned long long)(*bip)->bi_iter.bi_sector, |
| 3273 | (unsigned long long)sh->sector, dd_idx); |
| 3274 | |
| 3275 | if (conf->mddev->bitmap && firstwrite) { |
| 3276 | /* Cannot hold spinlock over bitmap_startwrite, |
| 3277 | * but must ensure this isn't added to a batch until |
| 3278 | * we have added to the bitmap and set bm_seq. |
| 3279 | * So set STRIPE_BITMAP_PENDING to prevent |
| 3280 | * batching. |
| 3281 | * If multiple add_stripe_bio() calls race here they |
| 3282 | * much all set STRIPE_BITMAP_PENDING. So only the first one |
| 3283 | * to complete "bitmap_startwrite" gets to set |
| 3284 | * STRIPE_BIT_DELAY. This is important as once a stripe |
| 3285 | * is added to a batch, STRIPE_BIT_DELAY cannot be changed |
| 3286 | * any more. |
| 3287 | */ |
| 3288 | set_bit(STRIPE_BITMAP_PENDING, &sh->state); |
| 3289 | spin_unlock_irq(&sh->stripe_lock); |
| 3290 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, |
| 3291 | STRIPE_SECTORS, 0); |
| 3292 | spin_lock_irq(&sh->stripe_lock); |
| 3293 | clear_bit(STRIPE_BITMAP_PENDING, &sh->state); |
| 3294 | if (!sh->batch_head) { |
| 3295 | sh->bm_seq = conf->seq_flush+1; |
| 3296 | set_bit(STRIPE_BIT_DELAY, &sh->state); |
| 3297 | } |
| 3298 | } |
| 3299 | spin_unlock_irq(&sh->stripe_lock); |
| 3300 | |
| 3301 | if (stripe_can_batch(sh)) |
| 3302 | stripe_add_to_batch_list(conf, sh); |
| 3303 | return 1; |
| 3304 | |
| 3305 | overlap: |
| 3306 | set_bit(R5_Overlap, &sh->dev[dd_idx].flags); |
| 3307 | spin_unlock_irq(&sh->stripe_lock); |
| 3308 | return 0; |
| 3309 | } |
| 3310 | |
| 3311 | static void end_reshape(struct r5conf *conf); |
| 3312 | |
| 3313 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
| 3314 | struct stripe_head *sh) |
| 3315 | { |
| 3316 | int sectors_per_chunk = |
| 3317 | previous ? conf->prev_chunk_sectors : conf->chunk_sectors; |
| 3318 | int dd_idx; |
| 3319 | int chunk_offset = sector_div(stripe, sectors_per_chunk); |
| 3320 | int disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
| 3321 | |
| 3322 | raid5_compute_sector(conf, |
| 3323 | stripe * (disks - conf->max_degraded) |
| 3324 | *sectors_per_chunk + chunk_offset, |
| 3325 | previous, |
| 3326 | &dd_idx, sh); |
| 3327 | } |
| 3328 | |
| 3329 | static void |
| 3330 | handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, |
| 3331 | struct stripe_head_state *s, int disks) |
| 3332 | { |
| 3333 | int i; |
| 3334 | BUG_ON(sh->batch_head); |
| 3335 | for (i = disks; i--; ) { |
| 3336 | struct bio *bi; |
| 3337 | int bitmap_end = 0; |
| 3338 | |
| 3339 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
| 3340 | struct md_rdev *rdev; |
| 3341 | rcu_read_lock(); |
| 3342 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 3343 | if (rdev && test_bit(In_sync, &rdev->flags) && |
| 3344 | !test_bit(Faulty, &rdev->flags)) |
| 3345 | atomic_inc(&rdev->nr_pending); |
| 3346 | else |
| 3347 | rdev = NULL; |
| 3348 | rcu_read_unlock(); |
| 3349 | if (rdev) { |
| 3350 | if (!rdev_set_badblocks( |
| 3351 | rdev, |
| 3352 | sh->sector, |
| 3353 | STRIPE_SECTORS, 0)) |
| 3354 | md_error(conf->mddev, rdev); |
| 3355 | rdev_dec_pending(rdev, conf->mddev); |
| 3356 | } |
| 3357 | } |
| 3358 | spin_lock_irq(&sh->stripe_lock); |
| 3359 | /* fail all writes first */ |
| 3360 | bi = sh->dev[i].towrite; |
| 3361 | sh->dev[i].towrite = NULL; |
| 3362 | sh->overwrite_disks = 0; |
| 3363 | spin_unlock_irq(&sh->stripe_lock); |
| 3364 | if (bi) |
| 3365 | bitmap_end = 1; |
| 3366 | |
| 3367 | log_stripe_write_finished(sh); |
| 3368 | |
| 3369 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 3370 | wake_up(&conf->wait_for_overlap); |
| 3371 | |
| 3372 | while (bi && bi->bi_iter.bi_sector < |
| 3373 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 3374 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
| 3375 | |
| 3376 | bi->bi_error = -EIO; |
| 3377 | md_write_end(conf->mddev); |
| 3378 | bio_endio(bi); |
| 3379 | bi = nextbi; |
| 3380 | } |
| 3381 | if (bitmap_end) |
| 3382 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
| 3383 | STRIPE_SECTORS, 0, 0); |
| 3384 | bitmap_end = 0; |
| 3385 | /* and fail all 'written' */ |
| 3386 | bi = sh->dev[i].written; |
| 3387 | sh->dev[i].written = NULL; |
| 3388 | if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { |
| 3389 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| 3390 | sh->dev[i].page = sh->dev[i].orig_page; |
| 3391 | } |
| 3392 | |
| 3393 | if (bi) bitmap_end = 1; |
| 3394 | while (bi && bi->bi_iter.bi_sector < |
| 3395 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 3396 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
| 3397 | |
| 3398 | bi->bi_error = -EIO; |
| 3399 | md_write_end(conf->mddev); |
| 3400 | bio_endio(bi); |
| 3401 | bi = bi2; |
| 3402 | } |
| 3403 | |
| 3404 | /* fail any reads if this device is non-operational and |
| 3405 | * the data has not reached the cache yet. |
| 3406 | */ |
| 3407 | if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && |
| 3408 | s->failed > conf->max_degraded && |
| 3409 | (!test_bit(R5_Insync, &sh->dev[i].flags) || |
| 3410 | test_bit(R5_ReadError, &sh->dev[i].flags))) { |
| 3411 | spin_lock_irq(&sh->stripe_lock); |
| 3412 | bi = sh->dev[i].toread; |
| 3413 | sh->dev[i].toread = NULL; |
| 3414 | spin_unlock_irq(&sh->stripe_lock); |
| 3415 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 3416 | wake_up(&conf->wait_for_overlap); |
| 3417 | if (bi) |
| 3418 | s->to_read--; |
| 3419 | while (bi && bi->bi_iter.bi_sector < |
| 3420 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 3421 | struct bio *nextbi = |
| 3422 | r5_next_bio(bi, sh->dev[i].sector); |
| 3423 | |
| 3424 | bi->bi_error = -EIO; |
| 3425 | bio_endio(bi); |
| 3426 | bi = nextbi; |
| 3427 | } |
| 3428 | } |
| 3429 | if (bitmap_end) |
| 3430 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
| 3431 | STRIPE_SECTORS, 0, 0); |
| 3432 | /* If we were in the middle of a write the parity block might |
| 3433 | * still be locked - so just clear all R5_LOCKED flags |
| 3434 | */ |
| 3435 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 3436 | } |
| 3437 | s->to_write = 0; |
| 3438 | s->written = 0; |
| 3439 | |
| 3440 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 3441 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 3442 | md_wakeup_thread(conf->mddev->thread); |
| 3443 | } |
| 3444 | |
| 3445 | static void |
| 3446 | handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, |
| 3447 | struct stripe_head_state *s) |
| 3448 | { |
| 3449 | int abort = 0; |
| 3450 | int i; |
| 3451 | |
| 3452 | BUG_ON(sh->batch_head); |
| 3453 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 3454 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) |
| 3455 | wake_up(&conf->wait_for_overlap); |
| 3456 | s->syncing = 0; |
| 3457 | s->replacing = 0; |
| 3458 | /* There is nothing more to do for sync/check/repair. |
| 3459 | * Don't even need to abort as that is handled elsewhere |
| 3460 | * if needed, and not always wanted e.g. if there is a known |
| 3461 | * bad block here. |
| 3462 | * For recover/replace we need to record a bad block on all |
| 3463 | * non-sync devices, or abort the recovery |
| 3464 | */ |
| 3465 | if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { |
| 3466 | /* During recovery devices cannot be removed, so |
| 3467 | * locking and refcounting of rdevs is not needed |
| 3468 | */ |
| 3469 | rcu_read_lock(); |
| 3470 | for (i = 0; i < conf->raid_disks; i++) { |
| 3471 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
| 3472 | if (rdev |
| 3473 | && !test_bit(Faulty, &rdev->flags) |
| 3474 | && !test_bit(In_sync, &rdev->flags) |
| 3475 | && !rdev_set_badblocks(rdev, sh->sector, |
| 3476 | STRIPE_SECTORS, 0)) |
| 3477 | abort = 1; |
| 3478 | rdev = rcu_dereference(conf->disks[i].replacement); |
| 3479 | if (rdev |
| 3480 | && !test_bit(Faulty, &rdev->flags) |
| 3481 | && !test_bit(In_sync, &rdev->flags) |
| 3482 | && !rdev_set_badblocks(rdev, sh->sector, |
| 3483 | STRIPE_SECTORS, 0)) |
| 3484 | abort = 1; |
| 3485 | } |
| 3486 | rcu_read_unlock(); |
| 3487 | if (abort) |
| 3488 | conf->recovery_disabled = |
| 3489 | conf->mddev->recovery_disabled; |
| 3490 | } |
| 3491 | md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); |
| 3492 | } |
| 3493 | |
| 3494 | static int want_replace(struct stripe_head *sh, int disk_idx) |
| 3495 | { |
| 3496 | struct md_rdev *rdev; |
| 3497 | int rv = 0; |
| 3498 | |
| 3499 | rcu_read_lock(); |
| 3500 | rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); |
| 3501 | if (rdev |
| 3502 | && !test_bit(Faulty, &rdev->flags) |
| 3503 | && !test_bit(In_sync, &rdev->flags) |
| 3504 | && (rdev->recovery_offset <= sh->sector |
| 3505 | || rdev->mddev->recovery_cp <= sh->sector)) |
| 3506 | rv = 1; |
| 3507 | rcu_read_unlock(); |
| 3508 | return rv; |
| 3509 | } |
| 3510 | |
| 3511 | static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, |
| 3512 | int disk_idx, int disks) |
| 3513 | { |
| 3514 | struct r5dev *dev = &sh->dev[disk_idx]; |
| 3515 | struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], |
| 3516 | &sh->dev[s->failed_num[1]] }; |
| 3517 | int i; |
| 3518 | |
| 3519 | |
| 3520 | if (test_bit(R5_LOCKED, &dev->flags) || |
| 3521 | test_bit(R5_UPTODATE, &dev->flags)) |
| 3522 | /* No point reading this as we already have it or have |
| 3523 | * decided to get it. |
| 3524 | */ |
| 3525 | return 0; |
| 3526 | |
| 3527 | if (dev->toread || |
| 3528 | (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) |
| 3529 | /* We need this block to directly satisfy a request */ |
| 3530 | return 1; |
| 3531 | |
| 3532 | if (s->syncing || s->expanding || |
| 3533 | (s->replacing && want_replace(sh, disk_idx))) |
| 3534 | /* When syncing, or expanding we read everything. |
| 3535 | * When replacing, we need the replaced block. |
| 3536 | */ |
| 3537 | return 1; |
| 3538 | |
| 3539 | if ((s->failed >= 1 && fdev[0]->toread) || |
| 3540 | (s->failed >= 2 && fdev[1]->toread)) |
| 3541 | /* If we want to read from a failed device, then |
| 3542 | * we need to actually read every other device. |
| 3543 | */ |
| 3544 | return 1; |
| 3545 | |
| 3546 | /* Sometimes neither read-modify-write nor reconstruct-write |
| 3547 | * cycles can work. In those cases we read every block we |
| 3548 | * can. Then the parity-update is certain to have enough to |
| 3549 | * work with. |
| 3550 | * This can only be a problem when we need to write something, |
| 3551 | * and some device has failed. If either of those tests |
| 3552 | * fail we need look no further. |
| 3553 | */ |
| 3554 | if (!s->failed || !s->to_write) |
| 3555 | return 0; |
| 3556 | |
| 3557 | if (test_bit(R5_Insync, &dev->flags) && |
| 3558 | !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 3559 | /* Pre-reads at not permitted until after short delay |
| 3560 | * to gather multiple requests. However if this |
| 3561 | * device is no Insync, the block could only be computed |
| 3562 | * and there is no need to delay that. |
| 3563 | */ |
| 3564 | return 0; |
| 3565 | |
| 3566 | for (i = 0; i < s->failed && i < 2; i++) { |
| 3567 | if (fdev[i]->towrite && |
| 3568 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
| 3569 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
| 3570 | /* If we have a partial write to a failed |
| 3571 | * device, then we will need to reconstruct |
| 3572 | * the content of that device, so all other |
| 3573 | * devices must be read. |
| 3574 | */ |
| 3575 | return 1; |
| 3576 | } |
| 3577 | |
| 3578 | /* If we are forced to do a reconstruct-write, either because |
| 3579 | * the current RAID6 implementation only supports that, or |
| 3580 | * because parity cannot be trusted and we are currently |
| 3581 | * recovering it, there is extra need to be careful. |
| 3582 | * If one of the devices that we would need to read, because |
| 3583 | * it is not being overwritten (and maybe not written at all) |
| 3584 | * is missing/faulty, then we need to read everything we can. |
| 3585 | */ |
| 3586 | if (sh->raid_conf->level != 6 && |
| 3587 | sh->sector < sh->raid_conf->mddev->recovery_cp) |
| 3588 | /* reconstruct-write isn't being forced */ |
| 3589 | return 0; |
| 3590 | for (i = 0; i < s->failed && i < 2; i++) { |
| 3591 | if (s->failed_num[i] != sh->pd_idx && |
| 3592 | s->failed_num[i] != sh->qd_idx && |
| 3593 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
| 3594 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
| 3595 | return 1; |
| 3596 | } |
| 3597 | |
| 3598 | return 0; |
| 3599 | } |
| 3600 | |
| 3601 | /* fetch_block - checks the given member device to see if its data needs |
| 3602 | * to be read or computed to satisfy a request. |
| 3603 | * |
| 3604 | * Returns 1 when no more member devices need to be checked, otherwise returns |
| 3605 | * 0 to tell the loop in handle_stripe_fill to continue |
| 3606 | */ |
| 3607 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, |
| 3608 | int disk_idx, int disks) |
| 3609 | { |
| 3610 | struct r5dev *dev = &sh->dev[disk_idx]; |
| 3611 | |
| 3612 | /* is the data in this block needed, and can we get it? */ |
| 3613 | if (need_this_block(sh, s, disk_idx, disks)) { |
| 3614 | /* we would like to get this block, possibly by computing it, |
| 3615 | * otherwise read it if the backing disk is insync |
| 3616 | */ |
| 3617 | BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); |
| 3618 | BUG_ON(test_bit(R5_Wantread, &dev->flags)); |
| 3619 | BUG_ON(sh->batch_head); |
| 3620 | if ((s->uptodate == disks - 1) && |
| 3621 | (s->failed && (disk_idx == s->failed_num[0] || |
| 3622 | disk_idx == s->failed_num[1]))) { |
| 3623 | /* have disk failed, and we're requested to fetch it; |
| 3624 | * do compute it |
| 3625 | */ |
| 3626 | pr_debug("Computing stripe %llu block %d\n", |
| 3627 | (unsigned long long)sh->sector, disk_idx); |
| 3628 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 3629 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 3630 | set_bit(R5_Wantcompute, &dev->flags); |
| 3631 | sh->ops.target = disk_idx; |
| 3632 | sh->ops.target2 = -1; /* no 2nd target */ |
| 3633 | s->req_compute = 1; |
| 3634 | /* Careful: from this point on 'uptodate' is in the eye |
| 3635 | * of raid_run_ops which services 'compute' operations |
| 3636 | * before writes. R5_Wantcompute flags a block that will |
| 3637 | * be R5_UPTODATE by the time it is needed for a |
| 3638 | * subsequent operation. |
| 3639 | */ |
| 3640 | s->uptodate++; |
| 3641 | return 1; |
| 3642 | } else if (s->uptodate == disks-2 && s->failed >= 2) { |
| 3643 | /* Computing 2-failure is *very* expensive; only |
| 3644 | * do it if failed >= 2 |
| 3645 | */ |
| 3646 | int other; |
| 3647 | for (other = disks; other--; ) { |
| 3648 | if (other == disk_idx) |
| 3649 | continue; |
| 3650 | if (!test_bit(R5_UPTODATE, |
| 3651 | &sh->dev[other].flags)) |
| 3652 | break; |
| 3653 | } |
| 3654 | BUG_ON(other < 0); |
| 3655 | pr_debug("Computing stripe %llu blocks %d,%d\n", |
| 3656 | (unsigned long long)sh->sector, |
| 3657 | disk_idx, other); |
| 3658 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 3659 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 3660 | set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); |
| 3661 | set_bit(R5_Wantcompute, &sh->dev[other].flags); |
| 3662 | sh->ops.target = disk_idx; |
| 3663 | sh->ops.target2 = other; |
| 3664 | s->uptodate += 2; |
| 3665 | s->req_compute = 1; |
| 3666 | return 1; |
| 3667 | } else if (test_bit(R5_Insync, &dev->flags)) { |
| 3668 | set_bit(R5_LOCKED, &dev->flags); |
| 3669 | set_bit(R5_Wantread, &dev->flags); |
| 3670 | s->locked++; |
| 3671 | pr_debug("Reading block %d (sync=%d)\n", |
| 3672 | disk_idx, s->syncing); |
| 3673 | } |
| 3674 | } |
| 3675 | |
| 3676 | return 0; |
| 3677 | } |
| 3678 | |
| 3679 | /** |
| 3680 | * handle_stripe_fill - read or compute data to satisfy pending requests. |
| 3681 | */ |
| 3682 | static void handle_stripe_fill(struct stripe_head *sh, |
| 3683 | struct stripe_head_state *s, |
| 3684 | int disks) |
| 3685 | { |
| 3686 | int i; |
| 3687 | |
| 3688 | /* look for blocks to read/compute, skip this if a compute |
| 3689 | * is already in flight, or if the stripe contents are in the |
| 3690 | * midst of changing due to a write |
| 3691 | */ |
| 3692 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
| 3693 | !sh->reconstruct_state) { |
| 3694 | |
| 3695 | /* |
| 3696 | * For degraded stripe with data in journal, do not handle |
| 3697 | * read requests yet, instead, flush the stripe to raid |
| 3698 | * disks first, this avoids handling complex rmw of write |
| 3699 | * back cache (prexor with orig_page, and then xor with |
| 3700 | * page) in the read path |
| 3701 | */ |
| 3702 | if (s->injournal && s->failed) { |
| 3703 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) |
| 3704 | r5c_make_stripe_write_out(sh); |
| 3705 | goto out; |
| 3706 | } |
| 3707 | |
| 3708 | for (i = disks; i--; ) |
| 3709 | if (fetch_block(sh, s, i, disks)) |
| 3710 | break; |
| 3711 | } |
| 3712 | out: |
| 3713 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3714 | } |
| 3715 | |
| 3716 | static void break_stripe_batch_list(struct stripe_head *head_sh, |
| 3717 | unsigned long handle_flags); |
| 3718 | /* handle_stripe_clean_event |
| 3719 | * any written block on an uptodate or failed drive can be returned. |
| 3720 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
| 3721 | * never LOCKED, so we don't need to test 'failed' directly. |
| 3722 | */ |
| 3723 | static void handle_stripe_clean_event(struct r5conf *conf, |
| 3724 | struct stripe_head *sh, int disks) |
| 3725 | { |
| 3726 | int i; |
| 3727 | struct r5dev *dev; |
| 3728 | int discard_pending = 0; |
| 3729 | struct stripe_head *head_sh = sh; |
| 3730 | bool do_endio = false; |
| 3731 | |
| 3732 | for (i = disks; i--; ) |
| 3733 | if (sh->dev[i].written) { |
| 3734 | dev = &sh->dev[i]; |
| 3735 | if (!test_bit(R5_LOCKED, &dev->flags) && |
| 3736 | (test_bit(R5_UPTODATE, &dev->flags) || |
| 3737 | test_bit(R5_Discard, &dev->flags) || |
| 3738 | test_bit(R5_SkipCopy, &dev->flags))) { |
| 3739 | /* We can return any write requests */ |
| 3740 | struct bio *wbi, *wbi2; |
| 3741 | pr_debug("Return write for disc %d\n", i); |
| 3742 | if (test_and_clear_bit(R5_Discard, &dev->flags)) |
| 3743 | clear_bit(R5_UPTODATE, &dev->flags); |
| 3744 | if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { |
| 3745 | WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); |
| 3746 | } |
| 3747 | do_endio = true; |
| 3748 | |
| 3749 | returnbi: |
| 3750 | dev->page = dev->orig_page; |
| 3751 | wbi = dev->written; |
| 3752 | dev->written = NULL; |
| 3753 | while (wbi && wbi->bi_iter.bi_sector < |
| 3754 | dev->sector + STRIPE_SECTORS) { |
| 3755 | wbi2 = r5_next_bio(wbi, dev->sector); |
| 3756 | md_write_end(conf->mddev); |
| 3757 | bio_endio(wbi); |
| 3758 | wbi = wbi2; |
| 3759 | } |
| 3760 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
| 3761 | STRIPE_SECTORS, |
| 3762 | !test_bit(STRIPE_DEGRADED, &sh->state), |
| 3763 | 0); |
| 3764 | if (head_sh->batch_head) { |
| 3765 | sh = list_first_entry(&sh->batch_list, |
| 3766 | struct stripe_head, |
| 3767 | batch_list); |
| 3768 | if (sh != head_sh) { |
| 3769 | dev = &sh->dev[i]; |
| 3770 | goto returnbi; |
| 3771 | } |
| 3772 | } |
| 3773 | sh = head_sh; |
| 3774 | dev = &sh->dev[i]; |
| 3775 | } else if (test_bit(R5_Discard, &dev->flags)) |
| 3776 | discard_pending = 1; |
| 3777 | } |
| 3778 | |
| 3779 | log_stripe_write_finished(sh); |
| 3780 | |
| 3781 | if (!discard_pending && |
| 3782 | test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { |
| 3783 | int hash; |
| 3784 | clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); |
| 3785 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); |
| 3786 | if (sh->qd_idx >= 0) { |
| 3787 | clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); |
| 3788 | clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); |
| 3789 | } |
| 3790 | /* now that discard is done we can proceed with any sync */ |
| 3791 | clear_bit(STRIPE_DISCARD, &sh->state); |
| 3792 | /* |
| 3793 | * SCSI discard will change some bio fields and the stripe has |
| 3794 | * no updated data, so remove it from hash list and the stripe |
| 3795 | * will be reinitialized |
| 3796 | */ |
| 3797 | unhash: |
| 3798 | hash = sh->hash_lock_index; |
| 3799 | spin_lock_irq(conf->hash_locks + hash); |
| 3800 | remove_hash(sh); |
| 3801 | spin_unlock_irq(conf->hash_locks + hash); |
| 3802 | if (head_sh->batch_head) { |
| 3803 | sh = list_first_entry(&sh->batch_list, |
| 3804 | struct stripe_head, batch_list); |
| 3805 | if (sh != head_sh) |
| 3806 | goto unhash; |
| 3807 | } |
| 3808 | sh = head_sh; |
| 3809 | |
| 3810 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) |
| 3811 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3812 | |
| 3813 | } |
| 3814 | |
| 3815 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 3816 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 3817 | md_wakeup_thread(conf->mddev->thread); |
| 3818 | |
| 3819 | if (head_sh->batch_head && do_endio) |
| 3820 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); |
| 3821 | } |
| 3822 | |
| 3823 | /* |
| 3824 | * For RMW in write back cache, we need extra page in prexor to store the |
| 3825 | * old data. This page is stored in dev->orig_page. |
| 3826 | * |
| 3827 | * This function checks whether we have data for prexor. The exact logic |
| 3828 | * is: |
| 3829 | * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) |
| 3830 | */ |
| 3831 | static inline bool uptodate_for_rmw(struct r5dev *dev) |
| 3832 | { |
| 3833 | return (test_bit(R5_UPTODATE, &dev->flags)) && |
| 3834 | (!test_bit(R5_InJournal, &dev->flags) || |
| 3835 | test_bit(R5_OrigPageUPTDODATE, &dev->flags)); |
| 3836 | } |
| 3837 | |
| 3838 | static int handle_stripe_dirtying(struct r5conf *conf, |
| 3839 | struct stripe_head *sh, |
| 3840 | struct stripe_head_state *s, |
| 3841 | int disks) |
| 3842 | { |
| 3843 | int rmw = 0, rcw = 0, i; |
| 3844 | sector_t recovery_cp = conf->mddev->recovery_cp; |
| 3845 | |
| 3846 | /* Check whether resync is now happening or should start. |
| 3847 | * If yes, then the array is dirty (after unclean shutdown or |
| 3848 | * initial creation), so parity in some stripes might be inconsistent. |
| 3849 | * In this case, we need to always do reconstruct-write, to ensure |
| 3850 | * that in case of drive failure or read-error correction, we |
| 3851 | * generate correct data from the parity. |
| 3852 | */ |
| 3853 | if (conf->rmw_level == PARITY_DISABLE_RMW || |
| 3854 | (recovery_cp < MaxSector && sh->sector >= recovery_cp && |
| 3855 | s->failed == 0)) { |
| 3856 | /* Calculate the real rcw later - for now make it |
| 3857 | * look like rcw is cheaper |
| 3858 | */ |
| 3859 | rcw = 1; rmw = 2; |
| 3860 | pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", |
| 3861 | conf->rmw_level, (unsigned long long)recovery_cp, |
| 3862 | (unsigned long long)sh->sector); |
| 3863 | } else for (i = disks; i--; ) { |
| 3864 | /* would I have to read this buffer for read_modify_write */ |
| 3865 | struct r5dev *dev = &sh->dev[i]; |
| 3866 | if (((dev->towrite && !delay_towrite(conf, dev, s)) || |
| 3867 | i == sh->pd_idx || i == sh->qd_idx || |
| 3868 | test_bit(R5_InJournal, &dev->flags)) && |
| 3869 | !test_bit(R5_LOCKED, &dev->flags) && |
| 3870 | !(uptodate_for_rmw(dev) || |
| 3871 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 3872 | if (test_bit(R5_Insync, &dev->flags)) |
| 3873 | rmw++; |
| 3874 | else |
| 3875 | rmw += 2*disks; /* cannot read it */ |
| 3876 | } |
| 3877 | /* Would I have to read this buffer for reconstruct_write */ |
| 3878 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
| 3879 | i != sh->pd_idx && i != sh->qd_idx && |
| 3880 | !test_bit(R5_LOCKED, &dev->flags) && |
| 3881 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 3882 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 3883 | if (test_bit(R5_Insync, &dev->flags)) |
| 3884 | rcw++; |
| 3885 | else |
| 3886 | rcw += 2*disks; |
| 3887 | } |
| 3888 | } |
| 3889 | |
| 3890 | pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n", |
| 3891 | (unsigned long long)sh->sector, sh->state, rmw, rcw); |
| 3892 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3893 | if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { |
| 3894 | /* prefer read-modify-write, but need to get some data */ |
| 3895 | if (conf->mddev->queue) |
| 3896 | blk_add_trace_msg(conf->mddev->queue, |
| 3897 | "raid5 rmw %llu %d", |
| 3898 | (unsigned long long)sh->sector, rmw); |
| 3899 | for (i = disks; i--; ) { |
| 3900 | struct r5dev *dev = &sh->dev[i]; |
| 3901 | if (test_bit(R5_InJournal, &dev->flags) && |
| 3902 | dev->page == dev->orig_page && |
| 3903 | !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { |
| 3904 | /* alloc page for prexor */ |
| 3905 | struct page *p = alloc_page(GFP_NOIO); |
| 3906 | |
| 3907 | if (p) { |
| 3908 | dev->orig_page = p; |
| 3909 | continue; |
| 3910 | } |
| 3911 | |
| 3912 | /* |
| 3913 | * alloc_page() failed, try use |
| 3914 | * disk_info->extra_page |
| 3915 | */ |
| 3916 | if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, |
| 3917 | &conf->cache_state)) { |
| 3918 | r5c_use_extra_page(sh); |
| 3919 | break; |
| 3920 | } |
| 3921 | |
| 3922 | /* extra_page in use, add to delayed_list */ |
| 3923 | set_bit(STRIPE_DELAYED, &sh->state); |
| 3924 | s->waiting_extra_page = 1; |
| 3925 | return -EAGAIN; |
| 3926 | } |
| 3927 | } |
| 3928 | |
| 3929 | for (i = disks; i--; ) { |
| 3930 | struct r5dev *dev = &sh->dev[i]; |
| 3931 | if (((dev->towrite && !delay_towrite(conf, dev, s)) || |
| 3932 | i == sh->pd_idx || i == sh->qd_idx || |
| 3933 | test_bit(R5_InJournal, &dev->flags)) && |
| 3934 | !test_bit(R5_LOCKED, &dev->flags) && |
| 3935 | !(uptodate_for_rmw(dev) || |
| 3936 | test_bit(R5_Wantcompute, &dev->flags)) && |
| 3937 | test_bit(R5_Insync, &dev->flags)) { |
| 3938 | if (test_bit(STRIPE_PREREAD_ACTIVE, |
| 3939 | &sh->state)) { |
| 3940 | pr_debug("Read_old block %d for r-m-w\n", |
| 3941 | i); |
| 3942 | set_bit(R5_LOCKED, &dev->flags); |
| 3943 | set_bit(R5_Wantread, &dev->flags); |
| 3944 | s->locked++; |
| 3945 | } else { |
| 3946 | set_bit(STRIPE_DELAYED, &sh->state); |
| 3947 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3948 | } |
| 3949 | } |
| 3950 | } |
| 3951 | } |
| 3952 | if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { |
| 3953 | /* want reconstruct write, but need to get some data */ |
| 3954 | int qread =0; |
| 3955 | rcw = 0; |
| 3956 | for (i = disks; i--; ) { |
| 3957 | struct r5dev *dev = &sh->dev[i]; |
| 3958 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
| 3959 | i != sh->pd_idx && i != sh->qd_idx && |
| 3960 | !test_bit(R5_LOCKED, &dev->flags) && |
| 3961 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 3962 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 3963 | rcw++; |
| 3964 | if (test_bit(R5_Insync, &dev->flags) && |
| 3965 | test_bit(STRIPE_PREREAD_ACTIVE, |
| 3966 | &sh->state)) { |
| 3967 | pr_debug("Read_old block " |
| 3968 | "%d for Reconstruct\n", i); |
| 3969 | set_bit(R5_LOCKED, &dev->flags); |
| 3970 | set_bit(R5_Wantread, &dev->flags); |
| 3971 | s->locked++; |
| 3972 | qread++; |
| 3973 | } else { |
| 3974 | set_bit(STRIPE_DELAYED, &sh->state); |
| 3975 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3976 | } |
| 3977 | } |
| 3978 | } |
| 3979 | if (rcw && conf->mddev->queue) |
| 3980 | blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", |
| 3981 | (unsigned long long)sh->sector, |
| 3982 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); |
| 3983 | } |
| 3984 | |
| 3985 | if (rcw > disks && rmw > disks && |
| 3986 | !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 3987 | set_bit(STRIPE_DELAYED, &sh->state); |
| 3988 | |
| 3989 | /* now if nothing is locked, and if we have enough data, |
| 3990 | * we can start a write request |
| 3991 | */ |
| 3992 | /* since handle_stripe can be called at any time we need to handle the |
| 3993 | * case where a compute block operation has been submitted and then a |
| 3994 | * subsequent call wants to start a write request. raid_run_ops only |
| 3995 | * handles the case where compute block and reconstruct are requested |
| 3996 | * simultaneously. If this is not the case then new writes need to be |
| 3997 | * held off until the compute completes. |
| 3998 | */ |
| 3999 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
| 4000 | (s->locked == 0 && (rcw == 0 || rmw == 0) && |
| 4001 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) |
| 4002 | schedule_reconstruction(sh, s, rcw == 0, 0); |
| 4003 | return 0; |
| 4004 | } |
| 4005 | |
| 4006 | static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, |
| 4007 | struct stripe_head_state *s, int disks) |
| 4008 | { |
| 4009 | struct r5dev *dev = NULL; |
| 4010 | |
| 4011 | BUG_ON(sh->batch_head); |
| 4012 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4013 | |
| 4014 | switch (sh->check_state) { |
| 4015 | case check_state_idle: |
| 4016 | /* start a new check operation if there are no failures */ |
| 4017 | if (s->failed == 0) { |
| 4018 | BUG_ON(s->uptodate != disks); |
| 4019 | sh->check_state = check_state_run; |
| 4020 | set_bit(STRIPE_OP_CHECK, &s->ops_request); |
| 4021 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); |
| 4022 | s->uptodate--; |
| 4023 | break; |
| 4024 | } |
| 4025 | dev = &sh->dev[s->failed_num[0]]; |
| 4026 | /* fall through */ |
| 4027 | case check_state_compute_result: |
| 4028 | sh->check_state = check_state_idle; |
| 4029 | if (!dev) |
| 4030 | dev = &sh->dev[sh->pd_idx]; |
| 4031 | |
| 4032 | /* check that a write has not made the stripe insync */ |
| 4033 | if (test_bit(STRIPE_INSYNC, &sh->state)) |
| 4034 | break; |
| 4035 | |
| 4036 | /* either failed parity check, or recovery is happening */ |
| 4037 | BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); |
| 4038 | BUG_ON(s->uptodate != disks); |
| 4039 | |
| 4040 | set_bit(R5_LOCKED, &dev->flags); |
| 4041 | s->locked++; |
| 4042 | set_bit(R5_Wantwrite, &dev->flags); |
| 4043 | |
| 4044 | clear_bit(STRIPE_DEGRADED, &sh->state); |
| 4045 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4046 | break; |
| 4047 | case check_state_run: |
| 4048 | break; /* we will be called again upon completion */ |
| 4049 | case check_state_check_result: |
| 4050 | sh->check_state = check_state_idle; |
| 4051 | |
| 4052 | /* if a failure occurred during the check operation, leave |
| 4053 | * STRIPE_INSYNC not set and let the stripe be handled again |
| 4054 | */ |
| 4055 | if (s->failed) |
| 4056 | break; |
| 4057 | |
| 4058 | /* handle a successful check operation, if parity is correct |
| 4059 | * we are done. Otherwise update the mismatch count and repair |
| 4060 | * parity if !MD_RECOVERY_CHECK |
| 4061 | */ |
| 4062 | if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) |
| 4063 | /* parity is correct (on disc, |
| 4064 | * not in buffer any more) |
| 4065 | */ |
| 4066 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4067 | else { |
| 4068 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 4069 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 4070 | /* don't try to repair!! */ |
| 4071 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4072 | else { |
| 4073 | sh->check_state = check_state_compute_run; |
| 4074 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 4075 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 4076 | set_bit(R5_Wantcompute, |
| 4077 | &sh->dev[sh->pd_idx].flags); |
| 4078 | sh->ops.target = sh->pd_idx; |
| 4079 | sh->ops.target2 = -1; |
| 4080 | s->uptodate++; |
| 4081 | } |
| 4082 | } |
| 4083 | break; |
| 4084 | case check_state_compute_run: |
| 4085 | break; |
| 4086 | default: |
| 4087 | pr_err("%s: unknown check_state: %d sector: %llu\n", |
| 4088 | __func__, sh->check_state, |
| 4089 | (unsigned long long) sh->sector); |
| 4090 | BUG(); |
| 4091 | } |
| 4092 | } |
| 4093 | |
| 4094 | static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, |
| 4095 | struct stripe_head_state *s, |
| 4096 | int disks) |
| 4097 | { |
| 4098 | int pd_idx = sh->pd_idx; |
| 4099 | int qd_idx = sh->qd_idx; |
| 4100 | struct r5dev *dev; |
| 4101 | |
| 4102 | BUG_ON(sh->batch_head); |
| 4103 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4104 | |
| 4105 | BUG_ON(s->failed > 2); |
| 4106 | |
| 4107 | /* Want to check and possibly repair P and Q. |
| 4108 | * However there could be one 'failed' device, in which |
| 4109 | * case we can only check one of them, possibly using the |
| 4110 | * other to generate missing data |
| 4111 | */ |
| 4112 | |
| 4113 | switch (sh->check_state) { |
| 4114 | case check_state_idle: |
| 4115 | /* start a new check operation if there are < 2 failures */ |
| 4116 | if (s->failed == s->q_failed) { |
| 4117 | /* The only possible failed device holds Q, so it |
| 4118 | * makes sense to check P (If anything else were failed, |
| 4119 | * we would have used P to recreate it). |
| 4120 | */ |
| 4121 | sh->check_state = check_state_run; |
| 4122 | } |
| 4123 | if (!s->q_failed && s->failed < 2) { |
| 4124 | /* Q is not failed, and we didn't use it to generate |
| 4125 | * anything, so it makes sense to check it |
| 4126 | */ |
| 4127 | if (sh->check_state == check_state_run) |
| 4128 | sh->check_state = check_state_run_pq; |
| 4129 | else |
| 4130 | sh->check_state = check_state_run_q; |
| 4131 | } |
| 4132 | |
| 4133 | /* discard potentially stale zero_sum_result */ |
| 4134 | sh->ops.zero_sum_result = 0; |
| 4135 | |
| 4136 | if (sh->check_state == check_state_run) { |
| 4137 | /* async_xor_zero_sum destroys the contents of P */ |
| 4138 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 4139 | s->uptodate--; |
| 4140 | } |
| 4141 | if (sh->check_state >= check_state_run && |
| 4142 | sh->check_state <= check_state_run_pq) { |
| 4143 | /* async_syndrome_zero_sum preserves P and Q, so |
| 4144 | * no need to mark them !uptodate here |
| 4145 | */ |
| 4146 | set_bit(STRIPE_OP_CHECK, &s->ops_request); |
| 4147 | break; |
| 4148 | } |
| 4149 | |
| 4150 | /* we have 2-disk failure */ |
| 4151 | BUG_ON(s->failed != 2); |
| 4152 | /* fall through */ |
| 4153 | case check_state_compute_result: |
| 4154 | sh->check_state = check_state_idle; |
| 4155 | |
| 4156 | /* check that a write has not made the stripe insync */ |
| 4157 | if (test_bit(STRIPE_INSYNC, &sh->state)) |
| 4158 | break; |
| 4159 | |
| 4160 | /* now write out any block on a failed drive, |
| 4161 | * or P or Q if they were recomputed |
| 4162 | */ |
| 4163 | BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ |
| 4164 | if (s->failed == 2) { |
| 4165 | dev = &sh->dev[s->failed_num[1]]; |
| 4166 | s->locked++; |
| 4167 | set_bit(R5_LOCKED, &dev->flags); |
| 4168 | set_bit(R5_Wantwrite, &dev->flags); |
| 4169 | } |
| 4170 | if (s->failed >= 1) { |
| 4171 | dev = &sh->dev[s->failed_num[0]]; |
| 4172 | s->locked++; |
| 4173 | set_bit(R5_LOCKED, &dev->flags); |
| 4174 | set_bit(R5_Wantwrite, &dev->flags); |
| 4175 | } |
| 4176 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { |
| 4177 | dev = &sh->dev[pd_idx]; |
| 4178 | s->locked++; |
| 4179 | set_bit(R5_LOCKED, &dev->flags); |
| 4180 | set_bit(R5_Wantwrite, &dev->flags); |
| 4181 | } |
| 4182 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { |
| 4183 | dev = &sh->dev[qd_idx]; |
| 4184 | s->locked++; |
| 4185 | set_bit(R5_LOCKED, &dev->flags); |
| 4186 | set_bit(R5_Wantwrite, &dev->flags); |
| 4187 | } |
| 4188 | clear_bit(STRIPE_DEGRADED, &sh->state); |
| 4189 | |
| 4190 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4191 | break; |
| 4192 | case check_state_run: |
| 4193 | case check_state_run_q: |
| 4194 | case check_state_run_pq: |
| 4195 | break; /* we will be called again upon completion */ |
| 4196 | case check_state_check_result: |
| 4197 | sh->check_state = check_state_idle; |
| 4198 | |
| 4199 | /* handle a successful check operation, if parity is correct |
| 4200 | * we are done. Otherwise update the mismatch count and repair |
| 4201 | * parity if !MD_RECOVERY_CHECK |
| 4202 | */ |
| 4203 | if (sh->ops.zero_sum_result == 0) { |
| 4204 | /* both parities are correct */ |
| 4205 | if (!s->failed) |
| 4206 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4207 | else { |
| 4208 | /* in contrast to the raid5 case we can validate |
| 4209 | * parity, but still have a failure to write |
| 4210 | * back |
| 4211 | */ |
| 4212 | sh->check_state = check_state_compute_result; |
| 4213 | /* Returning at this point means that we may go |
| 4214 | * off and bring p and/or q uptodate again so |
| 4215 | * we make sure to check zero_sum_result again |
| 4216 | * to verify if p or q need writeback |
| 4217 | */ |
| 4218 | } |
| 4219 | } else { |
| 4220 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
| 4221 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 4222 | /* don't try to repair!! */ |
| 4223 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4224 | else { |
| 4225 | int *target = &sh->ops.target; |
| 4226 | |
| 4227 | sh->ops.target = -1; |
| 4228 | sh->ops.target2 = -1; |
| 4229 | sh->check_state = check_state_compute_run; |
| 4230 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 4231 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 4232 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { |
| 4233 | set_bit(R5_Wantcompute, |
| 4234 | &sh->dev[pd_idx].flags); |
| 4235 | *target = pd_idx; |
| 4236 | target = &sh->ops.target2; |
| 4237 | s->uptodate++; |
| 4238 | } |
| 4239 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { |
| 4240 | set_bit(R5_Wantcompute, |
| 4241 | &sh->dev[qd_idx].flags); |
| 4242 | *target = qd_idx; |
| 4243 | s->uptodate++; |
| 4244 | } |
| 4245 | } |
| 4246 | } |
| 4247 | break; |
| 4248 | case check_state_compute_run: |
| 4249 | break; |
| 4250 | default: |
| 4251 | pr_warn("%s: unknown check_state: %d sector: %llu\n", |
| 4252 | __func__, sh->check_state, |
| 4253 | (unsigned long long) sh->sector); |
| 4254 | BUG(); |
| 4255 | } |
| 4256 | } |
| 4257 | |
| 4258 | static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) |
| 4259 | { |
| 4260 | int i; |
| 4261 | |
| 4262 | /* We have read all the blocks in this stripe and now we need to |
| 4263 | * copy some of them into a target stripe for expand. |
| 4264 | */ |
| 4265 | struct dma_async_tx_descriptor *tx = NULL; |
| 4266 | BUG_ON(sh->batch_head); |
| 4267 | clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 4268 | for (i = 0; i < sh->disks; i++) |
| 4269 | if (i != sh->pd_idx && i != sh->qd_idx) { |
| 4270 | int dd_idx, j; |
| 4271 | struct stripe_head *sh2; |
| 4272 | struct async_submit_ctl submit; |
| 4273 | |
| 4274 | sector_t bn = raid5_compute_blocknr(sh, i, 1); |
| 4275 | sector_t s = raid5_compute_sector(conf, bn, 0, |
| 4276 | &dd_idx, NULL); |
| 4277 | sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); |
| 4278 | if (sh2 == NULL) |
| 4279 | /* so far only the early blocks of this stripe |
| 4280 | * have been requested. When later blocks |
| 4281 | * get requested, we will try again |
| 4282 | */ |
| 4283 | continue; |
| 4284 | if (!test_bit(STRIPE_EXPANDING, &sh2->state) || |
| 4285 | test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { |
| 4286 | /* must have already done this block */ |
| 4287 | raid5_release_stripe(sh2); |
| 4288 | continue; |
| 4289 | } |
| 4290 | |
| 4291 | /* place all the copies on one channel */ |
| 4292 | init_async_submit(&submit, 0, tx, NULL, NULL, NULL); |
| 4293 | tx = async_memcpy(sh2->dev[dd_idx].page, |
| 4294 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
| 4295 | &submit); |
| 4296 | |
| 4297 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
| 4298 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |
| 4299 | for (j = 0; j < conf->raid_disks; j++) |
| 4300 | if (j != sh2->pd_idx && |
| 4301 | j != sh2->qd_idx && |
| 4302 | !test_bit(R5_Expanded, &sh2->dev[j].flags)) |
| 4303 | break; |
| 4304 | if (j == conf->raid_disks) { |
| 4305 | set_bit(STRIPE_EXPAND_READY, &sh2->state); |
| 4306 | set_bit(STRIPE_HANDLE, &sh2->state); |
| 4307 | } |
| 4308 | raid5_release_stripe(sh2); |
| 4309 | |
| 4310 | } |
| 4311 | /* done submitting copies, wait for them to complete */ |
| 4312 | async_tx_quiesce(&tx); |
| 4313 | } |
| 4314 | |
| 4315 | /* |
| 4316 | * handle_stripe - do things to a stripe. |
| 4317 | * |
| 4318 | * We lock the stripe by setting STRIPE_ACTIVE and then examine the |
| 4319 | * state of various bits to see what needs to be done. |
| 4320 | * Possible results: |
| 4321 | * return some read requests which now have data |
| 4322 | * return some write requests which are safely on storage |
| 4323 | * schedule a read on some buffers |
| 4324 | * schedule a write of some buffers |
| 4325 | * return confirmation of parity correctness |
| 4326 | * |
| 4327 | */ |
| 4328 | |
| 4329 | static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
| 4330 | { |
| 4331 | struct r5conf *conf = sh->raid_conf; |
| 4332 | int disks = sh->disks; |
| 4333 | struct r5dev *dev; |
| 4334 | int i; |
| 4335 | int do_recovery = 0; |
| 4336 | |
| 4337 | memset(s, 0, sizeof(*s)); |
| 4338 | |
| 4339 | s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; |
| 4340 | s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; |
| 4341 | s->failed_num[0] = -1; |
| 4342 | s->failed_num[1] = -1; |
| 4343 | s->log_failed = r5l_log_disk_error(conf); |
| 4344 | |
| 4345 | /* Now to look around and see what can be done */ |
| 4346 | rcu_read_lock(); |
| 4347 | for (i=disks; i--; ) { |
| 4348 | struct md_rdev *rdev; |
| 4349 | sector_t first_bad; |
| 4350 | int bad_sectors; |
| 4351 | int is_bad = 0; |
| 4352 | |
| 4353 | dev = &sh->dev[i]; |
| 4354 | |
| 4355 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
| 4356 | i, dev->flags, |
| 4357 | dev->toread, dev->towrite, dev->written); |
| 4358 | /* maybe we can reply to a read |
| 4359 | * |
| 4360 | * new wantfill requests are only permitted while |
| 4361 | * ops_complete_biofill is guaranteed to be inactive |
| 4362 | */ |
| 4363 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && |
| 4364 | !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) |
| 4365 | set_bit(R5_Wantfill, &dev->flags); |
| 4366 | |
| 4367 | /* now count some things */ |
| 4368 | if (test_bit(R5_LOCKED, &dev->flags)) |
| 4369 | s->locked++; |
| 4370 | if (test_bit(R5_UPTODATE, &dev->flags)) |
| 4371 | s->uptodate++; |
| 4372 | if (test_bit(R5_Wantcompute, &dev->flags)) { |
| 4373 | s->compute++; |
| 4374 | BUG_ON(s->compute > 2); |
| 4375 | } |
| 4376 | |
| 4377 | if (test_bit(R5_Wantfill, &dev->flags)) |
| 4378 | s->to_fill++; |
| 4379 | else if (dev->toread) |
| 4380 | s->to_read++; |
| 4381 | if (dev->towrite) { |
| 4382 | s->to_write++; |
| 4383 | if (!test_bit(R5_OVERWRITE, &dev->flags)) |
| 4384 | s->non_overwrite++; |
| 4385 | } |
| 4386 | if (dev->written) |
| 4387 | s->written++; |
| 4388 | /* Prefer to use the replacement for reads, but only |
| 4389 | * if it is recovered enough and has no bad blocks. |
| 4390 | */ |
| 4391 | rdev = rcu_dereference(conf->disks[i].replacement); |
| 4392 | if (rdev && !test_bit(Faulty, &rdev->flags) && |
| 4393 | rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && |
| 4394 | !is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 4395 | &first_bad, &bad_sectors)) |
| 4396 | set_bit(R5_ReadRepl, &dev->flags); |
| 4397 | else { |
| 4398 | if (rdev && !test_bit(Faulty, &rdev->flags)) |
| 4399 | set_bit(R5_NeedReplace, &dev->flags); |
| 4400 | else |
| 4401 | clear_bit(R5_NeedReplace, &dev->flags); |
| 4402 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 4403 | clear_bit(R5_ReadRepl, &dev->flags); |
| 4404 | } |
| 4405 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 4406 | rdev = NULL; |
| 4407 | if (rdev) { |
| 4408 | is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 4409 | &first_bad, &bad_sectors); |
| 4410 | if (s->blocked_rdev == NULL |
| 4411 | && (test_bit(Blocked, &rdev->flags) |
| 4412 | || is_bad < 0)) { |
| 4413 | if (is_bad < 0) |
| 4414 | set_bit(BlockedBadBlocks, |
| 4415 | &rdev->flags); |
| 4416 | s->blocked_rdev = rdev; |
| 4417 | atomic_inc(&rdev->nr_pending); |
| 4418 | } |
| 4419 | } |
| 4420 | clear_bit(R5_Insync, &dev->flags); |
| 4421 | if (!rdev) |
| 4422 | /* Not in-sync */; |
| 4423 | else if (is_bad) { |
| 4424 | /* also not in-sync */ |
| 4425 | if (!test_bit(WriteErrorSeen, &rdev->flags) && |
| 4426 | test_bit(R5_UPTODATE, &dev->flags)) { |
| 4427 | /* treat as in-sync, but with a read error |
| 4428 | * which we can now try to correct |
| 4429 | */ |
| 4430 | set_bit(R5_Insync, &dev->flags); |
| 4431 | set_bit(R5_ReadError, &dev->flags); |
| 4432 | } |
| 4433 | } else if (test_bit(In_sync, &rdev->flags)) |
| 4434 | set_bit(R5_Insync, &dev->flags); |
| 4435 | else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) |
| 4436 | /* in sync if before recovery_offset */ |
| 4437 | set_bit(R5_Insync, &dev->flags); |
| 4438 | else if (test_bit(R5_UPTODATE, &dev->flags) && |
| 4439 | test_bit(R5_Expanded, &dev->flags)) |
| 4440 | /* If we've reshaped into here, we assume it is Insync. |
| 4441 | * We will shortly update recovery_offset to make |
| 4442 | * it official. |
| 4443 | */ |
| 4444 | set_bit(R5_Insync, &dev->flags); |
| 4445 | |
| 4446 | if (test_bit(R5_WriteError, &dev->flags)) { |
| 4447 | /* This flag does not apply to '.replacement' |
| 4448 | * only to .rdev, so make sure to check that*/ |
| 4449 | struct md_rdev *rdev2 = rcu_dereference( |
| 4450 | conf->disks[i].rdev); |
| 4451 | if (rdev2 == rdev) |
| 4452 | clear_bit(R5_Insync, &dev->flags); |
| 4453 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
| 4454 | s->handle_bad_blocks = 1; |
| 4455 | atomic_inc(&rdev2->nr_pending); |
| 4456 | } else |
| 4457 | clear_bit(R5_WriteError, &dev->flags); |
| 4458 | } |
| 4459 | if (test_bit(R5_MadeGood, &dev->flags)) { |
| 4460 | /* This flag does not apply to '.replacement' |
| 4461 | * only to .rdev, so make sure to check that*/ |
| 4462 | struct md_rdev *rdev2 = rcu_dereference( |
| 4463 | conf->disks[i].rdev); |
| 4464 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
| 4465 | s->handle_bad_blocks = 1; |
| 4466 | atomic_inc(&rdev2->nr_pending); |
| 4467 | } else |
| 4468 | clear_bit(R5_MadeGood, &dev->flags); |
| 4469 | } |
| 4470 | if (test_bit(R5_MadeGoodRepl, &dev->flags)) { |
| 4471 | struct md_rdev *rdev2 = rcu_dereference( |
| 4472 | conf->disks[i].replacement); |
| 4473 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
| 4474 | s->handle_bad_blocks = 1; |
| 4475 | atomic_inc(&rdev2->nr_pending); |
| 4476 | } else |
| 4477 | clear_bit(R5_MadeGoodRepl, &dev->flags); |
| 4478 | } |
| 4479 | if (!test_bit(R5_Insync, &dev->flags)) { |
| 4480 | /* The ReadError flag will just be confusing now */ |
| 4481 | clear_bit(R5_ReadError, &dev->flags); |
| 4482 | clear_bit(R5_ReWrite, &dev->flags); |
| 4483 | } |
| 4484 | if (test_bit(R5_ReadError, &dev->flags)) |
| 4485 | clear_bit(R5_Insync, &dev->flags); |
| 4486 | if (!test_bit(R5_Insync, &dev->flags)) { |
| 4487 | if (s->failed < 2) |
| 4488 | s->failed_num[s->failed] = i; |
| 4489 | s->failed++; |
| 4490 | if (rdev && !test_bit(Faulty, &rdev->flags)) |
| 4491 | do_recovery = 1; |
| 4492 | } |
| 4493 | |
| 4494 | if (test_bit(R5_InJournal, &dev->flags)) |
| 4495 | s->injournal++; |
| 4496 | if (test_bit(R5_InJournal, &dev->flags) && dev->written) |
| 4497 | s->just_cached++; |
| 4498 | } |
| 4499 | if (test_bit(STRIPE_SYNCING, &sh->state)) { |
| 4500 | /* If there is a failed device being replaced, |
| 4501 | * we must be recovering. |
| 4502 | * else if we are after recovery_cp, we must be syncing |
| 4503 | * else if MD_RECOVERY_REQUESTED is set, we also are syncing. |
| 4504 | * else we can only be replacing |
| 4505 | * sync and recovery both need to read all devices, and so |
| 4506 | * use the same flag. |
| 4507 | */ |
| 4508 | if (do_recovery || |
| 4509 | sh->sector >= conf->mddev->recovery_cp || |
| 4510 | test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) |
| 4511 | s->syncing = 1; |
| 4512 | else |
| 4513 | s->replacing = 1; |
| 4514 | } |
| 4515 | rcu_read_unlock(); |
| 4516 | } |
| 4517 | |
| 4518 | static int clear_batch_ready(struct stripe_head *sh) |
| 4519 | { |
| 4520 | /* Return '1' if this is a member of batch, or |
| 4521 | * '0' if it is a lone stripe or a head which can now be |
| 4522 | * handled. |
| 4523 | */ |
| 4524 | struct stripe_head *tmp; |
| 4525 | if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) |
| 4526 | return (sh->batch_head && sh->batch_head != sh); |
| 4527 | spin_lock(&sh->stripe_lock); |
| 4528 | if (!sh->batch_head) { |
| 4529 | spin_unlock(&sh->stripe_lock); |
| 4530 | return 0; |
| 4531 | } |
| 4532 | |
| 4533 | /* |
| 4534 | * this stripe could be added to a batch list before we check |
| 4535 | * BATCH_READY, skips it |
| 4536 | */ |
| 4537 | if (sh->batch_head != sh) { |
| 4538 | spin_unlock(&sh->stripe_lock); |
| 4539 | return 1; |
| 4540 | } |
| 4541 | spin_lock(&sh->batch_lock); |
| 4542 | list_for_each_entry(tmp, &sh->batch_list, batch_list) |
| 4543 | clear_bit(STRIPE_BATCH_READY, &tmp->state); |
| 4544 | spin_unlock(&sh->batch_lock); |
| 4545 | spin_unlock(&sh->stripe_lock); |
| 4546 | |
| 4547 | /* |
| 4548 | * BATCH_READY is cleared, no new stripes can be added. |
| 4549 | * batch_list can be accessed without lock |
| 4550 | */ |
| 4551 | return 0; |
| 4552 | } |
| 4553 | |
| 4554 | static void break_stripe_batch_list(struct stripe_head *head_sh, |
| 4555 | unsigned long handle_flags) |
| 4556 | { |
| 4557 | struct stripe_head *sh, *next; |
| 4558 | int i; |
| 4559 | int do_wakeup = 0; |
| 4560 | |
| 4561 | list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { |
| 4562 | |
| 4563 | list_del_init(&sh->batch_list); |
| 4564 | |
| 4565 | WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | |
| 4566 | (1 << STRIPE_SYNCING) | |
| 4567 | (1 << STRIPE_REPLACED) | |
| 4568 | (1 << STRIPE_DELAYED) | |
| 4569 | (1 << STRIPE_BIT_DELAY) | |
| 4570 | (1 << STRIPE_FULL_WRITE) | |
| 4571 | (1 << STRIPE_BIOFILL_RUN) | |
| 4572 | (1 << STRIPE_COMPUTE_RUN) | |
| 4573 | (1 << STRIPE_OPS_REQ_PENDING) | |
| 4574 | (1 << STRIPE_DISCARD) | |
| 4575 | (1 << STRIPE_BATCH_READY) | |
| 4576 | (1 << STRIPE_BATCH_ERR) | |
| 4577 | (1 << STRIPE_BITMAP_PENDING)), |
| 4578 | "stripe state: %lx\n", sh->state); |
| 4579 | WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | |
| 4580 | (1 << STRIPE_REPLACED)), |
| 4581 | "head stripe state: %lx\n", head_sh->state); |
| 4582 | |
| 4583 | set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | |
| 4584 | (1 << STRIPE_PREREAD_ACTIVE) | |
| 4585 | (1 << STRIPE_DEGRADED)), |
| 4586 | head_sh->state & (1 << STRIPE_INSYNC)); |
| 4587 | |
| 4588 | sh->check_state = head_sh->check_state; |
| 4589 | sh->reconstruct_state = head_sh->reconstruct_state; |
| 4590 | for (i = 0; i < sh->disks; i++) { |
| 4591 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 4592 | do_wakeup = 1; |
| 4593 | sh->dev[i].flags = head_sh->dev[i].flags & |
| 4594 | (~((1 << R5_WriteError) | (1 << R5_Overlap))); |
| 4595 | } |
| 4596 | spin_lock_irq(&sh->stripe_lock); |
| 4597 | sh->batch_head = NULL; |
| 4598 | spin_unlock_irq(&sh->stripe_lock); |
| 4599 | if (handle_flags == 0 || |
| 4600 | sh->state & handle_flags) |
| 4601 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4602 | raid5_release_stripe(sh); |
| 4603 | } |
| 4604 | spin_lock_irq(&head_sh->stripe_lock); |
| 4605 | head_sh->batch_head = NULL; |
| 4606 | spin_unlock_irq(&head_sh->stripe_lock); |
| 4607 | for (i = 0; i < head_sh->disks; i++) |
| 4608 | if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) |
| 4609 | do_wakeup = 1; |
| 4610 | if (head_sh->state & handle_flags) |
| 4611 | set_bit(STRIPE_HANDLE, &head_sh->state); |
| 4612 | |
| 4613 | if (do_wakeup) |
| 4614 | wake_up(&head_sh->raid_conf->wait_for_overlap); |
| 4615 | } |
| 4616 | |
| 4617 | static void handle_stripe(struct stripe_head *sh) |
| 4618 | { |
| 4619 | struct stripe_head_state s; |
| 4620 | struct r5conf *conf = sh->raid_conf; |
| 4621 | int i; |
| 4622 | int prexor; |
| 4623 | int disks = sh->disks; |
| 4624 | struct r5dev *pdev, *qdev; |
| 4625 | |
| 4626 | clear_bit(STRIPE_HANDLE, &sh->state); |
| 4627 | if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { |
| 4628 | /* already being handled, ensure it gets handled |
| 4629 | * again when current action finishes */ |
| 4630 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4631 | return; |
| 4632 | } |
| 4633 | |
| 4634 | if (clear_batch_ready(sh) ) { |
| 4635 | clear_bit_unlock(STRIPE_ACTIVE, &sh->state); |
| 4636 | return; |
| 4637 | } |
| 4638 | |
| 4639 | if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) |
| 4640 | break_stripe_batch_list(sh, 0); |
| 4641 | |
| 4642 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { |
| 4643 | spin_lock(&sh->stripe_lock); |
| 4644 | /* Cannot process 'sync' concurrently with 'discard' */ |
| 4645 | if (!test_bit(STRIPE_DISCARD, &sh->state) && |
| 4646 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
| 4647 | set_bit(STRIPE_SYNCING, &sh->state); |
| 4648 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 4649 | clear_bit(STRIPE_REPLACED, &sh->state); |
| 4650 | } |
| 4651 | spin_unlock(&sh->stripe_lock); |
| 4652 | } |
| 4653 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 4654 | |
| 4655 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
| 4656 | "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", |
| 4657 | (unsigned long long)sh->sector, sh->state, |
| 4658 | atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, |
| 4659 | sh->check_state, sh->reconstruct_state); |
| 4660 | |
| 4661 | analyse_stripe(sh, &s); |
| 4662 | |
| 4663 | if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) |
| 4664 | goto finish; |
| 4665 | |
| 4666 | if (s.handle_bad_blocks || |
| 4667 | test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { |
| 4668 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4669 | goto finish; |
| 4670 | } |
| 4671 | |
| 4672 | if (unlikely(s.blocked_rdev)) { |
| 4673 | if (s.syncing || s.expanding || s.expanded || |
| 4674 | s.replacing || s.to_write || s.written) { |
| 4675 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4676 | goto finish; |
| 4677 | } |
| 4678 | /* There is nothing for the blocked_rdev to block */ |
| 4679 | rdev_dec_pending(s.blocked_rdev, conf->mddev); |
| 4680 | s.blocked_rdev = NULL; |
| 4681 | } |
| 4682 | |
| 4683 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { |
| 4684 | set_bit(STRIPE_OP_BIOFILL, &s.ops_request); |
| 4685 | set_bit(STRIPE_BIOFILL_RUN, &sh->state); |
| 4686 | } |
| 4687 | |
| 4688 | pr_debug("locked=%d uptodate=%d to_read=%d" |
| 4689 | " to_write=%d failed=%d failed_num=%d,%d\n", |
| 4690 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
| 4691 | s.failed_num[0], s.failed_num[1]); |
| 4692 | /* check if the array has lost more than max_degraded devices and, |
| 4693 | * if so, some requests might need to be failed. |
| 4694 | */ |
| 4695 | if (s.failed > conf->max_degraded || s.log_failed) { |
| 4696 | sh->check_state = 0; |
| 4697 | sh->reconstruct_state = 0; |
| 4698 | break_stripe_batch_list(sh, 0); |
| 4699 | if (s.to_read+s.to_write+s.written) |
| 4700 | handle_failed_stripe(conf, sh, &s, disks); |
| 4701 | if (s.syncing + s.replacing) |
| 4702 | handle_failed_sync(conf, sh, &s); |
| 4703 | } |
| 4704 | |
| 4705 | /* Now we check to see if any write operations have recently |
| 4706 | * completed |
| 4707 | */ |
| 4708 | prexor = 0; |
| 4709 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) |
| 4710 | prexor = 1; |
| 4711 | if (sh->reconstruct_state == reconstruct_state_drain_result || |
| 4712 | sh->reconstruct_state == reconstruct_state_prexor_drain_result) { |
| 4713 | sh->reconstruct_state = reconstruct_state_idle; |
| 4714 | |
| 4715 | /* All the 'written' buffers and the parity block are ready to |
| 4716 | * be written back to disk |
| 4717 | */ |
| 4718 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && |
| 4719 | !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); |
| 4720 | BUG_ON(sh->qd_idx >= 0 && |
| 4721 | !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && |
| 4722 | !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); |
| 4723 | for (i = disks; i--; ) { |
| 4724 | struct r5dev *dev = &sh->dev[i]; |
| 4725 | if (test_bit(R5_LOCKED, &dev->flags) && |
| 4726 | (i == sh->pd_idx || i == sh->qd_idx || |
| 4727 | dev->written || test_bit(R5_InJournal, |
| 4728 | &dev->flags))) { |
| 4729 | pr_debug("Writing block %d\n", i); |
| 4730 | set_bit(R5_Wantwrite, &dev->flags); |
| 4731 | if (prexor) |
| 4732 | continue; |
| 4733 | if (s.failed > 1) |
| 4734 | continue; |
| 4735 | if (!test_bit(R5_Insync, &dev->flags) || |
| 4736 | ((i == sh->pd_idx || i == sh->qd_idx) && |
| 4737 | s.failed == 0)) |
| 4738 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4739 | } |
| 4740 | } |
| 4741 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 4742 | s.dec_preread_active = 1; |
| 4743 | } |
| 4744 | |
| 4745 | /* |
| 4746 | * might be able to return some write requests if the parity blocks |
| 4747 | * are safe, or on a failed drive |
| 4748 | */ |
| 4749 | pdev = &sh->dev[sh->pd_idx]; |
| 4750 | s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) |
| 4751 | || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); |
| 4752 | qdev = &sh->dev[sh->qd_idx]; |
| 4753 | s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) |
| 4754 | || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) |
| 4755 | || conf->level < 6; |
| 4756 | |
| 4757 | if (s.written && |
| 4758 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) |
| 4759 | && !test_bit(R5_LOCKED, &pdev->flags) |
| 4760 | && (test_bit(R5_UPTODATE, &pdev->flags) || |
| 4761 | test_bit(R5_Discard, &pdev->flags))))) && |
| 4762 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) |
| 4763 | && !test_bit(R5_LOCKED, &qdev->flags) |
| 4764 | && (test_bit(R5_UPTODATE, &qdev->flags) || |
| 4765 | test_bit(R5_Discard, &qdev->flags)))))) |
| 4766 | handle_stripe_clean_event(conf, sh, disks); |
| 4767 | |
| 4768 | if (s.just_cached) |
| 4769 | r5c_handle_cached_data_endio(conf, sh, disks); |
| 4770 | log_stripe_write_finished(sh); |
| 4771 | |
| 4772 | /* Now we might consider reading some blocks, either to check/generate |
| 4773 | * parity, or to satisfy requests |
| 4774 | * or to load a block that is being partially written. |
| 4775 | */ |
| 4776 | if (s.to_read || s.non_overwrite |
| 4777 | || (conf->level == 6 && s.to_write && s.failed) |
| 4778 | || (s.syncing && (s.uptodate + s.compute < disks)) |
| 4779 | || s.replacing |
| 4780 | || s.expanding) |
| 4781 | handle_stripe_fill(sh, &s, disks); |
| 4782 | |
| 4783 | /* |
| 4784 | * When the stripe finishes full journal write cycle (write to journal |
| 4785 | * and raid disk), this is the clean up procedure so it is ready for |
| 4786 | * next operation. |
| 4787 | */ |
| 4788 | r5c_finish_stripe_write_out(conf, sh, &s); |
| 4789 | |
| 4790 | /* |
| 4791 | * Now to consider new write requests, cache write back and what else, |
| 4792 | * if anything should be read. We do not handle new writes when: |
| 4793 | * 1/ A 'write' operation (copy+xor) is already in flight. |
| 4794 | * 2/ A 'check' operation is in flight, as it may clobber the parity |
| 4795 | * block. |
| 4796 | * 3/ A r5c cache log write is in flight. |
| 4797 | */ |
| 4798 | |
| 4799 | if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { |
| 4800 | if (!r5c_is_writeback(conf->log)) { |
| 4801 | if (s.to_write) |
| 4802 | handle_stripe_dirtying(conf, sh, &s, disks); |
| 4803 | } else { /* write back cache */ |
| 4804 | int ret = 0; |
| 4805 | |
| 4806 | /* First, try handle writes in caching phase */ |
| 4807 | if (s.to_write) |
| 4808 | ret = r5c_try_caching_write(conf, sh, &s, |
| 4809 | disks); |
| 4810 | /* |
| 4811 | * If caching phase failed: ret == -EAGAIN |
| 4812 | * OR |
| 4813 | * stripe under reclaim: !caching && injournal |
| 4814 | * |
| 4815 | * fall back to handle_stripe_dirtying() |
| 4816 | */ |
| 4817 | if (ret == -EAGAIN || |
| 4818 | /* stripe under reclaim: !caching && injournal */ |
| 4819 | (!test_bit(STRIPE_R5C_CACHING, &sh->state) && |
| 4820 | s.injournal > 0)) { |
| 4821 | ret = handle_stripe_dirtying(conf, sh, &s, |
| 4822 | disks); |
| 4823 | if (ret == -EAGAIN) |
| 4824 | goto finish; |
| 4825 | } |
| 4826 | } |
| 4827 | } |
| 4828 | |
| 4829 | /* maybe we need to check and possibly fix the parity for this stripe |
| 4830 | * Any reads will already have been scheduled, so we just see if enough |
| 4831 | * data is available. The parity check is held off while parity |
| 4832 | * dependent operations are in flight. |
| 4833 | */ |
| 4834 | if (sh->check_state || |
| 4835 | (s.syncing && s.locked == 0 && |
| 4836 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && |
| 4837 | !test_bit(STRIPE_INSYNC, &sh->state))) { |
| 4838 | if (conf->level == 6) |
| 4839 | handle_parity_checks6(conf, sh, &s, disks); |
| 4840 | else |
| 4841 | handle_parity_checks5(conf, sh, &s, disks); |
| 4842 | } |
| 4843 | |
| 4844 | if ((s.replacing || s.syncing) && s.locked == 0 |
| 4845 | && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) |
| 4846 | && !test_bit(STRIPE_REPLACED, &sh->state)) { |
| 4847 | /* Write out to replacement devices where possible */ |
| 4848 | for (i = 0; i < conf->raid_disks; i++) |
| 4849 | if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { |
| 4850 | WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
| 4851 | set_bit(R5_WantReplace, &sh->dev[i].flags); |
| 4852 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 4853 | s.locked++; |
| 4854 | } |
| 4855 | if (s.replacing) |
| 4856 | set_bit(STRIPE_INSYNC, &sh->state); |
| 4857 | set_bit(STRIPE_REPLACED, &sh->state); |
| 4858 | } |
| 4859 | if ((s.syncing || s.replacing) && s.locked == 0 && |
| 4860 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && |
| 4861 | test_bit(STRIPE_INSYNC, &sh->state)) { |
| 4862 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 4863 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 4864 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) |
| 4865 | wake_up(&conf->wait_for_overlap); |
| 4866 | } |
| 4867 | |
| 4868 | /* If the failed drives are just a ReadError, then we might need |
| 4869 | * to progress the repair/check process |
| 4870 | */ |
| 4871 | if (s.failed <= conf->max_degraded && !conf->mddev->ro) |
| 4872 | for (i = 0; i < s.failed; i++) { |
| 4873 | struct r5dev *dev = &sh->dev[s.failed_num[i]]; |
| 4874 | if (test_bit(R5_ReadError, &dev->flags) |
| 4875 | && !test_bit(R5_LOCKED, &dev->flags) |
| 4876 | && test_bit(R5_UPTODATE, &dev->flags) |
| 4877 | ) { |
| 4878 | if (!test_bit(R5_ReWrite, &dev->flags)) { |
| 4879 | set_bit(R5_Wantwrite, &dev->flags); |
| 4880 | set_bit(R5_ReWrite, &dev->flags); |
| 4881 | set_bit(R5_LOCKED, &dev->flags); |
| 4882 | s.locked++; |
| 4883 | } else { |
| 4884 | /* let's read it back */ |
| 4885 | set_bit(R5_Wantread, &dev->flags); |
| 4886 | set_bit(R5_LOCKED, &dev->flags); |
| 4887 | s.locked++; |
| 4888 | } |
| 4889 | } |
| 4890 | } |
| 4891 | |
| 4892 | /* Finish reconstruct operations initiated by the expansion process */ |
| 4893 | if (sh->reconstruct_state == reconstruct_state_result) { |
| 4894 | struct stripe_head *sh_src |
| 4895 | = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); |
| 4896 | if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { |
| 4897 | /* sh cannot be written until sh_src has been read. |
| 4898 | * so arrange for sh to be delayed a little |
| 4899 | */ |
| 4900 | set_bit(STRIPE_DELAYED, &sh->state); |
| 4901 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4902 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, |
| 4903 | &sh_src->state)) |
| 4904 | atomic_inc(&conf->preread_active_stripes); |
| 4905 | raid5_release_stripe(sh_src); |
| 4906 | goto finish; |
| 4907 | } |
| 4908 | if (sh_src) |
| 4909 | raid5_release_stripe(sh_src); |
| 4910 | |
| 4911 | sh->reconstruct_state = reconstruct_state_idle; |
| 4912 | clear_bit(STRIPE_EXPANDING, &sh->state); |
| 4913 | for (i = conf->raid_disks; i--; ) { |
| 4914 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
| 4915 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 4916 | s.locked++; |
| 4917 | } |
| 4918 | } |
| 4919 | |
| 4920 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && |
| 4921 | !sh->reconstruct_state) { |
| 4922 | /* Need to write out all blocks after computing parity */ |
| 4923 | sh->disks = conf->raid_disks; |
| 4924 | stripe_set_idx(sh->sector, conf, 0, sh); |
| 4925 | schedule_reconstruction(sh, &s, 1, 1); |
| 4926 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
| 4927 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
| 4928 | atomic_dec(&conf->reshape_stripes); |
| 4929 | wake_up(&conf->wait_for_overlap); |
| 4930 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 4931 | } |
| 4932 | |
| 4933 | if (s.expanding && s.locked == 0 && |
| 4934 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
| 4935 | handle_stripe_expansion(conf, sh); |
| 4936 | |
| 4937 | finish: |
| 4938 | /* wait for this device to become unblocked */ |
| 4939 | if (unlikely(s.blocked_rdev)) { |
| 4940 | if (conf->mddev->external) |
| 4941 | md_wait_for_blocked_rdev(s.blocked_rdev, |
| 4942 | conf->mddev); |
| 4943 | else |
| 4944 | /* Internal metadata will immediately |
| 4945 | * be written by raid5d, so we don't |
| 4946 | * need to wait here. |
| 4947 | */ |
| 4948 | rdev_dec_pending(s.blocked_rdev, |
| 4949 | conf->mddev); |
| 4950 | } |
| 4951 | |
| 4952 | if (s.handle_bad_blocks) |
| 4953 | for (i = disks; i--; ) { |
| 4954 | struct md_rdev *rdev; |
| 4955 | struct r5dev *dev = &sh->dev[i]; |
| 4956 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { |
| 4957 | /* We own a safe reference to the rdev */ |
| 4958 | rdev = conf->disks[i].rdev; |
| 4959 | if (!rdev_set_badblocks(rdev, sh->sector, |
| 4960 | STRIPE_SECTORS, 0)) |
| 4961 | md_error(conf->mddev, rdev); |
| 4962 | rdev_dec_pending(rdev, conf->mddev); |
| 4963 | } |
| 4964 | if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { |
| 4965 | rdev = conf->disks[i].rdev; |
| 4966 | rdev_clear_badblocks(rdev, sh->sector, |
| 4967 | STRIPE_SECTORS, 0); |
| 4968 | rdev_dec_pending(rdev, conf->mddev); |
| 4969 | } |
| 4970 | if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { |
| 4971 | rdev = conf->disks[i].replacement; |
| 4972 | if (!rdev) |
| 4973 | /* rdev have been moved down */ |
| 4974 | rdev = conf->disks[i].rdev; |
| 4975 | rdev_clear_badblocks(rdev, sh->sector, |
| 4976 | STRIPE_SECTORS, 0); |
| 4977 | rdev_dec_pending(rdev, conf->mddev); |
| 4978 | } |
| 4979 | } |
| 4980 | |
| 4981 | if (s.ops_request) |
| 4982 | raid_run_ops(sh, s.ops_request); |
| 4983 | |
| 4984 | ops_run_io(sh, &s); |
| 4985 | |
| 4986 | if (s.dec_preread_active) { |
| 4987 | /* We delay this until after ops_run_io so that if make_request |
| 4988 | * is waiting on a flush, it won't continue until the writes |
| 4989 | * have actually been submitted. |
| 4990 | */ |
| 4991 | atomic_dec(&conf->preread_active_stripes); |
| 4992 | if (atomic_read(&conf->preread_active_stripes) < |
| 4993 | IO_THRESHOLD) |
| 4994 | md_wakeup_thread(conf->mddev->thread); |
| 4995 | } |
| 4996 | |
| 4997 | clear_bit_unlock(STRIPE_ACTIVE, &sh->state); |
| 4998 | } |
| 4999 | |
| 5000 | static void raid5_activate_delayed(struct r5conf *conf) |
| 5001 | { |
| 5002 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
| 5003 | while (!list_empty(&conf->delayed_list)) { |
| 5004 | struct list_head *l = conf->delayed_list.next; |
| 5005 | struct stripe_head *sh; |
| 5006 | sh = list_entry(l, struct stripe_head, lru); |
| 5007 | list_del_init(l); |
| 5008 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 5009 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 5010 | atomic_inc(&conf->preread_active_stripes); |
| 5011 | list_add_tail(&sh->lru, &conf->hold_list); |
| 5012 | raid5_wakeup_stripe_thread(sh); |
| 5013 | } |
| 5014 | } |
| 5015 | } |
| 5016 | |
| 5017 | static void activate_bit_delay(struct r5conf *conf, |
| 5018 | struct list_head *temp_inactive_list) |
| 5019 | { |
| 5020 | /* device_lock is held */ |
| 5021 | struct list_head head; |
| 5022 | list_add(&head, &conf->bitmap_list); |
| 5023 | list_del_init(&conf->bitmap_list); |
| 5024 | while (!list_empty(&head)) { |
| 5025 | struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); |
| 5026 | int hash; |
| 5027 | list_del_init(&sh->lru); |
| 5028 | atomic_inc(&sh->count); |
| 5029 | hash = sh->hash_lock_index; |
| 5030 | __release_stripe(conf, sh, &temp_inactive_list[hash]); |
| 5031 | } |
| 5032 | } |
| 5033 | |
| 5034 | static int raid5_congested(struct mddev *mddev, int bits) |
| 5035 | { |
| 5036 | struct r5conf *conf = mddev->private; |
| 5037 | |
| 5038 | /* No difference between reads and writes. Just check |
| 5039 | * how busy the stripe_cache is |
| 5040 | */ |
| 5041 | |
| 5042 | if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) |
| 5043 | return 1; |
| 5044 | |
| 5045 | /* Also checks whether there is pressure on r5cache log space */ |
| 5046 | if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) |
| 5047 | return 1; |
| 5048 | if (conf->quiesce) |
| 5049 | return 1; |
| 5050 | if (atomic_read(&conf->empty_inactive_list_nr)) |
| 5051 | return 1; |
| 5052 | |
| 5053 | return 0; |
| 5054 | } |
| 5055 | |
| 5056 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
| 5057 | { |
| 5058 | struct r5conf *conf = mddev->private; |
| 5059 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); |
| 5060 | unsigned int chunk_sectors; |
| 5061 | unsigned int bio_sectors = bio_sectors(bio); |
| 5062 | |
| 5063 | chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); |
| 5064 | return chunk_sectors >= |
| 5065 | ((sector & (chunk_sectors - 1)) + bio_sectors); |
| 5066 | } |
| 5067 | |
| 5068 | /* |
| 5069 | * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) |
| 5070 | * later sampled by raid5d. |
| 5071 | */ |
| 5072 | static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) |
| 5073 | { |
| 5074 | unsigned long flags; |
| 5075 | |
| 5076 | spin_lock_irqsave(&conf->device_lock, flags); |
| 5077 | |
| 5078 | bi->bi_next = conf->retry_read_aligned_list; |
| 5079 | conf->retry_read_aligned_list = bi; |
| 5080 | |
| 5081 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 5082 | md_wakeup_thread(conf->mddev->thread); |
| 5083 | } |
| 5084 | |
| 5085 | static struct bio *remove_bio_from_retry(struct r5conf *conf, |
| 5086 | unsigned int *offset) |
| 5087 | { |
| 5088 | struct bio *bi; |
| 5089 | |
| 5090 | bi = conf->retry_read_aligned; |
| 5091 | if (bi) { |
| 5092 | *offset = conf->retry_read_offset; |
| 5093 | conf->retry_read_aligned = NULL; |
| 5094 | return bi; |
| 5095 | } |
| 5096 | bi = conf->retry_read_aligned_list; |
| 5097 | if(bi) { |
| 5098 | conf->retry_read_aligned_list = bi->bi_next; |
| 5099 | bi->bi_next = NULL; |
| 5100 | *offset = 0; |
| 5101 | } |
| 5102 | |
| 5103 | return bi; |
| 5104 | } |
| 5105 | |
| 5106 | /* |
| 5107 | * The "raid5_align_endio" should check if the read succeeded and if it |
| 5108 | * did, call bio_endio on the original bio (having bio_put the new bio |
| 5109 | * first). |
| 5110 | * If the read failed.. |
| 5111 | */ |
| 5112 | static void raid5_align_endio(struct bio *bi) |
| 5113 | { |
| 5114 | struct bio* raid_bi = bi->bi_private; |
| 5115 | struct mddev *mddev; |
| 5116 | struct r5conf *conf; |
| 5117 | struct md_rdev *rdev; |
| 5118 | int error = bi->bi_error; |
| 5119 | |
| 5120 | bio_put(bi); |
| 5121 | |
| 5122 | rdev = (void*)raid_bi->bi_next; |
| 5123 | raid_bi->bi_next = NULL; |
| 5124 | mddev = rdev->mddev; |
| 5125 | conf = mddev->private; |
| 5126 | |
| 5127 | rdev_dec_pending(rdev, conf->mddev); |
| 5128 | |
| 5129 | if (!error) { |
| 5130 | trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), |
| 5131 | raid_bi, 0); |
| 5132 | bio_endio(raid_bi); |
| 5133 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 5134 | wake_up(&conf->wait_for_quiescent); |
| 5135 | return; |
| 5136 | } |
| 5137 | |
| 5138 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); |
| 5139 | |
| 5140 | add_bio_to_retry(raid_bi, conf); |
| 5141 | } |
| 5142 | |
| 5143 | static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) |
| 5144 | { |
| 5145 | struct r5conf *conf = mddev->private; |
| 5146 | int dd_idx; |
| 5147 | struct bio* align_bi; |
| 5148 | struct md_rdev *rdev; |
| 5149 | sector_t end_sector; |
| 5150 | |
| 5151 | if (!in_chunk_boundary(mddev, raid_bio)) { |
| 5152 | pr_debug("%s: non aligned\n", __func__); |
| 5153 | return 0; |
| 5154 | } |
| 5155 | /* |
| 5156 | * use bio_clone_fast to make a copy of the bio |
| 5157 | */ |
| 5158 | align_bi = bio_clone_fast(raid_bio, GFP_NOIO, mddev->bio_set); |
| 5159 | if (!align_bi) |
| 5160 | return 0; |
| 5161 | /* |
| 5162 | * set bi_end_io to a new function, and set bi_private to the |
| 5163 | * original bio. |
| 5164 | */ |
| 5165 | align_bi->bi_end_io = raid5_align_endio; |
| 5166 | align_bi->bi_private = raid_bio; |
| 5167 | /* |
| 5168 | * compute position |
| 5169 | */ |
| 5170 | align_bi->bi_iter.bi_sector = |
| 5171 | raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, |
| 5172 | 0, &dd_idx, NULL); |
| 5173 | |
| 5174 | end_sector = bio_end_sector(align_bi); |
| 5175 | rcu_read_lock(); |
| 5176 | rdev = rcu_dereference(conf->disks[dd_idx].replacement); |
| 5177 | if (!rdev || test_bit(Faulty, &rdev->flags) || |
| 5178 | rdev->recovery_offset < end_sector) { |
| 5179 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); |
| 5180 | if (rdev && |
| 5181 | (test_bit(Faulty, &rdev->flags) || |
| 5182 | !(test_bit(In_sync, &rdev->flags) || |
| 5183 | rdev->recovery_offset >= end_sector))) |
| 5184 | rdev = NULL; |
| 5185 | } |
| 5186 | |
| 5187 | if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { |
| 5188 | rcu_read_unlock(); |
| 5189 | bio_put(align_bi); |
| 5190 | return 0; |
| 5191 | } |
| 5192 | |
| 5193 | if (rdev) { |
| 5194 | sector_t first_bad; |
| 5195 | int bad_sectors; |
| 5196 | |
| 5197 | atomic_inc(&rdev->nr_pending); |
| 5198 | rcu_read_unlock(); |
| 5199 | raid_bio->bi_next = (void*)rdev; |
| 5200 | align_bi->bi_bdev = rdev->bdev; |
| 5201 | bio_clear_flag(align_bi, BIO_SEG_VALID); |
| 5202 | |
| 5203 | if (is_badblock(rdev, align_bi->bi_iter.bi_sector, |
| 5204 | bio_sectors(align_bi), |
| 5205 | &first_bad, &bad_sectors)) { |
| 5206 | bio_put(align_bi); |
| 5207 | rdev_dec_pending(rdev, mddev); |
| 5208 | return 0; |
| 5209 | } |
| 5210 | |
| 5211 | /* No reshape active, so we can trust rdev->data_offset */ |
| 5212 | align_bi->bi_iter.bi_sector += rdev->data_offset; |
| 5213 | |
| 5214 | spin_lock_irq(&conf->device_lock); |
| 5215 | wait_event_lock_irq(conf->wait_for_quiescent, |
| 5216 | conf->quiesce == 0, |
| 5217 | conf->device_lock); |
| 5218 | atomic_inc(&conf->active_aligned_reads); |
| 5219 | spin_unlock_irq(&conf->device_lock); |
| 5220 | |
| 5221 | if (mddev->gendisk) |
| 5222 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), |
| 5223 | align_bi, disk_devt(mddev->gendisk), |
| 5224 | raid_bio->bi_iter.bi_sector); |
| 5225 | generic_make_request(align_bi); |
| 5226 | return 1; |
| 5227 | } else { |
| 5228 | rcu_read_unlock(); |
| 5229 | bio_put(align_bi); |
| 5230 | return 0; |
| 5231 | } |
| 5232 | } |
| 5233 | |
| 5234 | static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) |
| 5235 | { |
| 5236 | struct bio *split; |
| 5237 | |
| 5238 | do { |
| 5239 | sector_t sector = raid_bio->bi_iter.bi_sector; |
| 5240 | unsigned chunk_sects = mddev->chunk_sectors; |
| 5241 | unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); |
| 5242 | |
| 5243 | if (sectors < bio_sectors(raid_bio)) { |
| 5244 | split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set); |
| 5245 | bio_chain(split, raid_bio); |
| 5246 | } else |
| 5247 | split = raid_bio; |
| 5248 | |
| 5249 | if (!raid5_read_one_chunk(mddev, split)) { |
| 5250 | if (split != raid_bio) |
| 5251 | generic_make_request(raid_bio); |
| 5252 | return split; |
| 5253 | } |
| 5254 | } while (split != raid_bio); |
| 5255 | |
| 5256 | return NULL; |
| 5257 | } |
| 5258 | |
| 5259 | /* __get_priority_stripe - get the next stripe to process |
| 5260 | * |
| 5261 | * Full stripe writes are allowed to pass preread active stripes up until |
| 5262 | * the bypass_threshold is exceeded. In general the bypass_count |
| 5263 | * increments when the handle_list is handled before the hold_list; however, it |
| 5264 | * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a |
| 5265 | * stripe with in flight i/o. The bypass_count will be reset when the |
| 5266 | * head of the hold_list has changed, i.e. the head was promoted to the |
| 5267 | * handle_list. |
| 5268 | */ |
| 5269 | static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) |
| 5270 | { |
| 5271 | struct stripe_head *sh, *tmp; |
| 5272 | struct list_head *handle_list = NULL; |
| 5273 | struct r5worker_group *wg; |
| 5274 | bool second_try = !r5c_is_writeback(conf->log); |
| 5275 | bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); |
| 5276 | |
| 5277 | again: |
| 5278 | wg = NULL; |
| 5279 | sh = NULL; |
| 5280 | if (conf->worker_cnt_per_group == 0) { |
| 5281 | handle_list = try_loprio ? &conf->loprio_list : |
| 5282 | &conf->handle_list; |
| 5283 | } else if (group != ANY_GROUP) { |
| 5284 | handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : |
| 5285 | &conf->worker_groups[group].handle_list; |
| 5286 | wg = &conf->worker_groups[group]; |
| 5287 | } else { |
| 5288 | int i; |
| 5289 | for (i = 0; i < conf->group_cnt; i++) { |
| 5290 | handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : |
| 5291 | &conf->worker_groups[i].handle_list; |
| 5292 | wg = &conf->worker_groups[i]; |
| 5293 | if (!list_empty(handle_list)) |
| 5294 | break; |
| 5295 | } |
| 5296 | } |
| 5297 | |
| 5298 | pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", |
| 5299 | __func__, |
| 5300 | list_empty(handle_list) ? "empty" : "busy", |
| 5301 | list_empty(&conf->hold_list) ? "empty" : "busy", |
| 5302 | atomic_read(&conf->pending_full_writes), conf->bypass_count); |
| 5303 | |
| 5304 | if (!list_empty(handle_list)) { |
| 5305 | sh = list_entry(handle_list->next, typeof(*sh), lru); |
| 5306 | |
| 5307 | if (list_empty(&conf->hold_list)) |
| 5308 | conf->bypass_count = 0; |
| 5309 | else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { |
| 5310 | if (conf->hold_list.next == conf->last_hold) |
| 5311 | conf->bypass_count++; |
| 5312 | else { |
| 5313 | conf->last_hold = conf->hold_list.next; |
| 5314 | conf->bypass_count -= conf->bypass_threshold; |
| 5315 | if (conf->bypass_count < 0) |
| 5316 | conf->bypass_count = 0; |
| 5317 | } |
| 5318 | } |
| 5319 | } else if (!list_empty(&conf->hold_list) && |
| 5320 | ((conf->bypass_threshold && |
| 5321 | conf->bypass_count > conf->bypass_threshold) || |
| 5322 | atomic_read(&conf->pending_full_writes) == 0)) { |
| 5323 | |
| 5324 | list_for_each_entry(tmp, &conf->hold_list, lru) { |
| 5325 | if (conf->worker_cnt_per_group == 0 || |
| 5326 | group == ANY_GROUP || |
| 5327 | !cpu_online(tmp->cpu) || |
| 5328 | cpu_to_group(tmp->cpu) == group) { |
| 5329 | sh = tmp; |
| 5330 | break; |
| 5331 | } |
| 5332 | } |
| 5333 | |
| 5334 | if (sh) { |
| 5335 | conf->bypass_count -= conf->bypass_threshold; |
| 5336 | if (conf->bypass_count < 0) |
| 5337 | conf->bypass_count = 0; |
| 5338 | } |
| 5339 | wg = NULL; |
| 5340 | } |
| 5341 | |
| 5342 | if (!sh) { |
| 5343 | if (second_try) |
| 5344 | return NULL; |
| 5345 | second_try = true; |
| 5346 | try_loprio = !try_loprio; |
| 5347 | goto again; |
| 5348 | } |
| 5349 | |
| 5350 | if (wg) { |
| 5351 | wg->stripes_cnt--; |
| 5352 | sh->group = NULL; |
| 5353 | } |
| 5354 | list_del_init(&sh->lru); |
| 5355 | BUG_ON(atomic_inc_return(&sh->count) != 1); |
| 5356 | return sh; |
| 5357 | } |
| 5358 | |
| 5359 | struct raid5_plug_cb { |
| 5360 | struct blk_plug_cb cb; |
| 5361 | struct list_head list; |
| 5362 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; |
| 5363 | }; |
| 5364 | |
| 5365 | static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) |
| 5366 | { |
| 5367 | struct raid5_plug_cb *cb = container_of( |
| 5368 | blk_cb, struct raid5_plug_cb, cb); |
| 5369 | struct stripe_head *sh; |
| 5370 | struct mddev *mddev = cb->cb.data; |
| 5371 | struct r5conf *conf = mddev->private; |
| 5372 | int cnt = 0; |
| 5373 | int hash; |
| 5374 | |
| 5375 | if (cb->list.next && !list_empty(&cb->list)) { |
| 5376 | spin_lock_irq(&conf->device_lock); |
| 5377 | while (!list_empty(&cb->list)) { |
| 5378 | sh = list_first_entry(&cb->list, struct stripe_head, lru); |
| 5379 | list_del_init(&sh->lru); |
| 5380 | /* |
| 5381 | * avoid race release_stripe_plug() sees |
| 5382 | * STRIPE_ON_UNPLUG_LIST clear but the stripe |
| 5383 | * is still in our list |
| 5384 | */ |
| 5385 | smp_mb__before_atomic(); |
| 5386 | clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); |
| 5387 | /* |
| 5388 | * STRIPE_ON_RELEASE_LIST could be set here. In that |
| 5389 | * case, the count is always > 1 here |
| 5390 | */ |
| 5391 | hash = sh->hash_lock_index; |
| 5392 | __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); |
| 5393 | cnt++; |
| 5394 | } |
| 5395 | spin_unlock_irq(&conf->device_lock); |
| 5396 | } |
| 5397 | release_inactive_stripe_list(conf, cb->temp_inactive_list, |
| 5398 | NR_STRIPE_HASH_LOCKS); |
| 5399 | if (mddev->queue) |
| 5400 | trace_block_unplug(mddev->queue, cnt, !from_schedule); |
| 5401 | kfree(cb); |
| 5402 | } |
| 5403 | |
| 5404 | static void release_stripe_plug(struct mddev *mddev, |
| 5405 | struct stripe_head *sh) |
| 5406 | { |
| 5407 | struct blk_plug_cb *blk_cb = blk_check_plugged( |
| 5408 | raid5_unplug, mddev, |
| 5409 | sizeof(struct raid5_plug_cb)); |
| 5410 | struct raid5_plug_cb *cb; |
| 5411 | |
| 5412 | if (!blk_cb) { |
| 5413 | raid5_release_stripe(sh); |
| 5414 | return; |
| 5415 | } |
| 5416 | |
| 5417 | cb = container_of(blk_cb, struct raid5_plug_cb, cb); |
| 5418 | |
| 5419 | if (cb->list.next == NULL) { |
| 5420 | int i; |
| 5421 | INIT_LIST_HEAD(&cb->list); |
| 5422 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) |
| 5423 | INIT_LIST_HEAD(cb->temp_inactive_list + i); |
| 5424 | } |
| 5425 | |
| 5426 | if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) |
| 5427 | list_add_tail(&sh->lru, &cb->list); |
| 5428 | else |
| 5429 | raid5_release_stripe(sh); |
| 5430 | } |
| 5431 | |
| 5432 | static void make_discard_request(struct mddev *mddev, struct bio *bi) |
| 5433 | { |
| 5434 | struct r5conf *conf = mddev->private; |
| 5435 | sector_t logical_sector, last_sector; |
| 5436 | struct stripe_head *sh; |
| 5437 | int stripe_sectors; |
| 5438 | |
| 5439 | if (mddev->reshape_position != MaxSector) |
| 5440 | /* Skip discard while reshape is happening */ |
| 5441 | return; |
| 5442 | |
| 5443 | logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 5444 | last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); |
| 5445 | |
| 5446 | bi->bi_next = NULL; |
| 5447 | md_write_start(mddev, bi); |
| 5448 | |
| 5449 | stripe_sectors = conf->chunk_sectors * |
| 5450 | (conf->raid_disks - conf->max_degraded); |
| 5451 | logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, |
| 5452 | stripe_sectors); |
| 5453 | sector_div(last_sector, stripe_sectors); |
| 5454 | |
| 5455 | logical_sector *= conf->chunk_sectors; |
| 5456 | last_sector *= conf->chunk_sectors; |
| 5457 | |
| 5458 | for (; logical_sector < last_sector; |
| 5459 | logical_sector += STRIPE_SECTORS) { |
| 5460 | DEFINE_WAIT(w); |
| 5461 | int d; |
| 5462 | again: |
| 5463 | sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); |
| 5464 | prepare_to_wait(&conf->wait_for_overlap, &w, |
| 5465 | TASK_UNINTERRUPTIBLE); |
| 5466 | set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); |
| 5467 | if (test_bit(STRIPE_SYNCING, &sh->state)) { |
| 5468 | raid5_release_stripe(sh); |
| 5469 | schedule(); |
| 5470 | goto again; |
| 5471 | } |
| 5472 | clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); |
| 5473 | spin_lock_irq(&sh->stripe_lock); |
| 5474 | for (d = 0; d < conf->raid_disks; d++) { |
| 5475 | if (d == sh->pd_idx || d == sh->qd_idx) |
| 5476 | continue; |
| 5477 | if (sh->dev[d].towrite || sh->dev[d].toread) { |
| 5478 | set_bit(R5_Overlap, &sh->dev[d].flags); |
| 5479 | spin_unlock_irq(&sh->stripe_lock); |
| 5480 | raid5_release_stripe(sh); |
| 5481 | schedule(); |
| 5482 | goto again; |
| 5483 | } |
| 5484 | } |
| 5485 | set_bit(STRIPE_DISCARD, &sh->state); |
| 5486 | finish_wait(&conf->wait_for_overlap, &w); |
| 5487 | sh->overwrite_disks = 0; |
| 5488 | for (d = 0; d < conf->raid_disks; d++) { |
| 5489 | if (d == sh->pd_idx || d == sh->qd_idx) |
| 5490 | continue; |
| 5491 | sh->dev[d].towrite = bi; |
| 5492 | set_bit(R5_OVERWRITE, &sh->dev[d].flags); |
| 5493 | bio_inc_remaining(bi); |
| 5494 | md_write_inc(mddev, bi); |
| 5495 | sh->overwrite_disks++; |
| 5496 | } |
| 5497 | spin_unlock_irq(&sh->stripe_lock); |
| 5498 | if (conf->mddev->bitmap) { |
| 5499 | for (d = 0; |
| 5500 | d < conf->raid_disks - conf->max_degraded; |
| 5501 | d++) |
| 5502 | bitmap_startwrite(mddev->bitmap, |
| 5503 | sh->sector, |
| 5504 | STRIPE_SECTORS, |
| 5505 | 0); |
| 5506 | sh->bm_seq = conf->seq_flush + 1; |
| 5507 | set_bit(STRIPE_BIT_DELAY, &sh->state); |
| 5508 | } |
| 5509 | |
| 5510 | set_bit(STRIPE_HANDLE, &sh->state); |
| 5511 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 5512 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 5513 | atomic_inc(&conf->preread_active_stripes); |
| 5514 | release_stripe_plug(mddev, sh); |
| 5515 | } |
| 5516 | |
| 5517 | md_write_end(mddev); |
| 5518 | bio_endio(bi); |
| 5519 | } |
| 5520 | |
| 5521 | static void raid5_make_request(struct mddev *mddev, struct bio * bi) |
| 5522 | { |
| 5523 | struct r5conf *conf = mddev->private; |
| 5524 | int dd_idx; |
| 5525 | sector_t new_sector; |
| 5526 | sector_t logical_sector, last_sector; |
| 5527 | struct stripe_head *sh; |
| 5528 | const int rw = bio_data_dir(bi); |
| 5529 | DEFINE_WAIT(w); |
| 5530 | bool do_prepare; |
| 5531 | bool do_flush = false; |
| 5532 | |
| 5533 | if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { |
| 5534 | int ret = r5l_handle_flush_request(conf->log, bi); |
| 5535 | |
| 5536 | if (ret == 0) |
| 5537 | return; |
| 5538 | if (ret == -ENODEV) { |
| 5539 | md_flush_request(mddev, bi); |
| 5540 | return; |
| 5541 | } |
| 5542 | /* ret == -EAGAIN, fallback */ |
| 5543 | /* |
| 5544 | * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, |
| 5545 | * we need to flush journal device |
| 5546 | */ |
| 5547 | do_flush = bi->bi_opf & REQ_PREFLUSH; |
| 5548 | } |
| 5549 | |
| 5550 | /* |
| 5551 | * If array is degraded, better not do chunk aligned read because |
| 5552 | * later we might have to read it again in order to reconstruct |
| 5553 | * data on failed drives. |
| 5554 | */ |
| 5555 | if (rw == READ && mddev->degraded == 0 && |
| 5556 | mddev->reshape_position == MaxSector) { |
| 5557 | bi = chunk_aligned_read(mddev, bi); |
| 5558 | if (!bi) |
| 5559 | return; |
| 5560 | } |
| 5561 | |
| 5562 | if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { |
| 5563 | make_discard_request(mddev, bi); |
| 5564 | return; |
| 5565 | } |
| 5566 | |
| 5567 | logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 5568 | last_sector = bio_end_sector(bi); |
| 5569 | bi->bi_next = NULL; |
| 5570 | md_write_start(mddev, bi); |
| 5571 | |
| 5572 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); |
| 5573 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
| 5574 | int previous; |
| 5575 | int seq; |
| 5576 | |
| 5577 | do_prepare = false; |
| 5578 | retry: |
| 5579 | seq = read_seqcount_begin(&conf->gen_lock); |
| 5580 | previous = 0; |
| 5581 | if (do_prepare) |
| 5582 | prepare_to_wait(&conf->wait_for_overlap, &w, |
| 5583 | TASK_UNINTERRUPTIBLE); |
| 5584 | if (unlikely(conf->reshape_progress != MaxSector)) { |
| 5585 | /* spinlock is needed as reshape_progress may be |
| 5586 | * 64bit on a 32bit platform, and so it might be |
| 5587 | * possible to see a half-updated value |
| 5588 | * Of course reshape_progress could change after |
| 5589 | * the lock is dropped, so once we get a reference |
| 5590 | * to the stripe that we think it is, we will have |
| 5591 | * to check again. |
| 5592 | */ |
| 5593 | spin_lock_irq(&conf->device_lock); |
| 5594 | if (mddev->reshape_backwards |
| 5595 | ? logical_sector < conf->reshape_progress |
| 5596 | : logical_sector >= conf->reshape_progress) { |
| 5597 | previous = 1; |
| 5598 | } else { |
| 5599 | if (mddev->reshape_backwards |
| 5600 | ? logical_sector < conf->reshape_safe |
| 5601 | : logical_sector >= conf->reshape_safe) { |
| 5602 | spin_unlock_irq(&conf->device_lock); |
| 5603 | schedule(); |
| 5604 | do_prepare = true; |
| 5605 | goto retry; |
| 5606 | } |
| 5607 | } |
| 5608 | spin_unlock_irq(&conf->device_lock); |
| 5609 | } |
| 5610 | |
| 5611 | new_sector = raid5_compute_sector(conf, logical_sector, |
| 5612 | previous, |
| 5613 | &dd_idx, NULL); |
| 5614 | pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", |
| 5615 | (unsigned long long)new_sector, |
| 5616 | (unsigned long long)logical_sector); |
| 5617 | |
| 5618 | sh = raid5_get_active_stripe(conf, new_sector, previous, |
| 5619 | (bi->bi_opf & REQ_RAHEAD), 0); |
| 5620 | if (sh) { |
| 5621 | if (unlikely(previous)) { |
| 5622 | /* expansion might have moved on while waiting for a |
| 5623 | * stripe, so we must do the range check again. |
| 5624 | * Expansion could still move past after this |
| 5625 | * test, but as we are holding a reference to |
| 5626 | * 'sh', we know that if that happens, |
| 5627 | * STRIPE_EXPANDING will get set and the expansion |
| 5628 | * won't proceed until we finish with the stripe. |
| 5629 | */ |
| 5630 | int must_retry = 0; |
| 5631 | spin_lock_irq(&conf->device_lock); |
| 5632 | if (mddev->reshape_backwards |
| 5633 | ? logical_sector >= conf->reshape_progress |
| 5634 | : logical_sector < conf->reshape_progress) |
| 5635 | /* mismatch, need to try again */ |
| 5636 | must_retry = 1; |
| 5637 | spin_unlock_irq(&conf->device_lock); |
| 5638 | if (must_retry) { |
| 5639 | raid5_release_stripe(sh); |
| 5640 | schedule(); |
| 5641 | do_prepare = true; |
| 5642 | goto retry; |
| 5643 | } |
| 5644 | } |
| 5645 | if (read_seqcount_retry(&conf->gen_lock, seq)) { |
| 5646 | /* Might have got the wrong stripe_head |
| 5647 | * by accident |
| 5648 | */ |
| 5649 | raid5_release_stripe(sh); |
| 5650 | goto retry; |
| 5651 | } |
| 5652 | |
| 5653 | if (rw == WRITE && |
| 5654 | logical_sector >= mddev->suspend_lo && |
| 5655 | logical_sector < mddev->suspend_hi) { |
| 5656 | raid5_release_stripe(sh); |
| 5657 | /* As the suspend_* range is controlled by |
| 5658 | * userspace, we want an interruptible |
| 5659 | * wait. |
| 5660 | */ |
| 5661 | flush_signals(current); |
| 5662 | prepare_to_wait(&conf->wait_for_overlap, |
| 5663 | &w, TASK_INTERRUPTIBLE); |
| 5664 | if (logical_sector >= mddev->suspend_lo && |
| 5665 | logical_sector < mddev->suspend_hi) { |
| 5666 | schedule(); |
| 5667 | do_prepare = true; |
| 5668 | } |
| 5669 | goto retry; |
| 5670 | } |
| 5671 | |
| 5672 | if (test_bit(STRIPE_EXPANDING, &sh->state) || |
| 5673 | !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { |
| 5674 | /* Stripe is busy expanding or |
| 5675 | * add failed due to overlap. Flush everything |
| 5676 | * and wait a while |
| 5677 | */ |
| 5678 | md_wakeup_thread(mddev->thread); |
| 5679 | raid5_release_stripe(sh); |
| 5680 | schedule(); |
| 5681 | do_prepare = true; |
| 5682 | goto retry; |
| 5683 | } |
| 5684 | if (do_flush) { |
| 5685 | set_bit(STRIPE_R5C_PREFLUSH, &sh->state); |
| 5686 | /* we only need flush for one stripe */ |
| 5687 | do_flush = false; |
| 5688 | } |
| 5689 | |
| 5690 | set_bit(STRIPE_HANDLE, &sh->state); |
| 5691 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 5692 | if ((!sh->batch_head || sh == sh->batch_head) && |
| 5693 | (bi->bi_opf & REQ_SYNC) && |
| 5694 | !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 5695 | atomic_inc(&conf->preread_active_stripes); |
| 5696 | release_stripe_plug(mddev, sh); |
| 5697 | } else { |
| 5698 | /* cannot get stripe for read-ahead, just give-up */ |
| 5699 | bi->bi_error = -EIO; |
| 5700 | break; |
| 5701 | } |
| 5702 | } |
| 5703 | finish_wait(&conf->wait_for_overlap, &w); |
| 5704 | |
| 5705 | if (rw == WRITE) |
| 5706 | md_write_end(mddev); |
| 5707 | bio_endio(bi); |
| 5708 | } |
| 5709 | |
| 5710 | static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); |
| 5711 | |
| 5712 | static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) |
| 5713 | { |
| 5714 | /* reshaping is quite different to recovery/resync so it is |
| 5715 | * handled quite separately ... here. |
| 5716 | * |
| 5717 | * On each call to sync_request, we gather one chunk worth of |
| 5718 | * destination stripes and flag them as expanding. |
| 5719 | * Then we find all the source stripes and request reads. |
| 5720 | * As the reads complete, handle_stripe will copy the data |
| 5721 | * into the destination stripe and release that stripe. |
| 5722 | */ |
| 5723 | struct r5conf *conf = mddev->private; |
| 5724 | struct stripe_head *sh; |
| 5725 | sector_t first_sector, last_sector; |
| 5726 | int raid_disks = conf->previous_raid_disks; |
| 5727 | int data_disks = raid_disks - conf->max_degraded; |
| 5728 | int new_data_disks = conf->raid_disks - conf->max_degraded; |
| 5729 | int i; |
| 5730 | int dd_idx; |
| 5731 | sector_t writepos, readpos, safepos; |
| 5732 | sector_t stripe_addr; |
| 5733 | int reshape_sectors; |
| 5734 | struct list_head stripes; |
| 5735 | sector_t retn; |
| 5736 | |
| 5737 | if (sector_nr == 0) { |
| 5738 | /* If restarting in the middle, skip the initial sectors */ |
| 5739 | if (mddev->reshape_backwards && |
| 5740 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { |
| 5741 | sector_nr = raid5_size(mddev, 0, 0) |
| 5742 | - conf->reshape_progress; |
| 5743 | } else if (mddev->reshape_backwards && |
| 5744 | conf->reshape_progress == MaxSector) { |
| 5745 | /* shouldn't happen, but just in case, finish up.*/ |
| 5746 | sector_nr = MaxSector; |
| 5747 | } else if (!mddev->reshape_backwards && |
| 5748 | conf->reshape_progress > 0) |
| 5749 | sector_nr = conf->reshape_progress; |
| 5750 | sector_div(sector_nr, new_data_disks); |
| 5751 | if (sector_nr) { |
| 5752 | mddev->curr_resync_completed = sector_nr; |
| 5753 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 5754 | *skipped = 1; |
| 5755 | retn = sector_nr; |
| 5756 | goto finish; |
| 5757 | } |
| 5758 | } |
| 5759 | |
| 5760 | /* We need to process a full chunk at a time. |
| 5761 | * If old and new chunk sizes differ, we need to process the |
| 5762 | * largest of these |
| 5763 | */ |
| 5764 | |
| 5765 | reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); |
| 5766 | |
| 5767 | /* We update the metadata at least every 10 seconds, or when |
| 5768 | * the data about to be copied would over-write the source of |
| 5769 | * the data at the front of the range. i.e. one new_stripe |
| 5770 | * along from reshape_progress new_maps to after where |
| 5771 | * reshape_safe old_maps to |
| 5772 | */ |
| 5773 | writepos = conf->reshape_progress; |
| 5774 | sector_div(writepos, new_data_disks); |
| 5775 | readpos = conf->reshape_progress; |
| 5776 | sector_div(readpos, data_disks); |
| 5777 | safepos = conf->reshape_safe; |
| 5778 | sector_div(safepos, data_disks); |
| 5779 | if (mddev->reshape_backwards) { |
| 5780 | BUG_ON(writepos < reshape_sectors); |
| 5781 | writepos -= reshape_sectors; |
| 5782 | readpos += reshape_sectors; |
| 5783 | safepos += reshape_sectors; |
| 5784 | } else { |
| 5785 | writepos += reshape_sectors; |
| 5786 | /* readpos and safepos are worst-case calculations. |
| 5787 | * A negative number is overly pessimistic, and causes |
| 5788 | * obvious problems for unsigned storage. So clip to 0. |
| 5789 | */ |
| 5790 | readpos -= min_t(sector_t, reshape_sectors, readpos); |
| 5791 | safepos -= min_t(sector_t, reshape_sectors, safepos); |
| 5792 | } |
| 5793 | |
| 5794 | /* Having calculated the 'writepos' possibly use it |
| 5795 | * to set 'stripe_addr' which is where we will write to. |
| 5796 | */ |
| 5797 | if (mddev->reshape_backwards) { |
| 5798 | BUG_ON(conf->reshape_progress == 0); |
| 5799 | stripe_addr = writepos; |
| 5800 | BUG_ON((mddev->dev_sectors & |
| 5801 | ~((sector_t)reshape_sectors - 1)) |
| 5802 | - reshape_sectors - stripe_addr |
| 5803 | != sector_nr); |
| 5804 | } else { |
| 5805 | BUG_ON(writepos != sector_nr + reshape_sectors); |
| 5806 | stripe_addr = sector_nr; |
| 5807 | } |
| 5808 | |
| 5809 | /* 'writepos' is the most advanced device address we might write. |
| 5810 | * 'readpos' is the least advanced device address we might read. |
| 5811 | * 'safepos' is the least address recorded in the metadata as having |
| 5812 | * been reshaped. |
| 5813 | * If there is a min_offset_diff, these are adjusted either by |
| 5814 | * increasing the safepos/readpos if diff is negative, or |
| 5815 | * increasing writepos if diff is positive. |
| 5816 | * If 'readpos' is then behind 'writepos', there is no way that we can |
| 5817 | * ensure safety in the face of a crash - that must be done by userspace |
| 5818 | * making a backup of the data. So in that case there is no particular |
| 5819 | * rush to update metadata. |
| 5820 | * Otherwise if 'safepos' is behind 'writepos', then we really need to |
| 5821 | * update the metadata to advance 'safepos' to match 'readpos' so that |
| 5822 | * we can be safe in the event of a crash. |
| 5823 | * So we insist on updating metadata if safepos is behind writepos and |
| 5824 | * readpos is beyond writepos. |
| 5825 | * In any case, update the metadata every 10 seconds. |
| 5826 | * Maybe that number should be configurable, but I'm not sure it is |
| 5827 | * worth it.... maybe it could be a multiple of safemode_delay??? |
| 5828 | */ |
| 5829 | if (conf->min_offset_diff < 0) { |
| 5830 | safepos += -conf->min_offset_diff; |
| 5831 | readpos += -conf->min_offset_diff; |
| 5832 | } else |
| 5833 | writepos += conf->min_offset_diff; |
| 5834 | |
| 5835 | if ((mddev->reshape_backwards |
| 5836 | ? (safepos > writepos && readpos < writepos) |
| 5837 | : (safepos < writepos && readpos > writepos)) || |
| 5838 | time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { |
| 5839 | /* Cannot proceed until we've updated the superblock... */ |
| 5840 | wait_event(conf->wait_for_overlap, |
| 5841 | atomic_read(&conf->reshape_stripes)==0 |
| 5842 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 5843 | if (atomic_read(&conf->reshape_stripes) != 0) |
| 5844 | return 0; |
| 5845 | mddev->reshape_position = conf->reshape_progress; |
| 5846 | mddev->curr_resync_completed = sector_nr; |
| 5847 | conf->reshape_checkpoint = jiffies; |
| 5848 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
| 5849 | md_wakeup_thread(mddev->thread); |
| 5850 | wait_event(mddev->sb_wait, mddev->sb_flags == 0 || |
| 5851 | test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 5852 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
| 5853 | return 0; |
| 5854 | spin_lock_irq(&conf->device_lock); |
| 5855 | conf->reshape_safe = mddev->reshape_position; |
| 5856 | spin_unlock_irq(&conf->device_lock); |
| 5857 | wake_up(&conf->wait_for_overlap); |
| 5858 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 5859 | } |
| 5860 | |
| 5861 | INIT_LIST_HEAD(&stripes); |
| 5862 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { |
| 5863 | int j; |
| 5864 | int skipped_disk = 0; |
| 5865 | sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); |
| 5866 | set_bit(STRIPE_EXPANDING, &sh->state); |
| 5867 | atomic_inc(&conf->reshape_stripes); |
| 5868 | /* If any of this stripe is beyond the end of the old |
| 5869 | * array, then we need to zero those blocks |
| 5870 | */ |
| 5871 | for (j=sh->disks; j--;) { |
| 5872 | sector_t s; |
| 5873 | if (j == sh->pd_idx) |
| 5874 | continue; |
| 5875 | if (conf->level == 6 && |
| 5876 | j == sh->qd_idx) |
| 5877 | continue; |
| 5878 | s = raid5_compute_blocknr(sh, j, 0); |
| 5879 | if (s < raid5_size(mddev, 0, 0)) { |
| 5880 | skipped_disk = 1; |
| 5881 | continue; |
| 5882 | } |
| 5883 | memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); |
| 5884 | set_bit(R5_Expanded, &sh->dev[j].flags); |
| 5885 | set_bit(R5_UPTODATE, &sh->dev[j].flags); |
| 5886 | } |
| 5887 | if (!skipped_disk) { |
| 5888 | set_bit(STRIPE_EXPAND_READY, &sh->state); |
| 5889 | set_bit(STRIPE_HANDLE, &sh->state); |
| 5890 | } |
| 5891 | list_add(&sh->lru, &stripes); |
| 5892 | } |
| 5893 | spin_lock_irq(&conf->device_lock); |
| 5894 | if (mddev->reshape_backwards) |
| 5895 | conf->reshape_progress -= reshape_sectors * new_data_disks; |
| 5896 | else |
| 5897 | conf->reshape_progress += reshape_sectors * new_data_disks; |
| 5898 | spin_unlock_irq(&conf->device_lock); |
| 5899 | /* Ok, those stripe are ready. We can start scheduling |
| 5900 | * reads on the source stripes. |
| 5901 | * The source stripes are determined by mapping the first and last |
| 5902 | * block on the destination stripes. |
| 5903 | */ |
| 5904 | first_sector = |
| 5905 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
| 5906 | 1, &dd_idx, NULL); |
| 5907 | last_sector = |
| 5908 | raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) |
| 5909 | * new_data_disks - 1), |
| 5910 | 1, &dd_idx, NULL); |
| 5911 | if (last_sector >= mddev->dev_sectors) |
| 5912 | last_sector = mddev->dev_sectors - 1; |
| 5913 | while (first_sector <= last_sector) { |
| 5914 | sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); |
| 5915 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 5916 | set_bit(STRIPE_HANDLE, &sh->state); |
| 5917 | raid5_release_stripe(sh); |
| 5918 | first_sector += STRIPE_SECTORS; |
| 5919 | } |
| 5920 | /* Now that the sources are clearly marked, we can release |
| 5921 | * the destination stripes |
| 5922 | */ |
| 5923 | while (!list_empty(&stripes)) { |
| 5924 | sh = list_entry(stripes.next, struct stripe_head, lru); |
| 5925 | list_del_init(&sh->lru); |
| 5926 | raid5_release_stripe(sh); |
| 5927 | } |
| 5928 | /* If this takes us to the resync_max point where we have to pause, |
| 5929 | * then we need to write out the superblock. |
| 5930 | */ |
| 5931 | sector_nr += reshape_sectors; |
| 5932 | retn = reshape_sectors; |
| 5933 | finish: |
| 5934 | if (mddev->curr_resync_completed > mddev->resync_max || |
| 5935 | (sector_nr - mddev->curr_resync_completed) * 2 |
| 5936 | >= mddev->resync_max - mddev->curr_resync_completed) { |
| 5937 | /* Cannot proceed until we've updated the superblock... */ |
| 5938 | wait_event(conf->wait_for_overlap, |
| 5939 | atomic_read(&conf->reshape_stripes) == 0 |
| 5940 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 5941 | if (atomic_read(&conf->reshape_stripes) != 0) |
| 5942 | goto ret; |
| 5943 | mddev->reshape_position = conf->reshape_progress; |
| 5944 | mddev->curr_resync_completed = sector_nr; |
| 5945 | conf->reshape_checkpoint = jiffies; |
| 5946 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
| 5947 | md_wakeup_thread(mddev->thread); |
| 5948 | wait_event(mddev->sb_wait, |
| 5949 | !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) |
| 5950 | || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); |
| 5951 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) |
| 5952 | goto ret; |
| 5953 | spin_lock_irq(&conf->device_lock); |
| 5954 | conf->reshape_safe = mddev->reshape_position; |
| 5955 | spin_unlock_irq(&conf->device_lock); |
| 5956 | wake_up(&conf->wait_for_overlap); |
| 5957 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 5958 | } |
| 5959 | ret: |
| 5960 | return retn; |
| 5961 | } |
| 5962 | |
| 5963 | static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, |
| 5964 | int *skipped) |
| 5965 | { |
| 5966 | struct r5conf *conf = mddev->private; |
| 5967 | struct stripe_head *sh; |
| 5968 | sector_t max_sector = mddev->dev_sectors; |
| 5969 | sector_t sync_blocks; |
| 5970 | int still_degraded = 0; |
| 5971 | int i; |
| 5972 | |
| 5973 | if (sector_nr >= max_sector) { |
| 5974 | /* just being told to finish up .. nothing much to do */ |
| 5975 | |
| 5976 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
| 5977 | end_reshape(conf); |
| 5978 | return 0; |
| 5979 | } |
| 5980 | |
| 5981 | if (mddev->curr_resync < max_sector) /* aborted */ |
| 5982 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
| 5983 | &sync_blocks, 1); |
| 5984 | else /* completed sync */ |
| 5985 | conf->fullsync = 0; |
| 5986 | bitmap_close_sync(mddev->bitmap); |
| 5987 | |
| 5988 | return 0; |
| 5989 | } |
| 5990 | |
| 5991 | /* Allow raid5_quiesce to complete */ |
| 5992 | wait_event(conf->wait_for_overlap, conf->quiesce != 2); |
| 5993 | |
| 5994 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| 5995 | return reshape_request(mddev, sector_nr, skipped); |
| 5996 | |
| 5997 | /* No need to check resync_max as we never do more than one |
| 5998 | * stripe, and as resync_max will always be on a chunk boundary, |
| 5999 | * if the check in md_do_sync didn't fire, there is no chance |
| 6000 | * of overstepping resync_max here |
| 6001 | */ |
| 6002 | |
| 6003 | /* if there is too many failed drives and we are trying |
| 6004 | * to resync, then assert that we are finished, because there is |
| 6005 | * nothing we can do. |
| 6006 | */ |
| 6007 | if (mddev->degraded >= conf->max_degraded && |
| 6008 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
| 6009 | sector_t rv = mddev->dev_sectors - sector_nr; |
| 6010 | *skipped = 1; |
| 6011 | return rv; |
| 6012 | } |
| 6013 | if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && |
| 6014 | !conf->fullsync && |
| 6015 | !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && |
| 6016 | sync_blocks >= STRIPE_SECTORS) { |
| 6017 | /* we can skip this block, and probably more */ |
| 6018 | sync_blocks /= STRIPE_SECTORS; |
| 6019 | *skipped = 1; |
| 6020 | return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ |
| 6021 | } |
| 6022 | |
| 6023 | bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); |
| 6024 | |
| 6025 | sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); |
| 6026 | if (sh == NULL) { |
| 6027 | sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); |
| 6028 | /* make sure we don't swamp the stripe cache if someone else |
| 6029 | * is trying to get access |
| 6030 | */ |
| 6031 | schedule_timeout_uninterruptible(1); |
| 6032 | } |
| 6033 | /* Need to check if array will still be degraded after recovery/resync |
| 6034 | * Note in case of > 1 drive failures it's possible we're rebuilding |
| 6035 | * one drive while leaving another faulty drive in array. |
| 6036 | */ |
| 6037 | rcu_read_lock(); |
| 6038 | for (i = 0; i < conf->raid_disks; i++) { |
| 6039 | struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); |
| 6040 | |
| 6041 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) |
| 6042 | still_degraded = 1; |
| 6043 | } |
| 6044 | rcu_read_unlock(); |
| 6045 | |
| 6046 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); |
| 6047 | |
| 6048 | set_bit(STRIPE_SYNC_REQUESTED, &sh->state); |
| 6049 | set_bit(STRIPE_HANDLE, &sh->state); |
| 6050 | |
| 6051 | raid5_release_stripe(sh); |
| 6052 | |
| 6053 | return STRIPE_SECTORS; |
| 6054 | } |
| 6055 | |
| 6056 | static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, |
| 6057 | unsigned int offset) |
| 6058 | { |
| 6059 | /* We may not be able to submit a whole bio at once as there |
| 6060 | * may not be enough stripe_heads available. |
| 6061 | * We cannot pre-allocate enough stripe_heads as we may need |
| 6062 | * more than exist in the cache (if we allow ever large chunks). |
| 6063 | * So we do one stripe head at a time and record in |
| 6064 | * ->bi_hw_segments how many have been done. |
| 6065 | * |
| 6066 | * We *know* that this entire raid_bio is in one chunk, so |
| 6067 | * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. |
| 6068 | */ |
| 6069 | struct stripe_head *sh; |
| 6070 | int dd_idx; |
| 6071 | sector_t sector, logical_sector, last_sector; |
| 6072 | int scnt = 0; |
| 6073 | int handled = 0; |
| 6074 | |
| 6075 | logical_sector = raid_bio->bi_iter.bi_sector & |
| 6076 | ~((sector_t)STRIPE_SECTORS-1); |
| 6077 | sector = raid5_compute_sector(conf, logical_sector, |
| 6078 | 0, &dd_idx, NULL); |
| 6079 | last_sector = bio_end_sector(raid_bio); |
| 6080 | |
| 6081 | for (; logical_sector < last_sector; |
| 6082 | logical_sector += STRIPE_SECTORS, |
| 6083 | sector += STRIPE_SECTORS, |
| 6084 | scnt++) { |
| 6085 | |
| 6086 | if (scnt < offset) |
| 6087 | /* already done this stripe */ |
| 6088 | continue; |
| 6089 | |
| 6090 | sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); |
| 6091 | |
| 6092 | if (!sh) { |
| 6093 | /* failed to get a stripe - must wait */ |
| 6094 | conf->retry_read_aligned = raid_bio; |
| 6095 | conf->retry_read_offset = scnt; |
| 6096 | return handled; |
| 6097 | } |
| 6098 | |
| 6099 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { |
| 6100 | raid5_release_stripe(sh); |
| 6101 | conf->retry_read_aligned = raid_bio; |
| 6102 | conf->retry_read_offset = scnt; |
| 6103 | return handled; |
| 6104 | } |
| 6105 | |
| 6106 | set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); |
| 6107 | handle_stripe(sh); |
| 6108 | raid5_release_stripe(sh); |
| 6109 | handled++; |
| 6110 | } |
| 6111 | |
| 6112 | bio_endio(raid_bio); |
| 6113 | |
| 6114 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 6115 | wake_up(&conf->wait_for_quiescent); |
| 6116 | return handled; |
| 6117 | } |
| 6118 | |
| 6119 | static int handle_active_stripes(struct r5conf *conf, int group, |
| 6120 | struct r5worker *worker, |
| 6121 | struct list_head *temp_inactive_list) |
| 6122 | { |
| 6123 | struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; |
| 6124 | int i, batch_size = 0, hash; |
| 6125 | bool release_inactive = false; |
| 6126 | |
| 6127 | while (batch_size < MAX_STRIPE_BATCH && |
| 6128 | (sh = __get_priority_stripe(conf, group)) != NULL) |
| 6129 | batch[batch_size++] = sh; |
| 6130 | |
| 6131 | if (batch_size == 0) { |
| 6132 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) |
| 6133 | if (!list_empty(temp_inactive_list + i)) |
| 6134 | break; |
| 6135 | if (i == NR_STRIPE_HASH_LOCKS) { |
| 6136 | spin_unlock_irq(&conf->device_lock); |
| 6137 | r5l_flush_stripe_to_raid(conf->log); |
| 6138 | spin_lock_irq(&conf->device_lock); |
| 6139 | return batch_size; |
| 6140 | } |
| 6141 | release_inactive = true; |
| 6142 | } |
| 6143 | spin_unlock_irq(&conf->device_lock); |
| 6144 | |
| 6145 | release_inactive_stripe_list(conf, temp_inactive_list, |
| 6146 | NR_STRIPE_HASH_LOCKS); |
| 6147 | |
| 6148 | r5l_flush_stripe_to_raid(conf->log); |
| 6149 | if (release_inactive) { |
| 6150 | spin_lock_irq(&conf->device_lock); |
| 6151 | return 0; |
| 6152 | } |
| 6153 | |
| 6154 | for (i = 0; i < batch_size; i++) |
| 6155 | handle_stripe(batch[i]); |
| 6156 | log_write_stripe_run(conf); |
| 6157 | |
| 6158 | cond_resched(); |
| 6159 | |
| 6160 | spin_lock_irq(&conf->device_lock); |
| 6161 | for (i = 0; i < batch_size; i++) { |
| 6162 | hash = batch[i]->hash_lock_index; |
| 6163 | __release_stripe(conf, batch[i], &temp_inactive_list[hash]); |
| 6164 | } |
| 6165 | return batch_size; |
| 6166 | } |
| 6167 | |
| 6168 | static void raid5_do_work(struct work_struct *work) |
| 6169 | { |
| 6170 | struct r5worker *worker = container_of(work, struct r5worker, work); |
| 6171 | struct r5worker_group *group = worker->group; |
| 6172 | struct r5conf *conf = group->conf; |
| 6173 | struct mddev *mddev = conf->mddev; |
| 6174 | int group_id = group - conf->worker_groups; |
| 6175 | int handled; |
| 6176 | struct blk_plug plug; |
| 6177 | |
| 6178 | pr_debug("+++ raid5worker active\n"); |
| 6179 | |
| 6180 | blk_start_plug(&plug); |
| 6181 | handled = 0; |
| 6182 | spin_lock_irq(&conf->device_lock); |
| 6183 | while (1) { |
| 6184 | int batch_size, released; |
| 6185 | |
| 6186 | released = release_stripe_list(conf, worker->temp_inactive_list); |
| 6187 | |
| 6188 | batch_size = handle_active_stripes(conf, group_id, worker, |
| 6189 | worker->temp_inactive_list); |
| 6190 | worker->working = false; |
| 6191 | if (!batch_size && !released) |
| 6192 | break; |
| 6193 | handled += batch_size; |
| 6194 | wait_event_lock_irq(mddev->sb_wait, |
| 6195 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), |
| 6196 | conf->device_lock); |
| 6197 | } |
| 6198 | pr_debug("%d stripes handled\n", handled); |
| 6199 | |
| 6200 | spin_unlock_irq(&conf->device_lock); |
| 6201 | blk_finish_plug(&plug); |
| 6202 | |
| 6203 | pr_debug("--- raid5worker inactive\n"); |
| 6204 | } |
| 6205 | |
| 6206 | /* |
| 6207 | * This is our raid5 kernel thread. |
| 6208 | * |
| 6209 | * We scan the hash table for stripes which can be handled now. |
| 6210 | * During the scan, completed stripes are saved for us by the interrupt |
| 6211 | * handler, so that they will not have to wait for our next wakeup. |
| 6212 | */ |
| 6213 | static void raid5d(struct md_thread *thread) |
| 6214 | { |
| 6215 | struct mddev *mddev = thread->mddev; |
| 6216 | struct r5conf *conf = mddev->private; |
| 6217 | int handled; |
| 6218 | struct blk_plug plug; |
| 6219 | |
| 6220 | pr_debug("+++ raid5d active\n"); |
| 6221 | |
| 6222 | md_check_recovery(mddev); |
| 6223 | |
| 6224 | blk_start_plug(&plug); |
| 6225 | handled = 0; |
| 6226 | spin_lock_irq(&conf->device_lock); |
| 6227 | while (1) { |
| 6228 | struct bio *bio; |
| 6229 | int batch_size, released; |
| 6230 | unsigned int offset; |
| 6231 | |
| 6232 | released = release_stripe_list(conf, conf->temp_inactive_list); |
| 6233 | if (released) |
| 6234 | clear_bit(R5_DID_ALLOC, &conf->cache_state); |
| 6235 | |
| 6236 | if ( |
| 6237 | !list_empty(&conf->bitmap_list)) { |
| 6238 | /* Now is a good time to flush some bitmap updates */ |
| 6239 | conf->seq_flush++; |
| 6240 | spin_unlock_irq(&conf->device_lock); |
| 6241 | bitmap_unplug(mddev->bitmap); |
| 6242 | spin_lock_irq(&conf->device_lock); |
| 6243 | conf->seq_write = conf->seq_flush; |
| 6244 | activate_bit_delay(conf, conf->temp_inactive_list); |
| 6245 | } |
| 6246 | raid5_activate_delayed(conf); |
| 6247 | |
| 6248 | while ((bio = remove_bio_from_retry(conf, &offset))) { |
| 6249 | int ok; |
| 6250 | spin_unlock_irq(&conf->device_lock); |
| 6251 | ok = retry_aligned_read(conf, bio, offset); |
| 6252 | spin_lock_irq(&conf->device_lock); |
| 6253 | if (!ok) |
| 6254 | break; |
| 6255 | handled++; |
| 6256 | } |
| 6257 | |
| 6258 | batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, |
| 6259 | conf->temp_inactive_list); |
| 6260 | if (!batch_size && !released) |
| 6261 | break; |
| 6262 | handled += batch_size; |
| 6263 | |
| 6264 | if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { |
| 6265 | spin_unlock_irq(&conf->device_lock); |
| 6266 | md_check_recovery(mddev); |
| 6267 | spin_lock_irq(&conf->device_lock); |
| 6268 | } |
| 6269 | } |
| 6270 | pr_debug("%d stripes handled\n", handled); |
| 6271 | |
| 6272 | spin_unlock_irq(&conf->device_lock); |
| 6273 | if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && |
| 6274 | mutex_trylock(&conf->cache_size_mutex)) { |
| 6275 | grow_one_stripe(conf, __GFP_NOWARN); |
| 6276 | /* Set flag even if allocation failed. This helps |
| 6277 | * slow down allocation requests when mem is short |
| 6278 | */ |
| 6279 | set_bit(R5_DID_ALLOC, &conf->cache_state); |
| 6280 | mutex_unlock(&conf->cache_size_mutex); |
| 6281 | } |
| 6282 | |
| 6283 | flush_deferred_bios(conf); |
| 6284 | |
| 6285 | r5l_flush_stripe_to_raid(conf->log); |
| 6286 | |
| 6287 | async_tx_issue_pending_all(); |
| 6288 | blk_finish_plug(&plug); |
| 6289 | |
| 6290 | pr_debug("--- raid5d inactive\n"); |
| 6291 | } |
| 6292 | |
| 6293 | static ssize_t |
| 6294 | raid5_show_stripe_cache_size(struct mddev *mddev, char *page) |
| 6295 | { |
| 6296 | struct r5conf *conf; |
| 6297 | int ret = 0; |
| 6298 | spin_lock(&mddev->lock); |
| 6299 | conf = mddev->private; |
| 6300 | if (conf) |
| 6301 | ret = sprintf(page, "%d\n", conf->min_nr_stripes); |
| 6302 | spin_unlock(&mddev->lock); |
| 6303 | return ret; |
| 6304 | } |
| 6305 | |
| 6306 | int |
| 6307 | raid5_set_cache_size(struct mddev *mddev, int size) |
| 6308 | { |
| 6309 | struct r5conf *conf = mddev->private; |
| 6310 | int err; |
| 6311 | |
| 6312 | if (size <= 16 || size > 32768) |
| 6313 | return -EINVAL; |
| 6314 | |
| 6315 | conf->min_nr_stripes = size; |
| 6316 | mutex_lock(&conf->cache_size_mutex); |
| 6317 | while (size < conf->max_nr_stripes && |
| 6318 | drop_one_stripe(conf)) |
| 6319 | ; |
| 6320 | mutex_unlock(&conf->cache_size_mutex); |
| 6321 | |
| 6322 | |
| 6323 | err = md_allow_write(mddev); |
| 6324 | if (err) |
| 6325 | return err; |
| 6326 | |
| 6327 | mutex_lock(&conf->cache_size_mutex); |
| 6328 | while (size > conf->max_nr_stripes) |
| 6329 | if (!grow_one_stripe(conf, GFP_KERNEL)) |
| 6330 | break; |
| 6331 | mutex_unlock(&conf->cache_size_mutex); |
| 6332 | |
| 6333 | return 0; |
| 6334 | } |
| 6335 | EXPORT_SYMBOL(raid5_set_cache_size); |
| 6336 | |
| 6337 | static ssize_t |
| 6338 | raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) |
| 6339 | { |
| 6340 | struct r5conf *conf; |
| 6341 | unsigned long new; |
| 6342 | int err; |
| 6343 | |
| 6344 | if (len >= PAGE_SIZE) |
| 6345 | return -EINVAL; |
| 6346 | if (kstrtoul(page, 10, &new)) |
| 6347 | return -EINVAL; |
| 6348 | err = mddev_lock(mddev); |
| 6349 | if (err) |
| 6350 | return err; |
| 6351 | conf = mddev->private; |
| 6352 | if (!conf) |
| 6353 | err = -ENODEV; |
| 6354 | else |
| 6355 | err = raid5_set_cache_size(mddev, new); |
| 6356 | mddev_unlock(mddev); |
| 6357 | |
| 6358 | return err ?: len; |
| 6359 | } |
| 6360 | |
| 6361 | static struct md_sysfs_entry |
| 6362 | raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, |
| 6363 | raid5_show_stripe_cache_size, |
| 6364 | raid5_store_stripe_cache_size); |
| 6365 | |
| 6366 | static ssize_t |
| 6367 | raid5_show_rmw_level(struct mddev *mddev, char *page) |
| 6368 | { |
| 6369 | struct r5conf *conf = mddev->private; |
| 6370 | if (conf) |
| 6371 | return sprintf(page, "%d\n", conf->rmw_level); |
| 6372 | else |
| 6373 | return 0; |
| 6374 | } |
| 6375 | |
| 6376 | static ssize_t |
| 6377 | raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) |
| 6378 | { |
| 6379 | struct r5conf *conf = mddev->private; |
| 6380 | unsigned long new; |
| 6381 | |
| 6382 | if (!conf) |
| 6383 | return -ENODEV; |
| 6384 | |
| 6385 | if (len >= PAGE_SIZE) |
| 6386 | return -EINVAL; |
| 6387 | |
| 6388 | if (kstrtoul(page, 10, &new)) |
| 6389 | return -EINVAL; |
| 6390 | |
| 6391 | if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) |
| 6392 | return -EINVAL; |
| 6393 | |
| 6394 | if (new != PARITY_DISABLE_RMW && |
| 6395 | new != PARITY_ENABLE_RMW && |
| 6396 | new != PARITY_PREFER_RMW) |
| 6397 | return -EINVAL; |
| 6398 | |
| 6399 | conf->rmw_level = new; |
| 6400 | return len; |
| 6401 | } |
| 6402 | |
| 6403 | static struct md_sysfs_entry |
| 6404 | raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, |
| 6405 | raid5_show_rmw_level, |
| 6406 | raid5_store_rmw_level); |
| 6407 | |
| 6408 | |
| 6409 | static ssize_t |
| 6410 | raid5_show_preread_threshold(struct mddev *mddev, char *page) |
| 6411 | { |
| 6412 | struct r5conf *conf; |
| 6413 | int ret = 0; |
| 6414 | spin_lock(&mddev->lock); |
| 6415 | conf = mddev->private; |
| 6416 | if (conf) |
| 6417 | ret = sprintf(page, "%d\n", conf->bypass_threshold); |
| 6418 | spin_unlock(&mddev->lock); |
| 6419 | return ret; |
| 6420 | } |
| 6421 | |
| 6422 | static ssize_t |
| 6423 | raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) |
| 6424 | { |
| 6425 | struct r5conf *conf; |
| 6426 | unsigned long new; |
| 6427 | int err; |
| 6428 | |
| 6429 | if (len >= PAGE_SIZE) |
| 6430 | return -EINVAL; |
| 6431 | if (kstrtoul(page, 10, &new)) |
| 6432 | return -EINVAL; |
| 6433 | |
| 6434 | err = mddev_lock(mddev); |
| 6435 | if (err) |
| 6436 | return err; |
| 6437 | conf = mddev->private; |
| 6438 | if (!conf) |
| 6439 | err = -ENODEV; |
| 6440 | else if (new > conf->min_nr_stripes) |
| 6441 | err = -EINVAL; |
| 6442 | else |
| 6443 | conf->bypass_threshold = new; |
| 6444 | mddev_unlock(mddev); |
| 6445 | return err ?: len; |
| 6446 | } |
| 6447 | |
| 6448 | static struct md_sysfs_entry |
| 6449 | raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, |
| 6450 | S_IRUGO | S_IWUSR, |
| 6451 | raid5_show_preread_threshold, |
| 6452 | raid5_store_preread_threshold); |
| 6453 | |
| 6454 | static ssize_t |
| 6455 | raid5_show_skip_copy(struct mddev *mddev, char *page) |
| 6456 | { |
| 6457 | struct r5conf *conf; |
| 6458 | int ret = 0; |
| 6459 | spin_lock(&mddev->lock); |
| 6460 | conf = mddev->private; |
| 6461 | if (conf) |
| 6462 | ret = sprintf(page, "%d\n", conf->skip_copy); |
| 6463 | spin_unlock(&mddev->lock); |
| 6464 | return ret; |
| 6465 | } |
| 6466 | |
| 6467 | static ssize_t |
| 6468 | raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) |
| 6469 | { |
| 6470 | struct r5conf *conf; |
| 6471 | unsigned long new; |
| 6472 | int err; |
| 6473 | |
| 6474 | if (len >= PAGE_SIZE) |
| 6475 | return -EINVAL; |
| 6476 | if (kstrtoul(page, 10, &new)) |
| 6477 | return -EINVAL; |
| 6478 | new = !!new; |
| 6479 | |
| 6480 | err = mddev_lock(mddev); |
| 6481 | if (err) |
| 6482 | return err; |
| 6483 | conf = mddev->private; |
| 6484 | if (!conf) |
| 6485 | err = -ENODEV; |
| 6486 | else if (new != conf->skip_copy) { |
| 6487 | mddev_suspend(mddev); |
| 6488 | conf->skip_copy = new; |
| 6489 | if (new) |
| 6490 | mddev->queue->backing_dev_info->capabilities |= |
| 6491 | BDI_CAP_STABLE_WRITES; |
| 6492 | else |
| 6493 | mddev->queue->backing_dev_info->capabilities &= |
| 6494 | ~BDI_CAP_STABLE_WRITES; |
| 6495 | mddev_resume(mddev); |
| 6496 | } |
| 6497 | mddev_unlock(mddev); |
| 6498 | return err ?: len; |
| 6499 | } |
| 6500 | |
| 6501 | static struct md_sysfs_entry |
| 6502 | raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, |
| 6503 | raid5_show_skip_copy, |
| 6504 | raid5_store_skip_copy); |
| 6505 | |
| 6506 | static ssize_t |
| 6507 | stripe_cache_active_show(struct mddev *mddev, char *page) |
| 6508 | { |
| 6509 | struct r5conf *conf = mddev->private; |
| 6510 | if (conf) |
| 6511 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); |
| 6512 | else |
| 6513 | return 0; |
| 6514 | } |
| 6515 | |
| 6516 | static struct md_sysfs_entry |
| 6517 | raid5_stripecache_active = __ATTR_RO(stripe_cache_active); |
| 6518 | |
| 6519 | static ssize_t |
| 6520 | raid5_show_group_thread_cnt(struct mddev *mddev, char *page) |
| 6521 | { |
| 6522 | struct r5conf *conf; |
| 6523 | int ret = 0; |
| 6524 | spin_lock(&mddev->lock); |
| 6525 | conf = mddev->private; |
| 6526 | if (conf) |
| 6527 | ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); |
| 6528 | spin_unlock(&mddev->lock); |
| 6529 | return ret; |
| 6530 | } |
| 6531 | |
| 6532 | static int alloc_thread_groups(struct r5conf *conf, int cnt, |
| 6533 | int *group_cnt, |
| 6534 | int *worker_cnt_per_group, |
| 6535 | struct r5worker_group **worker_groups); |
| 6536 | static ssize_t |
| 6537 | raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) |
| 6538 | { |
| 6539 | struct r5conf *conf; |
| 6540 | unsigned long new; |
| 6541 | int err; |
| 6542 | struct r5worker_group *new_groups, *old_groups; |
| 6543 | int group_cnt, worker_cnt_per_group; |
| 6544 | |
| 6545 | if (len >= PAGE_SIZE) |
| 6546 | return -EINVAL; |
| 6547 | if (kstrtoul(page, 10, &new)) |
| 6548 | return -EINVAL; |
| 6549 | |
| 6550 | err = mddev_lock(mddev); |
| 6551 | if (err) |
| 6552 | return err; |
| 6553 | conf = mddev->private; |
| 6554 | if (!conf) |
| 6555 | err = -ENODEV; |
| 6556 | else if (new != conf->worker_cnt_per_group) { |
| 6557 | mddev_suspend(mddev); |
| 6558 | |
| 6559 | old_groups = conf->worker_groups; |
| 6560 | if (old_groups) |
| 6561 | flush_workqueue(raid5_wq); |
| 6562 | |
| 6563 | err = alloc_thread_groups(conf, new, |
| 6564 | &group_cnt, &worker_cnt_per_group, |
| 6565 | &new_groups); |
| 6566 | if (!err) { |
| 6567 | spin_lock_irq(&conf->device_lock); |
| 6568 | conf->group_cnt = group_cnt; |
| 6569 | conf->worker_cnt_per_group = worker_cnt_per_group; |
| 6570 | conf->worker_groups = new_groups; |
| 6571 | spin_unlock_irq(&conf->device_lock); |
| 6572 | |
| 6573 | if (old_groups) |
| 6574 | kfree(old_groups[0].workers); |
| 6575 | kfree(old_groups); |
| 6576 | } |
| 6577 | mddev_resume(mddev); |
| 6578 | } |
| 6579 | mddev_unlock(mddev); |
| 6580 | |
| 6581 | return err ?: len; |
| 6582 | } |
| 6583 | |
| 6584 | static struct md_sysfs_entry |
| 6585 | raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, |
| 6586 | raid5_show_group_thread_cnt, |
| 6587 | raid5_store_group_thread_cnt); |
| 6588 | |
| 6589 | static struct attribute *raid5_attrs[] = { |
| 6590 | &raid5_stripecache_size.attr, |
| 6591 | &raid5_stripecache_active.attr, |
| 6592 | &raid5_preread_bypass_threshold.attr, |
| 6593 | &raid5_group_thread_cnt.attr, |
| 6594 | &raid5_skip_copy.attr, |
| 6595 | &raid5_rmw_level.attr, |
| 6596 | &r5c_journal_mode.attr, |
| 6597 | NULL, |
| 6598 | }; |
| 6599 | static struct attribute_group raid5_attrs_group = { |
| 6600 | .name = NULL, |
| 6601 | .attrs = raid5_attrs, |
| 6602 | }; |
| 6603 | |
| 6604 | static int alloc_thread_groups(struct r5conf *conf, int cnt, |
| 6605 | int *group_cnt, |
| 6606 | int *worker_cnt_per_group, |
| 6607 | struct r5worker_group **worker_groups) |
| 6608 | { |
| 6609 | int i, j, k; |
| 6610 | ssize_t size; |
| 6611 | struct r5worker *workers; |
| 6612 | |
| 6613 | *worker_cnt_per_group = cnt; |
| 6614 | if (cnt == 0) { |
| 6615 | *group_cnt = 0; |
| 6616 | *worker_groups = NULL; |
| 6617 | return 0; |
| 6618 | } |
| 6619 | *group_cnt = num_possible_nodes(); |
| 6620 | size = sizeof(struct r5worker) * cnt; |
| 6621 | workers = kzalloc(size * *group_cnt, GFP_NOIO); |
| 6622 | *worker_groups = kzalloc(sizeof(struct r5worker_group) * |
| 6623 | *group_cnt, GFP_NOIO); |
| 6624 | if (!*worker_groups || !workers) { |
| 6625 | kfree(workers); |
| 6626 | kfree(*worker_groups); |
| 6627 | return -ENOMEM; |
| 6628 | } |
| 6629 | |
| 6630 | for (i = 0; i < *group_cnt; i++) { |
| 6631 | struct r5worker_group *group; |
| 6632 | |
| 6633 | group = &(*worker_groups)[i]; |
| 6634 | INIT_LIST_HEAD(&group->handle_list); |
| 6635 | INIT_LIST_HEAD(&group->loprio_list); |
| 6636 | group->conf = conf; |
| 6637 | group->workers = workers + i * cnt; |
| 6638 | |
| 6639 | for (j = 0; j < cnt; j++) { |
| 6640 | struct r5worker *worker = group->workers + j; |
| 6641 | worker->group = group; |
| 6642 | INIT_WORK(&worker->work, raid5_do_work); |
| 6643 | |
| 6644 | for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) |
| 6645 | INIT_LIST_HEAD(worker->temp_inactive_list + k); |
| 6646 | } |
| 6647 | } |
| 6648 | |
| 6649 | return 0; |
| 6650 | } |
| 6651 | |
| 6652 | static void free_thread_groups(struct r5conf *conf) |
| 6653 | { |
| 6654 | if (conf->worker_groups) |
| 6655 | kfree(conf->worker_groups[0].workers); |
| 6656 | kfree(conf->worker_groups); |
| 6657 | conf->worker_groups = NULL; |
| 6658 | } |
| 6659 | |
| 6660 | static sector_t |
| 6661 | raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
| 6662 | { |
| 6663 | struct r5conf *conf = mddev->private; |
| 6664 | |
| 6665 | if (!sectors) |
| 6666 | sectors = mddev->dev_sectors; |
| 6667 | if (!raid_disks) |
| 6668 | /* size is defined by the smallest of previous and new size */ |
| 6669 | raid_disks = min(conf->raid_disks, conf->previous_raid_disks); |
| 6670 | |
| 6671 | sectors &= ~((sector_t)conf->chunk_sectors - 1); |
| 6672 | sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); |
| 6673 | return sectors * (raid_disks - conf->max_degraded); |
| 6674 | } |
| 6675 | |
| 6676 | static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) |
| 6677 | { |
| 6678 | safe_put_page(percpu->spare_page); |
| 6679 | if (percpu->scribble) |
| 6680 | flex_array_free(percpu->scribble); |
| 6681 | percpu->spare_page = NULL; |
| 6682 | percpu->scribble = NULL; |
| 6683 | } |
| 6684 | |
| 6685 | static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) |
| 6686 | { |
| 6687 | if (conf->level == 6 && !percpu->spare_page) |
| 6688 | percpu->spare_page = alloc_page(GFP_KERNEL); |
| 6689 | if (!percpu->scribble) |
| 6690 | percpu->scribble = scribble_alloc(max(conf->raid_disks, |
| 6691 | conf->previous_raid_disks), |
| 6692 | max(conf->chunk_sectors, |
| 6693 | conf->prev_chunk_sectors) |
| 6694 | / STRIPE_SECTORS, |
| 6695 | GFP_KERNEL); |
| 6696 | |
| 6697 | if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { |
| 6698 | free_scratch_buffer(conf, percpu); |
| 6699 | return -ENOMEM; |
| 6700 | } |
| 6701 | |
| 6702 | return 0; |
| 6703 | } |
| 6704 | |
| 6705 | static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 6706 | { |
| 6707 | struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); |
| 6708 | |
| 6709 | free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); |
| 6710 | return 0; |
| 6711 | } |
| 6712 | |
| 6713 | static void raid5_free_percpu(struct r5conf *conf) |
| 6714 | { |
| 6715 | if (!conf->percpu) |
| 6716 | return; |
| 6717 | |
| 6718 | cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); |
| 6719 | free_percpu(conf->percpu); |
| 6720 | } |
| 6721 | |
| 6722 | static void free_conf(struct r5conf *conf) |
| 6723 | { |
| 6724 | int i; |
| 6725 | |
| 6726 | log_exit(conf); |
| 6727 | |
| 6728 | if (conf->shrinker.nr_deferred) |
| 6729 | unregister_shrinker(&conf->shrinker); |
| 6730 | |
| 6731 | free_thread_groups(conf); |
| 6732 | shrink_stripes(conf); |
| 6733 | raid5_free_percpu(conf); |
| 6734 | for (i = 0; i < conf->pool_size; i++) |
| 6735 | if (conf->disks[i].extra_page) |
| 6736 | put_page(conf->disks[i].extra_page); |
| 6737 | kfree(conf->disks); |
| 6738 | kfree(conf->stripe_hashtbl); |
| 6739 | kfree(conf->pending_data); |
| 6740 | kfree(conf); |
| 6741 | } |
| 6742 | |
| 6743 | static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) |
| 6744 | { |
| 6745 | struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); |
| 6746 | struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); |
| 6747 | |
| 6748 | if (alloc_scratch_buffer(conf, percpu)) { |
| 6749 | pr_warn("%s: failed memory allocation for cpu%u\n", |
| 6750 | __func__, cpu); |
| 6751 | return -ENOMEM; |
| 6752 | } |
| 6753 | return 0; |
| 6754 | } |
| 6755 | |
| 6756 | static int raid5_alloc_percpu(struct r5conf *conf) |
| 6757 | { |
| 6758 | int err = 0; |
| 6759 | |
| 6760 | conf->percpu = alloc_percpu(struct raid5_percpu); |
| 6761 | if (!conf->percpu) |
| 6762 | return -ENOMEM; |
| 6763 | |
| 6764 | err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); |
| 6765 | if (!err) { |
| 6766 | conf->scribble_disks = max(conf->raid_disks, |
| 6767 | conf->previous_raid_disks); |
| 6768 | conf->scribble_sectors = max(conf->chunk_sectors, |
| 6769 | conf->prev_chunk_sectors); |
| 6770 | } |
| 6771 | return err; |
| 6772 | } |
| 6773 | |
| 6774 | static unsigned long raid5_cache_scan(struct shrinker *shrink, |
| 6775 | struct shrink_control *sc) |
| 6776 | { |
| 6777 | struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); |
| 6778 | unsigned long ret = SHRINK_STOP; |
| 6779 | |
| 6780 | if (mutex_trylock(&conf->cache_size_mutex)) { |
| 6781 | ret= 0; |
| 6782 | while (ret < sc->nr_to_scan && |
| 6783 | conf->max_nr_stripes > conf->min_nr_stripes) { |
| 6784 | if (drop_one_stripe(conf) == 0) { |
| 6785 | ret = SHRINK_STOP; |
| 6786 | break; |
| 6787 | } |
| 6788 | ret++; |
| 6789 | } |
| 6790 | mutex_unlock(&conf->cache_size_mutex); |
| 6791 | } |
| 6792 | return ret; |
| 6793 | } |
| 6794 | |
| 6795 | static unsigned long raid5_cache_count(struct shrinker *shrink, |
| 6796 | struct shrink_control *sc) |
| 6797 | { |
| 6798 | struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); |
| 6799 | |
| 6800 | if (conf->max_nr_stripes < conf->min_nr_stripes) |
| 6801 | /* unlikely, but not impossible */ |
| 6802 | return 0; |
| 6803 | return conf->max_nr_stripes - conf->min_nr_stripes; |
| 6804 | } |
| 6805 | |
| 6806 | static struct r5conf *setup_conf(struct mddev *mddev) |
| 6807 | { |
| 6808 | struct r5conf *conf; |
| 6809 | int raid_disk, memory, max_disks; |
| 6810 | struct md_rdev *rdev; |
| 6811 | struct disk_info *disk; |
| 6812 | char pers_name[6]; |
| 6813 | int i; |
| 6814 | int group_cnt, worker_cnt_per_group; |
| 6815 | struct r5worker_group *new_group; |
| 6816 | |
| 6817 | if (mddev->new_level != 5 |
| 6818 | && mddev->new_level != 4 |
| 6819 | && mddev->new_level != 6) { |
| 6820 | pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", |
| 6821 | mdname(mddev), mddev->new_level); |
| 6822 | return ERR_PTR(-EIO); |
| 6823 | } |
| 6824 | if ((mddev->new_level == 5 |
| 6825 | && !algorithm_valid_raid5(mddev->new_layout)) || |
| 6826 | (mddev->new_level == 6 |
| 6827 | && !algorithm_valid_raid6(mddev->new_layout))) { |
| 6828 | pr_warn("md/raid:%s: layout %d not supported\n", |
| 6829 | mdname(mddev), mddev->new_layout); |
| 6830 | return ERR_PTR(-EIO); |
| 6831 | } |
| 6832 | if (mddev->new_level == 6 && mddev->raid_disks < 4) { |
| 6833 | pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", |
| 6834 | mdname(mddev), mddev->raid_disks); |
| 6835 | return ERR_PTR(-EINVAL); |
| 6836 | } |
| 6837 | |
| 6838 | if (!mddev->new_chunk_sectors || |
| 6839 | (mddev->new_chunk_sectors << 9) % PAGE_SIZE || |
| 6840 | !is_power_of_2(mddev->new_chunk_sectors)) { |
| 6841 | pr_warn("md/raid:%s: invalid chunk size %d\n", |
| 6842 | mdname(mddev), mddev->new_chunk_sectors << 9); |
| 6843 | return ERR_PTR(-EINVAL); |
| 6844 | } |
| 6845 | |
| 6846 | conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); |
| 6847 | if (conf == NULL) |
| 6848 | goto abort; |
| 6849 | INIT_LIST_HEAD(&conf->free_list); |
| 6850 | INIT_LIST_HEAD(&conf->pending_list); |
| 6851 | conf->pending_data = kzalloc(sizeof(struct r5pending_data) * |
| 6852 | PENDING_IO_MAX, GFP_KERNEL); |
| 6853 | if (!conf->pending_data) |
| 6854 | goto abort; |
| 6855 | for (i = 0; i < PENDING_IO_MAX; i++) |
| 6856 | list_add(&conf->pending_data[i].sibling, &conf->free_list); |
| 6857 | /* Don't enable multi-threading by default*/ |
| 6858 | if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, |
| 6859 | &new_group)) { |
| 6860 | conf->group_cnt = group_cnt; |
| 6861 | conf->worker_cnt_per_group = worker_cnt_per_group; |
| 6862 | conf->worker_groups = new_group; |
| 6863 | } else |
| 6864 | goto abort; |
| 6865 | spin_lock_init(&conf->device_lock); |
| 6866 | seqcount_init(&conf->gen_lock); |
| 6867 | mutex_init(&conf->cache_size_mutex); |
| 6868 | init_waitqueue_head(&conf->wait_for_quiescent); |
| 6869 | init_waitqueue_head(&conf->wait_for_stripe); |
| 6870 | init_waitqueue_head(&conf->wait_for_overlap); |
| 6871 | INIT_LIST_HEAD(&conf->handle_list); |
| 6872 | INIT_LIST_HEAD(&conf->loprio_list); |
| 6873 | INIT_LIST_HEAD(&conf->hold_list); |
| 6874 | INIT_LIST_HEAD(&conf->delayed_list); |
| 6875 | INIT_LIST_HEAD(&conf->bitmap_list); |
| 6876 | init_llist_head(&conf->released_stripes); |
| 6877 | atomic_set(&conf->active_stripes, 0); |
| 6878 | atomic_set(&conf->preread_active_stripes, 0); |
| 6879 | atomic_set(&conf->active_aligned_reads, 0); |
| 6880 | spin_lock_init(&conf->pending_bios_lock); |
| 6881 | conf->batch_bio_dispatch = true; |
| 6882 | rdev_for_each(rdev, mddev) { |
| 6883 | if (test_bit(Journal, &rdev->flags)) |
| 6884 | continue; |
| 6885 | if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { |
| 6886 | conf->batch_bio_dispatch = false; |
| 6887 | break; |
| 6888 | } |
| 6889 | } |
| 6890 | |
| 6891 | conf->bypass_threshold = BYPASS_THRESHOLD; |
| 6892 | conf->recovery_disabled = mddev->recovery_disabled - 1; |
| 6893 | |
| 6894 | conf->raid_disks = mddev->raid_disks; |
| 6895 | if (mddev->reshape_position == MaxSector) |
| 6896 | conf->previous_raid_disks = mddev->raid_disks; |
| 6897 | else |
| 6898 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
| 6899 | max_disks = max(conf->raid_disks, conf->previous_raid_disks); |
| 6900 | |
| 6901 | conf->disks = kzalloc(max_disks * sizeof(struct disk_info), |
| 6902 | GFP_KERNEL); |
| 6903 | |
| 6904 | if (!conf->disks) |
| 6905 | goto abort; |
| 6906 | |
| 6907 | for (i = 0; i < max_disks; i++) { |
| 6908 | conf->disks[i].extra_page = alloc_page(GFP_KERNEL); |
| 6909 | if (!conf->disks[i].extra_page) |
| 6910 | goto abort; |
| 6911 | } |
| 6912 | |
| 6913 | conf->mddev = mddev; |
| 6914 | |
| 6915 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
| 6916 | goto abort; |
| 6917 | |
| 6918 | /* We init hash_locks[0] separately to that it can be used |
| 6919 | * as the reference lock in the spin_lock_nest_lock() call |
| 6920 | * in lock_all_device_hash_locks_irq in order to convince |
| 6921 | * lockdep that we know what we are doing. |
| 6922 | */ |
| 6923 | spin_lock_init(conf->hash_locks); |
| 6924 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) |
| 6925 | spin_lock_init(conf->hash_locks + i); |
| 6926 | |
| 6927 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) |
| 6928 | INIT_LIST_HEAD(conf->inactive_list + i); |
| 6929 | |
| 6930 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) |
| 6931 | INIT_LIST_HEAD(conf->temp_inactive_list + i); |
| 6932 | |
| 6933 | atomic_set(&conf->r5c_cached_full_stripes, 0); |
| 6934 | INIT_LIST_HEAD(&conf->r5c_full_stripe_list); |
| 6935 | atomic_set(&conf->r5c_cached_partial_stripes, 0); |
| 6936 | INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); |
| 6937 | atomic_set(&conf->r5c_flushing_full_stripes, 0); |
| 6938 | atomic_set(&conf->r5c_flushing_partial_stripes, 0); |
| 6939 | |
| 6940 | conf->level = mddev->new_level; |
| 6941 | conf->chunk_sectors = mddev->new_chunk_sectors; |
| 6942 | if (raid5_alloc_percpu(conf) != 0) |
| 6943 | goto abort; |
| 6944 | |
| 6945 | pr_debug("raid456: run(%s) called.\n", mdname(mddev)); |
| 6946 | |
| 6947 | rdev_for_each(rdev, mddev) { |
| 6948 | raid_disk = rdev->raid_disk; |
| 6949 | if (raid_disk >= max_disks |
| 6950 | || raid_disk < 0 || test_bit(Journal, &rdev->flags)) |
| 6951 | continue; |
| 6952 | disk = conf->disks + raid_disk; |
| 6953 | |
| 6954 | if (test_bit(Replacement, &rdev->flags)) { |
| 6955 | if (disk->replacement) |
| 6956 | goto abort; |
| 6957 | disk->replacement = rdev; |
| 6958 | } else { |
| 6959 | if (disk->rdev) |
| 6960 | goto abort; |
| 6961 | disk->rdev = rdev; |
| 6962 | } |
| 6963 | |
| 6964 | if (test_bit(In_sync, &rdev->flags)) { |
| 6965 | char b[BDEVNAME_SIZE]; |
| 6966 | pr_info("md/raid:%s: device %s operational as raid disk %d\n", |
| 6967 | mdname(mddev), bdevname(rdev->bdev, b), raid_disk); |
| 6968 | } else if (rdev->saved_raid_disk != raid_disk) |
| 6969 | /* Cannot rely on bitmap to complete recovery */ |
| 6970 | conf->fullsync = 1; |
| 6971 | } |
| 6972 | |
| 6973 | conf->level = mddev->new_level; |
| 6974 | if (conf->level == 6) { |
| 6975 | conf->max_degraded = 2; |
| 6976 | if (raid6_call.xor_syndrome) |
| 6977 | conf->rmw_level = PARITY_ENABLE_RMW; |
| 6978 | else |
| 6979 | conf->rmw_level = PARITY_DISABLE_RMW; |
| 6980 | } else { |
| 6981 | conf->max_degraded = 1; |
| 6982 | conf->rmw_level = PARITY_ENABLE_RMW; |
| 6983 | } |
| 6984 | conf->algorithm = mddev->new_layout; |
| 6985 | conf->reshape_progress = mddev->reshape_position; |
| 6986 | if (conf->reshape_progress != MaxSector) { |
| 6987 | conf->prev_chunk_sectors = mddev->chunk_sectors; |
| 6988 | conf->prev_algo = mddev->layout; |
| 6989 | } else { |
| 6990 | conf->prev_chunk_sectors = conf->chunk_sectors; |
| 6991 | conf->prev_algo = conf->algorithm; |
| 6992 | } |
| 6993 | |
| 6994 | conf->min_nr_stripes = NR_STRIPES; |
| 6995 | if (mddev->reshape_position != MaxSector) { |
| 6996 | int stripes = max_t(int, |
| 6997 | ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, |
| 6998 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); |
| 6999 | conf->min_nr_stripes = max(NR_STRIPES, stripes); |
| 7000 | if (conf->min_nr_stripes != NR_STRIPES) |
| 7001 | pr_info("md/raid:%s: force stripe size %d for reshape\n", |
| 7002 | mdname(mddev), conf->min_nr_stripes); |
| 7003 | } |
| 7004 | memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + |
| 7005 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
| 7006 | atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); |
| 7007 | if (grow_stripes(conf, conf->min_nr_stripes)) { |
| 7008 | pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", |
| 7009 | mdname(mddev), memory); |
| 7010 | goto abort; |
| 7011 | } else |
| 7012 | pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); |
| 7013 | /* |
| 7014 | * Losing a stripe head costs more than the time to refill it, |
| 7015 | * it reduces the queue depth and so can hurt throughput. |
| 7016 | * So set it rather large, scaled by number of devices. |
| 7017 | */ |
| 7018 | conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; |
| 7019 | conf->shrinker.scan_objects = raid5_cache_scan; |
| 7020 | conf->shrinker.count_objects = raid5_cache_count; |
| 7021 | conf->shrinker.batch = 128; |
| 7022 | conf->shrinker.flags = 0; |
| 7023 | if (register_shrinker(&conf->shrinker)) { |
| 7024 | pr_warn("md/raid:%s: couldn't register shrinker.\n", |
| 7025 | mdname(mddev)); |
| 7026 | goto abort; |
| 7027 | } |
| 7028 | |
| 7029 | sprintf(pers_name, "raid%d", mddev->new_level); |
| 7030 | conf->thread = md_register_thread(raid5d, mddev, pers_name); |
| 7031 | if (!conf->thread) { |
| 7032 | pr_warn("md/raid:%s: couldn't allocate thread.\n", |
| 7033 | mdname(mddev)); |
| 7034 | goto abort; |
| 7035 | } |
| 7036 | |
| 7037 | return conf; |
| 7038 | |
| 7039 | abort: |
| 7040 | if (conf) { |
| 7041 | free_conf(conf); |
| 7042 | return ERR_PTR(-EIO); |
| 7043 | } else |
| 7044 | return ERR_PTR(-ENOMEM); |
| 7045 | } |
| 7046 | |
| 7047 | static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) |
| 7048 | { |
| 7049 | switch (algo) { |
| 7050 | case ALGORITHM_PARITY_0: |
| 7051 | if (raid_disk < max_degraded) |
| 7052 | return 1; |
| 7053 | break; |
| 7054 | case ALGORITHM_PARITY_N: |
| 7055 | if (raid_disk >= raid_disks - max_degraded) |
| 7056 | return 1; |
| 7057 | break; |
| 7058 | case ALGORITHM_PARITY_0_6: |
| 7059 | if (raid_disk == 0 || |
| 7060 | raid_disk == raid_disks - 1) |
| 7061 | return 1; |
| 7062 | break; |
| 7063 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 7064 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 7065 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 7066 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 7067 | if (raid_disk == raid_disks - 1) |
| 7068 | return 1; |
| 7069 | } |
| 7070 | return 0; |
| 7071 | } |
| 7072 | |
| 7073 | static int raid5_run(struct mddev *mddev) |
| 7074 | { |
| 7075 | struct r5conf *conf; |
| 7076 | int working_disks = 0; |
| 7077 | int dirty_parity_disks = 0; |
| 7078 | struct md_rdev *rdev; |
| 7079 | struct md_rdev *journal_dev = NULL; |
| 7080 | sector_t reshape_offset = 0; |
| 7081 | int i; |
| 7082 | long long min_offset_diff = 0; |
| 7083 | int first = 1; |
| 7084 | |
| 7085 | if (mddev->recovery_cp != MaxSector) |
| 7086 | pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", |
| 7087 | mdname(mddev)); |
| 7088 | |
| 7089 | rdev_for_each(rdev, mddev) { |
| 7090 | long long diff; |
| 7091 | |
| 7092 | if (test_bit(Journal, &rdev->flags)) { |
| 7093 | journal_dev = rdev; |
| 7094 | continue; |
| 7095 | } |
| 7096 | if (rdev->raid_disk < 0) |
| 7097 | continue; |
| 7098 | diff = (rdev->new_data_offset - rdev->data_offset); |
| 7099 | if (first) { |
| 7100 | min_offset_diff = diff; |
| 7101 | first = 0; |
| 7102 | } else if (mddev->reshape_backwards && |
| 7103 | diff < min_offset_diff) |
| 7104 | min_offset_diff = diff; |
| 7105 | else if (!mddev->reshape_backwards && |
| 7106 | diff > min_offset_diff) |
| 7107 | min_offset_diff = diff; |
| 7108 | } |
| 7109 | |
| 7110 | if (mddev->reshape_position != MaxSector) { |
| 7111 | /* Check that we can continue the reshape. |
| 7112 | * Difficulties arise if the stripe we would write to |
| 7113 | * next is at or after the stripe we would read from next. |
| 7114 | * For a reshape that changes the number of devices, this |
| 7115 | * is only possible for a very short time, and mdadm makes |
| 7116 | * sure that time appears to have past before assembling |
| 7117 | * the array. So we fail if that time hasn't passed. |
| 7118 | * For a reshape that keeps the number of devices the same |
| 7119 | * mdadm must be monitoring the reshape can keeping the |
| 7120 | * critical areas read-only and backed up. It will start |
| 7121 | * the array in read-only mode, so we check for that. |
| 7122 | */ |
| 7123 | sector_t here_new, here_old; |
| 7124 | int old_disks; |
| 7125 | int max_degraded = (mddev->level == 6 ? 2 : 1); |
| 7126 | int chunk_sectors; |
| 7127 | int new_data_disks; |
| 7128 | |
| 7129 | if (journal_dev) { |
| 7130 | pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", |
| 7131 | mdname(mddev)); |
| 7132 | return -EINVAL; |
| 7133 | } |
| 7134 | |
| 7135 | if (mddev->new_level != mddev->level) { |
| 7136 | pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", |
| 7137 | mdname(mddev)); |
| 7138 | return -EINVAL; |
| 7139 | } |
| 7140 | old_disks = mddev->raid_disks - mddev->delta_disks; |
| 7141 | /* reshape_position must be on a new-stripe boundary, and one |
| 7142 | * further up in new geometry must map after here in old |
| 7143 | * geometry. |
| 7144 | * If the chunk sizes are different, then as we perform reshape |
| 7145 | * in units of the largest of the two, reshape_position needs |
| 7146 | * be a multiple of the largest chunk size times new data disks. |
| 7147 | */ |
| 7148 | here_new = mddev->reshape_position; |
| 7149 | chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); |
| 7150 | new_data_disks = mddev->raid_disks - max_degraded; |
| 7151 | if (sector_div(here_new, chunk_sectors * new_data_disks)) { |
| 7152 | pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", |
| 7153 | mdname(mddev)); |
| 7154 | return -EINVAL; |
| 7155 | } |
| 7156 | reshape_offset = here_new * chunk_sectors; |
| 7157 | /* here_new is the stripe we will write to */ |
| 7158 | here_old = mddev->reshape_position; |
| 7159 | sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); |
| 7160 | /* here_old is the first stripe that we might need to read |
| 7161 | * from */ |
| 7162 | if (mddev->delta_disks == 0) { |
| 7163 | /* We cannot be sure it is safe to start an in-place |
| 7164 | * reshape. It is only safe if user-space is monitoring |
| 7165 | * and taking constant backups. |
| 7166 | * mdadm always starts a situation like this in |
| 7167 | * readonly mode so it can take control before |
| 7168 | * allowing any writes. So just check for that. |
| 7169 | */ |
| 7170 | if (abs(min_offset_diff) >= mddev->chunk_sectors && |
| 7171 | abs(min_offset_diff) >= mddev->new_chunk_sectors) |
| 7172 | /* not really in-place - so OK */; |
| 7173 | else if (mddev->ro == 0) { |
| 7174 | pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", |
| 7175 | mdname(mddev)); |
| 7176 | return -EINVAL; |
| 7177 | } |
| 7178 | } else if (mddev->reshape_backwards |
| 7179 | ? (here_new * chunk_sectors + min_offset_diff <= |
| 7180 | here_old * chunk_sectors) |
| 7181 | : (here_new * chunk_sectors >= |
| 7182 | here_old * chunk_sectors + (-min_offset_diff))) { |
| 7183 | /* Reading from the same stripe as writing to - bad */ |
| 7184 | pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", |
| 7185 | mdname(mddev)); |
| 7186 | return -EINVAL; |
| 7187 | } |
| 7188 | pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); |
| 7189 | /* OK, we should be able to continue; */ |
| 7190 | } else { |
| 7191 | BUG_ON(mddev->level != mddev->new_level); |
| 7192 | BUG_ON(mddev->layout != mddev->new_layout); |
| 7193 | BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); |
| 7194 | BUG_ON(mddev->delta_disks != 0); |
| 7195 | } |
| 7196 | |
| 7197 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && |
| 7198 | test_bit(MD_HAS_PPL, &mddev->flags)) { |
| 7199 | pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n", |
| 7200 | mdname(mddev)); |
| 7201 | clear_bit(MD_HAS_PPL, &mddev->flags); |
| 7202 | } |
| 7203 | |
| 7204 | if (mddev->private == NULL) |
| 7205 | conf = setup_conf(mddev); |
| 7206 | else |
| 7207 | conf = mddev->private; |
| 7208 | |
| 7209 | if (IS_ERR(conf)) |
| 7210 | return PTR_ERR(conf); |
| 7211 | |
| 7212 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { |
| 7213 | if (!journal_dev) { |
| 7214 | pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", |
| 7215 | mdname(mddev)); |
| 7216 | mddev->ro = 1; |
| 7217 | set_disk_ro(mddev->gendisk, 1); |
| 7218 | } else if (mddev->recovery_cp == MaxSector) |
| 7219 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); |
| 7220 | } |
| 7221 | |
| 7222 | conf->min_offset_diff = min_offset_diff; |
| 7223 | mddev->thread = conf->thread; |
| 7224 | conf->thread = NULL; |
| 7225 | mddev->private = conf; |
| 7226 | |
| 7227 | for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; |
| 7228 | i++) { |
| 7229 | rdev = conf->disks[i].rdev; |
| 7230 | if (!rdev && conf->disks[i].replacement) { |
| 7231 | /* The replacement is all we have yet */ |
| 7232 | rdev = conf->disks[i].replacement; |
| 7233 | conf->disks[i].replacement = NULL; |
| 7234 | clear_bit(Replacement, &rdev->flags); |
| 7235 | conf->disks[i].rdev = rdev; |
| 7236 | } |
| 7237 | if (!rdev) |
| 7238 | continue; |
| 7239 | if (conf->disks[i].replacement && |
| 7240 | conf->reshape_progress != MaxSector) { |
| 7241 | /* replacements and reshape simply do not mix. */ |
| 7242 | pr_warn("md: cannot handle concurrent replacement and reshape.\n"); |
| 7243 | goto abort; |
| 7244 | } |
| 7245 | if (test_bit(In_sync, &rdev->flags)) { |
| 7246 | working_disks++; |
| 7247 | continue; |
| 7248 | } |
| 7249 | /* This disc is not fully in-sync. However if it |
| 7250 | * just stored parity (beyond the recovery_offset), |
| 7251 | * when we don't need to be concerned about the |
| 7252 | * array being dirty. |
| 7253 | * When reshape goes 'backwards', we never have |
| 7254 | * partially completed devices, so we only need |
| 7255 | * to worry about reshape going forwards. |
| 7256 | */ |
| 7257 | /* Hack because v0.91 doesn't store recovery_offset properly. */ |
| 7258 | if (mddev->major_version == 0 && |
| 7259 | mddev->minor_version > 90) |
| 7260 | rdev->recovery_offset = reshape_offset; |
| 7261 | |
| 7262 | if (rdev->recovery_offset < reshape_offset) { |
| 7263 | /* We need to check old and new layout */ |
| 7264 | if (!only_parity(rdev->raid_disk, |
| 7265 | conf->algorithm, |
| 7266 | conf->raid_disks, |
| 7267 | conf->max_degraded)) |
| 7268 | continue; |
| 7269 | } |
| 7270 | if (!only_parity(rdev->raid_disk, |
| 7271 | conf->prev_algo, |
| 7272 | conf->previous_raid_disks, |
| 7273 | conf->max_degraded)) |
| 7274 | continue; |
| 7275 | dirty_parity_disks++; |
| 7276 | } |
| 7277 | |
| 7278 | /* |
| 7279 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
| 7280 | */ |
| 7281 | mddev->degraded = raid5_calc_degraded(conf); |
| 7282 | |
| 7283 | if (has_failed(conf)) { |
| 7284 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", |
| 7285 | mdname(mddev), mddev->degraded, conf->raid_disks); |
| 7286 | goto abort; |
| 7287 | } |
| 7288 | |
| 7289 | /* device size must be a multiple of chunk size */ |
| 7290 | mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); |
| 7291 | mddev->resync_max_sectors = mddev->dev_sectors; |
| 7292 | |
| 7293 | if (mddev->degraded > dirty_parity_disks && |
| 7294 | mddev->recovery_cp != MaxSector) { |
| 7295 | if (test_bit(MD_HAS_PPL, &mddev->flags)) |
| 7296 | pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", |
| 7297 | mdname(mddev)); |
| 7298 | else if (mddev->ok_start_degraded) |
| 7299 | pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", |
| 7300 | mdname(mddev)); |
| 7301 | else { |
| 7302 | pr_crit("md/raid:%s: cannot start dirty degraded array.\n", |
| 7303 | mdname(mddev)); |
| 7304 | goto abort; |
| 7305 | } |
| 7306 | } |
| 7307 | |
| 7308 | pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", |
| 7309 | mdname(mddev), conf->level, |
| 7310 | mddev->raid_disks-mddev->degraded, mddev->raid_disks, |
| 7311 | mddev->new_layout); |
| 7312 | |
| 7313 | print_raid5_conf(conf); |
| 7314 | |
| 7315 | if (conf->reshape_progress != MaxSector) { |
| 7316 | conf->reshape_safe = conf->reshape_progress; |
| 7317 | atomic_set(&conf->reshape_stripes, 0); |
| 7318 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 7319 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 7320 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 7321 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 7322 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
| 7323 | "reshape"); |
| 7324 | } |
| 7325 | |
| 7326 | /* Ok, everything is just fine now */ |
| 7327 | if (mddev->to_remove == &raid5_attrs_group) |
| 7328 | mddev->to_remove = NULL; |
| 7329 | else if (mddev->kobj.sd && |
| 7330 | sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) |
| 7331 | pr_warn("raid5: failed to create sysfs attributes for %s\n", |
| 7332 | mdname(mddev)); |
| 7333 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 7334 | |
| 7335 | if (mddev->queue) { |
| 7336 | int chunk_size; |
| 7337 | bool discard_supported = true; |
| 7338 | /* read-ahead size must cover two whole stripes, which |
| 7339 | * is 2 * (datadisks) * chunksize where 'n' is the |
| 7340 | * number of raid devices |
| 7341 | */ |
| 7342 | int data_disks = conf->previous_raid_disks - conf->max_degraded; |
| 7343 | int stripe = data_disks * |
| 7344 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); |
| 7345 | if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) |
| 7346 | mddev->queue->backing_dev_info->ra_pages = 2 * stripe; |
| 7347 | |
| 7348 | chunk_size = mddev->chunk_sectors << 9; |
| 7349 | blk_queue_io_min(mddev->queue, chunk_size); |
| 7350 | blk_queue_io_opt(mddev->queue, chunk_size * |
| 7351 | (conf->raid_disks - conf->max_degraded)); |
| 7352 | mddev->queue->limits.raid_partial_stripes_expensive = 1; |
| 7353 | /* |
| 7354 | * We can only discard a whole stripe. It doesn't make sense to |
| 7355 | * discard data disk but write parity disk |
| 7356 | */ |
| 7357 | stripe = stripe * PAGE_SIZE; |
| 7358 | /* Round up to power of 2, as discard handling |
| 7359 | * currently assumes that */ |
| 7360 | while ((stripe-1) & stripe) |
| 7361 | stripe = (stripe | (stripe-1)) + 1; |
| 7362 | mddev->queue->limits.discard_alignment = stripe; |
| 7363 | mddev->queue->limits.discard_granularity = stripe; |
| 7364 | /* |
| 7365 | * unaligned part of discard request will be ignored, so can't |
| 7366 | * guarantee discard_zeroes_data |
| 7367 | */ |
| 7368 | mddev->queue->limits.discard_zeroes_data = 0; |
| 7369 | |
| 7370 | blk_queue_max_write_same_sectors(mddev->queue, 0); |
| 7371 | |
| 7372 | rdev_for_each(rdev, mddev) { |
| 7373 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 7374 | rdev->data_offset << 9); |
| 7375 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 7376 | rdev->new_data_offset << 9); |
| 7377 | /* |
| 7378 | * discard_zeroes_data is required, otherwise data |
| 7379 | * could be lost. Consider a scenario: discard a stripe |
| 7380 | * (the stripe could be inconsistent if |
| 7381 | * discard_zeroes_data is 0); write one disk of the |
| 7382 | * stripe (the stripe could be inconsistent again |
| 7383 | * depending on which disks are used to calculate |
| 7384 | * parity); the disk is broken; The stripe data of this |
| 7385 | * disk is lost. |
| 7386 | */ |
| 7387 | if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || |
| 7388 | !bdev_get_queue(rdev->bdev)-> |
| 7389 | limits.discard_zeroes_data) |
| 7390 | discard_supported = false; |
| 7391 | /* Unfortunately, discard_zeroes_data is not currently |
| 7392 | * a guarantee - just a hint. So we only allow DISCARD |
| 7393 | * if the sysadmin has confirmed that only safe devices |
| 7394 | * are in use by setting a module parameter. |
| 7395 | */ |
| 7396 | if (!devices_handle_discard_safely) { |
| 7397 | if (discard_supported) { |
| 7398 | pr_info("md/raid456: discard support disabled due to uncertainty.\n"); |
| 7399 | pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); |
| 7400 | } |
| 7401 | discard_supported = false; |
| 7402 | } |
| 7403 | } |
| 7404 | |
| 7405 | if (discard_supported && |
| 7406 | mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && |
| 7407 | mddev->queue->limits.discard_granularity >= stripe) |
| 7408 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
| 7409 | mddev->queue); |
| 7410 | else |
| 7411 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, |
| 7412 | mddev->queue); |
| 7413 | |
| 7414 | blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); |
| 7415 | } |
| 7416 | |
| 7417 | if (log_init(conf, journal_dev)) |
| 7418 | goto abort; |
| 7419 | |
| 7420 | return 0; |
| 7421 | abort: |
| 7422 | md_unregister_thread(&mddev->thread); |
| 7423 | print_raid5_conf(conf); |
| 7424 | free_conf(conf); |
| 7425 | mddev->private = NULL; |
| 7426 | pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); |
| 7427 | return -EIO; |
| 7428 | } |
| 7429 | |
| 7430 | static void raid5_free(struct mddev *mddev, void *priv) |
| 7431 | { |
| 7432 | struct r5conf *conf = priv; |
| 7433 | |
| 7434 | free_conf(conf); |
| 7435 | mddev->to_remove = &raid5_attrs_group; |
| 7436 | } |
| 7437 | |
| 7438 | static void raid5_status(struct seq_file *seq, struct mddev *mddev) |
| 7439 | { |
| 7440 | struct r5conf *conf = mddev->private; |
| 7441 | int i; |
| 7442 | |
| 7443 | seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, |
| 7444 | conf->chunk_sectors / 2, mddev->layout); |
| 7445 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); |
| 7446 | rcu_read_lock(); |
| 7447 | for (i = 0; i < conf->raid_disks; i++) { |
| 7448 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
| 7449 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
| 7450 | } |
| 7451 | rcu_read_unlock(); |
| 7452 | seq_printf (seq, "]"); |
| 7453 | } |
| 7454 | |
| 7455 | static void print_raid5_conf (struct r5conf *conf) |
| 7456 | { |
| 7457 | int i; |
| 7458 | struct disk_info *tmp; |
| 7459 | |
| 7460 | pr_debug("RAID conf printout:\n"); |
| 7461 | if (!conf) { |
| 7462 | pr_debug("(conf==NULL)\n"); |
| 7463 | return; |
| 7464 | } |
| 7465 | pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, |
| 7466 | conf->raid_disks, |
| 7467 | conf->raid_disks - conf->mddev->degraded); |
| 7468 | |
| 7469 | for (i = 0; i < conf->raid_disks; i++) { |
| 7470 | char b[BDEVNAME_SIZE]; |
| 7471 | tmp = conf->disks + i; |
| 7472 | if (tmp->rdev) |
| 7473 | pr_debug(" disk %d, o:%d, dev:%s\n", |
| 7474 | i, !test_bit(Faulty, &tmp->rdev->flags), |
| 7475 | bdevname(tmp->rdev->bdev, b)); |
| 7476 | } |
| 7477 | } |
| 7478 | |
| 7479 | static int raid5_spare_active(struct mddev *mddev) |
| 7480 | { |
| 7481 | int i; |
| 7482 | struct r5conf *conf = mddev->private; |
| 7483 | struct disk_info *tmp; |
| 7484 | int count = 0; |
| 7485 | unsigned long flags; |
| 7486 | |
| 7487 | for (i = 0; i < conf->raid_disks; i++) { |
| 7488 | tmp = conf->disks + i; |
| 7489 | if (tmp->replacement |
| 7490 | && tmp->replacement->recovery_offset == MaxSector |
| 7491 | && !test_bit(Faulty, &tmp->replacement->flags) |
| 7492 | && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { |
| 7493 | /* Replacement has just become active. */ |
| 7494 | if (!tmp->rdev |
| 7495 | || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) |
| 7496 | count++; |
| 7497 | if (tmp->rdev) { |
| 7498 | /* Replaced device not technically faulty, |
| 7499 | * but we need to be sure it gets removed |
| 7500 | * and never re-added. |
| 7501 | */ |
| 7502 | set_bit(Faulty, &tmp->rdev->flags); |
| 7503 | sysfs_notify_dirent_safe( |
| 7504 | tmp->rdev->sysfs_state); |
| 7505 | } |
| 7506 | sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); |
| 7507 | } else if (tmp->rdev |
| 7508 | && tmp->rdev->recovery_offset == MaxSector |
| 7509 | && !test_bit(Faulty, &tmp->rdev->flags) |
| 7510 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
| 7511 | count++; |
| 7512 | sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); |
| 7513 | } |
| 7514 | } |
| 7515 | spin_lock_irqsave(&conf->device_lock, flags); |
| 7516 | mddev->degraded = raid5_calc_degraded(conf); |
| 7517 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 7518 | print_raid5_conf(conf); |
| 7519 | return count; |
| 7520 | } |
| 7521 | |
| 7522 | static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) |
| 7523 | { |
| 7524 | struct r5conf *conf = mddev->private; |
| 7525 | int err = 0; |
| 7526 | int number = rdev->raid_disk; |
| 7527 | struct md_rdev **rdevp; |
| 7528 | struct disk_info *p = conf->disks + number; |
| 7529 | |
| 7530 | print_raid5_conf(conf); |
| 7531 | if (test_bit(Journal, &rdev->flags) && conf->log) { |
| 7532 | /* |
| 7533 | * we can't wait pending write here, as this is called in |
| 7534 | * raid5d, wait will deadlock. |
| 7535 | * neilb: there is no locking about new writes here, |
| 7536 | * so this cannot be safe. |
| 7537 | */ |
| 7538 | if (atomic_read(&conf->active_stripes)) { |
| 7539 | return -EBUSY; |
| 7540 | } |
| 7541 | log_exit(conf); |
| 7542 | return 0; |
| 7543 | } |
| 7544 | if (rdev == p->rdev) |
| 7545 | rdevp = &p->rdev; |
| 7546 | else if (rdev == p->replacement) |
| 7547 | rdevp = &p->replacement; |
| 7548 | else |
| 7549 | return 0; |
| 7550 | |
| 7551 | if (number >= conf->raid_disks && |
| 7552 | conf->reshape_progress == MaxSector) |
| 7553 | clear_bit(In_sync, &rdev->flags); |
| 7554 | |
| 7555 | if (test_bit(In_sync, &rdev->flags) || |
| 7556 | atomic_read(&rdev->nr_pending)) { |
| 7557 | err = -EBUSY; |
| 7558 | goto abort; |
| 7559 | } |
| 7560 | /* Only remove non-faulty devices if recovery |
| 7561 | * isn't possible. |
| 7562 | */ |
| 7563 | if (!test_bit(Faulty, &rdev->flags) && |
| 7564 | mddev->recovery_disabled != conf->recovery_disabled && |
| 7565 | !has_failed(conf) && |
| 7566 | (!p->replacement || p->replacement == rdev) && |
| 7567 | number < conf->raid_disks) { |
| 7568 | err = -EBUSY; |
| 7569 | goto abort; |
| 7570 | } |
| 7571 | *rdevp = NULL; |
| 7572 | if (!test_bit(RemoveSynchronized, &rdev->flags)) { |
| 7573 | synchronize_rcu(); |
| 7574 | if (atomic_read(&rdev->nr_pending)) { |
| 7575 | /* lost the race, try later */ |
| 7576 | err = -EBUSY; |
| 7577 | *rdevp = rdev; |
| 7578 | } |
| 7579 | } |
| 7580 | if (!err) { |
| 7581 | err = log_modify(conf, rdev, false); |
| 7582 | if (err) |
| 7583 | goto abort; |
| 7584 | } |
| 7585 | if (p->replacement) { |
| 7586 | /* We must have just cleared 'rdev' */ |
| 7587 | p->rdev = p->replacement; |
| 7588 | clear_bit(Replacement, &p->replacement->flags); |
| 7589 | smp_mb(); /* Make sure other CPUs may see both as identical |
| 7590 | * but will never see neither - if they are careful |
| 7591 | */ |
| 7592 | p->replacement = NULL; |
| 7593 | clear_bit(WantReplacement, &rdev->flags); |
| 7594 | |
| 7595 | if (!err) |
| 7596 | err = log_modify(conf, p->rdev, true); |
| 7597 | } else |
| 7598 | /* We might have just removed the Replacement as faulty- |
| 7599 | * clear the bit just in case |
| 7600 | */ |
| 7601 | clear_bit(WantReplacement, &rdev->flags); |
| 7602 | abort: |
| 7603 | |
| 7604 | print_raid5_conf(conf); |
| 7605 | return err; |
| 7606 | } |
| 7607 | |
| 7608 | static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
| 7609 | { |
| 7610 | struct r5conf *conf = mddev->private; |
| 7611 | int err = -EEXIST; |
| 7612 | int disk; |
| 7613 | struct disk_info *p; |
| 7614 | int first = 0; |
| 7615 | int last = conf->raid_disks - 1; |
| 7616 | |
| 7617 | if (test_bit(Journal, &rdev->flags)) { |
| 7618 | if (conf->log) |
| 7619 | return -EBUSY; |
| 7620 | |
| 7621 | rdev->raid_disk = 0; |
| 7622 | /* |
| 7623 | * The array is in readonly mode if journal is missing, so no |
| 7624 | * write requests running. We should be safe |
| 7625 | */ |
| 7626 | log_init(conf, rdev); |
| 7627 | return 0; |
| 7628 | } |
| 7629 | if (mddev->recovery_disabled == conf->recovery_disabled) |
| 7630 | return -EBUSY; |
| 7631 | |
| 7632 | if (rdev->saved_raid_disk < 0 && has_failed(conf)) |
| 7633 | /* no point adding a device */ |
| 7634 | return -EINVAL; |
| 7635 | |
| 7636 | if (rdev->raid_disk >= 0) |
| 7637 | first = last = rdev->raid_disk; |
| 7638 | |
| 7639 | /* |
| 7640 | * find the disk ... but prefer rdev->saved_raid_disk |
| 7641 | * if possible. |
| 7642 | */ |
| 7643 | if (rdev->saved_raid_disk >= 0 && |
| 7644 | rdev->saved_raid_disk >= first && |
| 7645 | conf->disks[rdev->saved_raid_disk].rdev == NULL) |
| 7646 | first = rdev->saved_raid_disk; |
| 7647 | |
| 7648 | for (disk = first; disk <= last; disk++) { |
| 7649 | p = conf->disks + disk; |
| 7650 | if (p->rdev == NULL) { |
| 7651 | clear_bit(In_sync, &rdev->flags); |
| 7652 | rdev->raid_disk = disk; |
| 7653 | if (rdev->saved_raid_disk != disk) |
| 7654 | conf->fullsync = 1; |
| 7655 | rcu_assign_pointer(p->rdev, rdev); |
| 7656 | |
| 7657 | err = log_modify(conf, rdev, true); |
| 7658 | |
| 7659 | goto out; |
| 7660 | } |
| 7661 | } |
| 7662 | for (disk = first; disk <= last; disk++) { |
| 7663 | p = conf->disks + disk; |
| 7664 | if (test_bit(WantReplacement, &p->rdev->flags) && |
| 7665 | p->replacement == NULL) { |
| 7666 | clear_bit(In_sync, &rdev->flags); |
| 7667 | set_bit(Replacement, &rdev->flags); |
| 7668 | rdev->raid_disk = disk; |
| 7669 | err = 0; |
| 7670 | conf->fullsync = 1; |
| 7671 | rcu_assign_pointer(p->replacement, rdev); |
| 7672 | break; |
| 7673 | } |
| 7674 | } |
| 7675 | out: |
| 7676 | print_raid5_conf(conf); |
| 7677 | return err; |
| 7678 | } |
| 7679 | |
| 7680 | static int raid5_resize(struct mddev *mddev, sector_t sectors) |
| 7681 | { |
| 7682 | /* no resync is happening, and there is enough space |
| 7683 | * on all devices, so we can resize. |
| 7684 | * We need to make sure resync covers any new space. |
| 7685 | * If the array is shrinking we should possibly wait until |
| 7686 | * any io in the removed space completes, but it hardly seems |
| 7687 | * worth it. |
| 7688 | */ |
| 7689 | sector_t newsize; |
| 7690 | struct r5conf *conf = mddev->private; |
| 7691 | |
| 7692 | if (conf->log || raid5_has_ppl(conf)) |
| 7693 | return -EINVAL; |
| 7694 | sectors &= ~((sector_t)conf->chunk_sectors - 1); |
| 7695 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); |
| 7696 | if (mddev->external_size && |
| 7697 | mddev->array_sectors > newsize) |
| 7698 | return -EINVAL; |
| 7699 | if (mddev->bitmap) { |
| 7700 | int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); |
| 7701 | if (ret) |
| 7702 | return ret; |
| 7703 | } |
| 7704 | md_set_array_sectors(mddev, newsize); |
| 7705 | if (sectors > mddev->dev_sectors && |
| 7706 | mddev->recovery_cp > mddev->dev_sectors) { |
| 7707 | mddev->recovery_cp = mddev->dev_sectors; |
| 7708 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
| 7709 | } |
| 7710 | mddev->dev_sectors = sectors; |
| 7711 | mddev->resync_max_sectors = sectors; |
| 7712 | return 0; |
| 7713 | } |
| 7714 | |
| 7715 | static int check_stripe_cache(struct mddev *mddev) |
| 7716 | { |
| 7717 | /* Can only proceed if there are plenty of stripe_heads. |
| 7718 | * We need a minimum of one full stripe,, and for sensible progress |
| 7719 | * it is best to have about 4 times that. |
| 7720 | * If we require 4 times, then the default 256 4K stripe_heads will |
| 7721 | * allow for chunk sizes up to 256K, which is probably OK. |
| 7722 | * If the chunk size is greater, user-space should request more |
| 7723 | * stripe_heads first. |
| 7724 | */ |
| 7725 | struct r5conf *conf = mddev->private; |
| 7726 | if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 |
| 7727 | > conf->min_nr_stripes || |
| 7728 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 |
| 7729 | > conf->min_nr_stripes) { |
| 7730 | pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", |
| 7731 | mdname(mddev), |
| 7732 | ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) |
| 7733 | / STRIPE_SIZE)*4); |
| 7734 | return 0; |
| 7735 | } |
| 7736 | return 1; |
| 7737 | } |
| 7738 | |
| 7739 | static int check_reshape(struct mddev *mddev) |
| 7740 | { |
| 7741 | struct r5conf *conf = mddev->private; |
| 7742 | |
| 7743 | if (conf->log || raid5_has_ppl(conf)) |
| 7744 | return -EINVAL; |
| 7745 | if (mddev->delta_disks == 0 && |
| 7746 | mddev->new_layout == mddev->layout && |
| 7747 | mddev->new_chunk_sectors == mddev->chunk_sectors) |
| 7748 | return 0; /* nothing to do */ |
| 7749 | if (has_failed(conf)) |
| 7750 | return -EINVAL; |
| 7751 | if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { |
| 7752 | /* We might be able to shrink, but the devices must |
| 7753 | * be made bigger first. |
| 7754 | * For raid6, 4 is the minimum size. |
| 7755 | * Otherwise 2 is the minimum |
| 7756 | */ |
| 7757 | int min = 2; |
| 7758 | if (mddev->level == 6) |
| 7759 | min = 4; |
| 7760 | if (mddev->raid_disks + mddev->delta_disks < min) |
| 7761 | return -EINVAL; |
| 7762 | } |
| 7763 | |
| 7764 | if (!check_stripe_cache(mddev)) |
| 7765 | return -ENOSPC; |
| 7766 | |
| 7767 | if (mddev->new_chunk_sectors > mddev->chunk_sectors || |
| 7768 | mddev->delta_disks > 0) |
| 7769 | if (resize_chunks(conf, |
| 7770 | conf->previous_raid_disks |
| 7771 | + max(0, mddev->delta_disks), |
| 7772 | max(mddev->new_chunk_sectors, |
| 7773 | mddev->chunk_sectors) |
| 7774 | ) < 0) |
| 7775 | return -ENOMEM; |
| 7776 | return resize_stripes(conf, (conf->previous_raid_disks |
| 7777 | + mddev->delta_disks)); |
| 7778 | } |
| 7779 | |
| 7780 | static int raid5_start_reshape(struct mddev *mddev) |
| 7781 | { |
| 7782 | struct r5conf *conf = mddev->private; |
| 7783 | struct md_rdev *rdev; |
| 7784 | int spares = 0; |
| 7785 | unsigned long flags; |
| 7786 | |
| 7787 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
| 7788 | return -EBUSY; |
| 7789 | |
| 7790 | if (!check_stripe_cache(mddev)) |
| 7791 | return -ENOSPC; |
| 7792 | |
| 7793 | if (has_failed(conf)) |
| 7794 | return -EINVAL; |
| 7795 | |
| 7796 | rdev_for_each(rdev, mddev) { |
| 7797 | if (!test_bit(In_sync, &rdev->flags) |
| 7798 | && !test_bit(Faulty, &rdev->flags)) |
| 7799 | spares++; |
| 7800 | } |
| 7801 | |
| 7802 | if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) |
| 7803 | /* Not enough devices even to make a degraded array |
| 7804 | * of that size |
| 7805 | */ |
| 7806 | return -EINVAL; |
| 7807 | |
| 7808 | /* Refuse to reduce size of the array. Any reductions in |
| 7809 | * array size must be through explicit setting of array_size |
| 7810 | * attribute. |
| 7811 | */ |
| 7812 | if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) |
| 7813 | < mddev->array_sectors) { |
| 7814 | pr_warn("md/raid:%s: array size must be reduced before number of disks\n", |
| 7815 | mdname(mddev)); |
| 7816 | return -EINVAL; |
| 7817 | } |
| 7818 | |
| 7819 | atomic_set(&conf->reshape_stripes, 0); |
| 7820 | spin_lock_irq(&conf->device_lock); |
| 7821 | write_seqcount_begin(&conf->gen_lock); |
| 7822 | conf->previous_raid_disks = conf->raid_disks; |
| 7823 | conf->raid_disks += mddev->delta_disks; |
| 7824 | conf->prev_chunk_sectors = conf->chunk_sectors; |
| 7825 | conf->chunk_sectors = mddev->new_chunk_sectors; |
| 7826 | conf->prev_algo = conf->algorithm; |
| 7827 | conf->algorithm = mddev->new_layout; |
| 7828 | conf->generation++; |
| 7829 | /* Code that selects data_offset needs to see the generation update |
| 7830 | * if reshape_progress has been set - so a memory barrier needed. |
| 7831 | */ |
| 7832 | smp_mb(); |
| 7833 | if (mddev->reshape_backwards) |
| 7834 | conf->reshape_progress = raid5_size(mddev, 0, 0); |
| 7835 | else |
| 7836 | conf->reshape_progress = 0; |
| 7837 | conf->reshape_safe = conf->reshape_progress; |
| 7838 | write_seqcount_end(&conf->gen_lock); |
| 7839 | spin_unlock_irq(&conf->device_lock); |
| 7840 | |
| 7841 | /* Now make sure any requests that proceeded on the assumption |
| 7842 | * the reshape wasn't running - like Discard or Read - have |
| 7843 | * completed. |
| 7844 | */ |
| 7845 | mddev_suspend(mddev); |
| 7846 | mddev_resume(mddev); |
| 7847 | |
| 7848 | /* Add some new drives, as many as will fit. |
| 7849 | * We know there are enough to make the newly sized array work. |
| 7850 | * Don't add devices if we are reducing the number of |
| 7851 | * devices in the array. This is because it is not possible |
| 7852 | * to correctly record the "partially reconstructed" state of |
| 7853 | * such devices during the reshape and confusion could result. |
| 7854 | */ |
| 7855 | if (mddev->delta_disks >= 0) { |
| 7856 | rdev_for_each(rdev, mddev) |
| 7857 | if (rdev->raid_disk < 0 && |
| 7858 | !test_bit(Faulty, &rdev->flags)) { |
| 7859 | if (raid5_add_disk(mddev, rdev) == 0) { |
| 7860 | if (rdev->raid_disk |
| 7861 | >= conf->previous_raid_disks) |
| 7862 | set_bit(In_sync, &rdev->flags); |
| 7863 | else |
| 7864 | rdev->recovery_offset = 0; |
| 7865 | |
| 7866 | if (sysfs_link_rdev(mddev, rdev)) |
| 7867 | /* Failure here is OK */; |
| 7868 | } |
| 7869 | } else if (rdev->raid_disk >= conf->previous_raid_disks |
| 7870 | && !test_bit(Faulty, &rdev->flags)) { |
| 7871 | /* This is a spare that was manually added */ |
| 7872 | set_bit(In_sync, &rdev->flags); |
| 7873 | } |
| 7874 | |
| 7875 | /* When a reshape changes the number of devices, |
| 7876 | * ->degraded is measured against the larger of the |
| 7877 | * pre and post number of devices. |
| 7878 | */ |
| 7879 | spin_lock_irqsave(&conf->device_lock, flags); |
| 7880 | mddev->degraded = raid5_calc_degraded(conf); |
| 7881 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 7882 | } |
| 7883 | mddev->raid_disks = conf->raid_disks; |
| 7884 | mddev->reshape_position = conf->reshape_progress; |
| 7885 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
| 7886 | |
| 7887 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 7888 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 7889 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); |
| 7890 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 7891 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 7892 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
| 7893 | "reshape"); |
| 7894 | if (!mddev->sync_thread) { |
| 7895 | mddev->recovery = 0; |
| 7896 | spin_lock_irq(&conf->device_lock); |
| 7897 | write_seqcount_begin(&conf->gen_lock); |
| 7898 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; |
| 7899 | mddev->new_chunk_sectors = |
| 7900 | conf->chunk_sectors = conf->prev_chunk_sectors; |
| 7901 | mddev->new_layout = conf->algorithm = conf->prev_algo; |
| 7902 | rdev_for_each(rdev, mddev) |
| 7903 | rdev->new_data_offset = rdev->data_offset; |
| 7904 | smp_wmb(); |
| 7905 | conf->generation --; |
| 7906 | conf->reshape_progress = MaxSector; |
| 7907 | mddev->reshape_position = MaxSector; |
| 7908 | write_seqcount_end(&conf->gen_lock); |
| 7909 | spin_unlock_irq(&conf->device_lock); |
| 7910 | return -EAGAIN; |
| 7911 | } |
| 7912 | conf->reshape_checkpoint = jiffies; |
| 7913 | md_wakeup_thread(mddev->sync_thread); |
| 7914 | md_new_event(mddev); |
| 7915 | return 0; |
| 7916 | } |
| 7917 | |
| 7918 | /* This is called from the reshape thread and should make any |
| 7919 | * changes needed in 'conf' |
| 7920 | */ |
| 7921 | static void end_reshape(struct r5conf *conf) |
| 7922 | { |
| 7923 | |
| 7924 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { |
| 7925 | struct md_rdev *rdev; |
| 7926 | |
| 7927 | spin_lock_irq(&conf->device_lock); |
| 7928 | conf->previous_raid_disks = conf->raid_disks; |
| 7929 | rdev_for_each(rdev, conf->mddev) |
| 7930 | rdev->data_offset = rdev->new_data_offset; |
| 7931 | smp_wmb(); |
| 7932 | conf->reshape_progress = MaxSector; |
| 7933 | conf->mddev->reshape_position = MaxSector; |
| 7934 | spin_unlock_irq(&conf->device_lock); |
| 7935 | wake_up(&conf->wait_for_overlap); |
| 7936 | |
| 7937 | /* read-ahead size must cover two whole stripes, which is |
| 7938 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
| 7939 | */ |
| 7940 | if (conf->mddev->queue) { |
| 7941 | int data_disks = conf->raid_disks - conf->max_degraded; |
| 7942 | int stripe = data_disks * ((conf->chunk_sectors << 9) |
| 7943 | / PAGE_SIZE); |
| 7944 | if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) |
| 7945 | conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; |
| 7946 | } |
| 7947 | } |
| 7948 | } |
| 7949 | |
| 7950 | /* This is called from the raid5d thread with mddev_lock held. |
| 7951 | * It makes config changes to the device. |
| 7952 | */ |
| 7953 | static void raid5_finish_reshape(struct mddev *mddev) |
| 7954 | { |
| 7955 | struct r5conf *conf = mddev->private; |
| 7956 | |
| 7957 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
| 7958 | |
| 7959 | if (mddev->delta_disks > 0) { |
| 7960 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 7961 | if (mddev->queue) { |
| 7962 | set_capacity(mddev->gendisk, mddev->array_sectors); |
| 7963 | revalidate_disk(mddev->gendisk); |
| 7964 | } |
| 7965 | } else { |
| 7966 | int d; |
| 7967 | spin_lock_irq(&conf->device_lock); |
| 7968 | mddev->degraded = raid5_calc_degraded(conf); |
| 7969 | spin_unlock_irq(&conf->device_lock); |
| 7970 | for (d = conf->raid_disks ; |
| 7971 | d < conf->raid_disks - mddev->delta_disks; |
| 7972 | d++) { |
| 7973 | struct md_rdev *rdev = conf->disks[d].rdev; |
| 7974 | if (rdev) |
| 7975 | clear_bit(In_sync, &rdev->flags); |
| 7976 | rdev = conf->disks[d].replacement; |
| 7977 | if (rdev) |
| 7978 | clear_bit(In_sync, &rdev->flags); |
| 7979 | } |
| 7980 | } |
| 7981 | mddev->layout = conf->algorithm; |
| 7982 | mddev->chunk_sectors = conf->chunk_sectors; |
| 7983 | mddev->reshape_position = MaxSector; |
| 7984 | mddev->delta_disks = 0; |
| 7985 | mddev->reshape_backwards = 0; |
| 7986 | } |
| 7987 | } |
| 7988 | |
| 7989 | static void raid5_quiesce(struct mddev *mddev, int state) |
| 7990 | { |
| 7991 | struct r5conf *conf = mddev->private; |
| 7992 | |
| 7993 | switch(state) { |
| 7994 | case 2: /* resume for a suspend */ |
| 7995 | wake_up(&conf->wait_for_overlap); |
| 7996 | break; |
| 7997 | |
| 7998 | case 1: /* stop all writes */ |
| 7999 | lock_all_device_hash_locks_irq(conf); |
| 8000 | /* '2' tells resync/reshape to pause so that all |
| 8001 | * active stripes can drain |
| 8002 | */ |
| 8003 | r5c_flush_cache(conf, INT_MAX); |
| 8004 | conf->quiesce = 2; |
| 8005 | wait_event_cmd(conf->wait_for_quiescent, |
| 8006 | atomic_read(&conf->active_stripes) == 0 && |
| 8007 | atomic_read(&conf->active_aligned_reads) == 0, |
| 8008 | unlock_all_device_hash_locks_irq(conf), |
| 8009 | lock_all_device_hash_locks_irq(conf)); |
| 8010 | conf->quiesce = 1; |
| 8011 | unlock_all_device_hash_locks_irq(conf); |
| 8012 | /* allow reshape to continue */ |
| 8013 | wake_up(&conf->wait_for_overlap); |
| 8014 | break; |
| 8015 | |
| 8016 | case 0: /* re-enable writes */ |
| 8017 | lock_all_device_hash_locks_irq(conf); |
| 8018 | conf->quiesce = 0; |
| 8019 | wake_up(&conf->wait_for_quiescent); |
| 8020 | wake_up(&conf->wait_for_overlap); |
| 8021 | unlock_all_device_hash_locks_irq(conf); |
| 8022 | break; |
| 8023 | } |
| 8024 | r5l_quiesce(conf->log, state); |
| 8025 | } |
| 8026 | |
| 8027 | static void *raid45_takeover_raid0(struct mddev *mddev, int level) |
| 8028 | { |
| 8029 | struct r0conf *raid0_conf = mddev->private; |
| 8030 | sector_t sectors; |
| 8031 | |
| 8032 | /* for raid0 takeover only one zone is supported */ |
| 8033 | if (raid0_conf->nr_strip_zones > 1) { |
| 8034 | pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", |
| 8035 | mdname(mddev)); |
| 8036 | return ERR_PTR(-EINVAL); |
| 8037 | } |
| 8038 | |
| 8039 | sectors = raid0_conf->strip_zone[0].zone_end; |
| 8040 | sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); |
| 8041 | mddev->dev_sectors = sectors; |
| 8042 | mddev->new_level = level; |
| 8043 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 8044 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
| 8045 | mddev->raid_disks += 1; |
| 8046 | mddev->delta_disks = 1; |
| 8047 | /* make sure it will be not marked as dirty */ |
| 8048 | mddev->recovery_cp = MaxSector; |
| 8049 | |
| 8050 | return setup_conf(mddev); |
| 8051 | } |
| 8052 | |
| 8053 | static void *raid5_takeover_raid1(struct mddev *mddev) |
| 8054 | { |
| 8055 | int chunksect; |
| 8056 | void *ret; |
| 8057 | |
| 8058 | if (mddev->raid_disks != 2 || |
| 8059 | mddev->degraded > 1) |
| 8060 | return ERR_PTR(-EINVAL); |
| 8061 | |
| 8062 | /* Should check if there are write-behind devices? */ |
| 8063 | |
| 8064 | chunksect = 64*2; /* 64K by default */ |
| 8065 | |
| 8066 | /* The array must be an exact multiple of chunksize */ |
| 8067 | while (chunksect && (mddev->array_sectors & (chunksect-1))) |
| 8068 | chunksect >>= 1; |
| 8069 | |
| 8070 | if ((chunksect<<9) < STRIPE_SIZE) |
| 8071 | /* array size does not allow a suitable chunk size */ |
| 8072 | return ERR_PTR(-EINVAL); |
| 8073 | |
| 8074 | mddev->new_level = 5; |
| 8075 | mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; |
| 8076 | mddev->new_chunk_sectors = chunksect; |
| 8077 | |
| 8078 | ret = setup_conf(mddev); |
| 8079 | if (!IS_ERR(ret)) |
| 8080 | mddev_clear_unsupported_flags(mddev, |
| 8081 | UNSUPPORTED_MDDEV_FLAGS); |
| 8082 | return ret; |
| 8083 | } |
| 8084 | |
| 8085 | static void *raid5_takeover_raid6(struct mddev *mddev) |
| 8086 | { |
| 8087 | int new_layout; |
| 8088 | |
| 8089 | switch (mddev->layout) { |
| 8090 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 8091 | new_layout = ALGORITHM_LEFT_ASYMMETRIC; |
| 8092 | break; |
| 8093 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 8094 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC; |
| 8095 | break; |
| 8096 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 8097 | new_layout = ALGORITHM_LEFT_SYMMETRIC; |
| 8098 | break; |
| 8099 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 8100 | new_layout = ALGORITHM_RIGHT_SYMMETRIC; |
| 8101 | break; |
| 8102 | case ALGORITHM_PARITY_0_6: |
| 8103 | new_layout = ALGORITHM_PARITY_0; |
| 8104 | break; |
| 8105 | case ALGORITHM_PARITY_N: |
| 8106 | new_layout = ALGORITHM_PARITY_N; |
| 8107 | break; |
| 8108 | default: |
| 8109 | return ERR_PTR(-EINVAL); |
| 8110 | } |
| 8111 | mddev->new_level = 5; |
| 8112 | mddev->new_layout = new_layout; |
| 8113 | mddev->delta_disks = -1; |
| 8114 | mddev->raid_disks -= 1; |
| 8115 | return setup_conf(mddev); |
| 8116 | } |
| 8117 | |
| 8118 | static int raid5_check_reshape(struct mddev *mddev) |
| 8119 | { |
| 8120 | /* For a 2-drive array, the layout and chunk size can be changed |
| 8121 | * immediately as not restriping is needed. |
| 8122 | * For larger arrays we record the new value - after validation |
| 8123 | * to be used by a reshape pass. |
| 8124 | */ |
| 8125 | struct r5conf *conf = mddev->private; |
| 8126 | int new_chunk = mddev->new_chunk_sectors; |
| 8127 | |
| 8128 | if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) |
| 8129 | return -EINVAL; |
| 8130 | if (new_chunk > 0) { |
| 8131 | if (!is_power_of_2(new_chunk)) |
| 8132 | return -EINVAL; |
| 8133 | if (new_chunk < (PAGE_SIZE>>9)) |
| 8134 | return -EINVAL; |
| 8135 | if (mddev->array_sectors & (new_chunk-1)) |
| 8136 | /* not factor of array size */ |
| 8137 | return -EINVAL; |
| 8138 | } |
| 8139 | |
| 8140 | /* They look valid */ |
| 8141 | |
| 8142 | if (mddev->raid_disks == 2) { |
| 8143 | /* can make the change immediately */ |
| 8144 | if (mddev->new_layout >= 0) { |
| 8145 | conf->algorithm = mddev->new_layout; |
| 8146 | mddev->layout = mddev->new_layout; |
| 8147 | } |
| 8148 | if (new_chunk > 0) { |
| 8149 | conf->chunk_sectors = new_chunk ; |
| 8150 | mddev->chunk_sectors = new_chunk; |
| 8151 | } |
| 8152 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
| 8153 | md_wakeup_thread(mddev->thread); |
| 8154 | } |
| 8155 | return check_reshape(mddev); |
| 8156 | } |
| 8157 | |
| 8158 | static int raid6_check_reshape(struct mddev *mddev) |
| 8159 | { |
| 8160 | int new_chunk = mddev->new_chunk_sectors; |
| 8161 | |
| 8162 | if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) |
| 8163 | return -EINVAL; |
| 8164 | if (new_chunk > 0) { |
| 8165 | if (!is_power_of_2(new_chunk)) |
| 8166 | return -EINVAL; |
| 8167 | if (new_chunk < (PAGE_SIZE >> 9)) |
| 8168 | return -EINVAL; |
| 8169 | if (mddev->array_sectors & (new_chunk-1)) |
| 8170 | /* not factor of array size */ |
| 8171 | return -EINVAL; |
| 8172 | } |
| 8173 | |
| 8174 | /* They look valid */ |
| 8175 | return check_reshape(mddev); |
| 8176 | } |
| 8177 | |
| 8178 | static void *raid5_takeover(struct mddev *mddev) |
| 8179 | { |
| 8180 | /* raid5 can take over: |
| 8181 | * raid0 - if there is only one strip zone - make it a raid4 layout |
| 8182 | * raid1 - if there are two drives. We need to know the chunk size |
| 8183 | * raid4 - trivial - just use a raid4 layout. |
| 8184 | * raid6 - Providing it is a *_6 layout |
| 8185 | */ |
| 8186 | if (mddev->level == 0) |
| 8187 | return raid45_takeover_raid0(mddev, 5); |
| 8188 | if (mddev->level == 1) |
| 8189 | return raid5_takeover_raid1(mddev); |
| 8190 | if (mddev->level == 4) { |
| 8191 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 8192 | mddev->new_level = 5; |
| 8193 | return setup_conf(mddev); |
| 8194 | } |
| 8195 | if (mddev->level == 6) |
| 8196 | return raid5_takeover_raid6(mddev); |
| 8197 | |
| 8198 | return ERR_PTR(-EINVAL); |
| 8199 | } |
| 8200 | |
| 8201 | static void *raid4_takeover(struct mddev *mddev) |
| 8202 | { |
| 8203 | /* raid4 can take over: |
| 8204 | * raid0 - if there is only one strip zone |
| 8205 | * raid5 - if layout is right |
| 8206 | */ |
| 8207 | if (mddev->level == 0) |
| 8208 | return raid45_takeover_raid0(mddev, 4); |
| 8209 | if (mddev->level == 5 && |
| 8210 | mddev->layout == ALGORITHM_PARITY_N) { |
| 8211 | mddev->new_layout = 0; |
| 8212 | mddev->new_level = 4; |
| 8213 | return setup_conf(mddev); |
| 8214 | } |
| 8215 | return ERR_PTR(-EINVAL); |
| 8216 | } |
| 8217 | |
| 8218 | static struct md_personality raid5_personality; |
| 8219 | |
| 8220 | static void *raid6_takeover(struct mddev *mddev) |
| 8221 | { |
| 8222 | /* Currently can only take over a raid5. We map the |
| 8223 | * personality to an equivalent raid6 personality |
| 8224 | * with the Q block at the end. |
| 8225 | */ |
| 8226 | int new_layout; |
| 8227 | |
| 8228 | if (mddev->pers != &raid5_personality) |
| 8229 | return ERR_PTR(-EINVAL); |
| 8230 | if (mddev->degraded > 1) |
| 8231 | return ERR_PTR(-EINVAL); |
| 8232 | if (mddev->raid_disks > 253) |
| 8233 | return ERR_PTR(-EINVAL); |
| 8234 | if (mddev->raid_disks < 3) |
| 8235 | return ERR_PTR(-EINVAL); |
| 8236 | |
| 8237 | switch (mddev->layout) { |
| 8238 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 8239 | new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; |
| 8240 | break; |
| 8241 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 8242 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; |
| 8243 | break; |
| 8244 | case ALGORITHM_LEFT_SYMMETRIC: |
| 8245 | new_layout = ALGORITHM_LEFT_SYMMETRIC_6; |
| 8246 | break; |
| 8247 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 8248 | new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; |
| 8249 | break; |
| 8250 | case ALGORITHM_PARITY_0: |
| 8251 | new_layout = ALGORITHM_PARITY_0_6; |
| 8252 | break; |
| 8253 | case ALGORITHM_PARITY_N: |
| 8254 | new_layout = ALGORITHM_PARITY_N; |
| 8255 | break; |
| 8256 | default: |
| 8257 | return ERR_PTR(-EINVAL); |
| 8258 | } |
| 8259 | mddev->new_level = 6; |
| 8260 | mddev->new_layout = new_layout; |
| 8261 | mddev->delta_disks = 1; |
| 8262 | mddev->raid_disks += 1; |
| 8263 | return setup_conf(mddev); |
| 8264 | } |
| 8265 | |
| 8266 | static void raid5_reset_stripe_cache(struct mddev *mddev) |
| 8267 | { |
| 8268 | struct r5conf *conf = mddev->private; |
| 8269 | |
| 8270 | mutex_lock(&conf->cache_size_mutex); |
| 8271 | while (conf->max_nr_stripes && |
| 8272 | drop_one_stripe(conf)) |
| 8273 | ; |
| 8274 | while (conf->min_nr_stripes > conf->max_nr_stripes && |
| 8275 | grow_one_stripe(conf, GFP_KERNEL)) |
| 8276 | ; |
| 8277 | mutex_unlock(&conf->cache_size_mutex); |
| 8278 | } |
| 8279 | |
| 8280 | static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) |
| 8281 | { |
| 8282 | struct r5conf *conf; |
| 8283 | int err; |
| 8284 | |
| 8285 | err = mddev_lock(mddev); |
| 8286 | if (err) |
| 8287 | return err; |
| 8288 | conf = mddev->private; |
| 8289 | if (!conf) { |
| 8290 | mddev_unlock(mddev); |
| 8291 | return -ENODEV; |
| 8292 | } |
| 8293 | |
| 8294 | if (strncmp(buf, "ppl", 3) == 0 && !raid5_has_ppl(conf)) { |
| 8295 | /* ppl only works with RAID 5 */ |
| 8296 | if (conf->level == 5) { |
| 8297 | mddev_suspend(mddev); |
| 8298 | set_bit(MD_HAS_PPL, &mddev->flags); |
| 8299 | err = log_init(conf, NULL); |
| 8300 | if (!err) |
| 8301 | raid5_reset_stripe_cache(mddev); |
| 8302 | mddev_resume(mddev); |
| 8303 | } else |
| 8304 | err = -EINVAL; |
| 8305 | } else if (strncmp(buf, "resync", 6) == 0) { |
| 8306 | if (raid5_has_ppl(conf)) { |
| 8307 | mddev_suspend(mddev); |
| 8308 | log_exit(conf); |
| 8309 | raid5_reset_stripe_cache(mddev); |
| 8310 | mddev_resume(mddev); |
| 8311 | } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && |
| 8312 | r5l_log_disk_error(conf)) { |
| 8313 | bool journal_dev_exists = false; |
| 8314 | struct md_rdev *rdev; |
| 8315 | |
| 8316 | rdev_for_each(rdev, mddev) |
| 8317 | if (test_bit(Journal, &rdev->flags)) { |
| 8318 | journal_dev_exists = true; |
| 8319 | break; |
| 8320 | } |
| 8321 | |
| 8322 | if (!journal_dev_exists) { |
| 8323 | mddev_suspend(mddev); |
| 8324 | clear_bit(MD_HAS_JOURNAL, &mddev->flags); |
| 8325 | mddev_resume(mddev); |
| 8326 | } else /* need remove journal device first */ |
| 8327 | err = -EBUSY; |
| 8328 | } else |
| 8329 | err = -EINVAL; |
| 8330 | } else { |
| 8331 | err = -EINVAL; |
| 8332 | } |
| 8333 | |
| 8334 | if (!err) |
| 8335 | md_update_sb(mddev, 1); |
| 8336 | |
| 8337 | mddev_unlock(mddev); |
| 8338 | |
| 8339 | return err; |
| 8340 | } |
| 8341 | |
| 8342 | static struct md_personality raid6_personality = |
| 8343 | { |
| 8344 | .name = "raid6", |
| 8345 | .level = 6, |
| 8346 | .owner = THIS_MODULE, |
| 8347 | .make_request = raid5_make_request, |
| 8348 | .run = raid5_run, |
| 8349 | .free = raid5_free, |
| 8350 | .status = raid5_status, |
| 8351 | .error_handler = raid5_error, |
| 8352 | .hot_add_disk = raid5_add_disk, |
| 8353 | .hot_remove_disk= raid5_remove_disk, |
| 8354 | .spare_active = raid5_spare_active, |
| 8355 | .sync_request = raid5_sync_request, |
| 8356 | .resize = raid5_resize, |
| 8357 | .size = raid5_size, |
| 8358 | .check_reshape = raid6_check_reshape, |
| 8359 | .start_reshape = raid5_start_reshape, |
| 8360 | .finish_reshape = raid5_finish_reshape, |
| 8361 | .quiesce = raid5_quiesce, |
| 8362 | .takeover = raid6_takeover, |
| 8363 | .congested = raid5_congested, |
| 8364 | .change_consistency_policy = raid5_change_consistency_policy, |
| 8365 | }; |
| 8366 | static struct md_personality raid5_personality = |
| 8367 | { |
| 8368 | .name = "raid5", |
| 8369 | .level = 5, |
| 8370 | .owner = THIS_MODULE, |
| 8371 | .make_request = raid5_make_request, |
| 8372 | .run = raid5_run, |
| 8373 | .free = raid5_free, |
| 8374 | .status = raid5_status, |
| 8375 | .error_handler = raid5_error, |
| 8376 | .hot_add_disk = raid5_add_disk, |
| 8377 | .hot_remove_disk= raid5_remove_disk, |
| 8378 | .spare_active = raid5_spare_active, |
| 8379 | .sync_request = raid5_sync_request, |
| 8380 | .resize = raid5_resize, |
| 8381 | .size = raid5_size, |
| 8382 | .check_reshape = raid5_check_reshape, |
| 8383 | .start_reshape = raid5_start_reshape, |
| 8384 | .finish_reshape = raid5_finish_reshape, |
| 8385 | .quiesce = raid5_quiesce, |
| 8386 | .takeover = raid5_takeover, |
| 8387 | .congested = raid5_congested, |
| 8388 | .change_consistency_policy = raid5_change_consistency_policy, |
| 8389 | }; |
| 8390 | |
| 8391 | static struct md_personality raid4_personality = |
| 8392 | { |
| 8393 | .name = "raid4", |
| 8394 | .level = 4, |
| 8395 | .owner = THIS_MODULE, |
| 8396 | .make_request = raid5_make_request, |
| 8397 | .run = raid5_run, |
| 8398 | .free = raid5_free, |
| 8399 | .status = raid5_status, |
| 8400 | .error_handler = raid5_error, |
| 8401 | .hot_add_disk = raid5_add_disk, |
| 8402 | .hot_remove_disk= raid5_remove_disk, |
| 8403 | .spare_active = raid5_spare_active, |
| 8404 | .sync_request = raid5_sync_request, |
| 8405 | .resize = raid5_resize, |
| 8406 | .size = raid5_size, |
| 8407 | .check_reshape = raid5_check_reshape, |
| 8408 | .start_reshape = raid5_start_reshape, |
| 8409 | .finish_reshape = raid5_finish_reshape, |
| 8410 | .quiesce = raid5_quiesce, |
| 8411 | .takeover = raid4_takeover, |
| 8412 | .congested = raid5_congested, |
| 8413 | .change_consistency_policy = raid5_change_consistency_policy, |
| 8414 | }; |
| 8415 | |
| 8416 | static int __init raid5_init(void) |
| 8417 | { |
| 8418 | int ret; |
| 8419 | |
| 8420 | raid5_wq = alloc_workqueue("raid5wq", |
| 8421 | WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); |
| 8422 | if (!raid5_wq) |
| 8423 | return -ENOMEM; |
| 8424 | |
| 8425 | ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, |
| 8426 | "md/raid5:prepare", |
| 8427 | raid456_cpu_up_prepare, |
| 8428 | raid456_cpu_dead); |
| 8429 | if (ret) { |
| 8430 | destroy_workqueue(raid5_wq); |
| 8431 | return ret; |
| 8432 | } |
| 8433 | register_md_personality(&raid6_personality); |
| 8434 | register_md_personality(&raid5_personality); |
| 8435 | register_md_personality(&raid4_personality); |
| 8436 | return 0; |
| 8437 | } |
| 8438 | |
| 8439 | static void raid5_exit(void) |
| 8440 | { |
| 8441 | unregister_md_personality(&raid6_personality); |
| 8442 | unregister_md_personality(&raid5_personality); |
| 8443 | unregister_md_personality(&raid4_personality); |
| 8444 | cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); |
| 8445 | destroy_workqueue(raid5_wq); |
| 8446 | } |
| 8447 | |
| 8448 | module_init(raid5_init); |
| 8449 | module_exit(raid5_exit); |
| 8450 | MODULE_LICENSE("GPL"); |
| 8451 | MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); |
| 8452 | MODULE_ALIAS("md-personality-4"); /* RAID5 */ |
| 8453 | MODULE_ALIAS("md-raid5"); |
| 8454 | MODULE_ALIAS("md-raid4"); |
| 8455 | MODULE_ALIAS("md-level-5"); |
| 8456 | MODULE_ALIAS("md-level-4"); |
| 8457 | MODULE_ALIAS("md-personality-8"); /* RAID6 */ |
| 8458 | MODULE_ALIAS("md-raid6"); |
| 8459 | MODULE_ALIAS("md-level-6"); |
| 8460 | |
| 8461 | /* This used to be two separate modules, they were: */ |
| 8462 | MODULE_ALIAS("raid5"); |
| 8463 | MODULE_ALIAS("raid6"); |