md: add ->takeover method for raid5 to be able to take over raid1
[linux-2.6-block.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
16a53ecc 5 * Copyright (C) 2002, 2003 H. Peter Anvin
1da177e4 6 *
16a53ecc
N
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
ae3c20cc
N
21/*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
1da177e4 45
bff61975 46#include <linux/blkdev.h>
f6705578 47#include <linux/kthread.h>
91c00924 48#include <linux/async_tx.h>
bff61975 49#include <linux/seq_file.h>
43b2e5d8 50#include "md.h"
bff61975 51#include "raid5.h"
ef740c37
CH
52#include "raid6.h"
53#include "bitmap.h"
72626685 54
1da177e4
LT
55/*
56 * Stripe cache
57 */
58
59#define NR_STRIPES 256
60#define STRIPE_SIZE PAGE_SIZE
61#define STRIPE_SHIFT (PAGE_SHIFT - 9)
62#define STRIPE_SECTORS (STRIPE_SIZE>>9)
63#define IO_THRESHOLD 1
8b3e6cdc 64#define BYPASS_THRESHOLD 1
fccddba0 65#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
66#define HASH_MASK (NR_HASH - 1)
67
fccddba0 68#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
1da177e4
LT
69
70/* bio's attached to a stripe+device for I/O are linked together in bi_sector
71 * order without overlap. There may be several bio's per stripe+device, and
72 * a bio could span several devices.
73 * When walking this list for a particular stripe+device, we must never proceed
74 * beyond a bio that extends past this device, as the next bio might no longer
75 * be valid.
76 * This macro is used to determine the 'next' bio in the list, given the sector
77 * of the current stripe+device
78 */
79#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
80/*
81 * The following can be used to debug the driver
82 */
1da177e4
LT
83#define RAID5_PARANOIA 1
84#if RAID5_PARANOIA && defined(CONFIG_SMP)
85# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
86#else
87# define CHECK_DEVLOCK()
88#endif
89
45b4233c 90#ifdef DEBUG
1da177e4
LT
91#define inline
92#define __inline__
93#endif
94
6be9d494
BS
95#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
96
16a53ecc
N
97#if !RAID6_USE_EMPTY_ZERO_PAGE
98/* In .bss so it's zeroed */
99const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
100#endif
101
960e739d 102/*
5b99c2ff
JA
103 * We maintain a biased count of active stripes in the bottom 16 bits of
104 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
960e739d
JA
105 */
106static inline int raid5_bi_phys_segments(struct bio *bio)
107{
5b99c2ff 108 return bio->bi_phys_segments & 0xffff;
960e739d
JA
109}
110
111static inline int raid5_bi_hw_segments(struct bio *bio)
112{
5b99c2ff 113 return (bio->bi_phys_segments >> 16) & 0xffff;
960e739d
JA
114}
115
116static inline int raid5_dec_bi_phys_segments(struct bio *bio)
117{
118 --bio->bi_phys_segments;
119 return raid5_bi_phys_segments(bio);
120}
121
122static inline int raid5_dec_bi_hw_segments(struct bio *bio)
123{
124 unsigned short val = raid5_bi_hw_segments(bio);
125
126 --val;
5b99c2ff 127 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
960e739d
JA
128 return val;
129}
130
131static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
132{
5b99c2ff 133 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
960e739d
JA
134}
135
d0dabf7e
N
136/* Find first data disk in a raid6 stripe */
137static inline int raid6_d0(struct stripe_head *sh)
138{
67cc2b81
N
139 if (sh->ddf_layout)
140 /* ddf always start from first device */
141 return 0;
142 /* md starts just after Q block */
d0dabf7e
N
143 if (sh->qd_idx == sh->disks - 1)
144 return 0;
145 else
146 return sh->qd_idx + 1;
147}
16a53ecc
N
148static inline int raid6_next_disk(int disk, int raid_disks)
149{
150 disk++;
151 return (disk < raid_disks) ? disk : 0;
152}
a4456856 153
d0dabf7e
N
154/* When walking through the disks in a raid5, starting at raid6_d0,
155 * We need to map each disk to a 'slot', where the data disks are slot
156 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
157 * is raid_disks-1. This help does that mapping.
158 */
67cc2b81
N
159static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
160 int *count, int syndrome_disks)
d0dabf7e
N
161{
162 int slot;
67cc2b81 163
d0dabf7e 164 if (idx == sh->pd_idx)
67cc2b81 165 return syndrome_disks;
d0dabf7e 166 if (idx == sh->qd_idx)
67cc2b81 167 return syndrome_disks + 1;
d0dabf7e
N
168 slot = (*count)++;
169 return slot;
170}
171
a4456856
DW
172static void return_io(struct bio *return_bi)
173{
174 struct bio *bi = return_bi;
175 while (bi) {
a4456856
DW
176
177 return_bi = bi->bi_next;
178 bi->bi_next = NULL;
179 bi->bi_size = 0;
0e13fe23 180 bio_endio(bi, 0);
a4456856
DW
181 bi = return_bi;
182 }
183}
184
1da177e4
LT
185static void print_raid5_conf (raid5_conf_t *conf);
186
600aa109
DW
187static int stripe_operations_active(struct stripe_head *sh)
188{
189 return sh->check_state || sh->reconstruct_state ||
190 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
191 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
192}
193
858119e1 194static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4
LT
195{
196 if (atomic_dec_and_test(&sh->count)) {
78bafebd
ES
197 BUG_ON(!list_empty(&sh->lru));
198 BUG_ON(atomic_read(&conf->active_stripes)==0);
1da177e4 199 if (test_bit(STRIPE_HANDLE, &sh->state)) {
7c785b7a 200 if (test_bit(STRIPE_DELAYED, &sh->state)) {
1da177e4 201 list_add_tail(&sh->lru, &conf->delayed_list);
7c785b7a
N
202 blk_plug_device(conf->mddev->queue);
203 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
ae3c20cc 204 sh->bm_seq - conf->seq_write > 0) {
72626685 205 list_add_tail(&sh->lru, &conf->bitmap_list);
7c785b7a
N
206 blk_plug_device(conf->mddev->queue);
207 } else {
72626685 208 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 209 list_add_tail(&sh->lru, &conf->handle_list);
72626685 210 }
1da177e4
LT
211 md_wakeup_thread(conf->mddev->thread);
212 } else {
600aa109 213 BUG_ON(stripe_operations_active(sh));
1da177e4
LT
214 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
215 atomic_dec(&conf->preread_active_stripes);
216 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
217 md_wakeup_thread(conf->mddev->thread);
218 }
1da177e4 219 atomic_dec(&conf->active_stripes);
ccfcc3c1
N
220 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
221 list_add_tail(&sh->lru, &conf->inactive_list);
1da177e4 222 wake_up(&conf->wait_for_stripe);
46031f9a
RBJ
223 if (conf->retry_read_aligned)
224 md_wakeup_thread(conf->mddev->thread);
ccfcc3c1 225 }
1da177e4
LT
226 }
227 }
228}
d0dabf7e 229
1da177e4
LT
230static void release_stripe(struct stripe_head *sh)
231{
232 raid5_conf_t *conf = sh->raid_conf;
233 unsigned long flags;
16a53ecc 234
1da177e4
LT
235 spin_lock_irqsave(&conf->device_lock, flags);
236 __release_stripe(conf, sh);
237 spin_unlock_irqrestore(&conf->device_lock, flags);
238}
239
fccddba0 240static inline void remove_hash(struct stripe_head *sh)
1da177e4 241{
45b4233c
DW
242 pr_debug("remove_hash(), stripe %llu\n",
243 (unsigned long long)sh->sector);
1da177e4 244
fccddba0 245 hlist_del_init(&sh->hash);
1da177e4
LT
246}
247
16a53ecc 248static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4 249{
fccddba0 250 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4 251
45b4233c
DW
252 pr_debug("insert_hash(), stripe %llu\n",
253 (unsigned long long)sh->sector);
1da177e4
LT
254
255 CHECK_DEVLOCK();
fccddba0 256 hlist_add_head(&sh->hash, hp);
1da177e4
LT
257}
258
259
260/* find an idle stripe, make sure it is unhashed, and return it. */
261static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
262{
263 struct stripe_head *sh = NULL;
264 struct list_head *first;
265
266 CHECK_DEVLOCK();
267 if (list_empty(&conf->inactive_list))
268 goto out;
269 first = conf->inactive_list.next;
270 sh = list_entry(first, struct stripe_head, lru);
271 list_del_init(first);
272 remove_hash(sh);
273 atomic_inc(&conf->active_stripes);
274out:
275 return sh;
276}
277
278static void shrink_buffers(struct stripe_head *sh, int num)
279{
280 struct page *p;
281 int i;
282
283 for (i=0; i<num ; i++) {
284 p = sh->dev[i].page;
285 if (!p)
286 continue;
287 sh->dev[i].page = NULL;
2d1f3b5d 288 put_page(p);
1da177e4
LT
289 }
290}
291
292static int grow_buffers(struct stripe_head *sh, int num)
293{
294 int i;
295
296 for (i=0; i<num; i++) {
297 struct page *page;
298
299 if (!(page = alloc_page(GFP_KERNEL))) {
300 return 1;
301 }
302 sh->dev[i].page = page;
303 }
304 return 0;
305}
306
d710e138 307static void raid5_build_block(struct stripe_head *sh, int i);
911d4ee8
N
308static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
309 struct stripe_head *sh);
1da177e4 310
b5663ba4 311static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
1da177e4
LT
312{
313 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 314 int i;
1da177e4 315
78bafebd
ES
316 BUG_ON(atomic_read(&sh->count) != 0);
317 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
600aa109 318 BUG_ON(stripe_operations_active(sh));
d84e0f10 319
1da177e4 320 CHECK_DEVLOCK();
45b4233c 321 pr_debug("init_stripe called, stripe %llu\n",
1da177e4
LT
322 (unsigned long long)sh->sector);
323
324 remove_hash(sh);
16a53ecc 325
b5663ba4 326 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1da177e4 327 sh->sector = sector;
911d4ee8 328 stripe_set_idx(sector, conf, previous, sh);
1da177e4
LT
329 sh->state = 0;
330
7ecaa1e6
N
331
332 for (i = sh->disks; i--; ) {
1da177e4
LT
333 struct r5dev *dev = &sh->dev[i];
334
d84e0f10 335 if (dev->toread || dev->read || dev->towrite || dev->written ||
1da177e4 336 test_bit(R5_LOCKED, &dev->flags)) {
d84e0f10 337 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
1da177e4 338 (unsigned long long)sh->sector, i, dev->toread,
d84e0f10 339 dev->read, dev->towrite, dev->written,
1da177e4
LT
340 test_bit(R5_LOCKED, &dev->flags));
341 BUG();
342 }
343 dev->flags = 0;
344 raid5_build_block(sh, i);
345 }
346 insert_hash(conf, sh);
347}
348
7ecaa1e6 349static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
1da177e4
LT
350{
351 struct stripe_head *sh;
fccddba0 352 struct hlist_node *hn;
1da177e4
LT
353
354 CHECK_DEVLOCK();
45b4233c 355 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 356 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
7ecaa1e6 357 if (sh->sector == sector && sh->disks == disks)
1da177e4 358 return sh;
45b4233c 359 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
1da177e4
LT
360 return NULL;
361}
362
363static void unplug_slaves(mddev_t *mddev);
165125e1 364static void raid5_unplug_device(struct request_queue *q);
1da177e4 365
b5663ba4
N
366static struct stripe_head *
367get_active_stripe(raid5_conf_t *conf, sector_t sector,
368 int previous, int noblock)
1da177e4
LT
369{
370 struct stripe_head *sh;
b5663ba4 371 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1da177e4 372
45b4233c 373 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
1da177e4
LT
374
375 spin_lock_irq(&conf->device_lock);
376
377 do {
72626685
N
378 wait_event_lock_irq(conf->wait_for_stripe,
379 conf->quiesce == 0,
380 conf->device_lock, /* nothing */);
7ecaa1e6 381 sh = __find_stripe(conf, sector, disks);
1da177e4
LT
382 if (!sh) {
383 if (!conf->inactive_blocked)
384 sh = get_free_stripe(conf);
385 if (noblock && sh == NULL)
386 break;
387 if (!sh) {
388 conf->inactive_blocked = 1;
389 wait_event_lock_irq(conf->wait_for_stripe,
390 !list_empty(&conf->inactive_list) &&
5036805b
N
391 (atomic_read(&conf->active_stripes)
392 < (conf->max_nr_stripes *3/4)
1da177e4
LT
393 || !conf->inactive_blocked),
394 conf->device_lock,
f4370781 395 raid5_unplug_device(conf->mddev->queue)
1da177e4
LT
396 );
397 conf->inactive_blocked = 0;
398 } else
b5663ba4 399 init_stripe(sh, sector, previous);
1da177e4
LT
400 } else {
401 if (atomic_read(&sh->count)) {
78bafebd 402 BUG_ON(!list_empty(&sh->lru));
1da177e4
LT
403 } else {
404 if (!test_bit(STRIPE_HANDLE, &sh->state))
405 atomic_inc(&conf->active_stripes);
ff4e8d9a
N
406 if (list_empty(&sh->lru) &&
407 !test_bit(STRIPE_EXPANDING, &sh->state))
16a53ecc
N
408 BUG();
409 list_del_init(&sh->lru);
1da177e4
LT
410 }
411 }
412 } while (sh == NULL);
413
414 if (sh)
415 atomic_inc(&sh->count);
416
417 spin_unlock_irq(&conf->device_lock);
418 return sh;
419}
420
6712ecf8
N
421static void
422raid5_end_read_request(struct bio *bi, int error);
423static void
424raid5_end_write_request(struct bio *bi, int error);
91c00924 425
c4e5ac0a 426static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
91c00924
DW
427{
428 raid5_conf_t *conf = sh->raid_conf;
429 int i, disks = sh->disks;
430
431 might_sleep();
432
433 for (i = disks; i--; ) {
434 int rw;
435 struct bio *bi;
436 mdk_rdev_t *rdev;
437 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
438 rw = WRITE;
439 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
440 rw = READ;
441 else
442 continue;
443
444 bi = &sh->dev[i].req;
445
446 bi->bi_rw = rw;
447 if (rw == WRITE)
448 bi->bi_end_io = raid5_end_write_request;
449 else
450 bi->bi_end_io = raid5_end_read_request;
451
452 rcu_read_lock();
453 rdev = rcu_dereference(conf->disks[i].rdev);
454 if (rdev && test_bit(Faulty, &rdev->flags))
455 rdev = NULL;
456 if (rdev)
457 atomic_inc(&rdev->nr_pending);
458 rcu_read_unlock();
459
460 if (rdev) {
c4e5ac0a 461 if (s->syncing || s->expanding || s->expanded)
91c00924
DW
462 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
463
2b7497f0
DW
464 set_bit(STRIPE_IO_STARTED, &sh->state);
465
91c00924
DW
466 bi->bi_bdev = rdev->bdev;
467 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
e46b272b 468 __func__, (unsigned long long)sh->sector,
91c00924
DW
469 bi->bi_rw, i);
470 atomic_inc(&sh->count);
471 bi->bi_sector = sh->sector + rdev->data_offset;
472 bi->bi_flags = 1 << BIO_UPTODATE;
473 bi->bi_vcnt = 1;
474 bi->bi_max_vecs = 1;
475 bi->bi_idx = 0;
476 bi->bi_io_vec = &sh->dev[i].vec;
477 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
478 bi->bi_io_vec[0].bv_offset = 0;
479 bi->bi_size = STRIPE_SIZE;
480 bi->bi_next = NULL;
481 if (rw == WRITE &&
482 test_bit(R5_ReWrite, &sh->dev[i].flags))
483 atomic_add(STRIPE_SECTORS,
484 &rdev->corrected_errors);
485 generic_make_request(bi);
486 } else {
487 if (rw == WRITE)
488 set_bit(STRIPE_DEGRADED, &sh->state);
489 pr_debug("skip op %ld on disc %d for sector %llu\n",
490 bi->bi_rw, i, (unsigned long long)sh->sector);
491 clear_bit(R5_LOCKED, &sh->dev[i].flags);
492 set_bit(STRIPE_HANDLE, &sh->state);
493 }
494 }
495}
496
497static struct dma_async_tx_descriptor *
498async_copy_data(int frombio, struct bio *bio, struct page *page,
499 sector_t sector, struct dma_async_tx_descriptor *tx)
500{
501 struct bio_vec *bvl;
502 struct page *bio_page;
503 int i;
504 int page_offset;
505
506 if (bio->bi_sector >= sector)
507 page_offset = (signed)(bio->bi_sector - sector) * 512;
508 else
509 page_offset = (signed)(sector - bio->bi_sector) * -512;
510 bio_for_each_segment(bvl, bio, i) {
511 int len = bio_iovec_idx(bio, i)->bv_len;
512 int clen;
513 int b_offset = 0;
514
515 if (page_offset < 0) {
516 b_offset = -page_offset;
517 page_offset += b_offset;
518 len -= b_offset;
519 }
520
521 if (len > 0 && page_offset + len > STRIPE_SIZE)
522 clen = STRIPE_SIZE - page_offset;
523 else
524 clen = len;
525
526 if (clen > 0) {
527 b_offset += bio_iovec_idx(bio, i)->bv_offset;
528 bio_page = bio_iovec_idx(bio, i)->bv_page;
529 if (frombio)
530 tx = async_memcpy(page, bio_page, page_offset,
531 b_offset, clen,
eb0645a8 532 ASYNC_TX_DEP_ACK,
91c00924
DW
533 tx, NULL, NULL);
534 else
535 tx = async_memcpy(bio_page, page, b_offset,
536 page_offset, clen,
eb0645a8 537 ASYNC_TX_DEP_ACK,
91c00924
DW
538 tx, NULL, NULL);
539 }
540 if (clen < len) /* hit end of page */
541 break;
542 page_offset += len;
543 }
544
545 return tx;
546}
547
548static void ops_complete_biofill(void *stripe_head_ref)
549{
550 struct stripe_head *sh = stripe_head_ref;
551 struct bio *return_bi = NULL;
552 raid5_conf_t *conf = sh->raid_conf;
e4d84909 553 int i;
91c00924 554
e46b272b 555 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
556 (unsigned long long)sh->sector);
557
558 /* clear completed biofills */
83de75cc 559 spin_lock_irq(&conf->device_lock);
91c00924
DW
560 for (i = sh->disks; i--; ) {
561 struct r5dev *dev = &sh->dev[i];
91c00924
DW
562
563 /* acknowledge completion of a biofill operation */
e4d84909
DW
564 /* and check if we need to reply to a read request,
565 * new R5_Wantfill requests are held off until
83de75cc 566 * !STRIPE_BIOFILL_RUN
e4d84909
DW
567 */
568 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
91c00924 569 struct bio *rbi, *rbi2;
91c00924 570
91c00924
DW
571 BUG_ON(!dev->read);
572 rbi = dev->read;
573 dev->read = NULL;
574 while (rbi && rbi->bi_sector <
575 dev->sector + STRIPE_SECTORS) {
576 rbi2 = r5_next_bio(rbi, dev->sector);
960e739d 577 if (!raid5_dec_bi_phys_segments(rbi)) {
91c00924
DW
578 rbi->bi_next = return_bi;
579 return_bi = rbi;
580 }
91c00924
DW
581 rbi = rbi2;
582 }
583 }
584 }
83de75cc
DW
585 spin_unlock_irq(&conf->device_lock);
586 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
91c00924
DW
587
588 return_io(return_bi);
589
e4d84909 590 set_bit(STRIPE_HANDLE, &sh->state);
91c00924
DW
591 release_stripe(sh);
592}
593
594static void ops_run_biofill(struct stripe_head *sh)
595{
596 struct dma_async_tx_descriptor *tx = NULL;
597 raid5_conf_t *conf = sh->raid_conf;
598 int i;
599
e46b272b 600 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
601 (unsigned long long)sh->sector);
602
603 for (i = sh->disks; i--; ) {
604 struct r5dev *dev = &sh->dev[i];
605 if (test_bit(R5_Wantfill, &dev->flags)) {
606 struct bio *rbi;
607 spin_lock_irq(&conf->device_lock);
608 dev->read = rbi = dev->toread;
609 dev->toread = NULL;
610 spin_unlock_irq(&conf->device_lock);
611 while (rbi && rbi->bi_sector <
612 dev->sector + STRIPE_SECTORS) {
613 tx = async_copy_data(0, rbi, dev->page,
614 dev->sector, tx);
615 rbi = r5_next_bio(rbi, dev->sector);
616 }
617 }
618 }
619
620 atomic_inc(&sh->count);
621 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
622 ops_complete_biofill, sh);
623}
624
625static void ops_complete_compute5(void *stripe_head_ref)
626{
627 struct stripe_head *sh = stripe_head_ref;
628 int target = sh->ops.target;
629 struct r5dev *tgt = &sh->dev[target];
630
e46b272b 631 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
632 (unsigned long long)sh->sector);
633
634 set_bit(R5_UPTODATE, &tgt->flags);
635 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
636 clear_bit(R5_Wantcompute, &tgt->flags);
ecc65c9b
DW
637 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
638 if (sh->check_state == check_state_compute_run)
639 sh->check_state = check_state_compute_result;
91c00924
DW
640 set_bit(STRIPE_HANDLE, &sh->state);
641 release_stripe(sh);
642}
643
7b3a871e 644static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
91c00924
DW
645{
646 /* kernel stack size limits the total number of disks */
647 int disks = sh->disks;
648 struct page *xor_srcs[disks];
649 int target = sh->ops.target;
650 struct r5dev *tgt = &sh->dev[target];
651 struct page *xor_dest = tgt->page;
652 int count = 0;
653 struct dma_async_tx_descriptor *tx;
654 int i;
655
656 pr_debug("%s: stripe %llu block: %d\n",
e46b272b 657 __func__, (unsigned long long)sh->sector, target);
91c00924
DW
658 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
659
660 for (i = disks; i--; )
661 if (i != target)
662 xor_srcs[count++] = sh->dev[i].page;
663
664 atomic_inc(&sh->count);
665
666 if (unlikely(count == 1))
667 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
668 0, NULL, ops_complete_compute5, sh);
669 else
670 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
671 ASYNC_TX_XOR_ZERO_DST, NULL,
672 ops_complete_compute5, sh);
673
91c00924
DW
674 return tx;
675}
676
677static void ops_complete_prexor(void *stripe_head_ref)
678{
679 struct stripe_head *sh = stripe_head_ref;
680
e46b272b 681 pr_debug("%s: stripe %llu\n", __func__,
91c00924 682 (unsigned long long)sh->sector);
91c00924
DW
683}
684
685static struct dma_async_tx_descriptor *
686ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
687{
688 /* kernel stack size limits the total number of disks */
689 int disks = sh->disks;
690 struct page *xor_srcs[disks];
691 int count = 0, pd_idx = sh->pd_idx, i;
692
693 /* existing parity data subtracted */
694 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
695
e46b272b 696 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
697 (unsigned long long)sh->sector);
698
699 for (i = disks; i--; ) {
700 struct r5dev *dev = &sh->dev[i];
701 /* Only process blocks that are known to be uptodate */
d8ee0728 702 if (test_bit(R5_Wantdrain, &dev->flags))
91c00924
DW
703 xor_srcs[count++] = dev->page;
704 }
705
706 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
707 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
708 ops_complete_prexor, sh);
709
710 return tx;
711}
712
713static struct dma_async_tx_descriptor *
d8ee0728 714ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
715{
716 int disks = sh->disks;
d8ee0728 717 int i;
91c00924 718
e46b272b 719 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
720 (unsigned long long)sh->sector);
721
722 for (i = disks; i--; ) {
723 struct r5dev *dev = &sh->dev[i];
724 struct bio *chosen;
91c00924 725
d8ee0728 726 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
91c00924
DW
727 struct bio *wbi;
728
729 spin_lock(&sh->lock);
730 chosen = dev->towrite;
731 dev->towrite = NULL;
732 BUG_ON(dev->written);
733 wbi = dev->written = chosen;
734 spin_unlock(&sh->lock);
735
736 while (wbi && wbi->bi_sector <
737 dev->sector + STRIPE_SECTORS) {
738 tx = async_copy_data(1, wbi, dev->page,
739 dev->sector, tx);
740 wbi = r5_next_bio(wbi, dev->sector);
741 }
742 }
743 }
744
745 return tx;
746}
747
748static void ops_complete_postxor(void *stripe_head_ref)
91c00924
DW
749{
750 struct stripe_head *sh = stripe_head_ref;
751 int disks = sh->disks, i, pd_idx = sh->pd_idx;
752
e46b272b 753 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
754 (unsigned long long)sh->sector);
755
756 for (i = disks; i--; ) {
757 struct r5dev *dev = &sh->dev[i];
758 if (dev->written || i == pd_idx)
759 set_bit(R5_UPTODATE, &dev->flags);
760 }
761
d8ee0728
DW
762 if (sh->reconstruct_state == reconstruct_state_drain_run)
763 sh->reconstruct_state = reconstruct_state_drain_result;
764 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
765 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
766 else {
767 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
768 sh->reconstruct_state = reconstruct_state_result;
769 }
91c00924
DW
770
771 set_bit(STRIPE_HANDLE, &sh->state);
772 release_stripe(sh);
773}
774
775static void
d8ee0728 776ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
777{
778 /* kernel stack size limits the total number of disks */
779 int disks = sh->disks;
780 struct page *xor_srcs[disks];
781
782 int count = 0, pd_idx = sh->pd_idx, i;
783 struct page *xor_dest;
d8ee0728 784 int prexor = 0;
91c00924 785 unsigned long flags;
91c00924 786
e46b272b 787 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
788 (unsigned long long)sh->sector);
789
790 /* check if prexor is active which means only process blocks
791 * that are part of a read-modify-write (written)
792 */
d8ee0728
DW
793 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
794 prexor = 1;
91c00924
DW
795 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
796 for (i = disks; i--; ) {
797 struct r5dev *dev = &sh->dev[i];
798 if (dev->written)
799 xor_srcs[count++] = dev->page;
800 }
801 } else {
802 xor_dest = sh->dev[pd_idx].page;
803 for (i = disks; i--; ) {
804 struct r5dev *dev = &sh->dev[i];
805 if (i != pd_idx)
806 xor_srcs[count++] = dev->page;
807 }
808 }
809
91c00924
DW
810 /* 1/ if we prexor'd then the dest is reused as a source
811 * 2/ if we did not prexor then we are redoing the parity
812 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
813 * for the synchronous xor case
814 */
815 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
816 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
817
818 atomic_inc(&sh->count);
819
820 if (unlikely(count == 1)) {
821 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
822 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
d8ee0728 823 flags, tx, ops_complete_postxor, sh);
91c00924
DW
824 } else
825 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
d8ee0728 826 flags, tx, ops_complete_postxor, sh);
91c00924
DW
827}
828
829static void ops_complete_check(void *stripe_head_ref)
830{
831 struct stripe_head *sh = stripe_head_ref;
91c00924 832
e46b272b 833 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
834 (unsigned long long)sh->sector);
835
ecc65c9b 836 sh->check_state = check_state_check_result;
91c00924
DW
837 set_bit(STRIPE_HANDLE, &sh->state);
838 release_stripe(sh);
839}
840
841static void ops_run_check(struct stripe_head *sh)
842{
843 /* kernel stack size limits the total number of disks */
844 int disks = sh->disks;
845 struct page *xor_srcs[disks];
846 struct dma_async_tx_descriptor *tx;
847
848 int count = 0, pd_idx = sh->pd_idx, i;
849 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
850
e46b272b 851 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
852 (unsigned long long)sh->sector);
853
854 for (i = disks; i--; ) {
855 struct r5dev *dev = &sh->dev[i];
856 if (i != pd_idx)
857 xor_srcs[count++] = dev->page;
858 }
859
860 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
861 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
862
91c00924
DW
863 atomic_inc(&sh->count);
864 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
865 ops_complete_check, sh);
866}
867
600aa109 868static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
91c00924
DW
869{
870 int overlap_clear = 0, i, disks = sh->disks;
871 struct dma_async_tx_descriptor *tx = NULL;
872
83de75cc 873 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
91c00924
DW
874 ops_run_biofill(sh);
875 overlap_clear++;
876 }
877
7b3a871e
DW
878 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
879 tx = ops_run_compute5(sh);
880 /* terminate the chain if postxor is not set to be run */
881 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
882 async_tx_ack(tx);
883 }
91c00924 884
600aa109 885 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
91c00924
DW
886 tx = ops_run_prexor(sh, tx);
887
600aa109 888 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
d8ee0728 889 tx = ops_run_biodrain(sh, tx);
91c00924
DW
890 overlap_clear++;
891 }
892
600aa109 893 if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
d8ee0728 894 ops_run_postxor(sh, tx);
91c00924 895
ecc65c9b 896 if (test_bit(STRIPE_OP_CHECK, &ops_request))
91c00924
DW
897 ops_run_check(sh);
898
91c00924
DW
899 if (overlap_clear)
900 for (i = disks; i--; ) {
901 struct r5dev *dev = &sh->dev[i];
902 if (test_and_clear_bit(R5_Overlap, &dev->flags))
903 wake_up(&sh->raid_conf->wait_for_overlap);
904 }
905}
906
3f294f4f 907static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
908{
909 struct stripe_head *sh;
3f294f4f
N
910 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
911 if (!sh)
912 return 0;
913 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
914 sh->raid_conf = conf;
915 spin_lock_init(&sh->lock);
916
917 if (grow_buffers(sh, conf->raid_disks)) {
918 shrink_buffers(sh, conf->raid_disks);
919 kmem_cache_free(conf->slab_cache, sh);
920 return 0;
921 }
7ecaa1e6 922 sh->disks = conf->raid_disks;
3f294f4f
N
923 /* we just created an active stripe so... */
924 atomic_set(&sh->count, 1);
925 atomic_inc(&conf->active_stripes);
926 INIT_LIST_HEAD(&sh->lru);
927 release_stripe(sh);
928 return 1;
929}
930
931static int grow_stripes(raid5_conf_t *conf, int num)
932{
e18b890b 933 struct kmem_cache *sc;
1da177e4
LT
934 int devs = conf->raid_disks;
935
245f46c2
N
936 sprintf(conf->cache_name[0],
937 "raid%d-%s", conf->level, mdname(conf->mddev));
938 sprintf(conf->cache_name[1],
939 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
ad01c9e3
N
940 conf->active_name = 0;
941 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1da177e4 942 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
20c2df83 943 0, 0, NULL);
1da177e4
LT
944 if (!sc)
945 return 1;
946 conf->slab_cache = sc;
ad01c9e3 947 conf->pool_size = devs;
16a53ecc 948 while (num--)
3f294f4f 949 if (!grow_one_stripe(conf))
1da177e4 950 return 1;
1da177e4
LT
951 return 0;
952}
29269553
N
953
954#ifdef CONFIG_MD_RAID5_RESHAPE
ad01c9e3
N
955static int resize_stripes(raid5_conf_t *conf, int newsize)
956{
957 /* Make all the stripes able to hold 'newsize' devices.
958 * New slots in each stripe get 'page' set to a new page.
959 *
960 * This happens in stages:
961 * 1/ create a new kmem_cache and allocate the required number of
962 * stripe_heads.
963 * 2/ gather all the old stripe_heads and tranfer the pages across
964 * to the new stripe_heads. This will have the side effect of
965 * freezing the array as once all stripe_heads have been collected,
966 * no IO will be possible. Old stripe heads are freed once their
967 * pages have been transferred over, and the old kmem_cache is
968 * freed when all stripes are done.
969 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
970 * we simple return a failre status - no need to clean anything up.
971 * 4/ allocate new pages for the new slots in the new stripe_heads.
972 * If this fails, we don't bother trying the shrink the
973 * stripe_heads down again, we just leave them as they are.
974 * As each stripe_head is processed the new one is released into
975 * active service.
976 *
977 * Once step2 is started, we cannot afford to wait for a write,
978 * so we use GFP_NOIO allocations.
979 */
980 struct stripe_head *osh, *nsh;
981 LIST_HEAD(newstripes);
982 struct disk_info *ndisks;
b5470dc5 983 int err;
e18b890b 984 struct kmem_cache *sc;
ad01c9e3
N
985 int i;
986
987 if (newsize <= conf->pool_size)
988 return 0; /* never bother to shrink */
989
b5470dc5
DW
990 err = md_allow_write(conf->mddev);
991 if (err)
992 return err;
2a2275d6 993
ad01c9e3
N
994 /* Step 1 */
995 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
996 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
20c2df83 997 0, 0, NULL);
ad01c9e3
N
998 if (!sc)
999 return -ENOMEM;
1000
1001 for (i = conf->max_nr_stripes; i; i--) {
1002 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1003 if (!nsh)
1004 break;
1005
1006 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1007
1008 nsh->raid_conf = conf;
1009 spin_lock_init(&nsh->lock);
1010
1011 list_add(&nsh->lru, &newstripes);
1012 }
1013 if (i) {
1014 /* didn't get enough, give up */
1015 while (!list_empty(&newstripes)) {
1016 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1017 list_del(&nsh->lru);
1018 kmem_cache_free(sc, nsh);
1019 }
1020 kmem_cache_destroy(sc);
1021 return -ENOMEM;
1022 }
1023 /* Step 2 - Must use GFP_NOIO now.
1024 * OK, we have enough stripes, start collecting inactive
1025 * stripes and copying them over
1026 */
1027 list_for_each_entry(nsh, &newstripes, lru) {
1028 spin_lock_irq(&conf->device_lock);
1029 wait_event_lock_irq(conf->wait_for_stripe,
1030 !list_empty(&conf->inactive_list),
1031 conf->device_lock,
b3b46be3 1032 unplug_slaves(conf->mddev)
ad01c9e3
N
1033 );
1034 osh = get_free_stripe(conf);
1035 spin_unlock_irq(&conf->device_lock);
1036 atomic_set(&nsh->count, 1);
1037 for(i=0; i<conf->pool_size; i++)
1038 nsh->dev[i].page = osh->dev[i].page;
1039 for( ; i<newsize; i++)
1040 nsh->dev[i].page = NULL;
1041 kmem_cache_free(conf->slab_cache, osh);
1042 }
1043 kmem_cache_destroy(conf->slab_cache);
1044
1045 /* Step 3.
1046 * At this point, we are holding all the stripes so the array
1047 * is completely stalled, so now is a good time to resize
1048 * conf->disks.
1049 */
1050 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1051 if (ndisks) {
1052 for (i=0; i<conf->raid_disks; i++)
1053 ndisks[i] = conf->disks[i];
1054 kfree(conf->disks);
1055 conf->disks = ndisks;
1056 } else
1057 err = -ENOMEM;
1058
1059 /* Step 4, return new stripes to service */
1060 while(!list_empty(&newstripes)) {
1061 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1062 list_del_init(&nsh->lru);
1063 for (i=conf->raid_disks; i < newsize; i++)
1064 if (nsh->dev[i].page == NULL) {
1065 struct page *p = alloc_page(GFP_NOIO);
1066 nsh->dev[i].page = p;
1067 if (!p)
1068 err = -ENOMEM;
1069 }
1070 release_stripe(nsh);
1071 }
1072 /* critical section pass, GFP_NOIO no longer needed */
1073
1074 conf->slab_cache = sc;
1075 conf->active_name = 1-conf->active_name;
1076 conf->pool_size = newsize;
1077 return err;
1078}
29269553 1079#endif
1da177e4 1080
3f294f4f 1081static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
1082{
1083 struct stripe_head *sh;
1084
3f294f4f
N
1085 spin_lock_irq(&conf->device_lock);
1086 sh = get_free_stripe(conf);
1087 spin_unlock_irq(&conf->device_lock);
1088 if (!sh)
1089 return 0;
78bafebd 1090 BUG_ON(atomic_read(&sh->count));
ad01c9e3 1091 shrink_buffers(sh, conf->pool_size);
3f294f4f
N
1092 kmem_cache_free(conf->slab_cache, sh);
1093 atomic_dec(&conf->active_stripes);
1094 return 1;
1095}
1096
1097static void shrink_stripes(raid5_conf_t *conf)
1098{
1099 while (drop_one_stripe(conf))
1100 ;
1101
29fc7e3e
N
1102 if (conf->slab_cache)
1103 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
1104 conf->slab_cache = NULL;
1105}
1106
6712ecf8 1107static void raid5_end_read_request(struct bio * bi, int error)
1da177e4 1108{
99c0fb5f 1109 struct stripe_head *sh = bi->bi_private;
1da177e4 1110 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1111 int disks = sh->disks, i;
1da177e4 1112 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
d6950432
N
1113 char b[BDEVNAME_SIZE];
1114 mdk_rdev_t *rdev;
1da177e4 1115
1da177e4
LT
1116
1117 for (i=0 ; i<disks; i++)
1118 if (bi == &sh->dev[i].req)
1119 break;
1120
45b4233c
DW
1121 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1122 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1da177e4
LT
1123 uptodate);
1124 if (i == disks) {
1125 BUG();
6712ecf8 1126 return;
1da177e4
LT
1127 }
1128
1129 if (uptodate) {
1da177e4 1130 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5 1131 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
d6950432 1132 rdev = conf->disks[i].rdev;
6be9d494
BS
1133 printk_rl(KERN_INFO "raid5:%s: read error corrected"
1134 " (%lu sectors at %llu on %s)\n",
1135 mdname(conf->mddev), STRIPE_SECTORS,
1136 (unsigned long long)(sh->sector
1137 + rdev->data_offset),
1138 bdevname(rdev->bdev, b));
4e5314b5
N
1139 clear_bit(R5_ReadError, &sh->dev[i].flags);
1140 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1141 }
ba22dcbf
N
1142 if (atomic_read(&conf->disks[i].rdev->read_errors))
1143 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 1144 } else {
d6950432 1145 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
ba22dcbf 1146 int retry = 0;
d6950432
N
1147 rdev = conf->disks[i].rdev;
1148
1da177e4 1149 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
d6950432 1150 atomic_inc(&rdev->read_errors);
ba22dcbf 1151 if (conf->mddev->degraded)
6be9d494
BS
1152 printk_rl(KERN_WARNING
1153 "raid5:%s: read error not correctable "
1154 "(sector %llu on %s).\n",
1155 mdname(conf->mddev),
1156 (unsigned long long)(sh->sector
1157 + rdev->data_offset),
1158 bdn);
ba22dcbf 1159 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 1160 /* Oh, no!!! */
6be9d494
BS
1161 printk_rl(KERN_WARNING
1162 "raid5:%s: read error NOT corrected!! "
1163 "(sector %llu on %s).\n",
1164 mdname(conf->mddev),
1165 (unsigned long long)(sh->sector
1166 + rdev->data_offset),
1167 bdn);
d6950432 1168 else if (atomic_read(&rdev->read_errors)
ba22dcbf 1169 > conf->max_nr_stripes)
14f8d26b 1170 printk(KERN_WARNING
d6950432
N
1171 "raid5:%s: Too many read errors, failing device %s.\n",
1172 mdname(conf->mddev), bdn);
ba22dcbf
N
1173 else
1174 retry = 1;
1175 if (retry)
1176 set_bit(R5_ReadError, &sh->dev[i].flags);
1177 else {
4e5314b5
N
1178 clear_bit(R5_ReadError, &sh->dev[i].flags);
1179 clear_bit(R5_ReWrite, &sh->dev[i].flags);
d6950432 1180 md_error(conf->mddev, rdev);
ba22dcbf 1181 }
1da177e4
LT
1182 }
1183 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1da177e4
LT
1184 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1185 set_bit(STRIPE_HANDLE, &sh->state);
1186 release_stripe(sh);
1da177e4
LT
1187}
1188
d710e138 1189static void raid5_end_write_request(struct bio *bi, int error)
1da177e4 1190{
99c0fb5f 1191 struct stripe_head *sh = bi->bi_private;
1da177e4 1192 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1193 int disks = sh->disks, i;
1da177e4
LT
1194 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1195
1da177e4
LT
1196 for (i=0 ; i<disks; i++)
1197 if (bi == &sh->dev[i].req)
1198 break;
1199
45b4233c 1200 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1da177e4
LT
1201 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1202 uptodate);
1203 if (i == disks) {
1204 BUG();
6712ecf8 1205 return;
1da177e4
LT
1206 }
1207
1da177e4
LT
1208 if (!uptodate)
1209 md_error(conf->mddev, conf->disks[i].rdev);
1210
1211 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1212
1213 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1214 set_bit(STRIPE_HANDLE, &sh->state);
c04be0aa 1215 release_stripe(sh);
1da177e4
LT
1216}
1217
1218
1219static sector_t compute_blocknr(struct stripe_head *sh, int i);
1220
d710e138 1221static void raid5_build_block(struct stripe_head *sh, int i)
1da177e4
LT
1222{
1223 struct r5dev *dev = &sh->dev[i];
1224
1225 bio_init(&dev->req);
1226 dev->req.bi_io_vec = &dev->vec;
1227 dev->req.bi_vcnt++;
1228 dev->req.bi_max_vecs++;
1229 dev->vec.bv_page = dev->page;
1230 dev->vec.bv_len = STRIPE_SIZE;
1231 dev->vec.bv_offset = 0;
1232
1233 dev->req.bi_sector = sh->sector;
1234 dev->req.bi_private = sh;
1235
1236 dev->flags = 0;
16a53ecc 1237 dev->sector = compute_blocknr(sh, i);
1da177e4
LT
1238}
1239
1240static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1241{
1242 char b[BDEVNAME_SIZE];
1243 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
45b4233c 1244 pr_debug("raid5: error called\n");
1da177e4 1245
b2d444d7 1246 if (!test_bit(Faulty, &rdev->flags)) {
850b2b42 1247 set_bit(MD_CHANGE_DEVS, &mddev->flags);
c04be0aa
N
1248 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1249 unsigned long flags;
1250 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1251 mddev->degraded++;
c04be0aa 1252 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1253 /*
1254 * if recovery was running, make sure it aborts.
1255 */
dfc70645 1256 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4 1257 }
b2d444d7 1258 set_bit(Faulty, &rdev->flags);
d710e138
N
1259 printk(KERN_ALERT
1260 "raid5: Disk failure on %s, disabling device.\n"
1261 "raid5: Operation continuing on %d devices.\n",
1262 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1da177e4 1263 }
16a53ecc 1264}
1da177e4
LT
1265
1266/*
1267 * Input: a 'big' sector number,
1268 * Output: index of the data and parity disk, and the sector # in them.
1269 */
112bf897 1270static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
911d4ee8
N
1271 int previous, int *dd_idx,
1272 struct stripe_head *sh)
1da177e4
LT
1273{
1274 long stripe;
1275 unsigned long chunk_number;
1276 unsigned int chunk_offset;
911d4ee8 1277 int pd_idx, qd_idx;
67cc2b81 1278 int ddf_layout = 0;
1da177e4
LT
1279 sector_t new_sector;
1280 int sectors_per_chunk = conf->chunk_size >> 9;
112bf897
N
1281 int raid_disks = previous ? conf->previous_raid_disks
1282 : conf->raid_disks;
1283 int data_disks = raid_disks - conf->max_degraded;
1da177e4
LT
1284
1285 /* First compute the information on this sector */
1286
1287 /*
1288 * Compute the chunk number and the sector offset inside the chunk
1289 */
1290 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1291 chunk_number = r_sector;
1292 BUG_ON(r_sector != chunk_number);
1293
1294 /*
1295 * Compute the stripe number
1296 */
1297 stripe = chunk_number / data_disks;
1298
1299 /*
1300 * Compute the data disk and parity disk indexes inside the stripe
1301 */
1302 *dd_idx = chunk_number % data_disks;
1303
1304 /*
1305 * Select the parity disk based on the user selected algorithm.
1306 */
911d4ee8 1307 pd_idx = qd_idx = ~0;
16a53ecc
N
1308 switch(conf->level) {
1309 case 4:
911d4ee8 1310 pd_idx = data_disks;
16a53ecc
N
1311 break;
1312 case 5:
1313 switch (conf->algorithm) {
1da177e4 1314 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1315 pd_idx = data_disks - stripe % raid_disks;
1316 if (*dd_idx >= pd_idx)
1da177e4
LT
1317 (*dd_idx)++;
1318 break;
1319 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1320 pd_idx = stripe % raid_disks;
1321 if (*dd_idx >= pd_idx)
1da177e4
LT
1322 (*dd_idx)++;
1323 break;
1324 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1325 pd_idx = data_disks - stripe % raid_disks;
1326 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4
LT
1327 break;
1328 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1329 pd_idx = stripe % raid_disks;
1330 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4 1331 break;
99c0fb5f
N
1332 case ALGORITHM_PARITY_0:
1333 pd_idx = 0;
1334 (*dd_idx)++;
1335 break;
1336 case ALGORITHM_PARITY_N:
1337 pd_idx = data_disks;
1338 break;
1da177e4 1339 default:
14f8d26b 1340 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1da177e4 1341 conf->algorithm);
99c0fb5f 1342 BUG();
16a53ecc
N
1343 }
1344 break;
1345 case 6:
1346
16a53ecc
N
1347 switch (conf->algorithm) {
1348 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1349 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1350 qd_idx = pd_idx + 1;
1351 if (pd_idx == raid_disks-1) {
99c0fb5f 1352 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1353 qd_idx = 0;
1354 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1355 (*dd_idx) += 2; /* D D P Q D */
1356 break;
1357 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1358 pd_idx = stripe % raid_disks;
1359 qd_idx = pd_idx + 1;
1360 if (pd_idx == raid_disks-1) {
99c0fb5f 1361 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1362 qd_idx = 0;
1363 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1364 (*dd_idx) += 2; /* D D P Q D */
1365 break;
1366 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1367 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1368 qd_idx = (pd_idx + 1) % raid_disks;
1369 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc
N
1370 break;
1371 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1372 pd_idx = stripe % raid_disks;
1373 qd_idx = (pd_idx + 1) % raid_disks;
1374 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc 1375 break;
99c0fb5f
N
1376
1377 case ALGORITHM_PARITY_0:
1378 pd_idx = 0;
1379 qd_idx = 1;
1380 (*dd_idx) += 2;
1381 break;
1382 case ALGORITHM_PARITY_N:
1383 pd_idx = data_disks;
1384 qd_idx = data_disks + 1;
1385 break;
1386
1387 case ALGORITHM_ROTATING_ZERO_RESTART:
1388 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1389 * of blocks for computing Q is different.
1390 */
1391 pd_idx = stripe % raid_disks;
1392 qd_idx = pd_idx + 1;
1393 if (pd_idx == raid_disks-1) {
1394 (*dd_idx)++; /* Q D D D P */
1395 qd_idx = 0;
1396 } else if (*dd_idx >= pd_idx)
1397 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1398 ddf_layout = 1;
99c0fb5f
N
1399 break;
1400
1401 case ALGORITHM_ROTATING_N_RESTART:
1402 /* Same a left_asymmetric, by first stripe is
1403 * D D D P Q rather than
1404 * Q D D D P
1405 */
1406 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1407 qd_idx = pd_idx + 1;
1408 if (pd_idx == raid_disks-1) {
1409 (*dd_idx)++; /* Q D D D P */
1410 qd_idx = 0;
1411 } else if (*dd_idx >= pd_idx)
1412 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1413 ddf_layout = 1;
99c0fb5f
N
1414 break;
1415
1416 case ALGORITHM_ROTATING_N_CONTINUE:
1417 /* Same as left_symmetric but Q is before P */
1418 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1419 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1420 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
67cc2b81 1421 ddf_layout = 1;
99c0fb5f
N
1422 break;
1423
1424 case ALGORITHM_LEFT_ASYMMETRIC_6:
1425 /* RAID5 left_asymmetric, with Q on last device */
1426 pd_idx = data_disks - stripe % (raid_disks-1);
1427 if (*dd_idx >= pd_idx)
1428 (*dd_idx)++;
1429 qd_idx = raid_disks - 1;
1430 break;
1431
1432 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1433 pd_idx = stripe % (raid_disks-1);
1434 if (*dd_idx >= pd_idx)
1435 (*dd_idx)++;
1436 qd_idx = raid_disks - 1;
1437 break;
1438
1439 case ALGORITHM_LEFT_SYMMETRIC_6:
1440 pd_idx = data_disks - stripe % (raid_disks-1);
1441 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1442 qd_idx = raid_disks - 1;
1443 break;
1444
1445 case ALGORITHM_RIGHT_SYMMETRIC_6:
1446 pd_idx = stripe % (raid_disks-1);
1447 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1448 qd_idx = raid_disks - 1;
1449 break;
1450
1451 case ALGORITHM_PARITY_0_6:
1452 pd_idx = 0;
1453 (*dd_idx)++;
1454 qd_idx = raid_disks - 1;
1455 break;
1456
1457
16a53ecc 1458 default:
d710e138
N
1459 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1460 conf->algorithm);
99c0fb5f 1461 BUG();
16a53ecc
N
1462 }
1463 break;
1da177e4
LT
1464 }
1465
911d4ee8
N
1466 if (sh) {
1467 sh->pd_idx = pd_idx;
1468 sh->qd_idx = qd_idx;
67cc2b81 1469 sh->ddf_layout = ddf_layout;
911d4ee8 1470 }
1da177e4
LT
1471 /*
1472 * Finally, compute the new sector number
1473 */
1474 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1475 return new_sector;
1476}
1477
1478
1479static sector_t compute_blocknr(struct stripe_head *sh, int i)
1480{
1481 raid5_conf_t *conf = sh->raid_conf;
b875e531
N
1482 int raid_disks = sh->disks;
1483 int data_disks = raid_disks - conf->max_degraded;
1da177e4
LT
1484 sector_t new_sector = sh->sector, check;
1485 int sectors_per_chunk = conf->chunk_size >> 9;
1486 sector_t stripe;
1487 int chunk_offset;
911d4ee8 1488 int chunk_number, dummy1, dd_idx = i;
1da177e4 1489 sector_t r_sector;
911d4ee8 1490 struct stripe_head sh2;
1da177e4 1491
16a53ecc 1492
1da177e4
LT
1493 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1494 stripe = new_sector;
1495 BUG_ON(new_sector != stripe);
1496
16a53ecc
N
1497 if (i == sh->pd_idx)
1498 return 0;
1499 switch(conf->level) {
1500 case 4: break;
1501 case 5:
1502 switch (conf->algorithm) {
1da177e4
LT
1503 case ALGORITHM_LEFT_ASYMMETRIC:
1504 case ALGORITHM_RIGHT_ASYMMETRIC:
1505 if (i > sh->pd_idx)
1506 i--;
1507 break;
1508 case ALGORITHM_LEFT_SYMMETRIC:
1509 case ALGORITHM_RIGHT_SYMMETRIC:
1510 if (i < sh->pd_idx)
1511 i += raid_disks;
1512 i -= (sh->pd_idx + 1);
1513 break;
99c0fb5f
N
1514 case ALGORITHM_PARITY_0:
1515 i -= 1;
1516 break;
1517 case ALGORITHM_PARITY_N:
1518 break;
1da177e4 1519 default:
14f8d26b 1520 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
16a53ecc 1521 conf->algorithm);
99c0fb5f 1522 BUG();
16a53ecc
N
1523 }
1524 break;
1525 case 6:
d0dabf7e 1526 if (i == sh->qd_idx)
16a53ecc
N
1527 return 0; /* It is the Q disk */
1528 switch (conf->algorithm) {
1529 case ALGORITHM_LEFT_ASYMMETRIC:
1530 case ALGORITHM_RIGHT_ASYMMETRIC:
99c0fb5f
N
1531 case ALGORITHM_ROTATING_ZERO_RESTART:
1532 case ALGORITHM_ROTATING_N_RESTART:
1533 if (sh->pd_idx == raid_disks-1)
1534 i--; /* Q D D D P */
16a53ecc
N
1535 else if (i > sh->pd_idx)
1536 i -= 2; /* D D P Q D */
1537 break;
1538 case ALGORITHM_LEFT_SYMMETRIC:
1539 case ALGORITHM_RIGHT_SYMMETRIC:
1540 if (sh->pd_idx == raid_disks-1)
1541 i--; /* Q D D D P */
1542 else {
1543 /* D D P Q D */
1544 if (i < sh->pd_idx)
1545 i += raid_disks;
1546 i -= (sh->pd_idx + 2);
1547 }
1548 break;
99c0fb5f
N
1549 case ALGORITHM_PARITY_0:
1550 i -= 2;
1551 break;
1552 case ALGORITHM_PARITY_N:
1553 break;
1554 case ALGORITHM_ROTATING_N_CONTINUE:
1555 if (sh->pd_idx == 0)
1556 i--; /* P D D D Q */
1557 else if (i > sh->pd_idx)
1558 i -= 2; /* D D Q P D */
1559 break;
1560 case ALGORITHM_LEFT_ASYMMETRIC_6:
1561 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1562 if (i > sh->pd_idx)
1563 i--;
1564 break;
1565 case ALGORITHM_LEFT_SYMMETRIC_6:
1566 case ALGORITHM_RIGHT_SYMMETRIC_6:
1567 if (i < sh->pd_idx)
1568 i += data_disks + 1;
1569 i -= (sh->pd_idx + 1);
1570 break;
1571 case ALGORITHM_PARITY_0_6:
1572 i -= 1;
1573 break;
16a53ecc 1574 default:
d710e138
N
1575 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1576 conf->algorithm);
99c0fb5f 1577 BUG();
16a53ecc
N
1578 }
1579 break;
1da177e4
LT
1580 }
1581
1582 chunk_number = stripe * data_disks + i;
1583 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1584
112bf897
N
1585 check = raid5_compute_sector(conf, r_sector,
1586 (raid_disks != conf->raid_disks),
911d4ee8
N
1587 &dummy1, &sh2);
1588 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1589 || sh2.qd_idx != sh->qd_idx) {
14f8d26b 1590 printk(KERN_ERR "compute_blocknr: map not correct\n");
1da177e4
LT
1591 return 0;
1592 }
1593 return r_sector;
1594}
1595
1596
1597
1598/*
16a53ecc
N
1599 * Copy data between a page in the stripe cache, and one or more bion
1600 * The page could align with the middle of the bio, or there could be
1601 * several bion, each with several bio_vecs, which cover part of the page
1602 * Multiple bion are linked together on bi_next. There may be extras
1603 * at the end of this list. We ignore them.
1da177e4
LT
1604 */
1605static void copy_data(int frombio, struct bio *bio,
1606 struct page *page,
1607 sector_t sector)
1608{
1609 char *pa = page_address(page);
1610 struct bio_vec *bvl;
1611 int i;
1612 int page_offset;
1613
1614 if (bio->bi_sector >= sector)
1615 page_offset = (signed)(bio->bi_sector - sector) * 512;
1616 else
1617 page_offset = (signed)(sector - bio->bi_sector) * -512;
1618 bio_for_each_segment(bvl, bio, i) {
1619 int len = bio_iovec_idx(bio,i)->bv_len;
1620 int clen;
1621 int b_offset = 0;
1622
1623 if (page_offset < 0) {
1624 b_offset = -page_offset;
1625 page_offset += b_offset;
1626 len -= b_offset;
1627 }
1628
1629 if (len > 0 && page_offset + len > STRIPE_SIZE)
1630 clen = STRIPE_SIZE - page_offset;
1631 else clen = len;
16a53ecc 1632
1da177e4
LT
1633 if (clen > 0) {
1634 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
1635 if (frombio)
1636 memcpy(pa+page_offset, ba+b_offset, clen);
1637 else
1638 memcpy(ba+b_offset, pa+page_offset, clen);
1639 __bio_kunmap_atomic(ba, KM_USER0);
1640 }
1641 if (clen < len) /* hit end of page */
1642 break;
1643 page_offset += len;
1644 }
1645}
1646
9bc89cd8
DW
1647#define check_xor() do { \
1648 if (count == MAX_XOR_BLOCKS) { \
1649 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1650 count = 0; \
1651 } \
1da177e4
LT
1652 } while(0)
1653
16a53ecc
N
1654static void compute_parity6(struct stripe_head *sh, int method)
1655{
bff61975 1656 raid5_conf_t *conf = sh->raid_conf;
d0dabf7e 1657 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
67cc2b81 1658 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
16a53ecc
N
1659 struct bio *chosen;
1660 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
67cc2b81 1661 void *ptrs[syndrome_disks+2];
16a53ecc 1662
d0dabf7e
N
1663 pd_idx = sh->pd_idx;
1664 qd_idx = sh->qd_idx;
1665 d0_idx = raid6_d0(sh);
16a53ecc 1666
45b4233c 1667 pr_debug("compute_parity, stripe %llu, method %d\n",
16a53ecc
N
1668 (unsigned long long)sh->sector, method);
1669
1670 switch(method) {
1671 case READ_MODIFY_WRITE:
1672 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1673 case RECONSTRUCT_WRITE:
1674 for (i= disks; i-- ;)
1675 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
1676 chosen = sh->dev[i].towrite;
1677 sh->dev[i].towrite = NULL;
1678
1679 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1680 wake_up(&conf->wait_for_overlap);
1681
52e5f9d1 1682 BUG_ON(sh->dev[i].written);
16a53ecc
N
1683 sh->dev[i].written = chosen;
1684 }
1685 break;
1686 case CHECK_PARITY:
1687 BUG(); /* Not implemented yet */
1688 }
1689
1690 for (i = disks; i--;)
1691 if (sh->dev[i].written) {
1692 sector_t sector = sh->dev[i].sector;
1693 struct bio *wbi = sh->dev[i].written;
1694 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1695 copy_data(1, wbi, sh->dev[i].page, sector);
1696 wbi = r5_next_bio(wbi, sector);
1697 }
1698
1699 set_bit(R5_LOCKED, &sh->dev[i].flags);
1700 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1701 }
1702
d0dabf7e 1703 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
67cc2b81
N
1704
1705 for (i = 0; i < disks; i++)
1706 ptrs[i] = (void *)raid6_empty_zero_page;
1707
d0dabf7e
N
1708 count = 0;
1709 i = d0_idx;
1710 do {
67cc2b81
N
1711 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1712
d0dabf7e 1713 ptrs[slot] = page_address(sh->dev[i].page);
67cc2b81 1714 if (slot < syndrome_disks &&
d0dabf7e
N
1715 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
1716 printk(KERN_ERR "block %d/%d not uptodate "
1717 "on parity calc\n", i, count);
1718 BUG();
1719 }
67cc2b81 1720
d0dabf7e
N
1721 i = raid6_next_disk(i, disks);
1722 } while (i != d0_idx);
67cc2b81 1723 BUG_ON(count != syndrome_disks);
16a53ecc 1724
67cc2b81 1725 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs);
16a53ecc
N
1726
1727 switch(method) {
1728 case RECONSTRUCT_WRITE:
1729 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1730 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1731 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1732 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
1733 break;
1734 case UPDATE_PARITY:
1735 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1736 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1737 break;
1738 }
1739}
1740
1741
1742/* Compute one missing block */
1743static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1744{
f416885e 1745 int i, count, disks = sh->disks;
9bc89cd8 1746 void *ptr[MAX_XOR_BLOCKS], *dest, *p;
d0dabf7e 1747 int qd_idx = sh->qd_idx;
16a53ecc 1748
45b4233c 1749 pr_debug("compute_block_1, stripe %llu, idx %d\n",
16a53ecc
N
1750 (unsigned long long)sh->sector, dd_idx);
1751
1752 if ( dd_idx == qd_idx ) {
1753 /* We're actually computing the Q drive */
1754 compute_parity6(sh, UPDATE_PARITY);
1755 } else {
9bc89cd8
DW
1756 dest = page_address(sh->dev[dd_idx].page);
1757 if (!nozero) memset(dest, 0, STRIPE_SIZE);
1758 count = 0;
16a53ecc
N
1759 for (i = disks ; i--; ) {
1760 if (i == dd_idx || i == qd_idx)
1761 continue;
1762 p = page_address(sh->dev[i].page);
1763 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
1764 ptr[count++] = p;
1765 else
1766 printk("compute_block() %d, stripe %llu, %d"
1767 " not present\n", dd_idx,
1768 (unsigned long long)sh->sector, i);
1769
1770 check_xor();
1771 }
9bc89cd8
DW
1772 if (count)
1773 xor_blocks(count, STRIPE_SIZE, dest, ptr);
16a53ecc
N
1774 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1775 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1776 }
1777}
1778
1779/* Compute two missing blocks */
1780static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1781{
f416885e 1782 int i, count, disks = sh->disks;
67cc2b81 1783 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
d0dabf7e
N
1784 int d0_idx = raid6_d0(sh);
1785 int faila = -1, failb = -1;
1786 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
67cc2b81 1787 void *ptrs[syndrome_disks+2];
16a53ecc 1788
67cc2b81
N
1789 for (i = 0; i < disks ; i++)
1790 ptrs[i] = (void *)raid6_empty_zero_page;
d0dabf7e
N
1791 count = 0;
1792 i = d0_idx;
1793 do {
67cc2b81
N
1794 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1795
d0dabf7e 1796 ptrs[slot] = page_address(sh->dev[i].page);
67cc2b81 1797
d0dabf7e
N
1798 if (i == dd_idx1)
1799 faila = slot;
1800 if (i == dd_idx2)
1801 failb = slot;
1802 i = raid6_next_disk(i, disks);
1803 } while (i != d0_idx);
67cc2b81 1804 BUG_ON(count != syndrome_disks);
16a53ecc
N
1805
1806 BUG_ON(faila == failb);
1807 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
1808
45b4233c 1809 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
d0dabf7e
N
1810 (unsigned long long)sh->sector, dd_idx1, dd_idx2,
1811 faila, failb);
16a53ecc 1812
67cc2b81 1813 if (failb == syndrome_disks+1) {
16a53ecc 1814 /* Q disk is one of the missing disks */
67cc2b81 1815 if (faila == syndrome_disks) {
16a53ecc
N
1816 /* Missing P+Q, just recompute */
1817 compute_parity6(sh, UPDATE_PARITY);
1818 return;
1819 } else {
1820 /* We're missing D+Q; recompute D from P */
d0dabf7e
N
1821 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
1822 dd_idx2 : dd_idx1),
1823 0);
16a53ecc
N
1824 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
1825 return;
1826 }
1827 }
1828
d0dabf7e 1829 /* We're missing D+P or D+D; */
67cc2b81 1830 if (failb == syndrome_disks) {
d0dabf7e 1831 /* We're missing D+P. */
67cc2b81 1832 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs);
d0dabf7e
N
1833 } else {
1834 /* We're missing D+D. */
67cc2b81
N
1835 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb,
1836 ptrs);
16a53ecc 1837 }
d0dabf7e
N
1838
1839 /* Both the above update both missing blocks */
1840 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
1841 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
16a53ecc
N
1842}
1843
600aa109 1844static void
1fe797e6 1845schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
600aa109 1846 int rcw, int expand)
e33129d8
DW
1847{
1848 int i, pd_idx = sh->pd_idx, disks = sh->disks;
e33129d8
DW
1849
1850 if (rcw) {
1851 /* if we are not expanding this is a proper write request, and
1852 * there will be bios with new data to be drained into the
1853 * stripe cache
1854 */
1855 if (!expand) {
600aa109
DW
1856 sh->reconstruct_state = reconstruct_state_drain_run;
1857 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1858 } else
1859 sh->reconstruct_state = reconstruct_state_run;
16a53ecc 1860
600aa109 1861 set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
e33129d8
DW
1862
1863 for (i = disks; i--; ) {
1864 struct r5dev *dev = &sh->dev[i];
1865
1866 if (dev->towrite) {
1867 set_bit(R5_LOCKED, &dev->flags);
d8ee0728 1868 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1869 if (!expand)
1870 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1871 s->locked++;
e33129d8
DW
1872 }
1873 }
600aa109 1874 if (s->locked + 1 == disks)
8b3e6cdc
DW
1875 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
1876 atomic_inc(&sh->raid_conf->pending_full_writes);
e33129d8
DW
1877 } else {
1878 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1879 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1880
d8ee0728 1881 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
600aa109
DW
1882 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1883 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1884 set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
e33129d8
DW
1885
1886 for (i = disks; i--; ) {
1887 struct r5dev *dev = &sh->dev[i];
1888 if (i == pd_idx)
1889 continue;
1890
e33129d8
DW
1891 if (dev->towrite &&
1892 (test_bit(R5_UPTODATE, &dev->flags) ||
d8ee0728
DW
1893 test_bit(R5_Wantcompute, &dev->flags))) {
1894 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1895 set_bit(R5_LOCKED, &dev->flags);
1896 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1897 s->locked++;
e33129d8
DW
1898 }
1899 }
1900 }
1901
1902 /* keep the parity disk locked while asynchronous operations
1903 * are in flight
1904 */
1905 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1906 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
600aa109 1907 s->locked++;
e33129d8 1908
600aa109 1909 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
e46b272b 1910 __func__, (unsigned long long)sh->sector,
600aa109 1911 s->locked, s->ops_request);
e33129d8 1912}
16a53ecc 1913
1da177e4
LT
1914/*
1915 * Each stripe/dev can have one or more bion attached.
16a53ecc 1916 * toread/towrite point to the first in a chain.
1da177e4
LT
1917 * The bi_next chain must be in order.
1918 */
1919static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
1920{
1921 struct bio **bip;
1922 raid5_conf_t *conf = sh->raid_conf;
72626685 1923 int firstwrite=0;
1da177e4 1924
45b4233c 1925 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1da177e4
LT
1926 (unsigned long long)bi->bi_sector,
1927 (unsigned long long)sh->sector);
1928
1929
1930 spin_lock(&sh->lock);
1931 spin_lock_irq(&conf->device_lock);
72626685 1932 if (forwrite) {
1da177e4 1933 bip = &sh->dev[dd_idx].towrite;
72626685
N
1934 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
1935 firstwrite = 1;
1936 } else
1da177e4
LT
1937 bip = &sh->dev[dd_idx].toread;
1938 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
1939 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
1940 goto overlap;
1941 bip = & (*bip)->bi_next;
1942 }
1943 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
1944 goto overlap;
1945
78bafebd 1946 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1da177e4
LT
1947 if (*bip)
1948 bi->bi_next = *bip;
1949 *bip = bi;
960e739d 1950 bi->bi_phys_segments++;
1da177e4
LT
1951 spin_unlock_irq(&conf->device_lock);
1952 spin_unlock(&sh->lock);
1953
45b4233c 1954 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1da177e4
LT
1955 (unsigned long long)bi->bi_sector,
1956 (unsigned long long)sh->sector, dd_idx);
1957
72626685 1958 if (conf->mddev->bitmap && firstwrite) {
72626685
N
1959 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1960 STRIPE_SECTORS, 0);
ae3c20cc 1961 sh->bm_seq = conf->seq_flush+1;
72626685
N
1962 set_bit(STRIPE_BIT_DELAY, &sh->state);
1963 }
1964
1da177e4
LT
1965 if (forwrite) {
1966 /* check if page is covered */
1967 sector_t sector = sh->dev[dd_idx].sector;
1968 for (bi=sh->dev[dd_idx].towrite;
1969 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1970 bi && bi->bi_sector <= sector;
1971 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1972 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1973 sector = bi->bi_sector + (bi->bi_size>>9);
1974 }
1975 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1976 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1977 }
1978 return 1;
1979
1980 overlap:
1981 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1982 spin_unlock_irq(&conf->device_lock);
1983 spin_unlock(&sh->lock);
1984 return 0;
1985}
1986
29269553
N
1987static void end_reshape(raid5_conf_t *conf);
1988
16a53ecc
N
1989static int page_is_zero(struct page *p)
1990{
1991 char *a = page_address(p);
1992 return ((*(u32*)a) == 0 &&
1993 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1994}
1995
911d4ee8
N
1996static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
1997 struct stripe_head *sh)
ccfcc3c1
N
1998{
1999 int sectors_per_chunk = conf->chunk_size >> 9;
911d4ee8 2000 int dd_idx;
2d2063ce 2001 int chunk_offset = sector_div(stripe, sectors_per_chunk);
112bf897 2002 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2d2063ce 2003
112bf897
N
2004 raid5_compute_sector(conf,
2005 stripe * (disks - conf->max_degraded)
b875e531 2006 *sectors_per_chunk + chunk_offset,
112bf897 2007 previous,
911d4ee8 2008 &dd_idx, sh);
ccfcc3c1
N
2009}
2010
a4456856 2011static void
1fe797e6 2012handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
a4456856
DW
2013 struct stripe_head_state *s, int disks,
2014 struct bio **return_bi)
2015{
2016 int i;
2017 for (i = disks; i--; ) {
2018 struct bio *bi;
2019 int bitmap_end = 0;
2020
2021 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2022 mdk_rdev_t *rdev;
2023 rcu_read_lock();
2024 rdev = rcu_dereference(conf->disks[i].rdev);
2025 if (rdev && test_bit(In_sync, &rdev->flags))
2026 /* multiple read failures in one stripe */
2027 md_error(conf->mddev, rdev);
2028 rcu_read_unlock();
2029 }
2030 spin_lock_irq(&conf->device_lock);
2031 /* fail all writes first */
2032 bi = sh->dev[i].towrite;
2033 sh->dev[i].towrite = NULL;
2034 if (bi) {
2035 s->to_write--;
2036 bitmap_end = 1;
2037 }
2038
2039 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2040 wake_up(&conf->wait_for_overlap);
2041
2042 while (bi && bi->bi_sector <
2043 sh->dev[i].sector + STRIPE_SECTORS) {
2044 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2045 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2046 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2047 md_write_end(conf->mddev);
2048 bi->bi_next = *return_bi;
2049 *return_bi = bi;
2050 }
2051 bi = nextbi;
2052 }
2053 /* and fail all 'written' */
2054 bi = sh->dev[i].written;
2055 sh->dev[i].written = NULL;
2056 if (bi) bitmap_end = 1;
2057 while (bi && bi->bi_sector <
2058 sh->dev[i].sector + STRIPE_SECTORS) {
2059 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2060 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2061 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2062 md_write_end(conf->mddev);
2063 bi->bi_next = *return_bi;
2064 *return_bi = bi;
2065 }
2066 bi = bi2;
2067 }
2068
b5e98d65
DW
2069 /* fail any reads if this device is non-operational and
2070 * the data has not reached the cache yet.
2071 */
2072 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2073 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2074 test_bit(R5_ReadError, &sh->dev[i].flags))) {
a4456856
DW
2075 bi = sh->dev[i].toread;
2076 sh->dev[i].toread = NULL;
2077 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2078 wake_up(&conf->wait_for_overlap);
2079 if (bi) s->to_read--;
2080 while (bi && bi->bi_sector <
2081 sh->dev[i].sector + STRIPE_SECTORS) {
2082 struct bio *nextbi =
2083 r5_next_bio(bi, sh->dev[i].sector);
2084 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2085 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2086 bi->bi_next = *return_bi;
2087 *return_bi = bi;
2088 }
2089 bi = nextbi;
2090 }
2091 }
2092 spin_unlock_irq(&conf->device_lock);
2093 if (bitmap_end)
2094 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2095 STRIPE_SECTORS, 0, 0);
2096 }
2097
8b3e6cdc
DW
2098 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2099 if (atomic_dec_and_test(&conf->pending_full_writes))
2100 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2101}
2102
1fe797e6
DW
2103/* fetch_block5 - checks the given member device to see if its data needs
2104 * to be read or computed to satisfy a request.
2105 *
2106 * Returns 1 when no more member devices need to be checked, otherwise returns
2107 * 0 to tell the loop in handle_stripe_fill5 to continue
f38e1219 2108 */
1fe797e6
DW
2109static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2110 int disk_idx, int disks)
f38e1219
DW
2111{
2112 struct r5dev *dev = &sh->dev[disk_idx];
2113 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2114
f38e1219
DW
2115 /* is the data in this block needed, and can we get it? */
2116 if (!test_bit(R5_LOCKED, &dev->flags) &&
1fe797e6
DW
2117 !test_bit(R5_UPTODATE, &dev->flags) &&
2118 (dev->toread ||
2119 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2120 s->syncing || s->expanding ||
2121 (s->failed &&
2122 (failed_dev->toread ||
2123 (failed_dev->towrite &&
2124 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
976ea8d4
DW
2125 /* We would like to get this block, possibly by computing it,
2126 * otherwise read it if the backing disk is insync
f38e1219
DW
2127 */
2128 if ((s->uptodate == disks - 1) &&
ecc65c9b 2129 (s->failed && disk_idx == s->failed_num)) {
976ea8d4
DW
2130 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2131 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
f38e1219
DW
2132 set_bit(R5_Wantcompute, &dev->flags);
2133 sh->ops.target = disk_idx;
2134 s->req_compute = 1;
f38e1219
DW
2135 /* Careful: from this point on 'uptodate' is in the eye
2136 * of raid5_run_ops which services 'compute' operations
2137 * before writes. R5_Wantcompute flags a block that will
2138 * be R5_UPTODATE by the time it is needed for a
2139 * subsequent operation.
2140 */
2141 s->uptodate++;
1fe797e6 2142 return 1; /* uptodate + compute == disks */
7a1fc53c 2143 } else if (test_bit(R5_Insync, &dev->flags)) {
f38e1219
DW
2144 set_bit(R5_LOCKED, &dev->flags);
2145 set_bit(R5_Wantread, &dev->flags);
f38e1219
DW
2146 s->locked++;
2147 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2148 s->syncing);
2149 }
2150 }
2151
1fe797e6 2152 return 0;
f38e1219
DW
2153}
2154
1fe797e6
DW
2155/**
2156 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2157 */
2158static void handle_stripe_fill5(struct stripe_head *sh,
a4456856
DW
2159 struct stripe_head_state *s, int disks)
2160{
2161 int i;
f38e1219 2162
f38e1219
DW
2163 /* look for blocks to read/compute, skip this if a compute
2164 * is already in flight, or if the stripe contents are in the
2165 * midst of changing due to a write
2166 */
976ea8d4 2167 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
1fe797e6 2168 !sh->reconstruct_state)
f38e1219 2169 for (i = disks; i--; )
1fe797e6 2170 if (fetch_block5(sh, s, i, disks))
f38e1219 2171 break;
a4456856
DW
2172 set_bit(STRIPE_HANDLE, &sh->state);
2173}
2174
1fe797e6 2175static void handle_stripe_fill6(struct stripe_head *sh,
a4456856
DW
2176 struct stripe_head_state *s, struct r6_state *r6s,
2177 int disks)
2178{
2179 int i;
2180 for (i = disks; i--; ) {
2181 struct r5dev *dev = &sh->dev[i];
2182 if (!test_bit(R5_LOCKED, &dev->flags) &&
2183 !test_bit(R5_UPTODATE, &dev->flags) &&
2184 (dev->toread || (dev->towrite &&
2185 !test_bit(R5_OVERWRITE, &dev->flags)) ||
2186 s->syncing || s->expanding ||
2187 (s->failed >= 1 &&
2188 (sh->dev[r6s->failed_num[0]].toread ||
2189 s->to_write)) ||
2190 (s->failed >= 2 &&
2191 (sh->dev[r6s->failed_num[1]].toread ||
2192 s->to_write)))) {
2193 /* we would like to get this block, possibly
2194 * by computing it, but we might not be able to
2195 */
c337869d
DW
2196 if ((s->uptodate == disks - 1) &&
2197 (s->failed && (i == r6s->failed_num[0] ||
2198 i == r6s->failed_num[1]))) {
45b4233c 2199 pr_debug("Computing stripe %llu block %d\n",
a4456856
DW
2200 (unsigned long long)sh->sector, i);
2201 compute_block_1(sh, i, 0);
2202 s->uptodate++;
2203 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
2204 /* Computing 2-failure is *very* expensive; only
2205 * do it if failed >= 2
2206 */
2207 int other;
2208 for (other = disks; other--; ) {
2209 if (other == i)
2210 continue;
2211 if (!test_bit(R5_UPTODATE,
2212 &sh->dev[other].flags))
2213 break;
2214 }
2215 BUG_ON(other < 0);
45b4233c 2216 pr_debug("Computing stripe %llu blocks %d,%d\n",
a4456856
DW
2217 (unsigned long long)sh->sector,
2218 i, other);
2219 compute_block_2(sh, i, other);
2220 s->uptodate += 2;
2221 } else if (test_bit(R5_Insync, &dev->flags)) {
2222 set_bit(R5_LOCKED, &dev->flags);
2223 set_bit(R5_Wantread, &dev->flags);
2224 s->locked++;
45b4233c 2225 pr_debug("Reading block %d (sync=%d)\n",
a4456856
DW
2226 i, s->syncing);
2227 }
2228 }
2229 }
2230 set_bit(STRIPE_HANDLE, &sh->state);
2231}
2232
2233
1fe797e6 2234/* handle_stripe_clean_event
a4456856
DW
2235 * any written block on an uptodate or failed drive can be returned.
2236 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2237 * never LOCKED, so we don't need to test 'failed' directly.
2238 */
1fe797e6 2239static void handle_stripe_clean_event(raid5_conf_t *conf,
a4456856
DW
2240 struct stripe_head *sh, int disks, struct bio **return_bi)
2241{
2242 int i;
2243 struct r5dev *dev;
2244
2245 for (i = disks; i--; )
2246 if (sh->dev[i].written) {
2247 dev = &sh->dev[i];
2248 if (!test_bit(R5_LOCKED, &dev->flags) &&
2249 test_bit(R5_UPTODATE, &dev->flags)) {
2250 /* We can return any write requests */
2251 struct bio *wbi, *wbi2;
2252 int bitmap_end = 0;
45b4233c 2253 pr_debug("Return write for disc %d\n", i);
a4456856
DW
2254 spin_lock_irq(&conf->device_lock);
2255 wbi = dev->written;
2256 dev->written = NULL;
2257 while (wbi && wbi->bi_sector <
2258 dev->sector + STRIPE_SECTORS) {
2259 wbi2 = r5_next_bio(wbi, dev->sector);
960e739d 2260 if (!raid5_dec_bi_phys_segments(wbi)) {
a4456856
DW
2261 md_write_end(conf->mddev);
2262 wbi->bi_next = *return_bi;
2263 *return_bi = wbi;
2264 }
2265 wbi = wbi2;
2266 }
2267 if (dev->towrite == NULL)
2268 bitmap_end = 1;
2269 spin_unlock_irq(&conf->device_lock);
2270 if (bitmap_end)
2271 bitmap_endwrite(conf->mddev->bitmap,
2272 sh->sector,
2273 STRIPE_SECTORS,
2274 !test_bit(STRIPE_DEGRADED, &sh->state),
2275 0);
2276 }
2277 }
8b3e6cdc
DW
2278
2279 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2280 if (atomic_dec_and_test(&conf->pending_full_writes))
2281 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2282}
2283
1fe797e6 2284static void handle_stripe_dirtying5(raid5_conf_t *conf,
a4456856
DW
2285 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2286{
2287 int rmw = 0, rcw = 0, i;
2288 for (i = disks; i--; ) {
2289 /* would I have to read this buffer for read_modify_write */
2290 struct r5dev *dev = &sh->dev[i];
2291 if ((dev->towrite || i == sh->pd_idx) &&
2292 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2293 !(test_bit(R5_UPTODATE, &dev->flags) ||
2294 test_bit(R5_Wantcompute, &dev->flags))) {
a4456856
DW
2295 if (test_bit(R5_Insync, &dev->flags))
2296 rmw++;
2297 else
2298 rmw += 2*disks; /* cannot read it */
2299 }
2300 /* Would I have to read this buffer for reconstruct_write */
2301 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2302 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2303 !(test_bit(R5_UPTODATE, &dev->flags) ||
2304 test_bit(R5_Wantcompute, &dev->flags))) {
2305 if (test_bit(R5_Insync, &dev->flags)) rcw++;
a4456856
DW
2306 else
2307 rcw += 2*disks;
2308 }
2309 }
45b4233c 2310 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
a4456856
DW
2311 (unsigned long long)sh->sector, rmw, rcw);
2312 set_bit(STRIPE_HANDLE, &sh->state);
2313 if (rmw < rcw && rmw > 0)
2314 /* prefer read-modify-write, but need to get some data */
2315 for (i = disks; i--; ) {
2316 struct r5dev *dev = &sh->dev[i];
2317 if ((dev->towrite || i == sh->pd_idx) &&
2318 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2319 !(test_bit(R5_UPTODATE, &dev->flags) ||
2320 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2321 test_bit(R5_Insync, &dev->flags)) {
2322 if (
2323 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2324 pr_debug("Read_old block "
a4456856
DW
2325 "%d for r-m-w\n", i);
2326 set_bit(R5_LOCKED, &dev->flags);
2327 set_bit(R5_Wantread, &dev->flags);
2328 s->locked++;
2329 } else {
2330 set_bit(STRIPE_DELAYED, &sh->state);
2331 set_bit(STRIPE_HANDLE, &sh->state);
2332 }
2333 }
2334 }
2335 if (rcw <= rmw && rcw > 0)
2336 /* want reconstruct write, but need to get some data */
2337 for (i = disks; i--; ) {
2338 struct r5dev *dev = &sh->dev[i];
2339 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2340 i != sh->pd_idx &&
2341 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2342 !(test_bit(R5_UPTODATE, &dev->flags) ||
2343 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2344 test_bit(R5_Insync, &dev->flags)) {
2345 if (
2346 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2347 pr_debug("Read_old block "
a4456856
DW
2348 "%d for Reconstruct\n", i);
2349 set_bit(R5_LOCKED, &dev->flags);
2350 set_bit(R5_Wantread, &dev->flags);
2351 s->locked++;
2352 } else {
2353 set_bit(STRIPE_DELAYED, &sh->state);
2354 set_bit(STRIPE_HANDLE, &sh->state);
2355 }
2356 }
2357 }
2358 /* now if nothing is locked, and if we have enough data,
2359 * we can start a write request
2360 */
f38e1219
DW
2361 /* since handle_stripe can be called at any time we need to handle the
2362 * case where a compute block operation has been submitted and then a
2363 * subsequent call wants to start a write request. raid5_run_ops only
2364 * handles the case where compute block and postxor are requested
2365 * simultaneously. If this is not the case then new writes need to be
2366 * held off until the compute completes.
2367 */
976ea8d4
DW
2368 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2369 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2370 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
1fe797e6 2371 schedule_reconstruction5(sh, s, rcw == 0, 0);
a4456856
DW
2372}
2373
1fe797e6 2374static void handle_stripe_dirtying6(raid5_conf_t *conf,
a4456856
DW
2375 struct stripe_head *sh, struct stripe_head_state *s,
2376 struct r6_state *r6s, int disks)
2377{
2378 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
2379 int qd_idx = r6s->qd_idx;
2380 for (i = disks; i--; ) {
2381 struct r5dev *dev = &sh->dev[i];
2382 /* Would I have to read this buffer for reconstruct_write */
2383 if (!test_bit(R5_OVERWRITE, &dev->flags)
2384 && i != pd_idx && i != qd_idx
2385 && (!test_bit(R5_LOCKED, &dev->flags)
2386 ) &&
2387 !test_bit(R5_UPTODATE, &dev->flags)) {
2388 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2389 else {
45b4233c 2390 pr_debug("raid6: must_compute: "
a4456856
DW
2391 "disk %d flags=%#lx\n", i, dev->flags);
2392 must_compute++;
2393 }
2394 }
2395 }
45b4233c 2396 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
a4456856
DW
2397 (unsigned long long)sh->sector, rcw, must_compute);
2398 set_bit(STRIPE_HANDLE, &sh->state);
2399
2400 if (rcw > 0)
2401 /* want reconstruct write, but need to get some data */
2402 for (i = disks; i--; ) {
2403 struct r5dev *dev = &sh->dev[i];
2404 if (!test_bit(R5_OVERWRITE, &dev->flags)
2405 && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
2406 && !test_bit(R5_LOCKED, &dev->flags) &&
2407 !test_bit(R5_UPTODATE, &dev->flags) &&
2408 test_bit(R5_Insync, &dev->flags)) {
2409 if (
2410 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2411 pr_debug("Read_old stripe %llu "
a4456856
DW
2412 "block %d for Reconstruct\n",
2413 (unsigned long long)sh->sector, i);
2414 set_bit(R5_LOCKED, &dev->flags);
2415 set_bit(R5_Wantread, &dev->flags);
2416 s->locked++;
2417 } else {
45b4233c 2418 pr_debug("Request delayed stripe %llu "
a4456856
DW
2419 "block %d for Reconstruct\n",
2420 (unsigned long long)sh->sector, i);
2421 set_bit(STRIPE_DELAYED, &sh->state);
2422 set_bit(STRIPE_HANDLE, &sh->state);
2423 }
2424 }
2425 }
2426 /* now if nothing is locked, and if we have enough data, we can start a
2427 * write request
2428 */
2429 if (s->locked == 0 && rcw == 0 &&
2430 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2431 if (must_compute > 0) {
2432 /* We have failed blocks and need to compute them */
2433 switch (s->failed) {
2434 case 0:
2435 BUG();
2436 case 1:
2437 compute_block_1(sh, r6s->failed_num[0], 0);
2438 break;
2439 case 2:
2440 compute_block_2(sh, r6s->failed_num[0],
2441 r6s->failed_num[1]);
2442 break;
2443 default: /* This request should have been failed? */
2444 BUG();
2445 }
2446 }
2447
45b4233c 2448 pr_debug("Computing parity for stripe %llu\n",
a4456856
DW
2449 (unsigned long long)sh->sector);
2450 compute_parity6(sh, RECONSTRUCT_WRITE);
2451 /* now every locked buffer is ready to be written */
2452 for (i = disks; i--; )
2453 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
45b4233c 2454 pr_debug("Writing stripe %llu block %d\n",
a4456856
DW
2455 (unsigned long long)sh->sector, i);
2456 s->locked++;
2457 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2458 }
8b3e6cdc
DW
2459 if (s->locked == disks)
2460 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2461 atomic_inc(&conf->pending_full_writes);
a4456856
DW
2462 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2463 set_bit(STRIPE_INSYNC, &sh->state);
2464
2465 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2466 atomic_dec(&conf->preread_active_stripes);
2467 if (atomic_read(&conf->preread_active_stripes) <
2468 IO_THRESHOLD)
2469 md_wakeup_thread(conf->mddev->thread);
2470 }
2471 }
2472}
2473
2474static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2475 struct stripe_head_state *s, int disks)
2476{
ecc65c9b 2477 struct r5dev *dev = NULL;
bd2ab670 2478
a4456856 2479 set_bit(STRIPE_HANDLE, &sh->state);
e89f8962 2480
ecc65c9b
DW
2481 switch (sh->check_state) {
2482 case check_state_idle:
2483 /* start a new check operation if there are no failures */
bd2ab670 2484 if (s->failed == 0) {
bd2ab670 2485 BUG_ON(s->uptodate != disks);
ecc65c9b
DW
2486 sh->check_state = check_state_run;
2487 set_bit(STRIPE_OP_CHECK, &s->ops_request);
bd2ab670 2488 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
bd2ab670 2489 s->uptodate--;
ecc65c9b 2490 break;
bd2ab670 2491 }
ecc65c9b
DW
2492 dev = &sh->dev[s->failed_num];
2493 /* fall through */
2494 case check_state_compute_result:
2495 sh->check_state = check_state_idle;
2496 if (!dev)
2497 dev = &sh->dev[sh->pd_idx];
2498
2499 /* check that a write has not made the stripe insync */
2500 if (test_bit(STRIPE_INSYNC, &sh->state))
2501 break;
c8894419 2502
a4456856 2503 /* either failed parity check, or recovery is happening */
a4456856
DW
2504 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2505 BUG_ON(s->uptodate != disks);
2506
2507 set_bit(R5_LOCKED, &dev->flags);
ecc65c9b 2508 s->locked++;
a4456856 2509 set_bit(R5_Wantwrite, &dev->flags);
830ea016 2510
a4456856 2511 clear_bit(STRIPE_DEGRADED, &sh->state);
a4456856 2512 set_bit(STRIPE_INSYNC, &sh->state);
ecc65c9b
DW
2513 break;
2514 case check_state_run:
2515 break; /* we will be called again upon completion */
2516 case check_state_check_result:
2517 sh->check_state = check_state_idle;
2518
2519 /* if a failure occurred during the check operation, leave
2520 * STRIPE_INSYNC not set and let the stripe be handled again
2521 */
2522 if (s->failed)
2523 break;
2524
2525 /* handle a successful check operation, if parity is correct
2526 * we are done. Otherwise update the mismatch count and repair
2527 * parity if !MD_RECOVERY_CHECK
2528 */
2529 if (sh->ops.zero_sum_result == 0)
2530 /* parity is correct (on disc,
2531 * not in buffer any more)
2532 */
2533 set_bit(STRIPE_INSYNC, &sh->state);
2534 else {
2535 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2536 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2537 /* don't try to repair!! */
2538 set_bit(STRIPE_INSYNC, &sh->state);
2539 else {
2540 sh->check_state = check_state_compute_run;
976ea8d4 2541 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
ecc65c9b
DW
2542 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2543 set_bit(R5_Wantcompute,
2544 &sh->dev[sh->pd_idx].flags);
2545 sh->ops.target = sh->pd_idx;
2546 s->uptodate++;
2547 }
2548 }
2549 break;
2550 case check_state_compute_run:
2551 break;
2552 default:
2553 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2554 __func__, sh->check_state,
2555 (unsigned long long) sh->sector);
2556 BUG();
a4456856
DW
2557 }
2558}
2559
2560
2561static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2562 struct stripe_head_state *s,
2563 struct r6_state *r6s, struct page *tmp_page,
2564 int disks)
2565{
2566 int update_p = 0, update_q = 0;
2567 struct r5dev *dev;
2568 int pd_idx = sh->pd_idx;
2569 int qd_idx = r6s->qd_idx;
2570
2571 set_bit(STRIPE_HANDLE, &sh->state);
2572
2573 BUG_ON(s->failed > 2);
2574 BUG_ON(s->uptodate < disks);
2575 /* Want to check and possibly repair P and Q.
2576 * However there could be one 'failed' device, in which
2577 * case we can only check one of them, possibly using the
2578 * other to generate missing data
2579 */
2580
2581 /* If !tmp_page, we cannot do the calculations,
2582 * but as we have set STRIPE_HANDLE, we will soon be called
2583 * by stripe_handle with a tmp_page - just wait until then.
2584 */
2585 if (tmp_page) {
2586 if (s->failed == r6s->q_failed) {
2587 /* The only possible failed device holds 'Q', so it
2588 * makes sense to check P (If anything else were failed,
2589 * we would have used P to recreate it).
2590 */
2591 compute_block_1(sh, pd_idx, 1);
2592 if (!page_is_zero(sh->dev[pd_idx].page)) {
2593 compute_block_1(sh, pd_idx, 0);
2594 update_p = 1;
2595 }
2596 }
2597 if (!r6s->q_failed && s->failed < 2) {
2598 /* q is not failed, and we didn't use it to generate
2599 * anything, so it makes sense to check it
2600 */
2601 memcpy(page_address(tmp_page),
2602 page_address(sh->dev[qd_idx].page),
2603 STRIPE_SIZE);
2604 compute_parity6(sh, UPDATE_PARITY);
2605 if (memcmp(page_address(tmp_page),
2606 page_address(sh->dev[qd_idx].page),
2607 STRIPE_SIZE) != 0) {
2608 clear_bit(STRIPE_INSYNC, &sh->state);
2609 update_q = 1;
2610 }
2611 }
2612 if (update_p || update_q) {
2613 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2614 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2615 /* don't try to repair!! */
2616 update_p = update_q = 0;
2617 }
2618
2619 /* now write out any block on a failed drive,
2620 * or P or Q if they need it
2621 */
2622
2623 if (s->failed == 2) {
2624 dev = &sh->dev[r6s->failed_num[1]];
2625 s->locked++;
2626 set_bit(R5_LOCKED, &dev->flags);
2627 set_bit(R5_Wantwrite, &dev->flags);
2628 }
2629 if (s->failed >= 1) {
2630 dev = &sh->dev[r6s->failed_num[0]];
2631 s->locked++;
2632 set_bit(R5_LOCKED, &dev->flags);
2633 set_bit(R5_Wantwrite, &dev->flags);
2634 }
2635
2636 if (update_p) {
2637 dev = &sh->dev[pd_idx];
2638 s->locked++;
2639 set_bit(R5_LOCKED, &dev->flags);
2640 set_bit(R5_Wantwrite, &dev->flags);
2641 }
2642 if (update_q) {
2643 dev = &sh->dev[qd_idx];
2644 s->locked++;
2645 set_bit(R5_LOCKED, &dev->flags);
2646 set_bit(R5_Wantwrite, &dev->flags);
2647 }
2648 clear_bit(STRIPE_DEGRADED, &sh->state);
2649
2650 set_bit(STRIPE_INSYNC, &sh->state);
2651 }
2652}
2653
2654static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2655 struct r6_state *r6s)
2656{
2657 int i;
2658
2659 /* We have read all the blocks in this stripe and now we need to
2660 * copy some of them into a target stripe for expand.
2661 */
f0a50d37 2662 struct dma_async_tx_descriptor *tx = NULL;
a4456856
DW
2663 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2664 for (i = 0; i < sh->disks; i++)
a2e08551 2665 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
911d4ee8 2666 int dd_idx, j;
a4456856
DW
2667 struct stripe_head *sh2;
2668
2669 sector_t bn = compute_blocknr(sh, i);
911d4ee8
N
2670 sector_t s = raid5_compute_sector(conf, bn, 0,
2671 &dd_idx, NULL);
b5663ba4 2672 sh2 = get_active_stripe(conf, s, 0, 1);
a4456856
DW
2673 if (sh2 == NULL)
2674 /* so far only the early blocks of this stripe
2675 * have been requested. When later blocks
2676 * get requested, we will try again
2677 */
2678 continue;
2679 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2680 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2681 /* must have already done this block */
2682 release_stripe(sh2);
2683 continue;
2684 }
f0a50d37
DW
2685
2686 /* place all the copies on one channel */
2687 tx = async_memcpy(sh2->dev[dd_idx].page,
2688 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2689 ASYNC_TX_DEP_ACK, tx, NULL, NULL);
2690
a4456856
DW
2691 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2692 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2693 for (j = 0; j < conf->raid_disks; j++)
2694 if (j != sh2->pd_idx &&
d0dabf7e 2695 (!r6s || j != sh2->qd_idx) &&
a4456856
DW
2696 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2697 break;
2698 if (j == conf->raid_disks) {
2699 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2700 set_bit(STRIPE_HANDLE, &sh2->state);
2701 }
2702 release_stripe(sh2);
f0a50d37 2703
a4456856 2704 }
a2e08551
N
2705 /* done submitting copies, wait for them to complete */
2706 if (tx) {
2707 async_tx_ack(tx);
2708 dma_wait_for_async_tx(tx);
2709 }
a4456856 2710}
1da177e4 2711
6bfe0b49 2712
1da177e4
LT
2713/*
2714 * handle_stripe - do things to a stripe.
2715 *
2716 * We lock the stripe and then examine the state of various bits
2717 * to see what needs to be done.
2718 * Possible results:
2719 * return some read request which now have data
2720 * return some write requests which are safely on disc
2721 * schedule a read on some buffers
2722 * schedule a write of some buffers
2723 * return confirmation of parity correctness
2724 *
1da177e4
LT
2725 * buffers are taken off read_list or write_list, and bh_cache buffers
2726 * get BH_Lock set before the stripe lock is released.
2727 *
2728 */
a4456856 2729
df10cfbc 2730static bool handle_stripe5(struct stripe_head *sh)
1da177e4
LT
2731{
2732 raid5_conf_t *conf = sh->raid_conf;
a4456856
DW
2733 int disks = sh->disks, i;
2734 struct bio *return_bi = NULL;
2735 struct stripe_head_state s;
1da177e4 2736 struct r5dev *dev;
6bfe0b49 2737 mdk_rdev_t *blocked_rdev = NULL;
e0a115e5 2738 int prexor;
1da177e4 2739
a4456856 2740 memset(&s, 0, sizeof(s));
600aa109
DW
2741 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2742 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2743 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2744 sh->reconstruct_state);
1da177e4
LT
2745
2746 spin_lock(&sh->lock);
2747 clear_bit(STRIPE_HANDLE, &sh->state);
2748 clear_bit(STRIPE_DELAYED, &sh->state);
2749
a4456856
DW
2750 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2751 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2752 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
def6ae26 2753
83de75cc 2754 /* Now to look around and see what can be done */
9910f16a 2755 rcu_read_lock();
1da177e4
LT
2756 for (i=disks; i--; ) {
2757 mdk_rdev_t *rdev;
a4456856 2758 struct r5dev *dev = &sh->dev[i];
1da177e4 2759 clear_bit(R5_Insync, &dev->flags);
1da177e4 2760
b5e98d65
DW
2761 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2762 "written %p\n", i, dev->flags, dev->toread, dev->read,
2763 dev->towrite, dev->written);
2764
2765 /* maybe we can request a biofill operation
2766 *
2767 * new wantfill requests are only permitted while
83de75cc 2768 * ops_complete_biofill is guaranteed to be inactive
b5e98d65
DW
2769 */
2770 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
83de75cc 2771 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
b5e98d65 2772 set_bit(R5_Wantfill, &dev->flags);
1da177e4
LT
2773
2774 /* now count some things */
a4456856
DW
2775 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2776 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
f38e1219 2777 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
1da177e4 2778
b5e98d65
DW
2779 if (test_bit(R5_Wantfill, &dev->flags))
2780 s.to_fill++;
2781 else if (dev->toread)
a4456856 2782 s.to_read++;
1da177e4 2783 if (dev->towrite) {
a4456856 2784 s.to_write++;
1da177e4 2785 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 2786 s.non_overwrite++;
1da177e4 2787 }
a4456856
DW
2788 if (dev->written)
2789 s.written++;
9910f16a 2790 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
2791 if (blocked_rdev == NULL &&
2792 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
2793 blocked_rdev = rdev;
2794 atomic_inc(&rdev->nr_pending);
6bfe0b49 2795 }
b2d444d7 2796 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
14f8d26b 2797 /* The ReadError flag will just be confusing now */
4e5314b5
N
2798 clear_bit(R5_ReadError, &dev->flags);
2799 clear_bit(R5_ReWrite, &dev->flags);
2800 }
b2d444d7 2801 if (!rdev || !test_bit(In_sync, &rdev->flags)
4e5314b5 2802 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
2803 s.failed++;
2804 s.failed_num = i;
1da177e4
LT
2805 } else
2806 set_bit(R5_Insync, &dev->flags);
2807 }
9910f16a 2808 rcu_read_unlock();
b5e98d65 2809
6bfe0b49 2810 if (unlikely(blocked_rdev)) {
ac4090d2
N
2811 if (s.syncing || s.expanding || s.expanded ||
2812 s.to_write || s.written) {
2813 set_bit(STRIPE_HANDLE, &sh->state);
2814 goto unlock;
2815 }
2816 /* There is nothing for the blocked_rdev to block */
2817 rdev_dec_pending(blocked_rdev, conf->mddev);
2818 blocked_rdev = NULL;
6bfe0b49
DW
2819 }
2820
83de75cc
DW
2821 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2822 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
2823 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2824 }
b5e98d65 2825
45b4233c 2826 pr_debug("locked=%d uptodate=%d to_read=%d"
1da177e4 2827 " to_write=%d failed=%d failed_num=%d\n",
a4456856
DW
2828 s.locked, s.uptodate, s.to_read, s.to_write,
2829 s.failed, s.failed_num);
1da177e4
LT
2830 /* check if the array has lost two devices and, if so, some requests might
2831 * need to be failed
2832 */
a4456856 2833 if (s.failed > 1 && s.to_read+s.to_write+s.written)
1fe797e6 2834 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 2835 if (s.failed > 1 && s.syncing) {
1da177e4
LT
2836 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2837 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 2838 s.syncing = 0;
1da177e4
LT
2839 }
2840
2841 /* might be able to return some write requests if the parity block
2842 * is safe, or on a failed drive
2843 */
2844 dev = &sh->dev[sh->pd_idx];
a4456856
DW
2845 if ( s.written &&
2846 ((test_bit(R5_Insync, &dev->flags) &&
2847 !test_bit(R5_LOCKED, &dev->flags) &&
2848 test_bit(R5_UPTODATE, &dev->flags)) ||
2849 (s.failed == 1 && s.failed_num == sh->pd_idx)))
1fe797e6 2850 handle_stripe_clean_event(conf, sh, disks, &return_bi);
1da177e4
LT
2851
2852 /* Now we might consider reading some blocks, either to check/generate
2853 * parity, or to satisfy requests
2854 * or to load a block that is being partially written.
2855 */
a4456856 2856 if (s.to_read || s.non_overwrite ||
976ea8d4 2857 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
1fe797e6 2858 handle_stripe_fill5(sh, &s, disks);
1da177e4 2859
e33129d8
DW
2860 /* Now we check to see if any write operations have recently
2861 * completed
2862 */
e0a115e5 2863 prexor = 0;
d8ee0728 2864 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
e0a115e5 2865 prexor = 1;
d8ee0728
DW
2866 if (sh->reconstruct_state == reconstruct_state_drain_result ||
2867 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
600aa109 2868 sh->reconstruct_state = reconstruct_state_idle;
e33129d8
DW
2869
2870 /* All the 'written' buffers and the parity block are ready to
2871 * be written back to disk
2872 */
2873 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
2874 for (i = disks; i--; ) {
2875 dev = &sh->dev[i];
2876 if (test_bit(R5_LOCKED, &dev->flags) &&
2877 (i == sh->pd_idx || dev->written)) {
2878 pr_debug("Writing block %d\n", i);
2879 set_bit(R5_Wantwrite, &dev->flags);
e0a115e5
DW
2880 if (prexor)
2881 continue;
e33129d8
DW
2882 if (!test_bit(R5_Insync, &dev->flags) ||
2883 (i == sh->pd_idx && s.failed == 0))
2884 set_bit(STRIPE_INSYNC, &sh->state);
2885 }
2886 }
2887 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2888 atomic_dec(&conf->preread_active_stripes);
2889 if (atomic_read(&conf->preread_active_stripes) <
2890 IO_THRESHOLD)
2891 md_wakeup_thread(conf->mddev->thread);
2892 }
2893 }
2894
2895 /* Now to consider new write requests and what else, if anything
2896 * should be read. We do not handle new writes when:
2897 * 1/ A 'write' operation (copy+xor) is already in flight.
2898 * 2/ A 'check' operation is in flight, as it may clobber the parity
2899 * block.
2900 */
600aa109 2901 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
1fe797e6 2902 handle_stripe_dirtying5(conf, sh, &s, disks);
1da177e4
LT
2903
2904 /* maybe we need to check and possibly fix the parity for this stripe
e89f8962
DW
2905 * Any reads will already have been scheduled, so we just see if enough
2906 * data is available. The parity check is held off while parity
2907 * dependent operations are in flight.
1da177e4 2908 */
ecc65c9b
DW
2909 if (sh->check_state ||
2910 (s.syncing && s.locked == 0 &&
976ea8d4 2911 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
ecc65c9b 2912 !test_bit(STRIPE_INSYNC, &sh->state)))
a4456856 2913 handle_parity_checks5(conf, sh, &s, disks);
e89f8962 2914
a4456856 2915 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1da177e4
LT
2916 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2917 clear_bit(STRIPE_SYNCING, &sh->state);
2918 }
4e5314b5
N
2919
2920 /* If the failed drive is just a ReadError, then we might need to progress
2921 * the repair/check process
2922 */
a4456856
DW
2923 if (s.failed == 1 && !conf->mddev->ro &&
2924 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
2925 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
2926 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
4e5314b5 2927 ) {
a4456856 2928 dev = &sh->dev[s.failed_num];
4e5314b5
N
2929 if (!test_bit(R5_ReWrite, &dev->flags)) {
2930 set_bit(R5_Wantwrite, &dev->flags);
2931 set_bit(R5_ReWrite, &dev->flags);
2932 set_bit(R5_LOCKED, &dev->flags);
a4456856 2933 s.locked++;
4e5314b5
N
2934 } else {
2935 /* let's read it back */
2936 set_bit(R5_Wantread, &dev->flags);
2937 set_bit(R5_LOCKED, &dev->flags);
a4456856 2938 s.locked++;
4e5314b5
N
2939 }
2940 }
2941
600aa109
DW
2942 /* Finish reconstruct operations initiated by the expansion process */
2943 if (sh->reconstruct_state == reconstruct_state_result) {
2944 sh->reconstruct_state = reconstruct_state_idle;
f0a50d37 2945 clear_bit(STRIPE_EXPANDING, &sh->state);
23397883 2946 for (i = conf->raid_disks; i--; ) {
ccfcc3c1 2947 set_bit(R5_Wantwrite, &sh->dev[i].flags);
23397883 2948 set_bit(R5_LOCKED, &sh->dev[i].flags);
efe31143 2949 s.locked++;
23397883 2950 }
f0a50d37
DW
2951 }
2952
2953 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
600aa109 2954 !sh->reconstruct_state) {
f0a50d37
DW
2955 /* Need to write out all blocks after computing parity */
2956 sh->disks = conf->raid_disks;
911d4ee8 2957 stripe_set_idx(sh->sector, conf, 0, sh);
1fe797e6 2958 schedule_reconstruction5(sh, &s, 1, 1);
600aa109 2959 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
ccfcc3c1 2960 clear_bit(STRIPE_EXPAND_READY, &sh->state);
f6705578 2961 atomic_dec(&conf->reshape_stripes);
ccfcc3c1
N
2962 wake_up(&conf->wait_for_overlap);
2963 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2964 }
2965
0f94e87c 2966 if (s.expanding && s.locked == 0 &&
976ea8d4 2967 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 2968 handle_stripe_expansion(conf, sh, NULL);
ccfcc3c1 2969
6bfe0b49 2970 unlock:
1da177e4
LT
2971 spin_unlock(&sh->lock);
2972
6bfe0b49
DW
2973 /* wait for this device to become unblocked */
2974 if (unlikely(blocked_rdev))
2975 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
2976
600aa109
DW
2977 if (s.ops_request)
2978 raid5_run_ops(sh, s.ops_request);
d84e0f10 2979
c4e5ac0a 2980 ops_run_io(sh, &s);
1da177e4 2981
a4456856 2982 return_io(return_bi);
df10cfbc
DW
2983
2984 return blocked_rdev == NULL;
1da177e4
LT
2985}
2986
df10cfbc 2987static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1da177e4 2988{
bff61975 2989 raid5_conf_t *conf = sh->raid_conf;
f416885e 2990 int disks = sh->disks;
a4456856
DW
2991 struct bio *return_bi = NULL;
2992 int i, pd_idx = sh->pd_idx;
2993 struct stripe_head_state s;
2994 struct r6_state r6s;
16a53ecc 2995 struct r5dev *dev, *pdev, *qdev;
6bfe0b49 2996 mdk_rdev_t *blocked_rdev = NULL;
1da177e4 2997
d0dabf7e 2998 r6s.qd_idx = sh->qd_idx;
45b4233c 2999 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
a4456856
DW
3000 "pd_idx=%d, qd_idx=%d\n",
3001 (unsigned long long)sh->sector, sh->state,
3002 atomic_read(&sh->count), pd_idx, r6s.qd_idx);
3003 memset(&s, 0, sizeof(s));
72626685 3004
16a53ecc
N
3005 spin_lock(&sh->lock);
3006 clear_bit(STRIPE_HANDLE, &sh->state);
3007 clear_bit(STRIPE_DELAYED, &sh->state);
3008
a4456856
DW
3009 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3010 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3011 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
16a53ecc 3012 /* Now to look around and see what can be done */
1da177e4
LT
3013
3014 rcu_read_lock();
16a53ecc
N
3015 for (i=disks; i--; ) {
3016 mdk_rdev_t *rdev;
3017 dev = &sh->dev[i];
3018 clear_bit(R5_Insync, &dev->flags);
1da177e4 3019
45b4233c 3020 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
16a53ecc
N
3021 i, dev->flags, dev->toread, dev->towrite, dev->written);
3022 /* maybe we can reply to a read */
3023 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
3024 struct bio *rbi, *rbi2;
45b4233c 3025 pr_debug("Return read for disc %d\n", i);
16a53ecc
N
3026 spin_lock_irq(&conf->device_lock);
3027 rbi = dev->toread;
3028 dev->toread = NULL;
3029 if (test_and_clear_bit(R5_Overlap, &dev->flags))
3030 wake_up(&conf->wait_for_overlap);
3031 spin_unlock_irq(&conf->device_lock);
3032 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
3033 copy_data(0, rbi, dev->page, dev->sector);
3034 rbi2 = r5_next_bio(rbi, dev->sector);
3035 spin_lock_irq(&conf->device_lock);
960e739d 3036 if (!raid5_dec_bi_phys_segments(rbi)) {
16a53ecc
N
3037 rbi->bi_next = return_bi;
3038 return_bi = rbi;
3039 }
3040 spin_unlock_irq(&conf->device_lock);
3041 rbi = rbi2;
3042 }
3043 }
1da177e4 3044
16a53ecc 3045 /* now count some things */
a4456856
DW
3046 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3047 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
1da177e4 3048
16a53ecc 3049
a4456856
DW
3050 if (dev->toread)
3051 s.to_read++;
16a53ecc 3052 if (dev->towrite) {
a4456856 3053 s.to_write++;
16a53ecc 3054 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 3055 s.non_overwrite++;
16a53ecc 3056 }
a4456856
DW
3057 if (dev->written)
3058 s.written++;
16a53ecc 3059 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
3060 if (blocked_rdev == NULL &&
3061 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
3062 blocked_rdev = rdev;
3063 atomic_inc(&rdev->nr_pending);
6bfe0b49 3064 }
16a53ecc
N
3065 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3066 /* The ReadError flag will just be confusing now */
3067 clear_bit(R5_ReadError, &dev->flags);
3068 clear_bit(R5_ReWrite, &dev->flags);
1da177e4 3069 }
16a53ecc
N
3070 if (!rdev || !test_bit(In_sync, &rdev->flags)
3071 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
3072 if (s.failed < 2)
3073 r6s.failed_num[s.failed] = i;
3074 s.failed++;
16a53ecc
N
3075 } else
3076 set_bit(R5_Insync, &dev->flags);
1da177e4
LT
3077 }
3078 rcu_read_unlock();
6bfe0b49
DW
3079
3080 if (unlikely(blocked_rdev)) {
ac4090d2
N
3081 if (s.syncing || s.expanding || s.expanded ||
3082 s.to_write || s.written) {
3083 set_bit(STRIPE_HANDLE, &sh->state);
3084 goto unlock;
3085 }
3086 /* There is nothing for the blocked_rdev to block */
3087 rdev_dec_pending(blocked_rdev, conf->mddev);
3088 blocked_rdev = NULL;
6bfe0b49 3089 }
ac4090d2 3090
45b4233c 3091 pr_debug("locked=%d uptodate=%d to_read=%d"
16a53ecc 3092 " to_write=%d failed=%d failed_num=%d,%d\n",
a4456856
DW
3093 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3094 r6s.failed_num[0], r6s.failed_num[1]);
3095 /* check if the array has lost >2 devices and, if so, some requests
3096 * might need to be failed
16a53ecc 3097 */
a4456856 3098 if (s.failed > 2 && s.to_read+s.to_write+s.written)
1fe797e6 3099 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 3100 if (s.failed > 2 && s.syncing) {
16a53ecc
N
3101 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3102 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 3103 s.syncing = 0;
16a53ecc
N
3104 }
3105
3106 /*
3107 * might be able to return some write requests if the parity blocks
3108 * are safe, or on a failed drive
3109 */
3110 pdev = &sh->dev[pd_idx];
a4456856
DW
3111 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3112 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
3113 qdev = &sh->dev[r6s.qd_idx];
3114 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx)
3115 || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx);
3116
3117 if ( s.written &&
3118 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
16a53ecc 3119 && !test_bit(R5_LOCKED, &pdev->flags)
a4456856
DW
3120 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3121 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
16a53ecc 3122 && !test_bit(R5_LOCKED, &qdev->flags)
a4456856 3123 && test_bit(R5_UPTODATE, &qdev->flags)))))
1fe797e6 3124 handle_stripe_clean_event(conf, sh, disks, &return_bi);
16a53ecc
N
3125
3126 /* Now we might consider reading some blocks, either to check/generate
3127 * parity, or to satisfy requests
3128 * or to load a block that is being partially written.
3129 */
a4456856
DW
3130 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
3131 (s.syncing && (s.uptodate < disks)) || s.expanding)
1fe797e6 3132 handle_stripe_fill6(sh, &s, &r6s, disks);
16a53ecc
N
3133
3134 /* now to consider writing and what else, if anything should be read */
a4456856 3135 if (s.to_write)
1fe797e6 3136 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
16a53ecc
N
3137
3138 /* maybe we need to check and possibly fix the parity for this stripe
a4456856
DW
3139 * Any reads will already have been scheduled, so we just see if enough
3140 * data is available
16a53ecc 3141 */
a4456856
DW
3142 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state))
3143 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks);
16a53ecc 3144
a4456856 3145 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
16a53ecc
N
3146 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3147 clear_bit(STRIPE_SYNCING, &sh->state);
3148 }
3149
3150 /* If the failed drives are just a ReadError, then we might need
3151 * to progress the repair/check process
3152 */
a4456856
DW
3153 if (s.failed <= 2 && !conf->mddev->ro)
3154 for (i = 0; i < s.failed; i++) {
3155 dev = &sh->dev[r6s.failed_num[i]];
16a53ecc
N
3156 if (test_bit(R5_ReadError, &dev->flags)
3157 && !test_bit(R5_LOCKED, &dev->flags)
3158 && test_bit(R5_UPTODATE, &dev->flags)
3159 ) {
3160 if (!test_bit(R5_ReWrite, &dev->flags)) {
3161 set_bit(R5_Wantwrite, &dev->flags);
3162 set_bit(R5_ReWrite, &dev->flags);
3163 set_bit(R5_LOCKED, &dev->flags);
3164 } else {
3165 /* let's read it back */
3166 set_bit(R5_Wantread, &dev->flags);
3167 set_bit(R5_LOCKED, &dev->flags);
3168 }
3169 }
3170 }
f416885e 3171
a4456856 3172 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
f416885e
N
3173 /* Need to write out all blocks after computing P&Q */
3174 sh->disks = conf->raid_disks;
911d4ee8 3175 stripe_set_idx(sh->sector, conf, 0, sh);
f416885e
N
3176 compute_parity6(sh, RECONSTRUCT_WRITE);
3177 for (i = conf->raid_disks ; i-- ; ) {
3178 set_bit(R5_LOCKED, &sh->dev[i].flags);
a4456856 3179 s.locked++;
f416885e
N
3180 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3181 }
3182 clear_bit(STRIPE_EXPANDING, &sh->state);
a4456856 3183 } else if (s.expanded) {
f416885e
N
3184 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3185 atomic_dec(&conf->reshape_stripes);
3186 wake_up(&conf->wait_for_overlap);
3187 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3188 }
3189
0f94e87c 3190 if (s.expanding && s.locked == 0 &&
976ea8d4 3191 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 3192 handle_stripe_expansion(conf, sh, &r6s);
f416885e 3193
6bfe0b49 3194 unlock:
16a53ecc
N
3195 spin_unlock(&sh->lock);
3196
6bfe0b49
DW
3197 /* wait for this device to become unblocked */
3198 if (unlikely(blocked_rdev))
3199 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3200
f0e43bcd 3201 ops_run_io(sh, &s);
16a53ecc 3202
f0e43bcd 3203 return_io(return_bi);
df10cfbc
DW
3204
3205 return blocked_rdev == NULL;
16a53ecc
N
3206}
3207
df10cfbc
DW
3208/* returns true if the stripe was handled */
3209static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
16a53ecc
N
3210{
3211 if (sh->raid_conf->level == 6)
df10cfbc 3212 return handle_stripe6(sh, tmp_page);
16a53ecc 3213 else
df10cfbc 3214 return handle_stripe5(sh);
16a53ecc
N
3215}
3216
3217
3218
3219static void raid5_activate_delayed(raid5_conf_t *conf)
3220{
3221 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3222 while (!list_empty(&conf->delayed_list)) {
3223 struct list_head *l = conf->delayed_list.next;
3224 struct stripe_head *sh;
3225 sh = list_entry(l, struct stripe_head, lru);
3226 list_del_init(l);
3227 clear_bit(STRIPE_DELAYED, &sh->state);
3228 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3229 atomic_inc(&conf->preread_active_stripes);
8b3e6cdc 3230 list_add_tail(&sh->lru, &conf->hold_list);
16a53ecc 3231 }
6ed3003c
N
3232 } else
3233 blk_plug_device(conf->mddev->queue);
16a53ecc
N
3234}
3235
3236static void activate_bit_delay(raid5_conf_t *conf)
3237{
3238 /* device_lock is held */
3239 struct list_head head;
3240 list_add(&head, &conf->bitmap_list);
3241 list_del_init(&conf->bitmap_list);
3242 while (!list_empty(&head)) {
3243 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3244 list_del_init(&sh->lru);
3245 atomic_inc(&sh->count);
3246 __release_stripe(conf, sh);
3247 }
3248}
3249
3250static void unplug_slaves(mddev_t *mddev)
3251{
3252 raid5_conf_t *conf = mddev_to_conf(mddev);
3253 int i;
3254
3255 rcu_read_lock();
3256 for (i=0; i<mddev->raid_disks; i++) {
3257 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3258 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
165125e1 3259 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
16a53ecc
N
3260
3261 atomic_inc(&rdev->nr_pending);
3262 rcu_read_unlock();
3263
2ad8b1ef 3264 blk_unplug(r_queue);
16a53ecc
N
3265
3266 rdev_dec_pending(rdev, mddev);
3267 rcu_read_lock();
3268 }
3269 }
3270 rcu_read_unlock();
3271}
3272
165125e1 3273static void raid5_unplug_device(struct request_queue *q)
16a53ecc
N
3274{
3275 mddev_t *mddev = q->queuedata;
3276 raid5_conf_t *conf = mddev_to_conf(mddev);
3277 unsigned long flags;
3278
3279 spin_lock_irqsave(&conf->device_lock, flags);
3280
3281 if (blk_remove_plug(q)) {
3282 conf->seq_flush++;
3283 raid5_activate_delayed(conf);
72626685 3284 }
1da177e4
LT
3285 md_wakeup_thread(mddev->thread);
3286
3287 spin_unlock_irqrestore(&conf->device_lock, flags);
3288
3289 unplug_slaves(mddev);
3290}
3291
f022b2fd
N
3292static int raid5_congested(void *data, int bits)
3293{
3294 mddev_t *mddev = data;
3295 raid5_conf_t *conf = mddev_to_conf(mddev);
3296
3297 /* No difference between reads and writes. Just check
3298 * how busy the stripe_cache is
3299 */
3300 if (conf->inactive_blocked)
3301 return 1;
3302 if (conf->quiesce)
3303 return 1;
3304 if (list_empty_careful(&conf->inactive_list))
3305 return 1;
3306
3307 return 0;
3308}
3309
23032a0e
RBJ
3310/* We want read requests to align with chunks where possible,
3311 * but write requests don't need to.
3312 */
cc371e66
AK
3313static int raid5_mergeable_bvec(struct request_queue *q,
3314 struct bvec_merge_data *bvm,
3315 struct bio_vec *biovec)
23032a0e
RBJ
3316{
3317 mddev_t *mddev = q->queuedata;
cc371e66 3318 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
23032a0e
RBJ
3319 int max;
3320 unsigned int chunk_sectors = mddev->chunk_size >> 9;
cc371e66 3321 unsigned int bio_sectors = bvm->bi_size >> 9;
23032a0e 3322
cc371e66 3323 if ((bvm->bi_rw & 1) == WRITE)
23032a0e
RBJ
3324 return biovec->bv_len; /* always allow writes to be mergeable */
3325
3326 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3327 if (max < 0) max = 0;
3328 if (max <= biovec->bv_len && bio_sectors == 0)
3329 return biovec->bv_len;
3330 else
3331 return max;
3332}
3333
f679623f
RBJ
3334
3335static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3336{
3337 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3338 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3339 unsigned int bio_sectors = bio->bi_size >> 9;
3340
3341 return chunk_sectors >=
3342 ((sector & (chunk_sectors - 1)) + bio_sectors);
3343}
3344
46031f9a
RBJ
3345/*
3346 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3347 * later sampled by raid5d.
3348 */
3349static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3350{
3351 unsigned long flags;
3352
3353 spin_lock_irqsave(&conf->device_lock, flags);
3354
3355 bi->bi_next = conf->retry_read_aligned_list;
3356 conf->retry_read_aligned_list = bi;
3357
3358 spin_unlock_irqrestore(&conf->device_lock, flags);
3359 md_wakeup_thread(conf->mddev->thread);
3360}
3361
3362
3363static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3364{
3365 struct bio *bi;
3366
3367 bi = conf->retry_read_aligned;
3368 if (bi) {
3369 conf->retry_read_aligned = NULL;
3370 return bi;
3371 }
3372 bi = conf->retry_read_aligned_list;
3373 if(bi) {
387bb173 3374 conf->retry_read_aligned_list = bi->bi_next;
46031f9a 3375 bi->bi_next = NULL;
960e739d
JA
3376 /*
3377 * this sets the active strip count to 1 and the processed
3378 * strip count to zero (upper 8 bits)
3379 */
46031f9a 3380 bi->bi_phys_segments = 1; /* biased count of active stripes */
46031f9a
RBJ
3381 }
3382
3383 return bi;
3384}
3385
3386
f679623f
RBJ
3387/*
3388 * The "raid5_align_endio" should check if the read succeeded and if it
3389 * did, call bio_endio on the original bio (having bio_put the new bio
3390 * first).
3391 * If the read failed..
3392 */
6712ecf8 3393static void raid5_align_endio(struct bio *bi, int error)
f679623f
RBJ
3394{
3395 struct bio* raid_bi = bi->bi_private;
46031f9a
RBJ
3396 mddev_t *mddev;
3397 raid5_conf_t *conf;
3398 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3399 mdk_rdev_t *rdev;
3400
f679623f 3401 bio_put(bi);
46031f9a
RBJ
3402
3403 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3404 conf = mddev_to_conf(mddev);
3405 rdev = (void*)raid_bi->bi_next;
3406 raid_bi->bi_next = NULL;
3407
3408 rdev_dec_pending(rdev, conf->mddev);
3409
3410 if (!error && uptodate) {
6712ecf8 3411 bio_endio(raid_bi, 0);
46031f9a
RBJ
3412 if (atomic_dec_and_test(&conf->active_aligned_reads))
3413 wake_up(&conf->wait_for_stripe);
6712ecf8 3414 return;
46031f9a
RBJ
3415 }
3416
3417
45b4233c 3418 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
46031f9a
RBJ
3419
3420 add_bio_to_retry(raid_bi, conf);
f679623f
RBJ
3421}
3422
387bb173
NB
3423static int bio_fits_rdev(struct bio *bi)
3424{
165125e1 3425 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
387bb173
NB
3426
3427 if ((bi->bi_size>>9) > q->max_sectors)
3428 return 0;
3429 blk_recount_segments(q, bi);
960e739d 3430 if (bi->bi_phys_segments > q->max_phys_segments)
387bb173
NB
3431 return 0;
3432
3433 if (q->merge_bvec_fn)
3434 /* it's too hard to apply the merge_bvec_fn at this stage,
3435 * just just give up
3436 */
3437 return 0;
3438
3439 return 1;
3440}
3441
3442
165125e1 3443static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
f679623f
RBJ
3444{
3445 mddev_t *mddev = q->queuedata;
3446 raid5_conf_t *conf = mddev_to_conf(mddev);
911d4ee8 3447 unsigned int dd_idx;
f679623f
RBJ
3448 struct bio* align_bi;
3449 mdk_rdev_t *rdev;
3450
3451 if (!in_chunk_boundary(mddev, raid_bio)) {
45b4233c 3452 pr_debug("chunk_aligned_read : non aligned\n");
f679623f
RBJ
3453 return 0;
3454 }
3455 /*
99c0fb5f 3456 * use bio_clone to make a copy of the bio
f679623f
RBJ
3457 */
3458 align_bi = bio_clone(raid_bio, GFP_NOIO);
3459 if (!align_bi)
3460 return 0;
3461 /*
3462 * set bi_end_io to a new function, and set bi_private to the
3463 * original bio.
3464 */
3465 align_bi->bi_end_io = raid5_align_endio;
3466 align_bi->bi_private = raid_bio;
3467 /*
3468 * compute position
3469 */
112bf897
N
3470 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3471 0,
911d4ee8 3472 &dd_idx, NULL);
f679623f
RBJ
3473
3474 rcu_read_lock();
3475 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3476 if (rdev && test_bit(In_sync, &rdev->flags)) {
f679623f
RBJ
3477 atomic_inc(&rdev->nr_pending);
3478 rcu_read_unlock();
46031f9a
RBJ
3479 raid_bio->bi_next = (void*)rdev;
3480 align_bi->bi_bdev = rdev->bdev;
3481 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3482 align_bi->bi_sector += rdev->data_offset;
3483
387bb173
NB
3484 if (!bio_fits_rdev(align_bi)) {
3485 /* too big in some way */
3486 bio_put(align_bi);
3487 rdev_dec_pending(rdev, mddev);
3488 return 0;
3489 }
3490
46031f9a
RBJ
3491 spin_lock_irq(&conf->device_lock);
3492 wait_event_lock_irq(conf->wait_for_stripe,
3493 conf->quiesce == 0,
3494 conf->device_lock, /* nothing */);
3495 atomic_inc(&conf->active_aligned_reads);
3496 spin_unlock_irq(&conf->device_lock);
3497
f679623f
RBJ
3498 generic_make_request(align_bi);
3499 return 1;
3500 } else {
3501 rcu_read_unlock();
46031f9a 3502 bio_put(align_bi);
f679623f
RBJ
3503 return 0;
3504 }
3505}
3506
8b3e6cdc
DW
3507/* __get_priority_stripe - get the next stripe to process
3508 *
3509 * Full stripe writes are allowed to pass preread active stripes up until
3510 * the bypass_threshold is exceeded. In general the bypass_count
3511 * increments when the handle_list is handled before the hold_list; however, it
3512 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3513 * stripe with in flight i/o. The bypass_count will be reset when the
3514 * head of the hold_list has changed, i.e. the head was promoted to the
3515 * handle_list.
3516 */
3517static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3518{
3519 struct stripe_head *sh;
3520
3521 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3522 __func__,
3523 list_empty(&conf->handle_list) ? "empty" : "busy",
3524 list_empty(&conf->hold_list) ? "empty" : "busy",
3525 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3526
3527 if (!list_empty(&conf->handle_list)) {
3528 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3529
3530 if (list_empty(&conf->hold_list))
3531 conf->bypass_count = 0;
3532 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3533 if (conf->hold_list.next == conf->last_hold)
3534 conf->bypass_count++;
3535 else {
3536 conf->last_hold = conf->hold_list.next;
3537 conf->bypass_count -= conf->bypass_threshold;
3538 if (conf->bypass_count < 0)
3539 conf->bypass_count = 0;
3540 }
3541 }
3542 } else if (!list_empty(&conf->hold_list) &&
3543 ((conf->bypass_threshold &&
3544 conf->bypass_count > conf->bypass_threshold) ||
3545 atomic_read(&conf->pending_full_writes) == 0)) {
3546 sh = list_entry(conf->hold_list.next,
3547 typeof(*sh), lru);
3548 conf->bypass_count -= conf->bypass_threshold;
3549 if (conf->bypass_count < 0)
3550 conf->bypass_count = 0;
3551 } else
3552 return NULL;
3553
3554 list_del_init(&sh->lru);
3555 atomic_inc(&sh->count);
3556 BUG_ON(atomic_read(&sh->count) != 1);
3557 return sh;
3558}
f679623f 3559
165125e1 3560static int make_request(struct request_queue *q, struct bio * bi)
1da177e4
LT
3561{
3562 mddev_t *mddev = q->queuedata;
3563 raid5_conf_t *conf = mddev_to_conf(mddev);
911d4ee8 3564 int dd_idx;
1da177e4
LT
3565 sector_t new_sector;
3566 sector_t logical_sector, last_sector;
3567 struct stripe_head *sh;
a362357b 3568 const int rw = bio_data_dir(bi);
c9959059 3569 int cpu, remaining;
1da177e4 3570
e5dcdd80 3571 if (unlikely(bio_barrier(bi))) {
6712ecf8 3572 bio_endio(bi, -EOPNOTSUPP);
e5dcdd80
N
3573 return 0;
3574 }
3575
3d310eb7 3576 md_write_start(mddev, bi);
06d91a5f 3577
074a7aca
TH
3578 cpu = part_stat_lock();
3579 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3580 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3581 bio_sectors(bi));
3582 part_stat_unlock();
1da177e4 3583
802ba064 3584 if (rw == READ &&
52488615
RBJ
3585 mddev->reshape_position == MaxSector &&
3586 chunk_aligned_read(q,bi))
99c0fb5f 3587 return 0;
52488615 3588
1da177e4
LT
3589 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3590 last_sector = bi->bi_sector + (bi->bi_size>>9);
3591 bi->bi_next = NULL;
3592 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 3593
1da177e4
LT
3594 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3595 DEFINE_WAIT(w);
16a53ecc 3596 int disks, data_disks;
b5663ba4 3597 int previous;
b578d55f 3598
7ecaa1e6 3599 retry:
b5663ba4 3600 previous = 0;
b578d55f 3601 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
7ecaa1e6
N
3602 if (likely(conf->expand_progress == MaxSector))
3603 disks = conf->raid_disks;
3604 else {
df8e7f76
N
3605 /* spinlock is needed as expand_progress may be
3606 * 64bit on a 32bit platform, and so it might be
3607 * possible to see a half-updated value
3608 * Ofcourse expand_progress could change after
3609 * the lock is dropped, so once we get a reference
3610 * to the stripe that we think it is, we will have
3611 * to check again.
3612 */
7ecaa1e6
N
3613 spin_lock_irq(&conf->device_lock);
3614 disks = conf->raid_disks;
b5663ba4 3615 if (logical_sector >= conf->expand_progress) {
7ecaa1e6 3616 disks = conf->previous_raid_disks;
b5663ba4
N
3617 previous = 1;
3618 } else {
b578d55f
N
3619 if (logical_sector >= conf->expand_lo) {
3620 spin_unlock_irq(&conf->device_lock);
3621 schedule();
3622 goto retry;
3623 }
3624 }
7ecaa1e6
N
3625 spin_unlock_irq(&conf->device_lock);
3626 }
16a53ecc
N
3627 data_disks = disks - conf->max_degraded;
3628
112bf897
N
3629 new_sector = raid5_compute_sector(conf, logical_sector,
3630 previous,
911d4ee8 3631 &dd_idx, NULL);
45b4233c 3632 pr_debug("raid5: make_request, sector %llu logical %llu\n",
1da177e4
LT
3633 (unsigned long long)new_sector,
3634 (unsigned long long)logical_sector);
3635
b5663ba4
N
3636 sh = get_active_stripe(conf, new_sector, previous,
3637 (bi->bi_rw&RWA_MASK));
1da177e4 3638 if (sh) {
7ecaa1e6
N
3639 if (unlikely(conf->expand_progress != MaxSector)) {
3640 /* expansion might have moved on while waiting for a
df8e7f76
N
3641 * stripe, so we must do the range check again.
3642 * Expansion could still move past after this
3643 * test, but as we are holding a reference to
3644 * 'sh', we know that if that happens,
3645 * STRIPE_EXPANDING will get set and the expansion
3646 * won't proceed until we finish with the stripe.
7ecaa1e6
N
3647 */
3648 int must_retry = 0;
3649 spin_lock_irq(&conf->device_lock);
3650 if (logical_sector < conf->expand_progress &&
3651 disks == conf->previous_raid_disks)
3652 /* mismatch, need to try again */
3653 must_retry = 1;
3654 spin_unlock_irq(&conf->device_lock);
3655 if (must_retry) {
3656 release_stripe(sh);
3657 goto retry;
3658 }
3659 }
e464eafd
N
3660 /* FIXME what if we get a false positive because these
3661 * are being updated.
3662 */
3663 if (logical_sector >= mddev->suspend_lo &&
3664 logical_sector < mddev->suspend_hi) {
3665 release_stripe(sh);
3666 schedule();
3667 goto retry;
3668 }
7ecaa1e6
N
3669
3670 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3671 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
3672 /* Stripe is busy expanding or
3673 * add failed due to overlap. Flush everything
1da177e4
LT
3674 * and wait a while
3675 */
3676 raid5_unplug_device(mddev->queue);
3677 release_stripe(sh);
3678 schedule();
3679 goto retry;
3680 }
3681 finish_wait(&conf->wait_for_overlap, &w);
6ed3003c
N
3682 set_bit(STRIPE_HANDLE, &sh->state);
3683 clear_bit(STRIPE_DELAYED, &sh->state);
1da177e4 3684 release_stripe(sh);
1da177e4
LT
3685 } else {
3686 /* cannot get stripe for read-ahead, just give-up */
3687 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3688 finish_wait(&conf->wait_for_overlap, &w);
3689 break;
3690 }
3691
3692 }
3693 spin_lock_irq(&conf->device_lock);
960e739d 3694 remaining = raid5_dec_bi_phys_segments(bi);
f6344757
N
3695 spin_unlock_irq(&conf->device_lock);
3696 if (remaining == 0) {
1da177e4 3697
16a53ecc 3698 if ( rw == WRITE )
1da177e4 3699 md_write_end(mddev);
6712ecf8 3700
0e13fe23 3701 bio_endio(bi, 0);
1da177e4 3702 }
1da177e4
LT
3703 return 0;
3704}
3705
52c03291 3706static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
1da177e4 3707{
52c03291
N
3708 /* reshaping is quite different to recovery/resync so it is
3709 * handled quite separately ... here.
3710 *
3711 * On each call to sync_request, we gather one chunk worth of
3712 * destination stripes and flag them as expanding.
3713 * Then we find all the source stripes and request reads.
3714 * As the reads complete, handle_stripe will copy the data
3715 * into the destination stripe and release that stripe.
3716 */
1da177e4
LT
3717 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3718 struct stripe_head *sh;
ccfcc3c1 3719 sector_t first_sector, last_sector;
f416885e
N
3720 int raid_disks = conf->previous_raid_disks;
3721 int data_disks = raid_disks - conf->max_degraded;
3722 int new_data_disks = conf->raid_disks - conf->max_degraded;
52c03291
N
3723 int i;
3724 int dd_idx;
3725 sector_t writepos, safepos, gap;
3726
3727 if (sector_nr == 0 &&
3728 conf->expand_progress != 0) {
3729 /* restarting in the middle, skip the initial sectors */
3730 sector_nr = conf->expand_progress;
f416885e 3731 sector_div(sector_nr, new_data_disks);
52c03291
N
3732 *skipped = 1;
3733 return sector_nr;
3734 }
3735
3736 /* we update the metadata when there is more than 3Meg
3737 * in the block range (that is rather arbitrary, should
3738 * probably be time based) or when the data about to be
3739 * copied would over-write the source of the data at
3740 * the front of the range.
3741 * i.e. one new_stripe forward from expand_progress new_maps
3742 * to after where expand_lo old_maps to
3743 */
3744 writepos = conf->expand_progress +
f416885e
N
3745 conf->chunk_size/512*(new_data_disks);
3746 sector_div(writepos, new_data_disks);
52c03291 3747 safepos = conf->expand_lo;
f416885e 3748 sector_div(safepos, data_disks);
52c03291
N
3749 gap = conf->expand_progress - conf->expand_lo;
3750
3751 if (writepos >= safepos ||
f416885e 3752 gap > (new_data_disks)*3000*2 /*3Meg*/) {
52c03291
N
3753 /* Cannot proceed until we've updated the superblock... */
3754 wait_event(conf->wait_for_overlap,
3755 atomic_read(&conf->reshape_stripes)==0);
3756 mddev->reshape_position = conf->expand_progress;
850b2b42 3757 set_bit(MD_CHANGE_DEVS, &mddev->flags);
52c03291 3758 md_wakeup_thread(mddev->thread);
850b2b42 3759 wait_event(mddev->sb_wait, mddev->flags == 0 ||
52c03291
N
3760 kthread_should_stop());
3761 spin_lock_irq(&conf->device_lock);
3762 conf->expand_lo = mddev->reshape_position;
3763 spin_unlock_irq(&conf->device_lock);
3764 wake_up(&conf->wait_for_overlap);
3765 }
3766
3767 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
3768 int j;
3769 int skipped = 0;
b5663ba4 3770 sh = get_active_stripe(conf, sector_nr+i, 0, 0);
52c03291
N
3771 set_bit(STRIPE_EXPANDING, &sh->state);
3772 atomic_inc(&conf->reshape_stripes);
3773 /* If any of this stripe is beyond the end of the old
3774 * array, then we need to zero those blocks
3775 */
3776 for (j=sh->disks; j--;) {
3777 sector_t s;
3778 if (j == sh->pd_idx)
3779 continue;
f416885e 3780 if (conf->level == 6 &&
d0dabf7e 3781 j == sh->qd_idx)
f416885e 3782 continue;
52c03291 3783 s = compute_blocknr(sh, j);
f233ea5c 3784 if (s < mddev->array_sectors) {
52c03291
N
3785 skipped = 1;
3786 continue;
3787 }
3788 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
3789 set_bit(R5_Expanded, &sh->dev[j].flags);
3790 set_bit(R5_UPTODATE, &sh->dev[j].flags);
3791 }
3792 if (!skipped) {
3793 set_bit(STRIPE_EXPAND_READY, &sh->state);
3794 set_bit(STRIPE_HANDLE, &sh->state);
3795 }
3796 release_stripe(sh);
3797 }
3798 spin_lock_irq(&conf->device_lock);
6d3baf2e 3799 conf->expand_progress = (sector_nr + i) * new_data_disks;
52c03291
N
3800 spin_unlock_irq(&conf->device_lock);
3801 /* Ok, those stripe are ready. We can start scheduling
3802 * reads on the source stripes.
3803 * The source stripes are determined by mapping the first and last
3804 * block on the destination stripes.
3805 */
52c03291 3806 first_sector =
112bf897 3807 raid5_compute_sector(conf, sector_nr*(new_data_disks),
911d4ee8 3808 1, &dd_idx, NULL);
52c03291 3809 last_sector =
112bf897
N
3810 raid5_compute_sector(conf, ((sector_nr+conf->chunk_size/512)
3811 *(new_data_disks) - 1),
911d4ee8 3812 1, &dd_idx, NULL);
58c0fed4
AN
3813 if (last_sector >= mddev->dev_sectors)
3814 last_sector = mddev->dev_sectors - 1;
52c03291 3815 while (first_sector <= last_sector) {
b5663ba4 3816 sh = get_active_stripe(conf, first_sector, 1, 0);
52c03291
N
3817 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3818 set_bit(STRIPE_HANDLE, &sh->state);
3819 release_stripe(sh);
3820 first_sector += STRIPE_SECTORS;
3821 }
c6207277
N
3822 /* If this takes us to the resync_max point where we have to pause,
3823 * then we need to write out the superblock.
3824 */
3825 sector_nr += conf->chunk_size>>9;
3826 if (sector_nr >= mddev->resync_max) {
3827 /* Cannot proceed until we've updated the superblock... */
3828 wait_event(conf->wait_for_overlap,
3829 atomic_read(&conf->reshape_stripes) == 0);
3830 mddev->reshape_position = conf->expand_progress;
3831 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3832 md_wakeup_thread(mddev->thread);
3833 wait_event(mddev->sb_wait,
3834 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
3835 || kthread_should_stop());
3836 spin_lock_irq(&conf->device_lock);
3837 conf->expand_lo = mddev->reshape_position;
3838 spin_unlock_irq(&conf->device_lock);
3839 wake_up(&conf->wait_for_overlap);
3840 }
52c03291
N
3841 return conf->chunk_size>>9;
3842}
3843
3844/* FIXME go_faster isn't used */
3845static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
3846{
3847 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3848 struct stripe_head *sh;
58c0fed4 3849 sector_t max_sector = mddev->dev_sectors;
72626685 3850 int sync_blocks;
16a53ecc
N
3851 int still_degraded = 0;
3852 int i;
1da177e4 3853
72626685 3854 if (sector_nr >= max_sector) {
1da177e4
LT
3855 /* just being told to finish up .. nothing much to do */
3856 unplug_slaves(mddev);
29269553
N
3857 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3858 end_reshape(conf);
3859 return 0;
3860 }
72626685
N
3861
3862 if (mddev->curr_resync < max_sector) /* aborted */
3863 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3864 &sync_blocks, 1);
16a53ecc 3865 else /* completed sync */
72626685
N
3866 conf->fullsync = 0;
3867 bitmap_close_sync(mddev->bitmap);
3868
1da177e4
LT
3869 return 0;
3870 }
ccfcc3c1 3871
52c03291
N
3872 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3873 return reshape_request(mddev, sector_nr, skipped);
f6705578 3874
c6207277
N
3875 /* No need to check resync_max as we never do more than one
3876 * stripe, and as resync_max will always be on a chunk boundary,
3877 * if the check in md_do_sync didn't fire, there is no chance
3878 * of overstepping resync_max here
3879 */
3880
16a53ecc 3881 /* if there is too many failed drives and we are trying
1da177e4
LT
3882 * to resync, then assert that we are finished, because there is
3883 * nothing we can do.
3884 */
3285edf1 3885 if (mddev->degraded >= conf->max_degraded &&
16a53ecc 3886 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
58c0fed4 3887 sector_t rv = mddev->dev_sectors - sector_nr;
57afd89f 3888 *skipped = 1;
1da177e4
LT
3889 return rv;
3890 }
72626685 3891 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 3892 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
3893 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
3894 /* we can skip this block, and probably more */
3895 sync_blocks /= STRIPE_SECTORS;
3896 *skipped = 1;
3897 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
3898 }
1da177e4 3899
b47490c9
N
3900
3901 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3902
b5663ba4 3903 sh = get_active_stripe(conf, sector_nr, 0, 1);
1da177e4 3904 if (sh == NULL) {
b5663ba4 3905 sh = get_active_stripe(conf, sector_nr, 0, 0);
1da177e4 3906 /* make sure we don't swamp the stripe cache if someone else
16a53ecc 3907 * is trying to get access
1da177e4 3908 */
66c006a5 3909 schedule_timeout_uninterruptible(1);
1da177e4 3910 }
16a53ecc
N
3911 /* Need to check if array will still be degraded after recovery/resync
3912 * We don't need to check the 'failed' flag as when that gets set,
3913 * recovery aborts.
3914 */
3915 for (i=0; i<mddev->raid_disks; i++)
3916 if (conf->disks[i].rdev == NULL)
3917 still_degraded = 1;
3918
3919 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
3920
3921 spin_lock(&sh->lock);
1da177e4
LT
3922 set_bit(STRIPE_SYNCING, &sh->state);
3923 clear_bit(STRIPE_INSYNC, &sh->state);
3924 spin_unlock(&sh->lock);
3925
df10cfbc
DW
3926 /* wait for any blocked device to be handled */
3927 while(unlikely(!handle_stripe(sh, NULL)))
3928 ;
1da177e4
LT
3929 release_stripe(sh);
3930
3931 return STRIPE_SECTORS;
3932}
3933
46031f9a
RBJ
3934static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3935{
3936 /* We may not be able to submit a whole bio at once as there
3937 * may not be enough stripe_heads available.
3938 * We cannot pre-allocate enough stripe_heads as we may need
3939 * more than exist in the cache (if we allow ever large chunks).
3940 * So we do one stripe head at a time and record in
3941 * ->bi_hw_segments how many have been done.
3942 *
3943 * We *know* that this entire raid_bio is in one chunk, so
3944 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
3945 */
3946 struct stripe_head *sh;
911d4ee8 3947 int dd_idx;
46031f9a
RBJ
3948 sector_t sector, logical_sector, last_sector;
3949 int scnt = 0;
3950 int remaining;
3951 int handled = 0;
3952
3953 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
112bf897 3954 sector = raid5_compute_sector(conf, logical_sector,
911d4ee8 3955 0, &dd_idx, NULL);
46031f9a
RBJ
3956 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
3957
3958 for (; logical_sector < last_sector;
387bb173
NB
3959 logical_sector += STRIPE_SECTORS,
3960 sector += STRIPE_SECTORS,
3961 scnt++) {
46031f9a 3962
960e739d 3963 if (scnt < raid5_bi_hw_segments(raid_bio))
46031f9a
RBJ
3964 /* already done this stripe */
3965 continue;
3966
b5663ba4 3967 sh = get_active_stripe(conf, sector, 0, 1);
46031f9a
RBJ
3968
3969 if (!sh) {
3970 /* failed to get a stripe - must wait */
960e739d 3971 raid5_set_bi_hw_segments(raid_bio, scnt);
46031f9a
RBJ
3972 conf->retry_read_aligned = raid_bio;
3973 return handled;
3974 }
3975
3976 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
387bb173
NB
3977 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
3978 release_stripe(sh);
960e739d 3979 raid5_set_bi_hw_segments(raid_bio, scnt);
387bb173
NB
3980 conf->retry_read_aligned = raid_bio;
3981 return handled;
3982 }
3983
46031f9a
RBJ
3984 handle_stripe(sh, NULL);
3985 release_stripe(sh);
3986 handled++;
3987 }
3988 spin_lock_irq(&conf->device_lock);
960e739d 3989 remaining = raid5_dec_bi_phys_segments(raid_bio);
46031f9a 3990 spin_unlock_irq(&conf->device_lock);
0e13fe23
NB
3991 if (remaining == 0)
3992 bio_endio(raid_bio, 0);
46031f9a
RBJ
3993 if (atomic_dec_and_test(&conf->active_aligned_reads))
3994 wake_up(&conf->wait_for_stripe);
3995 return handled;
3996}
3997
3998
3999
1da177e4
LT
4000/*
4001 * This is our raid5 kernel thread.
4002 *
4003 * We scan the hash table for stripes which can be handled now.
4004 * During the scan, completed stripes are saved for us by the interrupt
4005 * handler, so that they will not have to wait for our next wakeup.
4006 */
6ed3003c 4007static void raid5d(mddev_t *mddev)
1da177e4
LT
4008{
4009 struct stripe_head *sh;
4010 raid5_conf_t *conf = mddev_to_conf(mddev);
4011 int handled;
4012
45b4233c 4013 pr_debug("+++ raid5d active\n");
1da177e4
LT
4014
4015 md_check_recovery(mddev);
1da177e4
LT
4016
4017 handled = 0;
4018 spin_lock_irq(&conf->device_lock);
4019 while (1) {
46031f9a 4020 struct bio *bio;
1da177e4 4021
ae3c20cc 4022 if (conf->seq_flush != conf->seq_write) {
72626685 4023 int seq = conf->seq_flush;
700e432d 4024 spin_unlock_irq(&conf->device_lock);
72626685 4025 bitmap_unplug(mddev->bitmap);
700e432d 4026 spin_lock_irq(&conf->device_lock);
72626685
N
4027 conf->seq_write = seq;
4028 activate_bit_delay(conf);
4029 }
4030
46031f9a
RBJ
4031 while ((bio = remove_bio_from_retry(conf))) {
4032 int ok;
4033 spin_unlock_irq(&conf->device_lock);
4034 ok = retry_aligned_read(conf, bio);
4035 spin_lock_irq(&conf->device_lock);
4036 if (!ok)
4037 break;
4038 handled++;
4039 }
4040
8b3e6cdc
DW
4041 sh = __get_priority_stripe(conf);
4042
c9f21aaf 4043 if (!sh)
1da177e4 4044 break;
1da177e4
LT
4045 spin_unlock_irq(&conf->device_lock);
4046
4047 handled++;
16a53ecc 4048 handle_stripe(sh, conf->spare_page);
1da177e4
LT
4049 release_stripe(sh);
4050
4051 spin_lock_irq(&conf->device_lock);
4052 }
45b4233c 4053 pr_debug("%d stripes handled\n", handled);
1da177e4
LT
4054
4055 spin_unlock_irq(&conf->device_lock);
4056
c9f21aaf 4057 async_tx_issue_pending_all();
1da177e4
LT
4058 unplug_slaves(mddev);
4059
45b4233c 4060 pr_debug("--- raid5d inactive\n");
1da177e4
LT
4061}
4062
3f294f4f 4063static ssize_t
007583c9 4064raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3f294f4f 4065{
007583c9 4066 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
4067 if (conf)
4068 return sprintf(page, "%d\n", conf->max_nr_stripes);
4069 else
4070 return 0;
3f294f4f
N
4071}
4072
4073static ssize_t
007583c9 4074raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3f294f4f 4075{
007583c9 4076 raid5_conf_t *conf = mddev_to_conf(mddev);
4ef197d8 4077 unsigned long new;
b5470dc5
DW
4078 int err;
4079
3f294f4f
N
4080 if (len >= PAGE_SIZE)
4081 return -EINVAL;
96de1e66
N
4082 if (!conf)
4083 return -ENODEV;
3f294f4f 4084
4ef197d8 4085 if (strict_strtoul(page, 10, &new))
3f294f4f
N
4086 return -EINVAL;
4087 if (new <= 16 || new > 32768)
4088 return -EINVAL;
4089 while (new < conf->max_nr_stripes) {
4090 if (drop_one_stripe(conf))
4091 conf->max_nr_stripes--;
4092 else
4093 break;
4094 }
b5470dc5
DW
4095 err = md_allow_write(mddev);
4096 if (err)
4097 return err;
3f294f4f
N
4098 while (new > conf->max_nr_stripes) {
4099 if (grow_one_stripe(conf))
4100 conf->max_nr_stripes++;
4101 else break;
4102 }
4103 return len;
4104}
007583c9 4105
96de1e66
N
4106static struct md_sysfs_entry
4107raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4108 raid5_show_stripe_cache_size,
4109 raid5_store_stripe_cache_size);
3f294f4f 4110
8b3e6cdc
DW
4111static ssize_t
4112raid5_show_preread_threshold(mddev_t *mddev, char *page)
4113{
4114 raid5_conf_t *conf = mddev_to_conf(mddev);
4115 if (conf)
4116 return sprintf(page, "%d\n", conf->bypass_threshold);
4117 else
4118 return 0;
4119}
4120
4121static ssize_t
4122raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4123{
4124 raid5_conf_t *conf = mddev_to_conf(mddev);
4ef197d8 4125 unsigned long new;
8b3e6cdc
DW
4126 if (len >= PAGE_SIZE)
4127 return -EINVAL;
4128 if (!conf)
4129 return -ENODEV;
4130
4ef197d8 4131 if (strict_strtoul(page, 10, &new))
8b3e6cdc 4132 return -EINVAL;
4ef197d8 4133 if (new > conf->max_nr_stripes)
8b3e6cdc
DW
4134 return -EINVAL;
4135 conf->bypass_threshold = new;
4136 return len;
4137}
4138
4139static struct md_sysfs_entry
4140raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4141 S_IRUGO | S_IWUSR,
4142 raid5_show_preread_threshold,
4143 raid5_store_preread_threshold);
4144
3f294f4f 4145static ssize_t
96de1e66 4146stripe_cache_active_show(mddev_t *mddev, char *page)
3f294f4f 4147{
007583c9 4148 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
4149 if (conf)
4150 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4151 else
4152 return 0;
3f294f4f
N
4153}
4154
96de1e66
N
4155static struct md_sysfs_entry
4156raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 4157
007583c9 4158static struct attribute *raid5_attrs[] = {
3f294f4f
N
4159 &raid5_stripecache_size.attr,
4160 &raid5_stripecache_active.attr,
8b3e6cdc 4161 &raid5_preread_bypass_threshold.attr,
3f294f4f
N
4162 NULL,
4163};
007583c9
N
4164static struct attribute_group raid5_attrs_group = {
4165 .name = NULL,
4166 .attrs = raid5_attrs,
3f294f4f
N
4167};
4168
91adb564 4169static raid5_conf_t *setup_conf(mddev_t *mddev)
1da177e4
LT
4170{
4171 raid5_conf_t *conf;
4172 int raid_disk, memory;
4173 mdk_rdev_t *rdev;
4174 struct disk_info *disk;
1da177e4 4175
91adb564
N
4176 if (mddev->new_level != 5
4177 && mddev->new_level != 4
4178 && mddev->new_level != 6) {
16a53ecc 4179 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
91adb564
N
4180 mdname(mddev), mddev->new_level);
4181 return ERR_PTR(-EIO);
1da177e4 4182 }
91adb564
N
4183 if ((mddev->new_level == 5
4184 && !algorithm_valid_raid5(mddev->new_layout)) ||
4185 (mddev->new_level == 6
4186 && !algorithm_valid_raid6(mddev->new_layout))) {
99c0fb5f 4187 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
91adb564
N
4188 mdname(mddev), mddev->new_layout);
4189 return ERR_PTR(-EIO);
99c0fb5f 4190 }
91adb564
N
4191 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4192 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4193 mdname(mddev), mddev->raid_disks);
4194 return ERR_PTR(-EINVAL);
4bbf3771
N
4195 }
4196
91adb564
N
4197 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
4198 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4199 mddev->new_chunk, mdname(mddev));
4200 return ERR_PTR(-EINVAL);
f6705578
N
4201 }
4202
91adb564
N
4203 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4204 if (conf == NULL)
1da177e4 4205 goto abort;
91adb564
N
4206
4207 conf->raid_disks = mddev->raid_disks;
4208 if (mddev->reshape_position == MaxSector)
4209 conf->previous_raid_disks = mddev->raid_disks;
4210 else
f6705578 4211 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
f6705578
N
4212
4213 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
b55e6bfc
N
4214 GFP_KERNEL);
4215 if (!conf->disks)
4216 goto abort;
9ffae0cf 4217
1da177e4
LT
4218 conf->mddev = mddev;
4219
fccddba0 4220 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 4221 goto abort;
1da177e4 4222
91adb564 4223 if (mddev->new_level == 6) {
16a53ecc
N
4224 conf->spare_page = alloc_page(GFP_KERNEL);
4225 if (!conf->spare_page)
4226 goto abort;
4227 }
1da177e4
LT
4228 spin_lock_init(&conf->device_lock);
4229 init_waitqueue_head(&conf->wait_for_stripe);
4230 init_waitqueue_head(&conf->wait_for_overlap);
4231 INIT_LIST_HEAD(&conf->handle_list);
8b3e6cdc 4232 INIT_LIST_HEAD(&conf->hold_list);
1da177e4 4233 INIT_LIST_HEAD(&conf->delayed_list);
72626685 4234 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
4235 INIT_LIST_HEAD(&conf->inactive_list);
4236 atomic_set(&conf->active_stripes, 0);
4237 atomic_set(&conf->preread_active_stripes, 0);
46031f9a 4238 atomic_set(&conf->active_aligned_reads, 0);
8b3e6cdc 4239 conf->bypass_threshold = BYPASS_THRESHOLD;
1da177e4 4240
45b4233c 4241 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
1da177e4 4242
159ec1fc 4243 list_for_each_entry(rdev, &mddev->disks, same_set) {
1da177e4 4244 raid_disk = rdev->raid_disk;
f6705578 4245 if (raid_disk >= conf->raid_disks
1da177e4
LT
4246 || raid_disk < 0)
4247 continue;
4248 disk = conf->disks + raid_disk;
4249
4250 disk->rdev = rdev;
4251
b2d444d7 4252 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
4253 char b[BDEVNAME_SIZE];
4254 printk(KERN_INFO "raid5: device %s operational as raid"
4255 " disk %d\n", bdevname(rdev->bdev,b),
4256 raid_disk);
8c2e870a
NB
4257 } else
4258 /* Cannot rely on bitmap to complete recovery */
4259 conf->fullsync = 1;
1da177e4
LT
4260 }
4261
91adb564
N
4262 conf->chunk_size = mddev->new_chunk;
4263 conf->level = mddev->new_level;
16a53ecc
N
4264 if (conf->level == 6)
4265 conf->max_degraded = 2;
4266 else
4267 conf->max_degraded = 1;
91adb564 4268 conf->algorithm = mddev->new_layout;
1da177e4 4269 conf->max_nr_stripes = NR_STRIPES;
f6705578 4270 conf->expand_progress = mddev->reshape_position;
1da177e4 4271
91adb564
N
4272 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4273 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4274 if (grow_stripes(conf, conf->max_nr_stripes)) {
4275 printk(KERN_ERR
4276 "raid5: couldn't allocate %dkB for buffers\n", memory);
4277 goto abort;
4278 } else
4279 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4280 memory, mdname(mddev));
1da177e4 4281
91adb564
N
4282 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
4283 if (!conf->thread) {
4284 printk(KERN_ERR
4285 "raid5: couldn't allocate thread for %s\n",
4286 mdname(mddev));
16a53ecc
N
4287 goto abort;
4288 }
91adb564
N
4289
4290 return conf;
4291
4292 abort:
4293 if (conf) {
4294 shrink_stripes(conf);
4295 safe_put_page(conf->spare_page);
4296 kfree(conf->disks);
4297 kfree(conf->stripe_hashtbl);
4298 kfree(conf);
4299 return ERR_PTR(-EIO);
4300 } else
4301 return ERR_PTR(-ENOMEM);
4302}
4303
4304static int run(mddev_t *mddev)
4305{
4306 raid5_conf_t *conf;
4307 int working_disks = 0;
4308 mdk_rdev_t *rdev;
4309
4310 if (mddev->reshape_position != MaxSector) {
4311 /* Check that we can continue the reshape.
4312 * Currently only disks can change, it must
4313 * increase, and we must be past the point where
4314 * a stripe over-writes itself
4315 */
4316 sector_t here_new, here_old;
4317 int old_disks;
4318 int max_degraded = (mddev->level == 5 ? 1 : 2);
4319
4320 if (mddev->new_level != mddev->level ||
4321 mddev->new_layout != mddev->layout ||
4322 mddev->new_chunk != mddev->chunk_size) {
4323 printk(KERN_ERR "raid5: %s: unsupported reshape "
4324 "required - aborting.\n",
4325 mdname(mddev));
4326 return -EINVAL;
4327 }
4328 if (mddev->delta_disks <= 0) {
4329 printk(KERN_ERR "raid5: %s: unsupported reshape "
4330 "(reduce disks) required - aborting.\n",
4331 mdname(mddev));
4332 return -EINVAL;
4333 }
4334 old_disks = mddev->raid_disks - mddev->delta_disks;
4335 /* reshape_position must be on a new-stripe boundary, and one
4336 * further up in new geometry must map after here in old
4337 * geometry.
4338 */
4339 here_new = mddev->reshape_position;
4340 if (sector_div(here_new, (mddev->chunk_size>>9)*
4341 (mddev->raid_disks - max_degraded))) {
4342 printk(KERN_ERR "raid5: reshape_position not "
4343 "on a stripe boundary\n");
4344 return -EINVAL;
4345 }
4346 /* here_new is the stripe we will write to */
4347 here_old = mddev->reshape_position;
4348 sector_div(here_old, (mddev->chunk_size>>9)*
4349 (old_disks-max_degraded));
4350 /* here_old is the first stripe that we might need to read
4351 * from */
4352 if (here_new >= here_old) {
4353 /* Reading from the same stripe as writing to - bad */
4354 printk(KERN_ERR "raid5: reshape_position too early for "
4355 "auto-recovery - aborting.\n");
4356 return -EINVAL;
4357 }
4358 printk(KERN_INFO "raid5: reshape will continue\n");
4359 /* OK, we should be able to continue; */
4360 } else {
4361 BUG_ON(mddev->level != mddev->new_level);
4362 BUG_ON(mddev->layout != mddev->new_layout);
4363 BUG_ON(mddev->chunk_size != mddev->new_chunk);
4364 BUG_ON(mddev->delta_disks != 0);
1da177e4 4365 }
91adb564 4366
245f46c2
N
4367 if (mddev->private == NULL)
4368 conf = setup_conf(mddev);
4369 else
4370 conf = mddev->private;
4371
91adb564
N
4372 if (IS_ERR(conf))
4373 return PTR_ERR(conf);
4374
4375 mddev->thread = conf->thread;
4376 conf->thread = NULL;
4377 mddev->private = conf;
4378
4379 /*
4380 * 0 for a fully functional array, 1 or 2 for a degraded array.
4381 */
4382 list_for_each_entry(rdev, &mddev->disks, same_set)
4383 if (rdev->raid_disk >= 0 &&
4384 test_bit(In_sync, &rdev->flags))
4385 working_disks++;
4386
4387 mddev->degraded = conf->raid_disks - working_disks;
4388
16a53ecc 4389 if (mddev->degraded > conf->max_degraded) {
1da177e4
LT
4390 printk(KERN_ERR "raid5: not enough operational devices for %s"
4391 " (%d/%d failed)\n",
02c2de8c 4392 mdname(mddev), mddev->degraded, conf->raid_disks);
1da177e4
LT
4393 goto abort;
4394 }
4395
91adb564
N
4396 /* device size must be a multiple of chunk size */
4397 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
4398 mddev->resync_max_sectors = mddev->dev_sectors;
4399
16a53ecc 4400 if (mddev->degraded > 0 &&
1da177e4 4401 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
4402 if (mddev->ok_start_degraded)
4403 printk(KERN_WARNING
4404 "raid5: starting dirty degraded array: %s"
4405 "- data corruption possible.\n",
4406 mdname(mddev));
4407 else {
4408 printk(KERN_ERR
4409 "raid5: cannot start dirty degraded array for %s\n",
4410 mdname(mddev));
4411 goto abort;
4412 }
1da177e4
LT
4413 }
4414
1da177e4
LT
4415 if (mddev->degraded == 0)
4416 printk("raid5: raid level %d set %s active with %d out of %d"
4417 " devices, algorithm %d\n", conf->level, mdname(mddev),
4418 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4419 conf->algorithm);
4420 else
4421 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
4422 " out of %d devices, algorithm %d\n", conf->level,
4423 mdname(mddev), mddev->raid_disks - mddev->degraded,
4424 mddev->raid_disks, conf->algorithm);
4425
4426 print_raid5_conf(conf);
4427
f6705578
N
4428 if (conf->expand_progress != MaxSector) {
4429 printk("...ok start reshape thread\n");
b578d55f 4430 conf->expand_lo = conf->expand_progress;
f6705578
N
4431 atomic_set(&conf->reshape_stripes, 0);
4432 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4433 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4434 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4435 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4436 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4437 "%s_reshape");
f6705578
N
4438 }
4439
1da177e4 4440 /* read-ahead size must cover two whole stripes, which is
16a53ecc 4441 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
1da177e4
LT
4442 */
4443 {
16a53ecc
N
4444 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4445 int stripe = data_disks *
8932c2e0 4446 (mddev->chunk_size / PAGE_SIZE);
1da177e4
LT
4447 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4448 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4449 }
4450
4451 /* Ok, everything is just fine now */
5e55e2f5
N
4452 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4453 printk(KERN_WARNING
4454 "raid5: failed to create sysfs attributes for %s\n",
4455 mdname(mddev));
7a5febe9 4456
91adb564
N
4457 mddev->queue->queue_lock = &conf->device_lock;
4458
7a5febe9 4459 mddev->queue->unplug_fn = raid5_unplug_device;
f022b2fd 4460 mddev->queue->backing_dev_info.congested_data = mddev;
041ae52e 4461 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
f022b2fd 4462
58c0fed4
AN
4463 mddev->array_sectors = mddev->dev_sectors *
4464 (conf->previous_raid_disks - conf->max_degraded);
7a5febe9 4465
23032a0e
RBJ
4466 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4467
1da177e4
LT
4468 return 0;
4469abort:
e0cf8f04 4470 md_unregister_thread(mddev->thread);
91adb564 4471 mddev->thread = NULL;
1da177e4 4472 if (conf) {
91adb564 4473 shrink_stripes(conf);
1da177e4 4474 print_raid5_conf(conf);
16a53ecc 4475 safe_put_page(conf->spare_page);
b55e6bfc 4476 kfree(conf->disks);
fccddba0 4477 kfree(conf->stripe_hashtbl);
1da177e4
LT
4478 kfree(conf);
4479 }
4480 mddev->private = NULL;
4481 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
4482 return -EIO;
4483}
4484
4485
4486
3f294f4f 4487static int stop(mddev_t *mddev)
1da177e4
LT
4488{
4489 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4490
4491 md_unregister_thread(mddev->thread);
4492 mddev->thread = NULL;
4493 shrink_stripes(conf);
fccddba0 4494 kfree(conf->stripe_hashtbl);
041ae52e 4495 mddev->queue->backing_dev_info.congested_fn = NULL;
1da177e4 4496 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
007583c9 4497 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
b55e6bfc 4498 kfree(conf->disks);
96de1e66 4499 kfree(conf);
1da177e4
LT
4500 mddev->private = NULL;
4501 return 0;
4502}
4503
45b4233c 4504#ifdef DEBUG
d710e138 4505static void print_sh(struct seq_file *seq, struct stripe_head *sh)
1da177e4
LT
4506{
4507 int i;
4508
16a53ecc
N
4509 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4510 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4511 seq_printf(seq, "sh %llu, count %d.\n",
4512 (unsigned long long)sh->sector, atomic_read(&sh->count));
4513 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
7ecaa1e6 4514 for (i = 0; i < sh->disks; i++) {
16a53ecc
N
4515 seq_printf(seq, "(cache%d: %p %ld) ",
4516 i, sh->dev[i].page, sh->dev[i].flags);
1da177e4 4517 }
16a53ecc 4518 seq_printf(seq, "\n");
1da177e4
LT
4519}
4520
d710e138 4521static void printall(struct seq_file *seq, raid5_conf_t *conf)
1da177e4
LT
4522{
4523 struct stripe_head *sh;
fccddba0 4524 struct hlist_node *hn;
1da177e4
LT
4525 int i;
4526
4527 spin_lock_irq(&conf->device_lock);
4528 for (i = 0; i < NR_HASH; i++) {
fccddba0 4529 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
1da177e4
LT
4530 if (sh->raid_conf != conf)
4531 continue;
16a53ecc 4532 print_sh(seq, sh);
1da177e4
LT
4533 }
4534 }
4535 spin_unlock_irq(&conf->device_lock);
4536}
4537#endif
4538
d710e138 4539static void status(struct seq_file *seq, mddev_t *mddev)
1da177e4
LT
4540{
4541 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4542 int i;
4543
4544 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
02c2de8c 4545 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
1da177e4
LT
4546 for (i = 0; i < conf->raid_disks; i++)
4547 seq_printf (seq, "%s",
4548 conf->disks[i].rdev &&
b2d444d7 4549 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4 4550 seq_printf (seq, "]");
45b4233c 4551#ifdef DEBUG
16a53ecc
N
4552 seq_printf (seq, "\n");
4553 printall(seq, conf);
1da177e4
LT
4554#endif
4555}
4556
4557static void print_raid5_conf (raid5_conf_t *conf)
4558{
4559 int i;
4560 struct disk_info *tmp;
4561
4562 printk("RAID5 conf printout:\n");
4563 if (!conf) {
4564 printk("(conf==NULL)\n");
4565 return;
4566 }
02c2de8c
N
4567 printk(" --- rd:%d wd:%d\n", conf->raid_disks,
4568 conf->raid_disks - conf->mddev->degraded);
1da177e4
LT
4569
4570 for (i = 0; i < conf->raid_disks; i++) {
4571 char b[BDEVNAME_SIZE];
4572 tmp = conf->disks + i;
4573 if (tmp->rdev)
4574 printk(" disk %d, o:%d, dev:%s\n",
b2d444d7 4575 i, !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
4576 bdevname(tmp->rdev->bdev,b));
4577 }
4578}
4579
4580static int raid5_spare_active(mddev_t *mddev)
4581{
4582 int i;
4583 raid5_conf_t *conf = mddev->private;
4584 struct disk_info *tmp;
4585
4586 for (i = 0; i < conf->raid_disks; i++) {
4587 tmp = conf->disks + i;
4588 if (tmp->rdev
b2d444d7 4589 && !test_bit(Faulty, &tmp->rdev->flags)
c04be0aa
N
4590 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
4591 unsigned long flags;
4592 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 4593 mddev->degraded--;
c04be0aa 4594 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
4595 }
4596 }
4597 print_raid5_conf(conf);
4598 return 0;
4599}
4600
4601static int raid5_remove_disk(mddev_t *mddev, int number)
4602{
4603 raid5_conf_t *conf = mddev->private;
4604 int err = 0;
4605 mdk_rdev_t *rdev;
4606 struct disk_info *p = conf->disks + number;
4607
4608 print_raid5_conf(conf);
4609 rdev = p->rdev;
4610 if (rdev) {
b2d444d7 4611 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
4612 atomic_read(&rdev->nr_pending)) {
4613 err = -EBUSY;
4614 goto abort;
4615 }
dfc70645
N
4616 /* Only remove non-faulty devices if recovery
4617 * isn't possible.
4618 */
4619 if (!test_bit(Faulty, &rdev->flags) &&
4620 mddev->degraded <= conf->max_degraded) {
4621 err = -EBUSY;
4622 goto abort;
4623 }
1da177e4 4624 p->rdev = NULL;
fbd568a3 4625 synchronize_rcu();
1da177e4
LT
4626 if (atomic_read(&rdev->nr_pending)) {
4627 /* lost the race, try later */
4628 err = -EBUSY;
4629 p->rdev = rdev;
4630 }
4631 }
4632abort:
4633
4634 print_raid5_conf(conf);
4635 return err;
4636}
4637
4638static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
4639{
4640 raid5_conf_t *conf = mddev->private;
199050ea 4641 int err = -EEXIST;
1da177e4
LT
4642 int disk;
4643 struct disk_info *p;
6c2fce2e
NB
4644 int first = 0;
4645 int last = conf->raid_disks - 1;
1da177e4 4646
16a53ecc 4647 if (mddev->degraded > conf->max_degraded)
1da177e4 4648 /* no point adding a device */
199050ea 4649 return -EINVAL;
1da177e4 4650
6c2fce2e
NB
4651 if (rdev->raid_disk >= 0)
4652 first = last = rdev->raid_disk;
1da177e4
LT
4653
4654 /*
16a53ecc
N
4655 * find the disk ... but prefer rdev->saved_raid_disk
4656 * if possible.
1da177e4 4657 */
16a53ecc 4658 if (rdev->saved_raid_disk >= 0 &&
6c2fce2e 4659 rdev->saved_raid_disk >= first &&
16a53ecc
N
4660 conf->disks[rdev->saved_raid_disk].rdev == NULL)
4661 disk = rdev->saved_raid_disk;
4662 else
6c2fce2e
NB
4663 disk = first;
4664 for ( ; disk <= last ; disk++)
1da177e4 4665 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 4666 clear_bit(In_sync, &rdev->flags);
1da177e4 4667 rdev->raid_disk = disk;
199050ea 4668 err = 0;
72626685
N
4669 if (rdev->saved_raid_disk != disk)
4670 conf->fullsync = 1;
d6065f7b 4671 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
4672 break;
4673 }
4674 print_raid5_conf(conf);
199050ea 4675 return err;
1da177e4
LT
4676}
4677
4678static int raid5_resize(mddev_t *mddev, sector_t sectors)
4679{
4680 /* no resync is happening, and there is enough space
4681 * on all devices, so we can resize.
4682 * We need to make sure resync covers any new space.
4683 * If the array is shrinking we should possibly wait until
4684 * any io in the removed space completes, but it hardly seems
4685 * worth it.
4686 */
16a53ecc
N
4687 raid5_conf_t *conf = mddev_to_conf(mddev);
4688
1da177e4 4689 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
f233ea5c
AN
4690 mddev->array_sectors = sectors * (mddev->raid_disks
4691 - conf->max_degraded);
4692 set_capacity(mddev->gendisk, mddev->array_sectors);
44ce6294 4693 mddev->changed = 1;
58c0fed4
AN
4694 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
4695 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
4696 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4697 }
58c0fed4 4698 mddev->dev_sectors = sectors;
4b5c7ae8 4699 mddev->resync_max_sectors = sectors;
1da177e4
LT
4700 return 0;
4701}
4702
29269553 4703#ifdef CONFIG_MD_RAID5_RESHAPE
63c70c4f 4704static int raid5_check_reshape(mddev_t *mddev)
29269553
N
4705{
4706 raid5_conf_t *conf = mddev_to_conf(mddev);
4707 int err;
29269553 4708
63c70c4f
N
4709 if (mddev->delta_disks < 0 ||
4710 mddev->new_level != mddev->level)
4711 return -EINVAL; /* Cannot shrink array or change level yet */
4712 if (mddev->delta_disks == 0)
29269553 4713 return 0; /* nothing to do */
dba034ee
N
4714 if (mddev->bitmap)
4715 /* Cannot grow a bitmap yet */
4716 return -EBUSY;
29269553
N
4717
4718 /* Can only proceed if there are plenty of stripe_heads.
4719 * We need a minimum of one full stripe,, and for sensible progress
4720 * it is best to have about 4 times that.
4721 * If we require 4 times, then the default 256 4K stripe_heads will
4722 * allow for chunk sizes up to 256K, which is probably OK.
4723 * If the chunk size is greater, user-space should request more
4724 * stripe_heads first.
4725 */
63c70c4f
N
4726 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
4727 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
29269553
N
4728 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4729 (mddev->chunk_size / STRIPE_SIZE)*4);
4730 return -ENOSPC;
4731 }
4732
63c70c4f
N
4733 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
4734 if (err)
4735 return err;
4736
b4c4c7b8
N
4737 if (mddev->degraded > conf->max_degraded)
4738 return -EINVAL;
63c70c4f
N
4739 /* looks like we might be able to manage this */
4740 return 0;
4741}
4742
4743static int raid5_start_reshape(mddev_t *mddev)
4744{
4745 raid5_conf_t *conf = mddev_to_conf(mddev);
4746 mdk_rdev_t *rdev;
63c70c4f
N
4747 int spares = 0;
4748 int added_devices = 0;
c04be0aa 4749 unsigned long flags;
63c70c4f 4750
f416885e 4751 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
63c70c4f
N
4752 return -EBUSY;
4753
159ec1fc 4754 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
4755 if (rdev->raid_disk < 0 &&
4756 !test_bit(Faulty, &rdev->flags))
4757 spares++;
63c70c4f 4758
f416885e 4759 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
29269553
N
4760 /* Not enough devices even to make a degraded array
4761 * of that size
4762 */
4763 return -EINVAL;
4764
f6705578 4765 atomic_set(&conf->reshape_stripes, 0);
29269553
N
4766 spin_lock_irq(&conf->device_lock);
4767 conf->previous_raid_disks = conf->raid_disks;
63c70c4f 4768 conf->raid_disks += mddev->delta_disks;
29269553 4769 conf->expand_progress = 0;
b578d55f 4770 conf->expand_lo = 0;
29269553
N
4771 spin_unlock_irq(&conf->device_lock);
4772
4773 /* Add some new drives, as many as will fit.
4774 * We know there are enough to make the newly sized array work.
4775 */
159ec1fc 4776 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
4777 if (rdev->raid_disk < 0 &&
4778 !test_bit(Faulty, &rdev->flags)) {
199050ea 4779 if (raid5_add_disk(mddev, rdev) == 0) {
29269553
N
4780 char nm[20];
4781 set_bit(In_sync, &rdev->flags);
29269553 4782 added_devices++;
5fd6c1dc 4783 rdev->recovery_offset = 0;
29269553 4784 sprintf(nm, "rd%d", rdev->raid_disk);
5e55e2f5
N
4785 if (sysfs_create_link(&mddev->kobj,
4786 &rdev->kobj, nm))
4787 printk(KERN_WARNING
4788 "raid5: failed to create "
4789 " link %s for %s\n",
4790 nm, mdname(mddev));
29269553
N
4791 } else
4792 break;
4793 }
4794
c04be0aa 4795 spin_lock_irqsave(&conf->device_lock, flags);
63c70c4f 4796 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
c04be0aa 4797 spin_unlock_irqrestore(&conf->device_lock, flags);
63c70c4f 4798 mddev->raid_disks = conf->raid_disks;
f6705578 4799 mddev->reshape_position = 0;
850b2b42 4800 set_bit(MD_CHANGE_DEVS, &mddev->flags);
f6705578 4801
29269553
N
4802 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4803 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4804 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4805 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4806 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4807 "%s_reshape");
4808 if (!mddev->sync_thread) {
4809 mddev->recovery = 0;
4810 spin_lock_irq(&conf->device_lock);
4811 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
4812 conf->expand_progress = MaxSector;
4813 spin_unlock_irq(&conf->device_lock);
4814 return -EAGAIN;
4815 }
4816 md_wakeup_thread(mddev->sync_thread);
4817 md_new_event(mddev);
4818 return 0;
4819}
4820#endif
4821
4822static void end_reshape(raid5_conf_t *conf)
4823{
4824 struct block_device *bdev;
4825
f6705578 4826 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
58c0fed4 4827 conf->mddev->array_sectors = conf->mddev->dev_sectors *
f416885e 4828 (conf->raid_disks - conf->max_degraded);
f233ea5c 4829 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
44ce6294 4830 conf->mddev->changed = 1;
f6705578
N
4831
4832 bdev = bdget_disk(conf->mddev->gendisk, 0);
4833 if (bdev) {
4834 mutex_lock(&bdev->bd_inode->i_mutex);
f233ea5c
AN
4835 i_size_write(bdev->bd_inode,
4836 (loff_t)conf->mddev->array_sectors << 9);
f6705578
N
4837 mutex_unlock(&bdev->bd_inode->i_mutex);
4838 bdput(bdev);
4839 }
4840 spin_lock_irq(&conf->device_lock);
4841 conf->expand_progress = MaxSector;
4842 spin_unlock_irq(&conf->device_lock);
4843 conf->mddev->reshape_position = MaxSector;
16a53ecc
N
4844
4845 /* read-ahead size must cover two whole stripes, which is
4846 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4847 */
4848 {
4849 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4850 int stripe = data_disks *
4851 (conf->mddev->chunk_size / PAGE_SIZE);
4852 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4853 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4854 }
29269553 4855 }
29269553
N
4856}
4857
72626685
N
4858static void raid5_quiesce(mddev_t *mddev, int state)
4859{
4860 raid5_conf_t *conf = mddev_to_conf(mddev);
4861
4862 switch(state) {
e464eafd
N
4863 case 2: /* resume for a suspend */
4864 wake_up(&conf->wait_for_overlap);
4865 break;
4866
72626685
N
4867 case 1: /* stop all writes */
4868 spin_lock_irq(&conf->device_lock);
4869 conf->quiesce = 1;
4870 wait_event_lock_irq(conf->wait_for_stripe,
46031f9a
RBJ
4871 atomic_read(&conf->active_stripes) == 0 &&
4872 atomic_read(&conf->active_aligned_reads) == 0,
72626685
N
4873 conf->device_lock, /* nothing */);
4874 spin_unlock_irq(&conf->device_lock);
4875 break;
4876
4877 case 0: /* re-enable writes */
4878 spin_lock_irq(&conf->device_lock);
4879 conf->quiesce = 0;
4880 wake_up(&conf->wait_for_stripe);
e464eafd 4881 wake_up(&conf->wait_for_overlap);
72626685
N
4882 spin_unlock_irq(&conf->device_lock);
4883 break;
4884 }
72626685 4885}
b15c2e57 4886
d562b0c4
N
4887
4888static void *raid5_takeover_raid1(mddev_t *mddev)
4889{
4890 int chunksect;
4891
4892 if (mddev->raid_disks != 2 ||
4893 mddev->degraded > 1)
4894 return ERR_PTR(-EINVAL);
4895
4896 /* Should check if there are write-behind devices? */
4897
4898 chunksect = 64*2; /* 64K by default */
4899
4900 /* The array must be an exact multiple of chunksize */
4901 while (chunksect && (mddev->array_sectors & (chunksect-1)))
4902 chunksect >>= 1;
4903
4904 if ((chunksect<<9) < STRIPE_SIZE)
4905 /* array size does not allow a suitable chunk size */
4906 return ERR_PTR(-EINVAL);
4907
4908 mddev->new_level = 5;
4909 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
4910 mddev->new_chunk = chunksect << 9;
4911
4912 return setup_conf(mddev);
4913}
4914
4915
4916static void *raid5_takeover(mddev_t *mddev)
4917{
4918 /* raid5 can take over:
4919 * raid0 - if all devices are the same - make it a raid4 layout
4920 * raid1 - if there are two drives. We need to know the chunk size
4921 * raid4 - trivial - just use a raid4 layout.
4922 * raid6 - Providing it is a *_6 layout
4923 *
4924 * For now, just do raid1
4925 */
4926
4927 if (mddev->level == 1)
4928 return raid5_takeover_raid1(mddev);
4929
4930 return ERR_PTR(-EINVAL);
4931}
4932
4933
245f46c2
N
4934static struct mdk_personality raid5_personality;
4935
4936static void *raid6_takeover(mddev_t *mddev)
4937{
4938 /* Currently can only take over a raid5. We map the
4939 * personality to an equivalent raid6 personality
4940 * with the Q block at the end.
4941 */
4942 int new_layout;
4943
4944 if (mddev->pers != &raid5_personality)
4945 return ERR_PTR(-EINVAL);
4946 if (mddev->degraded > 1)
4947 return ERR_PTR(-EINVAL);
4948 if (mddev->raid_disks > 253)
4949 return ERR_PTR(-EINVAL);
4950 if (mddev->raid_disks < 3)
4951 return ERR_PTR(-EINVAL);
4952
4953 switch (mddev->layout) {
4954 case ALGORITHM_LEFT_ASYMMETRIC:
4955 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
4956 break;
4957 case ALGORITHM_RIGHT_ASYMMETRIC:
4958 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
4959 break;
4960 case ALGORITHM_LEFT_SYMMETRIC:
4961 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
4962 break;
4963 case ALGORITHM_RIGHT_SYMMETRIC:
4964 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
4965 break;
4966 case ALGORITHM_PARITY_0:
4967 new_layout = ALGORITHM_PARITY_0_6;
4968 break;
4969 case ALGORITHM_PARITY_N:
4970 new_layout = ALGORITHM_PARITY_N;
4971 break;
4972 default:
4973 return ERR_PTR(-EINVAL);
4974 }
4975 mddev->new_level = 6;
4976 mddev->new_layout = new_layout;
4977 mddev->delta_disks = 1;
4978 mddev->raid_disks += 1;
4979 return setup_conf(mddev);
4980}
4981
4982
16a53ecc
N
4983static struct mdk_personality raid6_personality =
4984{
4985 .name = "raid6",
4986 .level = 6,
4987 .owner = THIS_MODULE,
4988 .make_request = make_request,
4989 .run = run,
4990 .stop = stop,
4991 .status = status,
4992 .error_handler = error,
4993 .hot_add_disk = raid5_add_disk,
4994 .hot_remove_disk= raid5_remove_disk,
4995 .spare_active = raid5_spare_active,
4996 .sync_request = sync_request,
4997 .resize = raid5_resize,
f416885e
N
4998#ifdef CONFIG_MD_RAID5_RESHAPE
4999 .check_reshape = raid5_check_reshape,
5000 .start_reshape = raid5_start_reshape,
5001#endif
16a53ecc 5002 .quiesce = raid5_quiesce,
245f46c2 5003 .takeover = raid6_takeover,
16a53ecc 5004};
2604b703 5005static struct mdk_personality raid5_personality =
1da177e4
LT
5006{
5007 .name = "raid5",
2604b703 5008 .level = 5,
1da177e4
LT
5009 .owner = THIS_MODULE,
5010 .make_request = make_request,
5011 .run = run,
5012 .stop = stop,
5013 .status = status,
5014 .error_handler = error,
5015 .hot_add_disk = raid5_add_disk,
5016 .hot_remove_disk= raid5_remove_disk,
5017 .spare_active = raid5_spare_active,
5018 .sync_request = sync_request,
5019 .resize = raid5_resize,
29269553 5020#ifdef CONFIG_MD_RAID5_RESHAPE
63c70c4f
N
5021 .check_reshape = raid5_check_reshape,
5022 .start_reshape = raid5_start_reshape,
29269553 5023#endif
72626685 5024 .quiesce = raid5_quiesce,
d562b0c4 5025 .takeover = raid5_takeover,
1da177e4
LT
5026};
5027
2604b703 5028static struct mdk_personality raid4_personality =
1da177e4 5029{
2604b703
N
5030 .name = "raid4",
5031 .level = 4,
5032 .owner = THIS_MODULE,
5033 .make_request = make_request,
5034 .run = run,
5035 .stop = stop,
5036 .status = status,
5037 .error_handler = error,
5038 .hot_add_disk = raid5_add_disk,
5039 .hot_remove_disk= raid5_remove_disk,
5040 .spare_active = raid5_spare_active,
5041 .sync_request = sync_request,
5042 .resize = raid5_resize,
3d37890b
N
5043#ifdef CONFIG_MD_RAID5_RESHAPE
5044 .check_reshape = raid5_check_reshape,
5045 .start_reshape = raid5_start_reshape,
5046#endif
2604b703
N
5047 .quiesce = raid5_quiesce,
5048};
5049
5050static int __init raid5_init(void)
5051{
16a53ecc
N
5052 int e;
5053
5054 e = raid6_select_algo();
5055 if ( e )
5056 return e;
5057 register_md_personality(&raid6_personality);
2604b703
N
5058 register_md_personality(&raid5_personality);
5059 register_md_personality(&raid4_personality);
5060 return 0;
1da177e4
LT
5061}
5062
2604b703 5063static void raid5_exit(void)
1da177e4 5064{
16a53ecc 5065 unregister_md_personality(&raid6_personality);
2604b703
N
5066 unregister_md_personality(&raid5_personality);
5067 unregister_md_personality(&raid4_personality);
1da177e4
LT
5068}
5069
5070module_init(raid5_init);
5071module_exit(raid5_exit);
5072MODULE_LICENSE("GPL");
5073MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
5074MODULE_ALIAS("md-raid5");
5075MODULE_ALIAS("md-raid4");
2604b703
N
5076MODULE_ALIAS("md-level-5");
5077MODULE_ALIAS("md-level-4");
16a53ecc
N
5078MODULE_ALIAS("md-personality-8"); /* RAID6 */
5079MODULE_ALIAS("md-raid6");
5080MODULE_ALIAS("md-level-6");
5081
5082/* This used to be two separate modules, they were: */
5083MODULE_ALIAS("raid5");
5084MODULE_ALIAS("raid6");